repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
markveillette/high-fidelity-generative-compression
[ "d88b4d7f1212efa8611e91737ff6bf00bbf36670" ]
[ "src/loss/perceptual_similarity/dist_model.py" ]
[ "\nfrom __future__ import absolute_import\n\nimport sys\nimport numpy as np\nimport torch\nfrom torch import nn\nimport os\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nimport itertools\nfrom .base_model import BaseModel\nfrom scipy.ndimage import zoom\nimport fractions\nimport functools\nimport skimage.transform\nfrom tqdm import tqdm\n\n\nfrom . import networks_basic as networks\nfrom . import perceptual_loss\n\nclass DistModel(BaseModel):\n def name(self):\n return self.model_name\n\n def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,\n use_gpu=True, printNet=False, spatial=False, \n is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):\n '''\n INPUTS\n model - ['net-lin'] for linearly calibrated network\n ['net'] for off-the-shelf network\n ['L2'] for L2 distance in Lab colorspace\n ['SSIM'] for ssim in RGB colorspace\n net - ['squeeze','alex','vgg']\n model_path - if None, will look in weights/[NET_NAME].pth\n colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM\n use_gpu - bool - whether or not to use a GPU\n printNet - bool - whether or not to print network architecture out\n spatial - bool - whether to output an array containing varying distances across spatial dimensions\n is_train - bool - [True] for training mode\n lr - float - initial learning rate\n beta1 - float - initial momentum term for adam\n version - 0.1 for latest, 0.0 was original (with a bug)\n gpu_ids - int array - [0] by default, gpus to use\n '''\n BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)\n\n self.model = model\n self.net = net\n self.is_train = is_train\n self.spatial = spatial\n self.gpu_ids = gpu_ids\n self.model_name = '%s [%s]'%(model,net)\n\n if(self.model == 'net-lin'): # pretrained net + linear layer\n self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,\n use_dropout=True, spatial=spatial, version=version, lpips=True)\n kw = {}\n if not use_gpu:\n kw['map_location'] = 'cpu'\n if(model_path is None):\n import inspect\n model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))\n\n if(not is_train):\n print('Loading model from: %s'%model_path)\n self.net.load_state_dict(torch.load(model_path, **kw), strict=False)\n\n elif(self.model=='net'): # pretrained network\n self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)\n elif(self.model in ['L2','l2']):\n self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing\n self.model_name = 'L2'\n elif(self.model in ['DSSIM','dssim','SSIM','ssim']):\n self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)\n self.model_name = 'SSIM'\n else:\n raise ValueError(\"Model [%s] not recognized.\" % self.model)\n\n self.parameters = list(self.net.parameters())\n\n if self.is_train: # training mode\n # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)\n self.rankLoss = networks.BCERankingLoss()\n self.parameters += list(self.rankLoss.net.parameters())\n self.lr = lr\n self.old_lr = lr\n self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))\n else: # test mode\n self.net.eval()\n\n if(use_gpu):\n self.net.to(gpu_ids[0])\n self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)\n if(self.is_train):\n self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0\n\n if(printNet):\n print('---------- Networks initialized -------------')\n networks.print_network(self.net)\n print('-----------------------------------------------')\n\n def forward(self, in0, in1, retPerLayer=False):\n ''' Function computes the distance between image patches in0 and in1\n INPUTS\n in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]\n OUTPUT\n computed distances between in0 and in1\n '''\n\n return self.net.forward(in0, in1, retPerLayer=retPerLayer)\n\n # ***** TRAINING FUNCTIONS *****\n def optimize_parameters(self):\n self.forward_train()\n self.optimizer_net.zero_grad()\n self.backward_train()\n self.optimizer_net.step()\n self.clamp_weights()\n\n def clamp_weights(self):\n for module in self.net.modules():\n if(hasattr(module, 'weight') and module.kernel_size==(1,1)):\n module.weight.data = torch.clamp(module.weight.data,min=0)\n\n def set_input(self, data):\n self.input_ref = data['ref']\n self.input_p0 = data['p0']\n self.input_p1 = data['p1']\n self.input_judge = data['judge']\n\n if(self.use_gpu):\n self.input_ref = self.input_ref.to(device=self.gpu_ids[0])\n self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])\n self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])\n self.input_judge = self.input_judge.to(device=self.gpu_ids[0])\n\n self.var_ref = Variable(self.input_ref,requires_grad=True)\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\n self.var_p1 = Variable(self.input_p1,requires_grad=True)\n\n def forward_train(self): # run forward pass\n # print(self.net.module.scaling_layer.shift)\n # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())\n\n self.d0 = self.forward(self.var_ref, self.var_p0)\n self.d1 = self.forward(self.var_ref, self.var_p1)\n self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)\n\n self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())\n\n self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)\n\n return self.loss_total\n\n def backward_train(self):\n torch.mean(self.loss_total).backward()\n\n def compute_accuracy(self,d0,d1,judge):\n ''' d0, d1 are Variables, judge is a Tensor '''\n d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()\n judge_per = judge.cpu().numpy().flatten()\n return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)\n\n def get_current_errors(self):\n retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),\n ('acc_r', self.acc_r)])\n\n for key in retDict.keys():\n retDict[key] = np.mean(retDict[key])\n\n return retDict\n\n def get_current_visuals(self):\n zoom_factor = 256/self.var_ref.data.size()[2]\n\n ref_img = util.tensor2im(self.var_ref.data)\n p0_img = util.tensor2im(self.var_p0.data)\n p1_img = util.tensor2im(self.var_p1.data)\n\n ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)\n p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)\n p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)\n\n return OrderedDict([('ref', ref_img_vis),\n ('p0', p0_img_vis),\n ('p1', p1_img_vis)])\n\n def save(self, path, label):\n if(self.use_gpu):\n self.save_network(self.net.module, path, '', label)\n else:\n self.save_network(self.net, path, '', label)\n self.save_network(self.rankLoss.net, path, 'rank', label)\n\n def update_learning_rate(self,nepoch_decay):\n lrd = self.lr / nepoch_decay\n lr = self.old_lr - lrd\n\n for param_group in self.optimizer_net.param_groups:\n param_group['lr'] = lr\n\n print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))\n self.old_lr = lr\n\ndef score_2afc_dataset(data_loader, func, name=''):\n ''' Function computes Two Alternative Forced Choice (2AFC) score using\n distance function 'func' in dataset 'data_loader'\n INPUTS\n data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside\n func - callable distance function - calling d=func(in0,in1) should take 2\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\n OUTPUTS\n [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators\n [1] - dictionary with following elements\n d0s,d1s - N arrays containing distances between reference patch to perturbed patches \n gts - N array in [0,1], preferred patch selected by human evaluators\n (closer to \"0\" for left patch p0, \"1\" for right patch p1,\n \"0.6\" means 60pct people preferred right patch, 40pct preferred left)\n scores - N array in [0,1], corresponding to what percentage function agreed with humans\n CONSTS\n N - number of test triplets in data_loader\n '''\n\n d0s = []\n d1s = []\n gts = []\n\n for data in tqdm(data_loader.load_data(), desc=name):\n d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()\n d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()\n gts+=data['judge'].cpu().numpy().flatten().tolist()\n\n d0s = np.array(d0s)\n d1s = np.array(d1s)\n gts = np.array(gts)\n scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5\n\n return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))\n\ndef score_jnd_dataset(data_loader, func, name=''):\n ''' Function computes JND score using distance function 'func' in dataset 'data_loader'\n INPUTS\n data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside\n func - callable distance function - calling d=func(in0,in1) should take 2\n pytorch tensors with shape Nx3xXxY, and return pytorch array of length N\n OUTPUTS\n [0] - JND score in [0,1], mAP score (area under precision-recall curve)\n [1] - dictionary with following elements\n ds - N array containing distances between two patches shown to human evaluator\n sames - N array containing fraction of people who thought the two patches were identical\n CONSTS\n N - number of test triplets in data_loader\n '''\n\n ds = []\n gts = []\n\n for data in tqdm(data_loader.load_data(), desc=name):\n ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()\n gts+=data['same'].cpu().numpy().flatten().tolist()\n\n sames = np.array(gts)\n ds = np.array(ds)\n\n sorted_inds = np.argsort(ds)\n ds_sorted = ds[sorted_inds]\n sames_sorted = sames[sorted_inds]\n\n TPs = np.cumsum(sames_sorted)\n FPs = np.cumsum(1-sames_sorted)\n FNs = np.sum(sames_sorted)-TPs\n\n precs = TPs/(TPs+FPs)\n recs = TPs/(TPs+FNs)\n score = util.voc_ap(recs,precs)\n\n return(score, dict(ds=ds,sames=sames))\n" ]
[ [ "numpy.sum", "numpy.cumsum", "torch.load", "scipy.ndimage.zoom", "torch.clamp", "torch.mean", "torch.autograd.Variable", "numpy.argsort", "torch.optim.Adam", "numpy.array", "torch.nn.DataParallel", "numpy.mean" ] ]
SOPR-T/SOPR-T
[ "3242461fa8b3e917cde70be497beb1158a7b27e6", "3242461fa8b3e917cde70be497beb1158a7b27e6" ]
[ "d3rlpy-master/tests/models/torch/test_dynamics.py", "src/train_policy.py" ]
[ "import pytest\nimport torch\n\nfrom d3rlpy.models.encoders import DefaultEncoderFactory\nfrom d3rlpy.models.torch.dynamics import (\n ProbabilisticDynamicsModel,\n ProbabilisticEnsembleDynamicsModel,\n _compute_ensemble_variance,\n)\n\nfrom .model_test import DummyEncoder, check_parameter_updates\n\n\[email protected](\"batch_size\", [32])\[email protected](\"observation_shape\", [(100,)])\[email protected](\"n_ensembles\", [5])\[email protected](\"variance_type\", [\"max\", \"data\"])\ndef test_compute_ensemble_variance(\n batch_size, observation_shape, n_ensembles, variance_type\n):\n observations = torch.rand((batch_size, n_ensembles) + observation_shape)\n rewards = torch.rand(batch_size, n_ensembles, 1)\n variances = torch.rand(batch_size, n_ensembles, 1)\n\n if variance_type == \"max\":\n ref = variances.max(dim=1).values\n elif variance_type == \"data\":\n data = torch.cat([observations, rewards], dim=2)\n ref = (data.std(dim=1) ** 2).sum(dim=1, keepdims=True)\n\n variances = _compute_ensemble_variance(\n observations, rewards, variances, variance_type\n )\n\n assert variances.shape == (batch_size, 1)\n assert torch.allclose(variances, ref)\n\n\[email protected](\"feature_size\", [100])\[email protected](\"action_size\", [2])\[email protected](\"batch_size\", [32])\ndef test_probabilistic_dynamics_model(feature_size, action_size, batch_size):\n encoder = DummyEncoder(feature_size, action_size, True)\n dynamics = ProbabilisticDynamicsModel(encoder)\n\n # check output shape\n x = torch.rand(batch_size, feature_size)\n action = torch.rand(batch_size, action_size)\n pred_x, pred_reward = dynamics(x, action)\n assert pred_x.shape == (batch_size, feature_size)\n assert pred_reward.shape == (batch_size, 1)\n\n # check variance\n _, _, variance = dynamics.predict_with_variance(x, action)\n assert variance.shape == (batch_size, 1)\n\n # TODO: check error\n reward = torch.rand(batch_size, 1)\n loss = dynamics.compute_error(x, action, reward, x)\n assert loss.shape == (batch_size, 1)\n\n # check layer connection\n check_parameter_updates(dynamics, (x, action, reward, x))\n\n\[email protected](\"feature_size\", [100])\[email protected](\"action_size\", [2])\[email protected](\"batch_size\", [32])\[email protected](\"n_ensembles\", [5])\[email protected](\"variance_type\", [\"max\", \"data\"])\ndef test_probabilistic_ensemble_dynamics_dynamics_model(\n feature_size, action_size, batch_size, n_ensembles, variance_type\n):\n encoder = DummyEncoder(feature_size, action_size, True)\n models = []\n for _ in range(n_ensembles):\n models.append(ProbabilisticDynamicsModel(encoder))\n\n dynamics = ProbabilisticEnsembleDynamicsModel(models)\n\n # check output shape\n x = torch.rand(batch_size, feature_size)\n action = torch.rand(batch_size, action_size)\n pred_x, pred_reward = dynamics(x, action)\n assert pred_x.shape == (batch_size, n_ensembles, feature_size)\n assert pred_reward.shape == (batch_size, n_ensembles, 1)\n\n # check variance without indices\n pred_x, pred_reward, variances = dynamics.predict_with_variance(\n x, action, variance_type=variance_type\n )\n assert pred_x.shape == (batch_size, n_ensembles, feature_size)\n assert pred_reward.shape == (batch_size, n_ensembles, 1)\n assert variances.shape == (batch_size, 1)\n\n # check variance with indices\n indices = torch.randint(n_ensembles, size=(batch_size,))\n pred_x, pred_reward, variances = dynamics.predict_with_variance(\n x, action, variance_type=variance_type, indices=indices\n )\n assert pred_x.shape == (batch_size, feature_size)\n assert pred_reward.shape == (batch_size, 1)\n assert variances.shape == (batch_size, 1)\n\n # TODO: check error\n reward = torch.rand(batch_size, 1)\n loss = dynamics.compute_error(x, action, reward, x)\n\n # check layer connection\n check_parameter_updates(dynamics, (x, action, reward, x))\n", "import argparse\nimport numpy as np\nimport os\nimport torch\n\nimport offline_agent\nimport online_agent\nfrom utils.constants import env_list\n\n\nif __name__ == \"__main__\":\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--env\", default=\"HalfCheetah-v2\") # OpenAI gym environment name\n\tparser.add_argument(\"--seed\", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds\n\tparser.add_argument(\"--T\", type=int, default=100)\t\t\t # epochs for offline algorithms and steps for online algorithms\n\tparser.add_argument(\"--save_interval\", default=1, type=float) # For online algos, this means the intervals of saving steps\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # and for offline algos, this means the intervals of saving epochs\n\tparser.add_argument(\"--batch_size\", default=128, type=int) # Mini batch size for networks\n\tparser.add_argument(\"--algo\", type=str, default=None, \t \t # select algos\n\t\t\t\t\t\tchoices=['BCloning', 'BCQ', 'offDDPG', 'BEAR', 'CQL', 'onlineDDPG', 'onlineSAC', 'MOPO', 'CRR']) \n\tparser.add_argument('--model_path', type=str, default='../models_try/')\t# where to save model\n\tparser.add_argument('--buffer_path', type=str, default='D4RLdata')\t\t# where is the data for offline training\n\targs = parser.parse_args()\n\n\tprint(\"---------------------------------------\")\n\tprint(f'setting: training {args.algo}, Env: {args.env}, seed: {args.seed}')\n\tprint(\"---------------------------------------\")\n\n\tif not os.path.exists(args.model_path):\n\t\tos.makedirs(args.model_path, exist_ok=True)\n\n\ttorch.manual_seed(args.seed)\n\tnp.random.seed(args.seed)\n\t\n\tstate_dim = env_list[args.env]['state_dim'] #env.observation_space.shape[0]\n\taction_dim = env_list[args.env]['action_dim'] #env.action_space.shape[0] \n\tmax_action = env_list[args.env]['max_action'] #float(env.action_space.high[0])\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\t\n\tonline = args.algo in ['onlineDDPG', 'onlineSAC']\n\n\tif online:\n\t\ttrainer = online_agent.OnlineAgent(state_dim, action_dim, batch_size=args.batch_size, algo=args.algo, device=device)\n\telse:\n\t\ttrainer = offline_agent.OfflineAgent(state_dim, action_dim, batch_size=args.batch_size, algo=args.algo, device=device)\n\ttrainer.train(args, device=device)\n\n\t\n\t# os.makedirs(f'/mnt/exps/models/{args.model_path}/', exist_ok=True)\n\tos.system(f'mv {args.model_path}/{args.algo} /mnt/exps/models_our/{args.model_path}/')\n\tif args.algo == 'MOPO':\n\t\tos.system(f'mv {args.model_path}/MOPO_SAC/ /mnt/exps/models_retry/{args.model_path}/')\n" ]
[ [ "torch.rand", "torch.cat", "torch.randint", "torch.allclose" ], [ "torch.manual_seed", "torch.cuda.is_available", "numpy.random.seed" ] ]
LockdownInnovators/CodeNames
[ "b82fc9c85d4887ae81f331de6f2058e5e2cdccd9" ]
[ "engine.py" ]
[ "from __future__ import print_function, division\n\nimport itertools\nimport re\nimport sys\nimport os\nimport platform\n\nimport numpy as np\n\nimport model\nfrom config import config\n\nCLUE_PATTERN = r'^([a-zA-Z]+) ({0})$'\nUNLIMITED = \"unlimited\"\n\n\n# noinspection PyAttributeOutsideInit\nclass GameEngine(object):\n\n def __init__(self, seed=None, expert=False, word2vec_models=None):\n\n # Load our word list if necessary.\n # TODO: Max length of 11 is hardcoded here and in print_board()\n if word2vec_models is None:\n word2vec_models = {}\n with open(config.word_list) as f:\n _words = [line.rstrip().lower().replace(' ', '_') for line in f.readlines()]\n self.words = np.array(_words)\n\n # Initialize our word embedding models.\n self.models = {k: model.WordEmbedding(w2v) for k, w2v in word2vec_models.items()}\n\n # Initialize random numbers.\n self.generator = np.random.RandomState(seed=seed)\n\n # Register expert mode\n self.expert = expert\n self.unfound_words = (set(), set())\n\n # Useful regular expressions.\n if self.expert:\n self.valid_clue = re.compile(CLUE_PATTERN.format(\"[0-9]|\" + UNLIMITED))\n else:\n self.valid_clue = re.compile(CLUE_PATTERN.format(\"[0-9]\"))\n\n def initialize_random_game(self, size=5):\n\n self.size = size\n\n # Shuffle the wordlist.\n shuffle = self.generator.choice(\n len(self.words), size * size, replace=False)\n self.board = self.words[shuffle]\n\n # Specify the layout for this game.\n assignments = self.generator.permutation(size * size)\n self.owner = np.empty(size * size, int)\n self.owner[assignments[0]] = 0 # assassin\n self.owner[assignments[1:10]] = 1 # first player: 9 words\n self.owner[assignments[10:18]] = 2 # second player: 8 words\n self.owner[assignments[18:]] = 3 # bystander: 7 words\n\n self.assassin_word = self.board[self.owner == 0]\n\n # All cards are initially visible.\n self.visible = np.ones_like(self.owner, dtype=bool)\n\n self.num_turns = -1\n\n def initialize_from_words(self, initial_words, size=5):\n \"\"\"\n The initial_words parameter should be in the format:\n\n ASSASSIN;TEAM1;TEAM2;NEUTRAL\n\n where each group consists of comma-separated words from the word list.\n\n The total number of words must be <= size * size. Any missing words\n are considered to be already covered and neutral.\n \"\"\"\n self.size = size\n\n word_groups = initial_words.split(';')\n if len(word_groups) != 4:\n raise ValueError('Expected 4 groups separated by semicolon.')\n\n board, owner, visible = [], [], []\n for group_index, word_group in enumerate(word_groups):\n words = word_group.split(',')\n for word in words:\n word = word.lower().replace(' ', '_')\n if word not in self.words:\n raise ValueError('Invalid word \"{0}\".'.format(word))\n if word in board:\n raise ValueError('Duplicate word \"{0}\".'.format(word))\n board.append(word)\n owner.append(group_index)\n visible.append(True)\n if len(board) > size * size:\n raise ValueError('Too many words. Expected <= {0}.'.format(size * size))\n # Add dummy hidden words if necessary.\n while len(board) < size * size:\n board.append('---')\n owner.append(3)\n visible.append(False)\n\n self.board = np.array(board)\n self.owner = np.array(owner)\n self.visible = np.array(visible)\n\n # Perform a random shuffle of the board.\n shuffle = self.generator.permutation(size * size)\n self.board = self.board[shuffle]\n self.owner = self.owner[shuffle]\n self.visible = self.visible[shuffle]\n\n self.assassin_word = self.board[self.owner == 0]\n self.num_turns = -1\n\n def print_board(self, spymaster=False, clear_screen=True):\n\n if clear_screen:\n if platform.system() == 'Windows':\n os.system('cls')\n else:\n print(chr(27) + '[2J')\n\n board = self.board.reshape(self.size, self.size)\n owner = self.owner.reshape(self.size, self.size)\n visible = self.visible.reshape(self.size, self.size)\n\n for row in range(self.size):\n for col in range(self.size):\n word = board[row, col]\n tag = '#<>-'[owner[row, col]]\n if not visible[row, col]:\n word = tag * 11\n elif not spymaster:\n tag = ' '\n if not spymaster or owner[row, col] in (0, 1, 2):\n word = word.upper()\n print('{0}{1:11s} '.format(tag, word), end='')\n print('')\n\n def play_computer_spymaster(self, gamma=1.0, verbose=True):\n\n say('Thinking...')\n sys.stdout.flush()\n\n # Loop over all permutations of words.\n num_words = len(self.player_words)\n best_score, saved_clues = [], []\n for count in range(max(num_words, 2), 0, -1):\n # Multiply similarity scores by this factor for any clue\n # corresponding to this many words.\n bonus_factor = count ** gamma\n for group in itertools.combinations(range(num_words), count):\n words = self.player_words[list(group)]\n clue, score = self.models[f'{self.player + 1} Master'].get_clue(clue_words=words,\n pos_words=self.player_words,\n neg_words=np.concatenate((\n self.opponent_words,\n self.neutral_words)),\n veto_words=self.assassin_word)\n if clue:\n best_score.append(score * bonus_factor)\n saved_clues.append((clue, words))\n num_clues = len(saved_clues)\n order = sorted(range(num_clues), key=lambda k: best_score[k], reverse=True)\n\n if verbose:\n self.print_board(spymaster=True)\n for i in order[:10]:\n clue, words = saved_clues[i]\n say(u'{0:.3f} {1} = {2}'.format(best_score[i], ' + '.join([w.upper() for w in words]), clue))\n\n clue, words = saved_clues[order[0]]\n self.unfound_words[self.player].update(words)\n if self.expert and self._should_say_unlimited(nb_clue_words=len(words)):\n return clue, UNLIMITED\n else:\n return clue, len(words)\n\n def _should_say_unlimited(self, nb_clue_words, threshold_opponent=2):\n \"\"\"\n Announce \"unlimited\" if :\n (1) the opposing team risks winning with their next clue,\n (2) and our +1 guess isn't enough to catch up during this clue,\n (3) but all the words hinted by the current and previous clues\n are enough to catch up and win\n \"\"\"\n return (len(self.opponent_words) <= threshold_opponent # (1)\n and nb_clue_words + 1 < len(self.player_words) # (2)\n and self.unfound_words[self.player]\n == set(self.player_words)) # (3)\n\n def play_human_spymaster(self):\n\n self.print_board(spymaster=True)\n\n while True:\n clue = ask('{0} Enter your clue: '.format(self.player_label))\n matched = self.valid_clue.match(clue)\n if matched:\n word, count = matched.groups()\n if count != UNLIMITED:\n count = int(count)\n return word, count\n say('Invalid clue, should be WORD COUNT.')\n\n def play_human_team(self, word, count):\n\n num_guesses = 0\n while (self.expert and count == UNLIMITED) or num_guesses < count + 1:\n self.print_board(clear_screen=(num_guesses == 0))\n say(u'{0} your clue is: {1} {2}'.format(self.player_label, word, count))\n\n num_guesses += 1\n while True:\n guess = ask('{0} enter your guess #{1}: '.format(self.player_label, num_guesses))\n guess = guess.strip().lower().replace(' ', '_')\n if guess == '':\n # Team does not want to make any more guesses.\n return True\n if guess in self.board[self.visible]:\n break\n say('Invalid guess, should be a visible word.')\n\n loc = np.where(self.board == guess)[0]\n self.visible[loc] = False\n\n if guess == self.assassin_word:\n say('{0} You guessed the assasin - game over!'.format(self.player_label))\n return False\n\n if guess in self.player_words:\n self.unfound_words[self.player].discard(guess)\n if num_guesses == len(self.player_words):\n say('{0} You won!!!'.format(self.player_label))\n return False\n else:\n ask('{0} Congratulations, keep going! (hit ENTER)\\n'.format(self.player_label))\n else:\n if guess in self.opponent_words:\n ask('{0} Sorry, word from opposing team! (hit ENTER)\\n'.format(self.player_label))\n else:\n ask('{0} Sorry, bystander! (hit ENTER)\\n'.format(self.player_label))\n break\n\n return True\n\n def play_computer_team(self, word, count):\n num_guesses = 0\n say(u'{0} (computer) your clue is: {1} {2}'.format(self.player_label, word, count))\n guesses = self.models[f'{self.player + 1} Guesser'].get_closest_board_words_to(word, count, self.player_words)\n for guess in guesses:\n num_guesses += 1\n say(f'Computer guess #{num_guesses}: {guess}')\n loc = np.where(self.board == guess)[0]\n self.visible[loc] = False\n\n if guess == self.assassin_word:\n say('{0} (computer) guessed the assasin - game over!'.format(self.player_label))\n return False\n\n if guess in self.player_words:\n self.unfound_words[self.player].discard(guess)\n if num_guesses == len(self.player_words):\n say('{0} (computer) You won!!!'.format(self.player_label))\n return False\n else:\n ask('{0} Congratulations computer, keep going! (hit ENTER)\\n'.format(self.player_label))\n else:\n if guess in self.opponent_words:\n ask('{0} Sorry computer, word from opposing team! (hit ENTER)\\n'.format(self.player_label))\n else:\n ask('{0} Sorry computer, bystander! (hit ENTER)\\n'.format(self.player_label))\n break\n\n return True\n\n def next_turn(self):\n self.num_turns += 1\n\n self.player = self.num_turns % 2\n self.opponent = (self.player + 1) % 2\n\n self.player_label = '<>'[self.player] * 3\n self.player_words = self.board[(self.owner == self.player + 1) & self.visible]\n self.opponent_words = self.board[(self.owner == self.opponent + 1) & self.visible]\n self.neutral_words = self.board[(self.owner == 3) & self.visible]\n\n def play_turn(self, spymaster='human', team='human'):\n\n self.next_turn()\n\n if spymaster == 'human':\n word, count = self.play_human_spymaster()\n else:\n word, count = self.play_computer_spymaster()\n\n if team == 'human':\n ongoing = self.play_human_team(word, count)\n else:\n ongoing = self.play_computer_team(word, count)\n\n return ongoing\n\n def play_game(self, spymaster1='human', team1='human',\n spymaster2='human', team2='human', init=None):\n\n if init is None:\n self.initialize_random_game()\n else:\n self.initialize_from_words(init)\n\n while True:\n if not self.play_turn(spymaster1, team1): break\n if not self.play_turn(spymaster2, team2): break\n\n\ndef say(message):\n print((message + '\\n').encode('utf8'))\n\n\ndef ask(message):\n try:\n return input(message)\n except KeyboardInterrupt:\n say('\\nBye.')\n sys.exit(0)\n" ]
[ [ "numpy.empty", "numpy.concatenate", "numpy.ones_like", "numpy.random.RandomState", "numpy.array", "numpy.where" ] ]
aaxwaz/youtube-8m
[ "3c3ceae83173d6b9eaef6072308a2804ba56bcf5" ]
[ "other_train/train_loadCorrMat.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Binary for training Tensorflow models on the YouTube-8M dataset.\"\"\"\n\nimport json\nimport os\nimport time\n\nimport eval_util\nimport export_model\nimport losses\nimport frame_level_models\nimport video_level_models\nimport readers\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow import app\nfrom tensorflow import flags\nfrom tensorflow import gfile\nfrom tensorflow import logging\nfrom tensorflow.python.client import device_lib\nimport utils\nimport numpy as np \n\nFLAGS = flags.FLAGS\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nif __name__ == \"__main__\":\n # Dataset flags.\n flags.DEFINE_string(\"train_dir\", \"/tmp/yt8m_model/\",\n \"The directory to save the model files in.\")\n flags.DEFINE_string(\n \"train_data_pattern\", \"\",\n \"File glob for the training dataset. If the files refer to Frame Level \"\n \"features (i.e. tensorflow.SequenceExample), then set --reader_type \"\n \"format. The (Sequence)Examples are expected to have 'rgb' byte array \"\n \"sequence feature as well as a 'labels' int64 context feature.\")\n flags.DEFINE_string(\"feature_names\", \"mean_rgb\", \"Name of the feature \"\n \"to use for training.\")\n flags.DEFINE_string(\"feature_sizes\", \"1024\", \"Length of the feature vectors.\")\n\n # Model flags.\n flags.DEFINE_bool(\n \"frame_features\", False,\n \"If set, then --train_data_pattern must be frame-level features. \"\n \"Otherwise, --train_data_pattern must be aggregated video-level \"\n \"features. The model must also be set appropriately (i.e. to read 3D \"\n \"batches VS 4D batches.\")\n flags.DEFINE_string(\n \"model\", \"LogisticModel\",\n \"Which architecture to use for the model. Models are defined \"\n \"in models.py.\")\n flags.DEFINE_bool(\n \"start_new_model\", False,\n \"If set, this will not resume from a checkpoint and will instead create a\"\n \" new model instance.\")\n\n # Training flags.\n flags.DEFINE_integer(\"num_gpu\", 1,\n \"The maximum number of GPU devices to use for training. \"\n \"Flag only applies if GPUs are installed\")\n flags.DEFINE_integer(\"batch_size\", 1024,\n \"How many examples to process per batch for training.\")\n flags.DEFINE_string(\"label_loss\", \"CrossEntropyLoss\",\n \"Which loss function to use for training the model.\")\n flags.DEFINE_float(\n \"regularization_penalty\", 1.0,\n \"How much weight to give to the regularization loss (the label loss has \"\n \"a weight of 1).\")\n flags.DEFINE_float(\"base_learning_rate\", 0.01,\n \"Which learning rate to start with.\")\n flags.DEFINE_float(\"learning_rate_decay\", 0.95,\n \"Learning rate decay factor to be applied every \"\n \"learning_rate_decay_examples.\")\n flags.DEFINE_float(\"learning_rate_decay_examples\", 4000000,\n \"Multiply current learning rate by learning_rate_decay \"\n \"every learning_rate_decay_examples.\")\n flags.DEFINE_integer(\"num_epochs\", 5,\n \"How many passes to make over the dataset before \"\n \"halting training.\")\n flags.DEFINE_integer(\"max_steps\", None,\n \"The maximum number of iterations of the training loop.\")\n flags.DEFINE_integer(\"export_model_steps\", 10000000000,\n \"The period, in number of steps, with which the model \"\n \"is exported for batch prediction.\")\n flags.DEFINE_float(\"save_checkpoint_every_n_hour\", 0.4,\n \"Save the checkpoint every n hours.\")\n flags.DEFINE_integer(\"validate_every_n_training_steps\", 100, \n \"eval on training for every n steps\")\n\n\n # Other flags.\n flags.DEFINE_integer(\"num_readers\", 12,\n \"How many threads to use for reading input files.\")\n flags.DEFINE_string(\"optimizer\", \"AdamOptimizer\",\n \"What optimizer class to use.\")\n flags.DEFINE_float(\"clip_gradient_norm\", 1.0, \"Norm to clip gradients to.\")\n flags.DEFINE_bool(\n \"log_device_placement\", False,\n \"Whether to write the device on which every op will run into the \"\n \"logs on startup.\")\n\ndef validate_class_name(flag_value, category, modules, expected_superclass):\n \"\"\"Checks that the given string matches a class of the expected type.\n\n Args:\n flag_value: A string naming the class to instantiate.\n category: A string used further describe the class in error messages\n (e.g. 'model', 'reader', 'loss').\n modules: A list of modules to search for the given class.\n expected_superclass: A class that the given class should inherit from.\n\n Raises:\n FlagsError: If the given class could not be found or if the first class\n found with that name doesn't inherit from the expected superclass.\n\n Returns:\n True if a class was found that matches the given constraints.\n \"\"\"\n candidates = [getattr(module, flag_value, None) for module in modules]\n for candidate in candidates:\n if not candidate:\n continue\n if not issubclass(candidate, expected_superclass):\n raise flags.FlagsError(\"%s '%s' doesn't inherit from %s.\" %\n (category, flag_value,\n expected_superclass.__name__))\n return True\n raise flags.FlagsError(\"Unable to find %s '%s'.\" % (category, flag_value))\n\ndef get_input_data_tensors(reader,\n data_pattern,\n batch_size=1000,\n num_epochs=None,\n num_readers=1):\n \"\"\"Creates the section of the graph which reads the training data.\n\n Args:\n reader: A class which parses the training data.\n data_pattern: A 'glob' style path to the data files.\n batch_size: How many examples to process at a time.\n num_epochs: How many passes to make over the training data. Set to 'None'\n to run indefinitely.\n num_readers: How many I/O threads to use.\n\n Returns:\n A tuple containing the features tensor, labels tensor, and optionally a\n tensor containing the number of frames per video. The exact dimensions\n depend on the reader being used.\n\n Raises:\n IOError: If no files matching the given pattern were found.\n \"\"\"\n logging.info(\"Using batch size of \" + str(batch_size) + \" for training.\")\n with tf.name_scope(\"train_input\"):\n files = gfile.Glob(data_pattern)\n if not files:\n raise IOError(\"Unable to find training files. data_pattern='\" +\n data_pattern + \"'.\")\n logging.info(\"Number of training files: %s.\", str(len(files)))\n filename_queue = tf.train.string_input_producer(\n files, num_epochs=num_epochs, shuffle=True)\n training_data = [\n reader.prepare_reader(filename_queue) for _ in range(num_readers)\n ]\n\n return tf.train.shuffle_batch_join(\n training_data,\n batch_size=batch_size,\n capacity=batch_size * 5,\n min_after_dequeue=batch_size,\n allow_smaller_final_batch=True,\n enqueue_many=True)\n\n\ndef find_class_by_name(name, modules):\n \"\"\"Searches the provided modules for the named class and returns it.\"\"\"\n modules = [getattr(module, name, None) for module in modules]\n return next(a for a in modules if a)\n\ndef build_graph(reader,\n model,\n train_data_pattern,\n label_loss_fn=losses.CrossEntropyLoss(),\n batch_size=1000,\n base_learning_rate=0.01,\n learning_rate_decay_examples=1000000,\n learning_rate_decay=0.95,\n optimizer_class=tf.train.AdamOptimizer,\n clip_gradient_norm=1.0,\n regularization_penalty=1,\n num_readers=1,\n num_epochs=None, \n corr_mat=None):\n \"\"\"Creates the Tensorflow graph.\n\n This will only be called once in the life of\n a training model, because after the graph is created the model will be\n restored from a meta graph file rather than being recreated.\n\n Args:\n reader: The data file reader. It should inherit from BaseReader.\n model: The core model (e.g. logistic or neural net). It should inherit\n from BaseModel.\n train_data_pattern: glob path to the training data files.\n label_loss_fn: What kind of loss to apply to the model. It should inherit\n from BaseLoss.\n batch_size: How many examples to process at a time.\n base_learning_rate: What learning rate to initialize the optimizer with.\n optimizer_class: Which optimization algorithm to use.\n clip_gradient_norm: Magnitude of the gradient to clip to.\n regularization_penalty: How much weight to give the regularization loss\n compared to the label loss.\n num_readers: How many threads to use for I/O operations.\n num_epochs: How many passes to make over the data. 'None' means an\n unlimited number of passes.\n \"\"\"\n\n global_step = tf.Variable(0, trainable=False, name=\"global_step\")\n\n local_device_protos = device_lib.list_local_devices()\n gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']\n gpus = gpus[:FLAGS.num_gpu]\n num_gpus = len(gpus)\n\n if num_gpus > 0:\n logging.info(\"Using the following GPUs to train: \" + str(gpus))\n num_towers = num_gpus\n device_string = '/gpu:%d'\n else:\n logging.info(\"No GPUs found. Training on CPU.\")\n num_towers = 1\n device_string = '/cpu:%d'\n\n learning_rate = tf.train.exponential_decay(\n base_learning_rate,\n global_step * batch_size * num_towers,\n learning_rate_decay_examples,\n learning_rate_decay,\n staircase=True)\n tf.summary.scalar('learning_rate', learning_rate)\n\n optimizer = optimizer_class(learning_rate)\n unused_video_id, model_input_raw, labels_batch, num_frames = (\n get_input_data_tensors(\n reader,\n train_data_pattern,\n batch_size=batch_size * num_towers,\n num_readers=num_readers,\n num_epochs=num_epochs))\n tf.summary.histogram(\"model/input_raw\", model_input_raw)\n\n feature_dim = len(model_input_raw.get_shape()) - 1\n\n model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)\n\n tower_inputs = tf.split(model_input, num_towers)\n tower_labels = tf.split(labels_batch, num_towers)\n tower_num_frames = tf.split(num_frames, num_towers)\n tower_gradients = []\n tower_predictions = []\n tower_label_losses = []\n tower_reg_losses = []\n\n for i in range(num_towers):\n # For some reason these 'with' statements can't be combined onto the same\n # line. They have to be nested.\n with tf.device(device_string % i):\n with (tf.variable_scope((\"tower\"), reuse=True if i > 0 else None)):\n with (slim.arg_scope([slim.model_variable, slim.variable], device=\"/cpu:0\" if num_gpus!=1 else \"/gpu:0\")):\n result = model.create_model(\n tower_inputs[i],\n num_frames=tower_num_frames[i],\n vocab_size=reader.num_classes,\n corr_mat_init=corr_mat,\n labels=tower_labels[i])\n for variable in slim.get_model_variables():\n tf.summary.histogram(variable.op.name, variable)\n\n predictions0 = result[\"predictions0\"]\n predictions = result[\"predictions\"]\n \n tower_predictions.append(predictions)\n\n label_loss = label_loss_fn.calculate_loss(predictions0, tower_labels[i])\n\n if \"regularization_loss\" in result.keys():\n reg_loss = result[\"regularization_loss\"]\n else:\n reg_loss = tf.constant(0.0)\n\n reg_losses = tf.losses.get_regularization_losses()\n if reg_losses:\n reg_loss += tf.add_n(reg_losses)\n\n tower_reg_losses.append(reg_loss)\n\n # Adds update_ops (e.g., moving average updates in batch normalization) as\n # a dependency to the train_op.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if \"update_ops\" in result.keys():\n update_ops += result[\"update_ops\"]\n if update_ops:\n with tf.control_dependencies(update_ops):\n barrier = tf.no_op(name=\"gradient_barrier\")\n with tf.control_dependencies([barrier]):\n label_loss = tf.identity(label_loss)\n\n tower_label_losses.append(label_loss)\n\n # Incorporate the L2 weight penalties etc.\n final_loss = regularization_penalty * reg_loss + label_loss\n gradients = optimizer.compute_gradients(final_loss,\n colocate_gradients_with_ops=False)\n tower_gradients.append(gradients)\n\n\n label_loss = tf.reduce_mean(tf.stack(tower_label_losses))\n tf.summary.scalar(\"label_loss\", label_loss)\n if regularization_penalty != 0:\n reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses))\n tf.summary.scalar(\"reg_loss\", reg_loss)\n merged_gradients = utils.combine_gradients(tower_gradients)\n\n if clip_gradient_norm > 0:\n with tf.name_scope('clip_grads'):\n merged_gradients = utils.clip_gradient_norms(merged_gradients, clip_gradient_norm)\n\n train_op = optimizer.apply_gradients(merged_gradients, global_step=global_step)\n\n tf.add_to_collection(\"global_step\", global_step)\n tf.add_to_collection(\"loss\", label_loss)\n tf.add_to_collection(\"predictions\", tf.concat(tower_predictions, 0))\n tf.add_to_collection(\"input_batch_raw\", model_input_raw)\n tf.add_to_collection(\"input_batch\", model_input)\n tf.add_to_collection(\"num_frames\", num_frames)\n tf.add_to_collection(\"labels\", tf.cast(labels_batch, tf.float32))\n tf.add_to_collection(\"train_op\", train_op)\n\nclass Trainer(object):\n \"\"\"A Trainer to train a Tensorflow graph.\"\"\"\n\n def __init__(self, cluster, task, train_dir, model, reader, model_exporter,\n log_device_placement=True, max_steps=None,\n export_model_steps=1000, corr_mat = None):\n \"\"\"\"Creates a Trainer.\n\n Args:\n cluster: A tf.train.ClusterSpec if the execution is distributed.\n None otherwise.\n task: A TaskSpec describing the job type and the task index.\n \"\"\"\n\n self.cluster = cluster\n self.task = task\n self.is_master = (task.type == \"master\" and task.index == 0)\n self.train_dir = train_dir\n self.config = tf.ConfigProto(\n allow_soft_placement=True,log_device_placement=log_device_placement)\n self.model = model\n self.reader = reader\n self.model_exporter = model_exporter\n self.max_steps = max_steps\n self.max_steps_reached = False\n self.export_model_steps = export_model_steps\n self.last_model_export_step = 0\n self.corr_mat = corr_mat\n\n# if self.is_master and self.task.index > 0:\n# raise StandardError(\"%s: Only one replica of master expected\",\n# task_as_string(self.task))\n\n def run(self, start_new_model=False):\n \"\"\"Performs training on the currently defined Tensorflow graph.\n\n Returns:\n A tuple of the training Hit@1 and the training PERR.\n \"\"\"\n if self.is_master and start_new_model:\n self.remove_training_directory(self.train_dir)\n\n if not os.path.exists(self.train_dir):\n os.makedirs(self.train_dir)\n\n model_flags_dict = {\n \"model\": FLAGS.model,\n \"feature_sizes\": FLAGS.feature_sizes,\n \"feature_names\": FLAGS.feature_names,\n \"frame_features\": FLAGS.frame_features,\n \"label_loss\": FLAGS.label_loss,\n }\n flags_json_path = os.path.join(FLAGS.train_dir, \"model_flags.json\")\n if os.path.exists(flags_json_path):\n existing_flags = json.load(open(flags_json_path))\n if existing_flags != model_flags_dict:\n logging.error(\"Model flags do not match existing file %s. Please \"\n \"delete the file, change --train_dir, or pass flag \"\n \"--start_new_model\",\n flags_json_path)\n logging.error(\"Ran model with flags: %s\", str(model_flags_dict))\n logging.error(\"Previously ran with flags: %s\", str(existing_flags))\n exit(1)\n else:\n # Write the file.\n with open(flags_json_path, \"w\") as fout:\n\n fout.write(json.dumps(model_flags_dict))\n\n target, device_fn = self.start_server_if_distributed()\n\n meta_filename = self.get_meta_filename(start_new_model, self.train_dir)\n\n with tf.Graph().as_default() as graph:\n if meta_filename:\n saver = self.recover_model(meta_filename)\n\n with tf.device(device_fn):\n if not meta_filename:\n saver = self.build_model(self.model, self.reader, self.corr_mat)\n\n global_step = tf.get_collection(\"global_step\")[0]\n loss = tf.get_collection(\"loss\")[0]\n predictions = tf.get_collection(\"predictions\")[0]\n labels = tf.get_collection(\"labels\")[0]\n train_op = tf.get_collection(\"train_op\")[0]\n init_op = tf.global_variables_initializer()\n\n sv = tf.train.Supervisor(\n graph,\n logdir=self.train_dir,\n init_op=init_op,\n is_chief=self.is_master,\n global_step=global_step,\n #save_model_secs=15 * 60,\n save_model_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),\n #save_summaries_secs=120,\n save_summaries_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),\n saver=saver)\n logging.info(\"%s: Starting managed session.\", task_as_string(self.task))\n with sv.managed_session(target, config=self.config) as sess:\n try:\n logging.info(\"%s: Entering training loop.\", task_as_string(self.task))\n while (not sv.should_stop()) and (not self.max_steps_reached):\n batch_start_time = time.time()\n _, global_step_val, loss_val, predictions_val, labels_val = sess.run(\n [train_op, global_step, loss, predictions, labels])\n seconds_per_batch = time.time() - batch_start_time\n examples_per_second = labels_val.shape[0] / seconds_per_batch\n\n if self.max_steps and self.max_steps <= global_step_val:\n self.max_steps_reached = True\n\n #if self.is_master and global_step_val % 10 == 0 and self.train_dir:\n if self.is_master and global_step_val % FLAGS.validate_every_n_training_steps == 0 and self.train_dir:\n eval_start_time = time.time()\n hit_at_one = eval_util.calculate_hit_at_one(predictions_val, labels_val)\n perr = eval_util.calculate_precision_at_equal_recall_rate(predictions_val,\n labels_val)\n gap = eval_util.calculate_gap(predictions_val, labels_val)\n eval_end_time = time.time()\n eval_time = eval_end_time - eval_start_time\n\n logging.info(\"training step \" + str(global_step_val) + \" | Loss: \" + (\"%.2f\" % loss_val) +\n \" Examples/sec: \" + (\"%.2f\" % examples_per_second) + \" | Hit@1: \" +\n (\"%.2f\" % hit_at_one) + \" PERR: \" + (\"%.2f\" % perr) +\n \" GAP: \" + (\"%.2f\" % gap))\n\n sv.summary_writer.add_summary(\n utils.MakeSummary(\"model/Training_Hit@1\", hit_at_one),\n global_step_val)\n sv.summary_writer.add_summary(\n utils.MakeSummary(\"model/Training_Perr\", perr), global_step_val)\n sv.summary_writer.add_summary(\n utils.MakeSummary(\"model/Training_GAP\", gap), global_step_val)\n sv.summary_writer.add_summary(\n utils.MakeSummary(\"global_step/Examples/Second\",\n examples_per_second), global_step_val)\n sv.summary_writer.flush()\n\n with open(FLAGS.train_dir + '/global_step_{%d}_training_GAP_{%.6f}.txt' % (global_step_val, gap), 'w') as f:\n f.write('\\n')\n\n # Exporting the model every x steps\n time_to_export = ((self.last_model_export_step == 0) or\n (global_step_val - self.last_model_export_step\n >= self.export_model_steps))\n\n if self.is_master and time_to_export:\n self.export_model(global_step_val, sv.saver, sv.save_path, sess)\n self.last_model_export_step = global_step_val\n else:\n #logging.info(\"training step \" + str(global_step_val) + \" | Loss: \" +\n #(\"%.2f\" % loss_val) + \" Examples/sec: \" + (\"%.2f\" % examples_per_second))\n continue\n except tf.errors.OutOfRangeError:\n logging.info(\"%s: Done training -- epoch limit reached.\",\n task_as_string(self.task))\n\n logging.info(\"%s: Exited training loop.\", task_as_string(self.task))\n sv.Stop()\n\n def export_model(self, global_step_val, saver, save_path, session):\n\n # If the model has already been exported at this step, return.\n if global_step_val == self.last_model_export_step:\n return\n\n last_checkpoint = saver.save(session, save_path, global_step_val)\n\n model_dir = \"{0}/export/step_{1}\".format(self.train_dir, global_step_val)\n logging.info(\"%s: Exporting the model at step %s to %s.\",\n task_as_string(self.task), global_step_val, model_dir)\n\n self.model_exporter.export_model(\n model_dir=model_dir,\n global_step_val=global_step_val,\n last_checkpoint=last_checkpoint)\n\n def start_server_if_distributed(self):\n \"\"\"Starts a server if the execution is distributed.\"\"\"\n\n if self.cluster:\n logging.info(\"%s: Starting trainer within cluster %s.\",\n task_as_string(self.task), self.cluster.as_dict())\n server = start_server(self.cluster, self.task)\n target = server.target\n device_fn = tf.train.replica_device_setter(\n ps_device=\"/job:ps\",\n worker_device=\"/job:%s/task:%d\" % (self.task.type, self.task.index),\n cluster=self.cluster)\n else:\n target = \"\"\n device_fn = \"\"\n return (target, device_fn)\n\n def remove_training_directory(self, train_dir):\n \"\"\"Removes the training directory.\"\"\"\n try:\n logging.info(\n \"%s: Removing existing train directory.\",\n task_as_string(self.task))\n gfile.DeleteRecursively(train_dir)\n except:\n logging.error(\n \"%s: Failed to delete directory \" + train_dir +\n \" when starting a new model. Please delete it manually and\" +\n \" try again.\", task_as_string(self.task))\n\n def get_meta_filename(self, start_new_model, train_dir):\n if start_new_model:\n logging.info(\"%s: Flag 'start_new_model' is set. Building a new model.\",\n task_as_string(self.task))\n return None\n\n latest_checkpoint = tf.train.latest_checkpoint(train_dir)\n if not latest_checkpoint:\n logging.info(\"%s: No checkpoint file found. Building a new model.\",\n task_as_string(self.task))\n return None\n\n meta_filename = latest_checkpoint + \".meta\"\n if not gfile.Exists(meta_filename):\n logging.info(\"%s: No meta graph file found. Building a new model.\",\n task_as_string(self.task))\n return None\n else:\n return meta_filename\n\n def recover_model(self, meta_filename):\n logging.info(\"%s: Restoring from meta graph file %s\",\n task_as_string(self.task), meta_filename)\n return tf.train.import_meta_graph(meta_filename)\n\n def build_model(self, model, reader, corr_mat = None):\n \"\"\"Find the model and build the graph.\"\"\"\n\n label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()\n optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])\n\n build_graph(reader=reader,\n model=model,\n optimizer_class=optimizer_class,\n clip_gradient_norm=FLAGS.clip_gradient_norm,\n train_data_pattern=FLAGS.train_data_pattern,\n label_loss_fn=label_loss_fn,\n base_learning_rate=FLAGS.base_learning_rate,\n learning_rate_decay=FLAGS.learning_rate_decay,\n learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,\n regularization_penalty=FLAGS.regularization_penalty,\n num_readers=FLAGS.num_readers,\n batch_size=FLAGS.batch_size,\n num_epochs=FLAGS.num_epochs, \n corr_mat = corr_mat)\n\n return tf.train.Saver(max_to_keep=0, keep_checkpoint_every_n_hours=FLAGS.save_checkpoint_every_n_hour)\n\n\ndef get_reader():\n # Convert feature_names and feature_sizes to lists of values.\n feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(\n FLAGS.feature_names, FLAGS.feature_sizes)\n\n if FLAGS.frame_features:\n reader = readers.YT8MFrameFeatureReader(\n feature_names=feature_names, feature_sizes=feature_sizes)\n else:\n reader = readers.YT8MAggregatedFeatureReader(\n feature_names=feature_names, feature_sizes=feature_sizes)\n\n return reader\n\n\nclass ParameterServer(object):\n \"\"\"A parameter server to serve variables in a distributed execution.\"\"\"\n\n def __init__(self, cluster, task):\n \"\"\"Creates a ParameterServer.\n\n Args:\n cluster: A tf.train.ClusterSpec if the execution is distributed.\n None otherwise.\n task: A TaskSpec describing the job type and the task index.\n \"\"\"\n\n self.cluster = cluster\n self.task = task\n\n def run(self):\n \"\"\"Starts the parameter server.\"\"\"\n\n logging.info(\"%s: Starting parameter server within cluster %s.\",\n task_as_string(self.task), self.cluster.as_dict())\n server = start_server(self.cluster, self.task)\n server.join()\n\n\ndef start_server(cluster, task):\n \"\"\"Creates a Server.\n\n Args:\n cluster: A tf.train.ClusterSpec if the execution is distributed.\n None otherwise.\n task: A TaskSpec describing the job type and the task index.\n \"\"\"\n\n if not task.type:\n raise ValueError(\"%s: The task type must be specified.\" %\n task_as_string(task))\n if task.index is None:\n raise ValueError(\"%s: The task index must be specified.\" %\n task_as_string(task))\n\n # Create and start a server.\n return tf.train.Server(\n tf.train.ClusterSpec(cluster),\n protocol=\"grpc\",\n job_name=task.type,\n task_index=task.index)\n\ndef task_as_string(task):\n return \"/job:%s/task:%s\" % (task.type, task.index)\n\ndef main(unused_argv):\n # Load the environment.\n env = json.loads(os.environ.get(\"TF_CONFIG\", \"{}\"))\n\n # Load the cluster data from the environment.\n cluster_data = env.get(\"cluster\", None)\n cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None\n\n # Load the task data from the environment.\n task_data = env.get(\"task\", None) or {\"type\": \"master\", \"index\": 0}\n task = type(\"TaskSpec\", (object,), task_data)\n\n # Logging the version.\n logging.set_verbosity(tf.logging.INFO)\n logging.info(\"%s: Tensorflow version: %s.\",\n task_as_string(task), tf.__version__)\n\n # Dispatch to a master, a worker, or a parameter server.\n if not cluster or task.type == \"master\" or task.type == \"worker\":\n model = find_class_by_name(FLAGS.model,\n [frame_level_models, video_level_models])()\n\n reader = get_reader()\n\n model_exporter = export_model.ModelExporter(\n frame_features=FLAGS.frame_features,\n model=model,\n reader=reader)\n\n mat_dir = '/home/weimin/yt8m/code/youtube-8m/'\n with open(mat_dir + 'corr_mat.npz', 'rb') as f:\n corr_mat = np.load(f)\n\n Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter,\n FLAGS.log_device_placement, FLAGS.max_steps,\n FLAGS.export_model_steps, corr_mat).run(start_new_model=FLAGS.start_new_model)\n\n elif task.type == \"ps\":\n ParameterServer(cluster, task).run()\n else:\n raise ValueError(\"%s: Invalid task_type: %s.\" %\n (task_as_string(task), task.type))\n\nif __name__ == \"__main__\":\n app.run()\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.losses.get_regularization_losses", "tensorflow.flags.DEFINE_float", "tensorflow.no_op", "tensorflow.logging.set_verbosity", "tensorflow.flags.FlagsError", "tensorflow.variable_scope", "tensorflow.logging.error", "tensorflow.name_scope", "tensorflow.concat", "tensorflow.Variable", "tensorflow.train.replica_device_setter", "tensorflow.identity", "tensorflow.split", "tensorflow.summary.histogram", "tensorflow.global_variables_initializer", "tensorflow.device", "tensorflow.gfile.Glob", "tensorflow.train.shuffle_batch_join", "tensorflow.Graph", "tensorflow.train.exponential_decay", "tensorflow.contrib.slim.get_model_variables", "tensorflow.constant", "tensorflow.add_to_collection", "tensorflow.train.import_meta_graph", "numpy.load", "tensorflow.flags.DEFINE_integer", "tensorflow.stack", "tensorflow.add_n", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.app.run", "tensorflow.get_collection", "tensorflow.train.string_input_producer", "tensorflow.contrib.slim.arg_scope", "tensorflow.cast", "tensorflow.train.Saver", "tensorflow.flags.DEFINE_string", "tensorflow.ConfigProto", "tensorflow.nn.l2_normalize", "tensorflow.control_dependencies", "tensorflow.logging.info", "tensorflow.flags.DEFINE_bool", "tensorflow.train.ClusterSpec", "tensorflow.train.latest_checkpoint", "tensorflow.gfile.Exists", "tensorflow.gfile.DeleteRecursively" ] ]
sergimasot/PycQED_py3
[ "54ad1b14929ffe5cc87cf59423a970e4b9baa3e1" ]
[ "pycqed/measurement/waveform_control/pulsar.py" ]
[ "# Originally by Wolfgang Pfaff\n# Modified by Adriaan Rol 9/2015\n# Modified by Ants Remm 5/2017\n# Modified by Michael Kerschbaum 5/2019\nimport os\nimport shutil\nimport ctypes\nimport numpy as np\nimport logging\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.parameter import (\n ManualParameter, InstrumentRefParameter)\nimport qcodes.utils.validators as vals\nimport time\n\nfrom pycqed.instrument_drivers.virtual_instruments.virtual_awg5014 import \\\n VirtualAWG5014\nfrom pycqed.instrument_drivers.virtual_instruments.virtual_AWG8 import \\\n VirtualAWG8\n# exception catching removed because it does not work in python versions before\n# 3.6\ntry:\n from qcodes.instrument_drivers.tektronix.AWG5014 import Tektronix_AWG5014\nexcept Exception:\n Tektronix_AWG5014 = type(None)\ntry:\n from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.\\\n UHFQuantumController import UHFQC\nexcept Exception:\n UHFQC = type(None)\ntry:\n from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \\\n ZI_HDAWG8 import ZI_HDAWG8\nexcept Exception:\n ZI_HDAWG8 = type(None)\nlog = logging.getLogger(__name__)\n\nfrom pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \\\n dummy_UHFQC import dummy_UHFQC\n\nclass UHFQCPulsar:\n \"\"\"\n Defines the Zurich Instruments UHFQC specific functionality for the Pulsar\n class\n \"\"\"\n _supportedAWGtypes = (UHFQC, dummy_UHFQC)\n \n _uhf_sequence_string_template = (\n \"const WINT_EN = 0x03ff0000;\\n\"\n \"const WINT_TRIG = 0x00000010;\\n\"\n \"const IAVG_TRIG = 0x00000020;\\n\"\n \"var RO_TRIG;\\n\"\n \"if (getUserReg(1)) {{\\n\"\n \" RO_TRIG = WINT_EN + IAVG_TRIG;\\n\"\n \"}} else {{\\n\"\n \" RO_TRIG = WINT_EN + WINT_TRIG;\\n\"\n \"}}\\n\"\n \"setTrigger(WINT_EN);\\n\"\n \"\\n\"\n \"{wave_definitions}\\n\"\n \"\\n\"\n \"var loop_cnt = getUserReg(0);\\n\"\n \"\\n\"\n \"repeat (loop_cnt) {{\\n\"\n \" {playback_string}\\n\"\n \"}}\\n\"\n )\n\n def _create_awg_parameters(self, awg, channel_name_map):\n if not isinstance(awg, UHFQCPulsar._supportedAWGtypes):\n return super()._create_awg_parameters(awg, channel_name_map)\n \n name = awg.name\n\n self.add_parameter('{}_reuse_waveforms'.format(awg.name),\n initial_value=True, vals=vals.Bool(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),\n initial_value=True, vals=vals.Bool(),\n parameter_class=ManualParameter,\n docstring=\"Minimizes the sequencer \"\n \"memory by repeating specific sequence \"\n \"patterns (eg. readout) passed in \"\n \"'repeat dictionary'\")\n self.add_parameter('{}_enforce_single_element'.format(awg.name),\n initial_value=False, vals=vals.Bool(),\n parameter_class=ManualParameter,\n docstring=\"Group all the pulses on this AWG into \"\n \"a single element. Useful for making sure \"\n \"that the master AWG has only one waveform\"\n \" per segment.\")\n self.add_parameter('{}_granularity'.format(awg.name),\n get_cmd=lambda: 16)\n self.add_parameter('{}_element_start_granularity'.format(awg.name),\n initial_value=8/(1.8e9),\n parameter_class=ManualParameter)\n self.add_parameter('{}_min_length'.format(awg.name),\n get_cmd=lambda: 16 /(1.8e9))\n self.add_parameter('{}_inter_element_deadtime'.format(awg.name),\n # get_cmd=lambda: 80 / 2.4e9)\n get_cmd=lambda: 8 / (1.8e9))\n # get_cmd=lambda: 0 / 2.4e9)\n self.add_parameter('{}_precompile'.format(awg.name), \n initial_value=False, vals=vals.Bool(),\n label='{} precompile segments'.format(awg.name),\n parameter_class=ManualParameter)\n self.add_parameter('{}_delay'.format(awg.name), \n initial_value=0, label='{} delay'.format(name), \n unit='s', parameter_class=ManualParameter,\n docstring='Global delay applied to this '\n 'channel. Positive values move pulses'\n ' on this channel forward in time')\n self.add_parameter('{}_trigger_channels'.format(awg.name), \n initial_value=[],\n label='{} trigger channel'.format(awg.name), \n parameter_class=ManualParameter)\n self.add_parameter('{}_active'.format(awg.name), initial_value=True,\n label='{} active'.format(awg.name),\n vals=vals.Bool(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_compensation_pulse_min_length'.format(name), \n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n self.add_parameter('{}_trigger_source'.format(awg.name), \n initial_value='Dig1',\n vals=vals.Enum('Dig1', 'Dig2', 'DIO'),\n parameter_class=ManualParameter, \n docstring='Defines for which trigger source \\\n the AWG should wait, before playing \\\n the next waveform. Allowed values \\\n are: \"Dig1\", \"Dig2\", \"DIO\"')\n\n for ch_nr in range(2):\n id = 'ch{}'.format(ch_nr + 1)\n name = channel_name_map.get(id, awg.name + '_' + id)\n self._uhfqc_create_channel_parameters(id, name, awg)\n self.channels.add(name)\n\n def _uhfqc_create_channel_parameters(self, id, name, awg):\n self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)\n self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)\n self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')\n self.add_parameter('{}_amp'.format(name),\n label='{} amplitude'.format(name), unit='V',\n set_cmd=self._uhfqc_setter(awg, id, 'amp'),\n get_cmd=self._uhfqc_getter(awg, id, 'amp'),\n vals=vals.Numbers(0.075, 1.5),\n initial_value=0.75)\n self.add_parameter('{}_offset'.format(name),\n label='{} offset'.format(name), unit='V',\n set_cmd=self._uhfqc_setter(awg, id, 'offset'),\n get_cmd=self._uhfqc_getter(awg, id, 'offset'),\n vals=vals.Numbers(-1.5, 1.5),\n initial_value=0)\n self.add_parameter('{}_distortion'.format(name),\n label='{} distortion mode'.format(name),\n initial_value='off',\n vals=vals.Enum('off', 'precalculate'),\n parameter_class=ManualParameter)\n self.add_parameter('{}_distortion_dict'.format(name),\n label='{} distortion dictionary'.format(name),\n vals=vals.Dict(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_charge_buildup_compensation'.format(name),\n parameter_class=ManualParameter,\n vals=vals.Bool(), initial_value=False)\n self.add_parameter('{}_compensation_pulse_scale'.format(name),\n parameter_class=ManualParameter,\n vals=vals.Numbers(0., 1.), initial_value=0.5)\n self.add_parameter('{}_compensation_pulse_delay'.format(name), \n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),\n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n\n @staticmethod\n def _uhfqc_setter(obj, id, par):\n if par == 'offset':\n def s(val):\n obj.set('sigouts_{}_offset'.format(int(id[2])-1), val)\n elif par == 'amp':\n def s(val):\n obj.set('sigouts_{}_range'.format(int(id[2])-1), val)\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n return s\n\n def _uhfqc_getter(self, obj, id, par):\n if par == 'offset':\n def g():\n return obj.get('sigouts_{}_offset'.format(int(id[2])-1))\n elif par == 'amp':\n def g():\n if self._awgs_prequeried_state:\n return obj.parameters['sigouts_{}_range' \\\n .format(int(id[2])-1)].get_latest()/2\n else:\n return obj.get('sigouts_{}_range' \\\n .format(int(id[2])-1))/2\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n return g \n\n def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):\n if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):\n return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)\n\n if not self._zi_waves_cleared:\n _zi_clear_waves()\n self._zi_waves_cleared = True\n waves_to_upload = {h: waveforms[h]\n for codewords in awg_sequence.values() \n if codewords is not None\n for cw, chids in codewords.items()\n if cw != 'metadata'\n for h in chids.values()}\n self._zi_write_waves(waves_to_upload)\n\n defined_waves = set()\n wave_definitions = []\n playback_strings = []\n\n ch_has_waveforms = {'ch1': False, 'ch2': False}\n\n current_segment = 'no_segment'\n\n def play_element(element, playback_strings, wave_definitions):\n if awg_sequence[element] is None:\n current_segment = element\n playback_strings.append(f'// Segment {current_segment}')\n return playback_strings, wave_definitions\n playback_strings.append(f'// Element {element}')\n\n metadata = awg_sequence[element].pop('metadata', {})\n if list(awg_sequence[element].keys()) != ['no_codeword']:\n raise NotImplementedError('UHFQC sequencer does currently\\\n not support codewords!')\n chid_to_hash = awg_sequence[element]['no_codeword']\n\n wave = (chid_to_hash.get('ch1', None), None,\n chid_to_hash.get('ch2', None), None)\n wave_definitions += self._zi_wave_definition(wave,\n defined_waves)\n\n acq = metadata.get('acq', False)\n playback_strings += self._zi_playback_string(name=obj.name,\n device='uhf', \n wave=wave, \n acq=acq)\n\n ch_has_waveforms['ch1'] |= wave[0] is not None\n ch_has_waveforms['ch2'] |= wave[2] is not None\n return playback_strings, wave_definitions\n\n if repeat_pattern is None:\n for element in awg_sequence:\n playback_strings, wave_definitions = play_element(element,\n playback_strings,\n wave_definitions)\n else:\n real_indicies = []\n for index, element in enumerate(awg_sequence):\n if awg_sequence[element] is not None:\n real_indicies.append(index)\n el_total = len(real_indicies)\n\n def repeat_func(n, el_played, index, playback_strings, wave_definitions):\n if isinstance(n, tuple):\n el_played_list = []\n if n[0] > 1:\n playback_strings.append('repeat ('+str(n[0])+') {')\n for t in n[1:]:\n el_cnt, playback_strings, wave_definitions = repeat_func(t,\n el_played,\n index + np.sum(\n el_played_list),\n playback_strings,\n wave_definitions)\n el_played_list.append(el_cnt)\n if n[0] > 1:\n playback_strings.append('}')\n return int(n[0] * np.sum(el_played_list)), playback_strings, wave_definitions\n else:\n for k in range(n):\n el_index = real_indicies[int(index)+k]\n element = list(awg_sequence.keys())[el_index]\n playback_strings, wave_definitions = play_element(element,\n playback_strings,\n wave_definitions)\n el_played = el_played + 1\n return el_played, playback_strings, wave_definitions\n\n\n\n el_played, playback_strings, wave_definitions = repeat_func(repeat_pattern, 0, 0,\n playback_strings, wave_definitions)\n\n\n if int(el_played) != int(el_total):\n log.error(el_played, ' is not ', el_total)\n raise ValueError('Check number of sequences in repeat pattern')\n\n\n if not (ch_has_waveforms['ch1'] or ch_has_waveforms['ch2']):\n return\n self.awgs_with_waveforms(obj.name)\n \n awg_str = self._uhf_sequence_string_template.format(\n wave_definitions='\\n'.join(wave_definitions),\n playback_string='\\n '.join(playback_strings),\n )\n\n # Necessary hack to pass the UHFQC drivers sanity check \n # in acquisition_initialize()\n obj._awg_program_features['loop_cnt'] = True\n obj._awg_program_features['avg_cnt'] = False\n # Hack needed to have \n obj._awg_needs_configuration[0] = False\n obj._awg_program[0] = True\n\n obj.configure_awg_from_string(awg_nr=0, program_string=awg_str, timeout=600)\n\n def _is_awg_running(self, obj):\n if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):\n return super()._is_awg_running(obj)\n return obj.awgs_0_enable() != 0\n\n def _clock(self, obj, cid=None):\n if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):\n return super()._clock(obj)\n return obj.clock_freq()\n\nclass HDAWG8Pulsar:\n \"\"\"\n Defines the Zurich Instruments HDAWG8 specific functionality for the Pulsar\n class\n \"\"\"\n _supportedAWGtypes = (ZI_HDAWG8, VirtualAWG8, )\n\n _hdawg_sequence_string_template = (\n \"{wave_definitions}\\n\"\n \"\\n\"\n \"{codeword_table_defs}\\n\"\n \"\\n\"\n \"while (1) {{\\n\"\n \" {playback_string}\\n\"\n \"}}\\n\"\n )\n\n def _create_awg_parameters(self, awg, channel_name_map):\n if not isinstance(awg, HDAWG8Pulsar._supportedAWGtypes):\n return super()._create_awg_parameters(awg, channel_name_map)\n \n name = awg.name\n\n self.add_parameter('{}_reuse_waveforms'.format(awg.name),\n initial_value=True, vals=vals.Bool(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),\n initial_value=False, vals=vals.Bool(),\n parameter_class=ManualParameter,\n docstring=\"Minimizes the sequencer \"\n \"memory by repeating specific sequence \"\n \"patterns (eg. readout) passed in \"\n \"'repeat dictionary'\")\n self.add_parameter('{}_enforce_single_element'.format(awg.name),\n initial_value=False, vals=vals.Bool(),\n parameter_class=ManualParameter,\n docstring=\"Group all the pulses on this AWG into \"\n \"a single element. Useful for making sure \"\n \"that the master AWG has only one waveform\"\n \" per segment.\")\n self.add_parameter('{}_granularity'.format(awg.name),\n get_cmd=lambda: 16)\n self.add_parameter('{}_element_start_granularity'.format(awg.name),\n initial_value=8/(2.4e9),\n parameter_class=ManualParameter)\n self.add_parameter('{}_min_length'.format(awg.name),\n initial_value=16 /(2.4e9),\n parameter_class=ManualParameter)\n self.add_parameter('{}_inter_element_deadtime'.format(awg.name),\n # get_cmd=lambda: 80 / 2.4e9)\n get_cmd=lambda: 8 / (2.4e9))\n # get_cmd=lambda: 0 / 2.4e9)\n self.add_parameter('{}_precompile'.format(awg.name), \n initial_value=False, vals=vals.Bool(),\n label='{} precompile segments'.format(awg.name),\n parameter_class=ManualParameter)\n self.add_parameter('{}_delay'.format(awg.name), \n initial_value=0, label='{} delay'.format(name), \n unit='s', parameter_class=ManualParameter,\n docstring='Global delay applied to this '\n 'channel. Positive values move pulses'\n ' on this channel forward in time')\n self.add_parameter('{}_trigger_channels'.format(awg.name), \n initial_value=[],\n label='{} trigger channel'.format(awg.name), \n parameter_class=ManualParameter)\n self.add_parameter('{}_active'.format(awg.name), initial_value=True,\n label='{} active'.format(awg.name),\n vals=vals.Bool(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_compensation_pulse_min_length'.format(name), \n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n self.add_parameter('{}_trigger_source'.format(awg.name), \n initial_value='Dig1',\n vals=vals.Enum('Dig1', 'DIO', 'ZSync'),\n parameter_class=ManualParameter, \n docstring='Defines for which trigger source \\\n the AWG should wait, before playing \\\n the next waveform. Allowed values \\\n are: \"Dig1\", \"DIO\", \"ZSync\"')\n\n for ch_nr in range(8):\n id = 'ch{}'.format(ch_nr + 1)\n name = channel_name_map.get(id, awg.name + '_' + id)\n self._hdawg_create_analog_channel_parameters(id, name, awg)\n self.channels.add(name)\n id = 'ch{}m'.format(ch_nr + 1)\n name = channel_name_map.get(id, awg.name + '_' + id)\n self._hdawg_create_marker_channel_parameters(id, name, awg)\n self.channels.add(name)\n\n def _hdawg_create_analog_channel_parameters(self, id, name, awg):\n self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)\n self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)\n self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')\n self.add_parameter('{}_offset'.format(name),\n label='{} offset'.format(name), unit='V',\n set_cmd=self._hdawg_setter(awg, id, 'offset'),\n get_cmd=self._hdawg_getter(awg, id, 'offset'),\n vals=vals.Numbers())\n self.add_parameter('{}_amp'.format(name),\n label='{} amplitude'.format(name), unit='V',\n set_cmd=self._hdawg_setter(awg, id, 'amp'),\n get_cmd=self._hdawg_getter(awg, id, 'amp'),\n vals=vals.Numbers(0.01, 5.0))\n self.add_parameter('{}_distortion'.format(name),\n label='{} distortion mode'.format(name),\n initial_value='off',\n vals=vals.Enum('off', 'precalculate'),\n parameter_class=ManualParameter)\n self.add_parameter('{}_distortion_dict'.format(name),\n label='{} distortion dictionary'.format(name),\n vals=vals.Dict(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_charge_buildup_compensation'.format(name),\n parameter_class=ManualParameter,\n vals=vals.Bool(), initial_value=False)\n self.add_parameter('{}_compensation_pulse_scale'.format(name),\n parameter_class=ManualParameter,\n vals=vals.Numbers(0., 1.), initial_value=0.5)\n self.add_parameter('{}_compensation_pulse_delay'.format(name), \n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),\n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n self.add_parameter('{}_internal_modulation'.format(name), \n initial_value=False, vals=vals.Bool(),\n parameter_class=ManualParameter)\n \n def _hdawg_create_marker_channel_parameters(self, id, name, awg):\n self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)\n self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)\n self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'marker')\n self.add_parameter('{}_offset'.format(name),\n label='{} offset'.format(name), unit='V',\n set_cmd=self._hdawg_setter(awg, id, 'offset'),\n get_cmd=self._hdawg_getter(awg, id, 'offset'),\n vals=vals.Numbers())\n self.add_parameter('{}_amp'.format(name),\n label='{} amplitude'.format(name), unit='V',\n set_cmd=self._hdawg_setter(awg, id, 'amp'),\n get_cmd=self._hdawg_getter(awg, id, 'amp'),\n vals=vals.Numbers(0.01, 5.0))\n \n @staticmethod\n def _hdawg_setter(obj, id, par):\n if par == 'offset':\n if id[-1] != 'm':\n def s(val):\n obj.set('sigouts_{}_offset'.format(int(id[2])-1), val)\n else:\n s = None\n elif par == 'amp':\n if id[-1] != 'm':\n def s(val):\n obj.set('sigouts_{}_range'.format(int(id[2])-1), 2*val)\n else:\n s = None\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n return s\n\n def _hdawg_getter(self, obj, id, par):\n if par == 'offset':\n if id[-1] != 'm':\n def g():\n return obj.get('sigouts_{}_offset'.format(int(id[2])-1))\n else:\n return lambda: 0\n elif par == 'amp':\n if id[-1] != 'm':\n def g():\n if self._awgs_prequeried_state:\n return obj.parameters['sigouts_{}_range' \\\n .format(int(id[2])-1)].get_latest()/2\n else:\n return obj.get('sigouts_{}_range' \\\n .format(int(id[2])-1))/2\n else:\n return lambda: 1\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n return g \n\n def get_divisor(self, chid, awg):\n '''\n Divisor is 1 for non modulated channels and 2 for modulated non \n marker channels.\n '''\n\n if chid[-1]=='m':\n return 1\n\n name = self._id_channel(chid, awg)\n if self.get(f\"{name}_internal_modulation\"):\n return 2\n else: \n return 1\n\n \n def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):\n if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):\n return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)\n \n if not self._zi_waves_cleared:\n _zi_clear_waves()\n self._zi_waves_cleared = True\n \n chids = [f'ch{i+1}{m}' for i in range(8) for m in ['','m']]\n divisor = {chid: self.get_divisor(chid, obj.name) for chid in chids}\n \n waves_to_upload = {h: divisor[chid]*waveforms[h][::divisor[chid]]\n for codewords in awg_sequence.values() \n if codewords is not None \n for cw, chids in codewords.items() \n if cw != 'metadata'\n for chid, h in chids.items()}\n self._zi_write_waves(waves_to_upload)\n \n ch_has_waveforms = {'ch{}{}'.format(i + 1, m): False \n for i in range(8) for m in ['','m']}\n\n for awg_nr in self._hdawg_active_awgs(obj):\n defined_waves = set()\n codeword_table = {}\n wave_definitions = []\n codeword_table_defs = []\n playback_strings = []\n interleaves = []\n\n prev_dio_valid_polarity = obj.get(\n 'awgs_{}_dio_valid_polarity'.format(awg_nr))\n \n added_cw = set()\n ch1id = 'ch{}'.format(awg_nr * 2 + 1)\n ch1mid = 'ch{}m'.format(awg_nr * 2 + 1)\n ch2id = 'ch{}'.format(awg_nr * 2 + 2)\n ch2mid = 'ch{}m'.format(awg_nr * 2 + 2)\n chids = [ch1id, ch2id]\n\n channels = [self._id_channel(chid, obj.name) for chid in chids]\n\n codeword_el = set()\n if all([self.get(\n f'{chan}_internal_modulation') for chan in channels]):\n internal_mod = True\n elif not any([self.get(\n f'{chan}_internal_modulation') for chan in channels]):\n internal_mod = False\n else:\n raise NotImplementedError('Internal modulation can only be' \n 'specified per sub AWG!')\n\n counter = 1\n current_segment = 'no_segment'\n for element in awg_sequence:\n if awg_sequence[element] is None:\n current_segment = element\n playback_strings.append(f'// Segment {current_segment}')\n continue\n playback_strings.append(f'// Element {element}')\n \n metadata = awg_sequence[element].pop('metadata', {})\n \n nr_cw = len(set(awg_sequence[element].keys()) - \\\n {'no_codeword'})\n\n if nr_cw == 1:\n log.warning(\n f'Only one codeword has been set for {element}')\n else:\n for cw in awg_sequence[element]:\n if cw == 'no_codeword':\n if nr_cw != 0:\n continue\n chid_to_hash = awg_sequence[element][cw]\n wave = tuple(chid_to_hash.get(ch, None)\n for ch in [ch1id, ch1mid, ch2id, ch2mid])\n wave_definitions += self._zi_wave_definition(wave,\n defined_waves)\n \n if nr_cw != 0:\n w1, w2 = self._zi_waves_to_wavenames(wave)\n if cw not in codeword_table:\n codeword_table_defs += \\\n self._zi_codeword_table_entry(cw, wave)\n codeword_table[cw] = (w1, w2)\n elif codeword_table[cw] != (w1, w2) \\\n and self.reuse_waveforms():\n log.warning('Same codeword used for different '\n 'waveforms. Using first waveform. '\n f'Ignoring element {element}.')\n\n ch_has_waveforms[ch1id] |= wave[0] is not None\n ch_has_waveforms[ch1mid] |= wave[1] is not None\n ch_has_waveforms[ch2id] |= wave[2] is not None\n ch_has_waveforms[ch2mid] |= wave[3] is not None\n\n if not internal_mod:\n playback_strings += self._zi_playback_string(name=obj.name,\n device='hdawg', wave=wave, codeword=(nr_cw != 0),\n append_zeros=self.append_zeros())\n else:\n pb_string, interleave_string = \\\n self._zi_interleaved_playback_string(name=obj.name, \n device='hdawg', counter=counter, wave=wave, \n codeword=(nr_cw != 0)) \n counter += 1\n playback_strings += pb_string\n interleaves += interleave_string\n \n if not any([ch_has_waveforms[ch] \n for ch in [ch1id, ch1mid, ch2id, ch2mid]]):\n continue\n \n awg_str = self._hdawg_sequence_string_template.format(\n wave_definitions='\\n'.join(wave_definitions+interleaves),\n codeword_table_defs='\\n'.join(codeword_table_defs),\n playback_string='\\n '.join(playback_strings))\n\n # Hack needed to pass the sanity check of the ZI_base_instrument\n # class in \n obj._awg_needs_configuration[awg_nr] = False\n obj._awg_program[awg_nr] = True\n\n obj.configure_awg_from_string(awg_nr, awg_str, timeout=600)\n\n obj.set('awgs_{}_dio_valid_polarity'.format(awg_nr),\n prev_dio_valid_polarity)\n\n for ch in range(8):\n obj.set('sigouts_{}_on'.format(ch), ch_has_waveforms[f'ch{ch+1}'])\n\n if any(ch_has_waveforms.values()):\n self.awgs_with_waveforms(obj.name)\n\n def _is_awg_running(self, obj):\n if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):\n return super()._is_awg_running(obj)\n\n return any([obj.get('awgs_{}_enable'.format(awg_nr)) for awg_nr in\n self._hdawg_active_awgs(obj)])\n\n def _clock(self, obj, cid):\n if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):\n return super()._clock(obj, cid)\n return obj.clock_freq()\n\n def _hdawg_active_awgs(self, obj):\n return [0,1,2,3]\n\nclass AWG5014Pulsar:\n \"\"\"\n Defines the Tektronix AWG5014 specific functionality for the Pulsar class\n \"\"\"\n _supportedAWGtypes = (Tektronix_AWG5014, VirtualAWG5014, )\n\n def _create_awg_parameters(self, awg, channel_name_map):\n if not isinstance(awg, AWG5014Pulsar._supportedAWGtypes):\n return super()._create_awg_parameters(awg, channel_name_map)\n \n self.add_parameter('{}_reuse_waveforms'.format(awg.name),\n initial_value=True, vals=vals.Bool(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),\n initial_value=False, vals=vals.Bool(),\n parameter_class=ManualParameter,\n docstring=\"Minimizes the sequencer \"\n \"memory by repeating specific sequence \"\n \"patterns (eg. readout) passed in \"\n \"'repeat dictionary'\")\n self.add_parameter('{}_enforce_single_element'.format(awg.name),\n initial_value=False, vals=vals.Bool(),\n parameter_class=ManualParameter,\n docstring=\"Group all the pulses on this AWG into \"\n \"a single element. Useful for making sure \"\n \"that the master AWG has only one waveform\"\n \" per segment.\")\n self.add_parameter('{}_granularity'.format(awg.name),\n get_cmd=lambda: 4)\n self.add_parameter('{}_element_start_granularity'.format(awg.name),\n initial_value=4/(1.2e9),\n parameter_class=ManualParameter)\n self.add_parameter('{}_min_length'.format(awg.name),\n get_cmd=lambda: 256/(1.2e9)) # Can not be triggered \n # faster than 210 ns.\n self.add_parameter('{}_inter_element_deadtime'.format(awg.name),\n get_cmd=lambda: 0)\n self.add_parameter('{}_precompile'.format(awg.name), \n initial_value=False, \n label='{} precompile segments'.format(awg.name),\n parameter_class=ManualParameter, vals=vals.Bool())\n self.add_parameter('{}_delay'.format(awg.name), initial_value=0,\n label='{} delay'.format(awg.name), unit='s',\n parameter_class=ManualParameter,\n docstring=\"Global delay applied to this channel. \"\n \"Positive values move pulses on this \"\n \"channel forward in time\")\n self.add_parameter('{}_trigger_channels'.format(awg.name), \n initial_value=[],\n label='{} trigger channels'.format(awg.name), \n parameter_class=ManualParameter)\n self.add_parameter('{}_active'.format(awg.name), initial_value=True,\n label='{} active'.format(awg.name), \n vals=vals.Bool(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_compensation_pulse_min_length'.format(awg.name), \n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n\n for ch_nr in range(4):\n id = 'ch{}'.format(ch_nr + 1)\n name = channel_name_map.get(id, awg.name + '_' + id)\n self._awg5014_create_analog_channel_parameters(id, name, awg)\n self.channels.add(name)\n id = 'ch{}m1'.format(ch_nr + 1)\n name = channel_name_map.get(id, awg.name + '_' + id)\n self._awg5014_create_marker_channel_parameters(id, name, awg)\n self.channels.add(name)\n id = 'ch{}m2'.format(ch_nr + 1)\n name = channel_name_map.get(id, awg.name + '_' + id)\n self._awg5014_create_marker_channel_parameters(id, name, awg)\n self.channels.add(name)\n\n def _awg5014_create_analog_channel_parameters(self, id, name, awg):\n self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)\n self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)\n self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')\n self.add_parameter('{}_offset_mode'.format(name), \n parameter_class=ManualParameter, \n vals=vals.Enum('software', 'hardware'))\n offset_mode_func = self.parameters['{}_offset_mode'.format(name)]\n self.add_parameter('{}_offset'.format(name),\n label='{} offset'.format(name), unit='V',\n set_cmd=self._awg5014_setter(awg, id, 'offset', \n offset_mode_func),\n get_cmd=self._awg5014_getter(awg, id, 'offset', \n offset_mode_func),\n vals=vals.Numbers())\n self.add_parameter('{}_amp'.format(name),\n label='{} amplitude'.format(name), unit='V',\n set_cmd=self._awg5014_setter(awg, id, 'amp'),\n get_cmd=self._awg5014_getter(awg, id, 'amp'),\n vals=vals.Numbers(0.01, 2.25))\n self.add_parameter('{}_distortion'.format(name),\n label='{} distortion mode'.format(name),\n initial_value='off',\n vals=vals.Enum('off', 'precalculate'),\n parameter_class=ManualParameter)\n self.add_parameter('{}_distortion_dict'.format(name),\n label='{} distortion dictionary'.format(name),\n vals=vals.Dict(),\n parameter_class=ManualParameter)\n self.add_parameter('{}_charge_buildup_compensation'.format(name),\n parameter_class=ManualParameter,\n vals=vals.Bool(), initial_value=False)\n self.add_parameter('{}_compensation_pulse_scale'.format(name),\n parameter_class=ManualParameter,\n vals=vals.Numbers(0., 1.), initial_value=0.5)\n self.add_parameter('{}_compensation_pulse_delay'.format(name), \n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),\n initial_value=0, unit='s',\n parameter_class=ManualParameter)\n \n def _awg5014_create_marker_channel_parameters(self, id, name, awg):\n self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)\n self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)\n self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'marker')\n self.add_parameter('{}_offset'.format(name),\n label='{} offset'.format(name), unit='V',\n set_cmd=self._awg5014_setter(awg, id, 'offset'),\n get_cmd=self._awg5014_getter(awg, id, 'offset'),\n vals=vals.Numbers(-2.7, 2.7))\n self.add_parameter('{}_amp'.format(name),\n label='{} amplitude'.format(name), unit='V',\n set_cmd=self._awg5014_setter(awg, id, 'amp'),\n get_cmd=self._awg5014_getter(awg, id, 'amp'),\n vals=vals.Numbers(-5.4, 5.4))\n\n @staticmethod\n def _awg5014_setter(obj, id, par, offset_mode_func=None):\n if id in ['ch1', 'ch2', 'ch3', 'ch4']:\n if par == 'offset':\n def s(val):\n if offset_mode_func() == 'software':\n obj.set('{}_offset'.format(id), val)\n elif offset_mode_func() == 'hardware':\n obj.set('{}_DC_out'.format(id), val)\n else:\n raise ValueError('Invalid offset mode for AWG5014: '\n '{}'.format(offset_mode_func()))\n elif par == 'amp':\n def s(val):\n obj.set('{}_amp'.format(id), 2*val)\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n else:\n id_raw = id[:3] + '_' + id[3:] # convert ch1m1 to ch1_m1\n if par == 'offset':\n def s(val):\n h = obj.get('{}_high'.format(id_raw))\n l = obj.get('{}_low'.format(id_raw))\n obj.set('{}_high'.format(id_raw), val + h - l)\n obj.set('{}_low'.format(id_raw), val)\n elif par == 'amp':\n def s(val):\n l = obj.get('{}_low'.format(id_raw))\n obj.set('{}_high'.format(id_raw), l + val)\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n return s\n\n def _awg5014_getter(self, obj, id, par, offset_mode_func=None):\n if id in ['ch1', 'ch2', 'ch3', 'ch4']:\n if par == 'offset':\n def g():\n if offset_mode_func() == 'software':\n return obj.get('{}_offset'.format(id))\n elif offset_mode_func() == 'hardware':\n return obj.get('{}_DC_out'.format(id))\n else:\n raise ValueError('Invalid offset mode for AWG5014: '\n '{}'.format(offset_mode_func()))\n \n elif par == 'amp':\n def g():\n if self._awgs_prequeried_state:\n return obj.parameters['{}_amp'.format(id)] \\\n .get_latest()/2\n else:\n return obj.get('{}_amp'.format(id))/2\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n else:\n id_raw = id[:3] + '_' + id[3:] # convert ch1m1 to ch1_m1\n if par == 'offset':\n def g():\n return obj.get('{}_low'.format(id_raw))\n elif par == 'amp':\n def g():\n if self._awgs_prequeried_state:\n h = obj.get('{}_high'.format(id_raw))\n l = obj.get('{}_low'.format(id_raw))\n else:\n h = obj.parameters['{}_high'.format(id_raw)]\\\n .get_latest()\n l = obj.parameters['{}_low'.format(id_raw)]\\\n .get_latest()\n return h - l\n else:\n raise NotImplementedError('Unknown parameter {}'.format(par))\n return g\n\n def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):\n if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):\n return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)\n\n pars = {\n 'ch{}_m{}_low'.format(ch + 1, m + 1)\n for ch in range(4) for m in range(2)\n }\n pars |= {\n 'ch{}_m{}_high'.format(ch + 1, m + 1)\n for ch in range(4) for m in range(2)\n }\n pars |= {\n 'ch{}_offset'.format(ch + 1) for ch in range(4)\n }\n old_vals = {}\n for par in pars:\n old_vals[par] = obj.get(par)\n\n packed_waveforms = {}\n wfname_l = []\n\n grp_has_waveforms = {f'ch{i+1}': False for i in range(4)}\n\n for element in awg_sequence:\n if awg_sequence[element] is None:\n continue\n metadata = awg_sequence[element].pop('metadata', {})\n if list(awg_sequence[element].keys()) != ['no_codeword']:\n raise NotImplementedError('AWG5014 sequencer does '\n 'not support codewords!')\n chid_to_hash = awg_sequence[element]['no_codeword']\n\n if not any(chid_to_hash):\n continue # no waveforms\n \n maxlen = max([len(waveforms[h]) for h in chid_to_hash.values()])\n maxlen = max(maxlen, 256)\n\n wfname_l.append([])\n for grp in [f'ch{i + 1}' for i in range(4)]:\n wave = (chid_to_hash.get(grp, None),\n chid_to_hash.get(grp + 'm1', None), \n chid_to_hash.get(grp + 'm2', None))\n grp_has_waveforms[grp] |= (wave != (None, None, None))\n wfname = self._hash_to_wavename((maxlen, wave))\n grp_wfs = [np.pad(waveforms.get(h, [0]), \n (0, maxlen - len(waveforms.get(h, [0]))), \n 'constant', constant_values=0) for h in wave]\n packed_waveforms[wfname] = obj.pack_waveform(*grp_wfs)\n wfname_l[-1].append(wfname)\n if any([wf[0] != 0 for wf in grp_wfs]):\n log.warning(f'Element {element} starts with non-zero ' \n f'entry on {obj.name}.')\n\n if not any(grp_has_waveforms.values()):\n for grp in ['ch1', 'ch2', 'ch3', 'ch4']:\n obj.set('{}_state'.format(grp), grp_has_waveforms[grp])\n return None\n\n self.awgs_with_waveforms(obj.name)\n\n nrep_l = [1] * len(wfname_l)\n goto_l = [0] * len(wfname_l)\n goto_l[-1] = 1\n wait_l = [1] * len(wfname_l)\n logic_jump_l = [0] * len(wfname_l)\n\n filename = 'pycqed_pulsar.awg'\n\n awg_file = obj.generate_awg_file(packed_waveforms, np.array(wfname_l).transpose().copy(),\n nrep_l, wait_l, goto_l, logic_jump_l,\n self._awg5014_chan_cfg(obj.name))\n obj.send_awg_file(filename, awg_file)\n obj.load_awg_file(filename)\n\n for par in pars:\n obj.set(par, old_vals[par])\n\n time.sleep(.1)\n # Waits for AWG to be ready\n obj.is_awg_ready()\n\n for grp in ['ch1', 'ch2', 'ch3', 'ch4']:\n obj.set('{}_state'.format(grp), 1*grp_has_waveforms[grp])\n\n hardware_offsets = 0\n for grp in ['ch1', 'ch2', 'ch3', 'ch4']:\n cname = self._id_channel(grp, obj.name)\n offset_mode = self.get('{}_offset_mode'.format(cname))\n if offset_mode == 'hardware':\n hardware_offsets = 1\n obj.DC_output(hardware_offsets)\n\n return awg_file\n\n def _is_awg_running(self, obj):\n if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):\n return super()._is_awg_running(obj)\n\n return obj.get_state() != 'Idle'\n\n def _clock(self, obj, cid=None):\n if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):\n return super()._clock(obj, cid)\n return obj.clock_freq()\n\n @staticmethod\n def _awg5014_group_ids(cid):\n \"\"\"\n Returns all id-s corresponding to a single channel group.\n For example `Pulsar._awg5014_group_ids('ch2')` returns `['ch2',\n 'ch2m1', 'ch2m2']`.\n\n Args:\n cid: An id of one of the AWG5014 channels.\n\n Returns: A list of id-s corresponding to the same group as `cid`.\n \"\"\"\n return [cid[:3], cid[:3] + 'm1', cid[:3] + 'm2'] \n\n def _awg5014_chan_cfg(self, awg):\n channel_cfg = {}\n for channel in self.channels:\n if self.get('{}_awg'.format(channel)) != awg:\n continue\n cid = self.get('{}_id'.format(channel))\n amp = self.get('{}_amp'.format(channel))\n off = self.get('{}_offset'.format(channel))\n if self.get('{}_type'.format(channel)) == 'analog':\n offset_mode = self.get('{}_offset_mode'.format(channel))\n channel_cfg['ANALOG_METHOD_' + cid[2]] = 1\n channel_cfg['ANALOG_AMPLITUDE_' + cid[2]] = amp * 2\n if offset_mode == 'software':\n channel_cfg['ANALOG_OFFSET_' + cid[2]] = off\n channel_cfg['DC_OUTPUT_LEVEL_' + cid[2]] = 0\n channel_cfg['EXTERNAL_ADD_' + cid[2]] = 0\n else:\n channel_cfg['ANALOG_OFFSET_' + cid[2]] = 0\n channel_cfg['DC_OUTPUT_LEVEL_' + cid[2]] = off\n channel_cfg['EXTERNAL_ADD_' + cid[2]] = 1\n else:\n channel_cfg['MARKER1_METHOD_' + cid[2]] = 2\n channel_cfg['MARKER2_METHOD_' + cid[2]] = 2\n channel_cfg['MARKER{}_LOW_{}'.format(cid[-1], cid[2])] = \\\n off\n channel_cfg['MARKER{}_HIGH_{}'.format(cid[-1], cid[2])] = \\\n off + amp\n channel_cfg['CHANNEL_STATE_' + cid[2]] = 0\n\n for channel in self.channels:\n if self.get('{}_awg'.format(channel)) != awg:\n continue\n if self.get('{}_active'.format(awg)):\n cid = self.get('{}_id'.format(channel))\n channel_cfg['CHANNEL_STATE_' + cid[2]] = 1\n return channel_cfg\n\n\nclass Pulsar(AWG5014Pulsar, HDAWG8Pulsar, UHFQCPulsar, Instrument):\n \"\"\"\n A meta-instrument responsible for all communication with the AWGs.\n Contains information about all the available awg-channels in the setup.\n Starting, stopping and programming and changing the parameters of the AWGs\n should be done through Pulsar. Supports Tektronix AWG5014 and partially\n ZI UHFLI.\n\n Args:\n master_awg: Name of the AWG that triggers all the other AWG-s and\n should be started last (after other AWG-s are already\n waiting for a trigger.\n \"\"\"\n def __init__(self, name='Pulsar', master_awg=None):\n super().__init__(name)\n\n self.add_parameter('master_awg', \n parameter_class=InstrumentRefParameter,\n initial_value=master_awg)\n self.add_parameter('inter_element_spacing',\n vals=vals.MultiType(vals.Numbers(0),\n vals.Enum('auto')),\n set_cmd=self._set_inter_element_spacing,\n get_cmd=self._get_inter_element_spacing)\n self.add_parameter('reuse_waveforms', initial_value=False,\n parameter_class=ManualParameter, vals=vals.Bool())\n self.add_parameter('append_zeros', initial_value=0, vals=vals.Ints(),\n parameter_class=ManualParameter)\n self.add_parameter('flux_crosstalk_cancellation', initial_value=False,\n parameter_class=ManualParameter, vals=vals.Bool())\n self.add_parameter('flux_channels', initial_value=[],\n parameter_class=ManualParameter, vals=vals.Lists())\n self.add_parameter('flux_crosstalk_cancellation_mtx',\n initial_value=None, parameter_class=ManualParameter)\n self.add_parameter('flux_crosstalk_cancellation_shift_mtx',\n initial_value=None, parameter_class=ManualParameter)\n\n self._inter_element_spacing = 'auto'\n self.channels = set() # channel names\n self.awgs = set() # AWG names\n self.last_sequence = None\n self.last_elements = None\n self._awgs_with_waveforms = set()\n\n self._awgs_prequeried_state = False\n\n self._zi_waves_cleared = False\n self._hash_to_wavename_table = {}\n\n self.num_seg = 0\n\n Pulsar._instance = self\n\n @staticmethod\n def get_instance():\n return Pulsar._instance\n\n # channel handling\n def define_awg_channels(self, awg, channel_name_map=None):\n \"\"\"\n The AWG object must be created before creating channels for that AWG\n\n Args:\n awg: AWG object to add to the pulsar.\n channel_name_map: A dictionary that maps channel ids to channel\n names. (default {})\n \"\"\"\n if channel_name_map is None:\n channel_name_map = {}\n\n for channel_name in channel_name_map.values():\n if channel_name in self.channels:\n raise KeyError(\"Channel named '{}' already defined\".format(\n channel_name))\n if awg.name in self.awgs:\n raise KeyError(\"AWG '{}' already added to pulsar\".format(awg.name))\n\n fail = None\n super()._create_awg_parameters(awg, channel_name_map)\n # try:\n # super()._create_awg_parameters(awg, channel_name_map)\n # except AttributeError as e:\n # fail = e\n # if fail is not None:\n # raise TypeError('Unsupported AWG instrument: {}. '\n # .format(awg.name) + str(fail))\n \n self.awgs.add(awg.name)\n\n def find_awg_channels(self, awg):\n channel_list = []\n for channel in self.channels:\n if self.get('{}_awg'.format(channel)) == awg:\n channel_list.append(channel)\n\n return channel_list\n\n def AWG_obj(self, **kw):\n \"\"\"\n Return the AWG object corresponding to a channel or an AWG name.\n\n Args:\n awg: Name of the AWG Instrument.\n channel: Name of the channel\n\n Returns: An instance of Instrument class corresponding to the AWG\n requested.\n \"\"\"\n awg = kw.get('awg', None)\n chan = kw.get('channel', None)\n if awg is not None and chan is not None:\n raise ValueError('Both `awg` and `channel` arguments passed to '\n 'Pulsar.AWG_obj()')\n elif awg is None and chan is not None:\n name = self.get('{}_awg'.format(chan))\n elif awg is not None and chan is None:\n name = awg\n else:\n raise ValueError('Either `awg` or `channel` argument needs to be '\n 'passed to Pulsar.AWG_obj()')\n return Instrument.find_instrument(name)\n\n def clock(self, channel=None, awg=None):\n \"\"\"\n Returns the clock rate of channel or AWG 'instrument_ref' \n Args:\n isntrument_ref: name of the channel or AWG\n Returns: clock rate in samples per second\n \"\"\"\n if channel is not None and awg is not None:\n raise ValueError('Both channel and awg arguments passed to '\n 'Pulsar.clock()')\n if channel is None and awg is None:\n raise ValueError('Neither channel nor awg arguments passed to '\n 'Pulsar.clock()')\n\n if channel is not None:\n awg = self.get('{}_awg'.format(channel))\n \n if self._awgs_prequeried_state:\n return self._clocks[awg]\n else:\n fail = None\n obj = self.AWG_obj(awg=awg)\n try:\n return super()._clock(obj)\n except AttributeError as e:\n fail = e\n if fail is not None:\n raise TypeError('Unsupported AWG instrument: {} of type {}. '\n .format(obj.name, type(obj)) + str(fail))\n\n def active_awgs(self):\n \"\"\"\n Returns:\n A set of the names of the active AWGs registered\n\n Inactive AWGs don't get started or stopped. Also the waveforms on\n inactive AWGs don't get updated.\n \"\"\"\n return {awg for awg in self.awgs if self.get('{}_active'.format(awg))}\n\n def awgs_with_waveforms(self, awg=None):\n \"\"\"\n Adds an awg to the set of AWGs with waveforms programmed, or returns \n set of said AWGs.\n \"\"\"\n if awg == None:\n return self._awgs_with_waveforms\n else:\n self._awgs_with_waveforms.add(awg)\n\n def start(self, exclude=None):\n \"\"\"\n Start the active AWGs. If multiple AWGs are used in a setup where the\n slave AWGs are triggered by the master AWG, then the slave AWGs must be\n running and waiting for trigger when the master AWG is started to\n ensure synchronous playback.\n \"\"\"\n if exclude is None:\n exclude = []\n\n # Start only the AWGs which have at least one channel programmed, i.e.\n # where at least one channel has state = 1. \n awgs_with_waveforms = self.awgs_with_waveforms()\n used_awgs = set(self.active_awgs()) & awgs_with_waveforms\n \n for awg in used_awgs:\n self._stop_awg(awg)\n\n if self.master_awg() is None:\n for awg in used_awgs:\n if awg not in exclude:\n self._start_awg(awg)\n else:\n if self.master_awg() not in exclude:\n self.master_awg.get_instr().stop()\n for awg in used_awgs:\n if awg != self.master_awg() and awg not in exclude:\n self._start_awg(awg)\n tstart = time.time()\n for awg in used_awgs:\n if awg == self.master_awg() or awg in exclude:\n continue\n good = False\n while not (good or time.time() > tstart + 10):\n if self._is_awg_running(awg):\n good = True\n else:\n time.sleep(0.1)\n if not good:\n raise Exception('AWG {} did not start in 10s'\n .format(awg))\n if self.master_awg() not in exclude:\n self.master_awg.get_instr().start()\n\n def stop(self):\n \"\"\"\n Stop all active AWGs.\n \"\"\"\n\n awgs_with_waveforms = set(self.awgs_with_waveforms())\n used_awgs = set(self.active_awgs()) & awgs_with_waveforms\n\n for awg in used_awgs:\n self._stop_awg(awg)\n \n def program_awgs(self, sequence, awgs='all'):\n\n # Stores the last uploaded sequence for easy access and plotting\n self.last_sequence = sequence\n\n if awgs == 'all':\n awgs = self.active_awgs()\n\n # initializes the set of AWGs with waveforms\n self._awgs_with_waveforms -= awgs\n\n\n # prequery all AWG clock values and AWG amplitudes\n self.AWGs_prequeried(True)\n\n log.info(f'Starting compilation of sequence {sequence.name}')\n t0 = time.time()\n waveforms, awg_sequences = sequence.generate_waveforms_sequences()\n log.info(f'Finished compilation of sequence {sequence.name} in '\n f'{time.time() - t0}')\n\n\n channels_used = self._channels_in_awg_sequences(awg_sequences)\n repeat_dict = self._generate_awg_repeat_dict(sequence.repeat_patterns,\n channels_used)\n self._zi_waves_cleared = False\n self._hash_to_wavename_table = {}\n\n for awg in awgs:\n log.info(f'Started programming {awg}')\n t0 = time.time()\n if awg in repeat_dict.keys():\n self._program_awg(self.AWG_obj(awg=awg),\n awg_sequences.get(awg, {}), waveforms,\n repeat_pattern=repeat_dict[awg])\n else:\n self._program_awg(self.AWG_obj(awg=awg),\n awg_sequences.get(awg, {}), waveforms)\n log.info(f'Finished programming {awg} in {time.time() - t0}')\n \n self.num_seg = len(sequence.segments)\n self.AWGs_prequeried(False)\n\n def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):\n \"\"\"\n Program the AWG with a sequence of segments.\n\n Args:\n obj: the instance of the AWG to program\n sequence: the `Sequence` object that determines the segment order,\n repetition and trigger wait\n el_wfs: A dictionary from element name to a dictionary from channel\n id to the waveform.\n loop: Boolean flag, whether the segments should be looped over.\n Default is `True`.\n \"\"\"\n # fail = None\n # try:\n # super()._program_awg(obj, awg_sequence, waveforms)\n # except AttributeError as e:\n # fail = e\n # if fail is not None:\n # raise TypeError('Unsupported AWG instrument: {} of type {}. '\n # .format(obj.name, type(obj)) + str(fail))\n if repeat_pattern is not None:\n super()._program_awg(obj, awg_sequence, waveforms,\n repeat_pattern=repeat_pattern)\n else:\n super()._program_awg(obj, awg_sequence, waveforms)\n\n def _hash_to_wavename(self, h):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n if h not in self._hash_to_wavename_table:\n hash_int = abs(hash(h))\n wname = ''.join(to_base(hash_int, len(alphabet), alphabet))[::-1]\n while wname in self._hash_to_wavename_table.values():\n hash_int += 1\n wname = ''.join(to_base(hash_int, len(alphabet), alphabet)) \\\n [::-1]\n self._hash_to_wavename_table[h] = wname\n return self._hash_to_wavename_table[h]\n\n def _zi_wave_definition(self, wave, defined_waves=None):\n if defined_waves is None:\n defined_waves = set()\n wave_definition = []\n w1, w2 = self._zi_waves_to_wavenames(wave)\n for analog, marker, wc in [(wave[0], wave[1], w1), \n (wave[2], wave[3], w2)]:\n if analog is not None:\n wa = self._hash_to_wavename(analog)\n if wa not in defined_waves:\n wave_definition.append(f'wave {wa} = \"{wa}\";')\n defined_waves.add(wa)\n if marker is not None: \n wm = self._hash_to_wavename(marker)\n if wm not in defined_waves:\n wave_definition.append(f'wave {wm} = \"{wm}\";')\n defined_waves.add(wm)\n if analog is not None and marker is not None:\n if wc not in defined_waves:\n wave_definition.append(f'wave {wc} = {wa} + {wm};')\n defined_waves.add(wc)\n return wave_definition\n\n def _zi_playback_string(self, name, device, wave, acq=False, codeword=False,\n append_zeros=0):\n playback_string = []\n w1, w2 = self._zi_waves_to_wavenames(wave)\n\n trig_source = self.get('{}_trigger_source'.format(name))\n if trig_source == 'Dig1':\n playback_string.append(\n 'waitDigTrigger(1{});'.format(', 1' if device == 'uhf' else ''))\n elif trig_source == 'Dig2':\n playback_string.append('waitDigTrigger(2,1);')\n else:\n playback_string.append(f'wait{trig_source}Trigger();')\n\n if codeword and not (w1 is None and w2 is None):\n playback_string.append('playWaveDIO();')\n else:\n if w1 is None and w2 is not None:\n # This hack is needed due to a bug on the HDAWG.\n # Remove this if case once the bug is fixed.\n playback_string.append(f'playWave(marker(1,0)*0*{w2}, {w2});')\n elif w1 is not None and w2 is None:\n # This hack is needed due to a bug on the HDAWG.\n # Remove this if case once the bug is fixed.\n playback_string.append(f'playWave({w1}, marker(1,0)*0*{w1});')\n elif w1 is not None or w2 is not None:\n playback_string.append('playWave({});'.format(\n _zi_wavename_pair_to_argument(w1, w2)))\n if acq:\n playback_string.append('setTrigger(RO_TRIG);')\n playback_string.append('setTrigger(WINT_EN);')\n if append_zeros:\n playback_string.append(f'playZero({append_zeros});')\n return playback_string\n\n def _zi_interleaved_playback_string(self, name, device, counter, \n wave, acq=False, codeword=False):\n playback_string = []\n w1, w2 = self._zi_waves_to_wavenames(wave)\n if w1 is None or w2 is None:\n raise ValueError('When using HDAWG modulation both I and Q need ' \n 'to be defined')\n \n wname = f'wave{counter}'\n interleaves = [f'wave {wname} = interleave({w1}, {w2});']\n\n if not codeword:\n if not acq:\n playback_string.append(f'prefetch({wname},{wname});')\n \n trig_source = self.get('{}_trigger_source'.format(name))\n if trig_source == 'Dig1':\n playback_string.append(\n 'waitDigTrigger(1{});'.format(', 1' if device == 'uhf' else ''))\n elif trig_source == 'Dig2':\n playback_string.append('waitDigTrigger(2,1);')\n else:\n playback_string.append(f'wait{trig_source}Trigger();')\n\n if codeword:\n # playback_string.append('playWaveDIO();')\n raise NotImplementedError('Modulation in combination with codeword'\n 'pulses has not yet been implemented!')\n else:\n playback_string.append(f'playWave({wname},{wname});')\n if acq:\n playback_string.append('setTrigger(RO_TRIG);')\n playback_string.append('setTrigger(WINT_EN);')\n return playback_string, interleaves\n\n def _zi_codeword_table_entry(self, codeword, wave):\n w1, w2 = self._zi_waves_to_wavenames(wave)\n if w1 is None and w2 is not None:\n # This hack is needed due to a bug on the HDAWG. \n # Remove this if case once the bug is fixed.\n return [f'setWaveDIO({codeword}, zeros(1) + marker(1, 0), {w2});']\n elif not (w1 is None and w2 is None):\n return ['setWaveDIO({}, {});'.format(codeword, \n _zi_wavename_pair_to_argument(w1, w2))]\n else:\n return []\n\n def _zi_waves_to_wavenames(self, wave):\n wavenames = []\n for analog, marker in [(wave[0], wave[1]), (wave[2], wave[3])]:\n if analog is None and marker is None:\n wavenames.append(None)\n elif analog is None and marker is not None:\n wavenames.append(self._hash_to_wavename(marker))\n elif analog is not None and marker is None:\n wavenames.append(self._hash_to_wavename(analog))\n else:\n wavenames.append(self._hash_to_wavename((analog, marker)))\n return wavenames\n\n def _zi_write_waves(self, waveforms):\n wave_dir = _zi_wave_dir()\n for h, wf in waveforms.items():\n filename = os.path.join(wave_dir, self._hash_to_wavename(h)+'.csv')\n fmt = '%.18e' if wf.dtype == np.float else '%d'\n np.savetxt(filename, wf, delimiter=\",\", fmt=fmt)\n\n def _start_awg(self, awg):\n obj = self.AWG_obj(awg=awg)\n obj.start()\n\n def _stop_awg(self, awg):\n obj = self.AWG_obj(awg=awg)\n obj.stop()\n\n def _is_awg_running(self, awg):\n fail = None\n obj = self.AWG_obj(awg=awg)\n try:\n return super()._is_awg_running(obj)\n except AttributeError as e:\n fail = e\n if fail is not None:\n raise TypeError('Unsupported AWG instrument: {} of type {}. '\n .format(obj.name, type(obj)) + str(fail))\n\n def _set_inter_element_spacing(self, val):\n self._inter_element_spacing = val\n\n def _get_inter_element_spacing(self):\n if self._inter_element_spacing != 'auto':\n return self._inter_element_spacing\n else:\n max_spacing = 0\n for awg in self.awgs:\n max_spacing = max(max_spacing, self.get(\n '{}_inter_element_deadtime'.format(awg)))\n return max_spacing\n\n def AWGs_prequeried(self, status=None):\n if status is None:\n return self._awgs_prequeried_state\n elif status:\n self._awgs_prequeried_state = False\n self._clocks = {}\n for awg in self.awgs:\n self._clocks[awg] = self.clock(awg=awg)\n for c in self.channels:\n # prequery also the output amplitude values\n self.get(c + '_amp')\n self._awgs_prequeried_state = True\n else:\n self._awgs_prequeried_state = False\n\n def _id_channel(self, cid, awg):\n \"\"\"\n Returns the channel name corresponding to the channel with id `cid` on\n the AWG `awg`.\n\n Args:\n cid: An id of one of the channels.\n awg: The name of the AWG.\n\n Returns: The corresponding channel name. If the channel is not found,\n returns `None`.\n \"\"\"\n for cname in self.channels:\n if self.get('{}_awg'.format(cname)) == awg and \\\n self.get('{}_id'.format(cname)) == cid:\n return cname\n return None\n\n @staticmethod\n def _channels_in_awg_sequences(awg_sequences):\n \"\"\"\n identifies all channels used in the given awg keyed sequence\n :param awg_sequences (dict): awg sequences keyed by awg name, i.e. as\n returned by sequence.generate_sequence_waveforms()\n :return: dictionary keyed by awg of with all channel used during the sequence\n \"\"\"\n channels_used = dict()\n for awg in awg_sequences:\n channels_used[awg] = set()\n for segname in awg_sequences[awg]:\n if awg_sequences[awg][segname] is None:\n continue\n elements = awg_sequences[awg][segname]\n for cw in elements:\n if cw != \"metadata\":\n channels_used[awg] |= elements[cw].keys()\n return channels_used\n\n def _generate_awg_repeat_dict(self, repeat_dict_per_ch, channels_used):\n \"\"\"\n Translates a repeat dictionary keyed by channels to a repeat dictionary\n keyed by awg. Checks whether all channels in channels_used have an entry.\n :param repeat_dict_per_ch: keys: channels_id, values: repeat pattern\n :param channels_used (dict): list of channel used on each awg\n :return:\n \"\"\"\n awg_ch_repeat_dict = dict()\n repeat_dict_per_awg = dict()\n for cname in repeat_dict_per_ch:\n awg = self.get(f\"{cname}_awg\")\n chid = self.get(f\"{cname}_id\")\n\n if not awg in awg_ch_repeat_dict.keys():\n awg_ch_repeat_dict[awg] = []\n awg_ch_repeat_dict[awg].append(chid)\n if repeat_dict_per_awg.get(awg, repeat_dict_per_ch[cname]) \\\n != repeat_dict_per_ch[cname]:\n raise NotImplementedError(f\"Repeat pattern on {cname} is \"\n f\"different from at least one other channel on {awg}:\"\n f\"{repeat_dict_per_ch[cname]} vs {repeat_dict_per_awg[awg]}\")\n repeat_dict_per_awg[awg] = repeat_dict_per_ch[cname]\n \n for awg_repeat, chs_repeat in awg_ch_repeat_dict.items():\n for ch in channels_used[awg_repeat]:\n assert ch in chs_repeat, f\"Repeat pattern \" \\\n f\"provided for {awg_repeat} but no pattern was given on \" \\\n f\"{ch}. All used channels on the same awg must have a \" \\\n f\"repeat pattern.\"\n\n return repeat_dict_per_awg\n\n\ndef to_base(n, b, alphabet=None, prev=None):\n if prev is None: prev = []\n if n == 0: \n if alphabet is None: return prev\n else: return [alphabet[i] for i in prev]\n return to_base(n//b, b, alphabet, prev+[n%b])\n\ndef _zi_wave_dir():\n if os.name == 'nt':\n dll = ctypes.windll.shell32\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH + 1)\n if dll.SHGetSpecialFolderPathW(None, buf, 0x0005, False):\n _basedir = buf.value\n else:\n log.warning('Could not extract my documents folder')\n else:\n _basedir = os.path.expanduser('~')\n wave_dir = os.path.join(_basedir, 'Zurich Instruments', 'LabOne',\n 'WebServer', 'awg', 'waves')\n if not os.path.exists(wave_dir):\n os.makedirs(wave_dir)\n return wave_dir\n\n\ndef _zi_clear_waves():\n wave_dir = _zi_wave_dir()\n for f in os.listdir(wave_dir):\n if f.endswith(\".csv\"):\n os.remove(os.path.join(wave_dir, f))\n elif f.endswith('.cache'):\n shutil.rmtree(os.path.join(wave_dir, f))\n\n\ndef _zi_wavename_pair_to_argument(w1, w2):\n if w1 is not None and w2 is not None:\n return f'{w1}, {w2}'\n elif w1 is not None and w2 is None:\n return f'1, {w1}'\n elif w1 is None and w2 is not None:\n return f'2, {w2}'\n else:\n return ''" ]
[ [ "numpy.array", "numpy.savetxt", "numpy.sum" ] ]
PeterXingke/HugeCTR
[ "d7552c4c5f93ff18ded961645cac82d5d8b5b785" ]
[ "sparse_operation_kit/unit_test/test_scripts/tf2/test_sparse_emb_demo_model_multi_worker.py" ]
[ "\"\"\"\n Copyright (c) 2021, NVIDIA CORPORATION.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport argparse\n\nimport sys, os\nsys.path.append(os.path.abspath(os.path.join(\n os.path.dirname(os.path.abspath(__file__)), r\"../../../\")))\nimport sparse_operation_kit as sok\nimport tensorflow as tf\n\nimport numpy as np\nimport os, json\nimport pickle\nimport utils\n\nfrom test_sparse_emb_demo_model_single_worker import SOKDemo, test_tf_demo, check_saved_embedding_variables\n\ndef test_sok_demo(args, init_tensors, *random_samples):\n port = 12345\n os.environ[\"TF_CONFIG\"] = json.dumps({\n 'cluster': {\"worker\": [args.ips[i] + \":\" + str(port + i) for i in range(args.worker_num)] },\n 'task': {\"type\": 'worker', \"index\": args.task_id}\n })\n strategy = tf.distribute.MultiWorkerMirroredStrategy()\n with strategy.scope():\n result = sok.Init(global_batch_size=args.global_batch_size)\n\n plugin_demo = SOKDemo(combiner=args.combiner, \n max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,\n slot_num=args.slot_num, max_nnz=args.max_nnz,\n embedding_vec_size=args.embedding_vec_size)\n\n emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)\n dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)\n\n plugin_saver = sok.Saver()\n if (1 == args.restore_params):\n filepath = r\"./embedding_variables\"\n plugin_saver.restore_from_file(plugin_demo.embedding_layer.embedding_variable, filepath)\n else:\n status = plugin_saver.load_embedding_values(plugin_demo.embedding_layer.embedding_variable, init_tensors)\n\n loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)\n def _replica_loss(labels, logits):\n loss = loss_fn(labels, logits)\n return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)\n\n @tf.function\n def _train_step(inputs, labels):\n with tf.GradientTape() as tape:\n logit, embedding_vector = plugin_demo(inputs, training=True)\n loss = _replica_loss(labels, logit)\n embedding_variables, other_variable = sok.split_embedding_variable_from_others(plugin_demo.trainable_variables)\n grads, emb_grads = tape.gradient(loss, [other_variable, embedding_variables])\n if \"plugin\" not in args.optimizer:\n with sok.OptimizerScope(embedding_variables):\n emb_opt.apply_gradients(zip(emb_grads, embedding_variables),\n experimental_aggregate_gradients=False)\n else:\n emb_opt.apply_gradients(zip(emb_grads, embedding_variables),\n experimental_aggregate_gradients=False)\n dense_opt.apply_gradients(zip(grads, other_variable))\n return logit, embedding_vector\n\n sok_results = list()\n\n def _dataset_fn(input_context):\n replica_batch_size = input_context.get_per_replica_batch_size(args.global_batch_size)\n dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size, to_sparse_tensor=True, repeat=1)\n # because each worker has its own data source, so that no need to shard the dataset.\n return dataset\n\n dataset = strategy.distribute_datasets_from_function(_dataset_fn)\n\n for i, (sparse_tensors, replica_labels) in enumerate(dataset):\n print(\"-\" * 30, \"step \", str(i), \"-\" * 30)\n logit, embedding_vector = strategy.run(_train_step, args=(sparse_tensors, replica_labels))\n print(\"[INFO]: embedding_vector\\n\", embedding_vector)\n sok_results.append(embedding_vector)\n # FIXME: when the forward computation is too fast, there\n # may exist some conficts with datareader, which cause the program hang.\n import time\n time.sleep(0.2) # seconds\n\n # save params to file.\n if 1 == args.save_params:\n filepath = r\"./embedding_variables/\"\n utils.try_make_dirs(filepath, chief=(True if args.task_id == 0 else False))\n\n plugin_saver.dump_to_file(plugin_demo.embedding_layer.embedding_variable, filepath)\n\n return sok_results, plugin_demo.embedding_layer.embedding_variable.values[0].m_var_name\n\ndef compare_sok_with_tf(args):\n if (args.global_batch_size % args.local_gpu_num != 0):\n raise ValueError(\"global_batch_size: %d is not divisible by local_gpu_num: %d\"\n %(args.global_batch_size, args.local_gpu_num))\n if (args.global_batch_size % args.worker_num != 0):\n raise ValueError(\"global_batch_size: %d is not divisible by worker_num: %d\"\n %(args.global_batch_size, args.worker_num))\n\n # each worker generate different dataset\n if args.generate_new_datas:\n worker_batch_size = args.global_batch_size // args.worker_num\n random_samples_local = utils.generate_random_samples(num_of_samples=worker_batch_size * args.iter_num,\n vocabulary_size=args.local_gpu_num * args.max_vocabulary_size_per_gpu * args.worker_num,\n slot_num=args.slot_num,\n max_nnz=args.max_nnz)\n utils.save_to_file(r\"./random_samples_\" + str(args.task_id) + r\".file\", *random_samples_local)\n else:\n random_samples_local = utils.restore_from_file(r\"./random_samples_\" + str(args.task_id) + r\".file\")\n\n if (0 == args.restore_params):\n # each worker generate same init tensors, because each worker will do the filtering by itself.\n init_tensors = utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,\n embedding_vec_size=args.embedding_vec_size,\n num=args.local_gpu_num * args.worker_num)\n else:\n filepath = r\"./embedding_variables\"\n tf_values_filename = os.path.join(filepath, r\"tf_variable.file\")\n init_tensors = utils.restore_from_file(tf_values_filename)\n\n sok_results_local, embedding_variable_name = test_sok_demo(args, init_tensors, *random_samples_local)\n # save the forward embedding vector from different worker to file\n utils.save_to_file(r\"./sok_embedding_vectors_\" + str(args.task_id) + r\".file\", *sok_results_local)\n\n # aggregate dataset from different worker\n dataset_filenames = [r\"./random_samples_\" + str(task_id) + r\".file\"\n for task_id in range(args.worker_num)]\n random_samples_total = [list() for _ in range(args.iter_num)]\n random_labels_total = [list() for _ in range(args.iter_num)]\n local_batch_size = args.global_batch_size // args.worker_num\n for work_id in range(args.worker_num):\n samples, labels = utils.restore_from_file(dataset_filenames[work_id])\n for i in range(args.iter_num):\n random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])\n random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])\n random_samples_total = np.concatenate(random_samples_total, axis=0)\n random_labels_total = np.concatenate(random_labels_total, axis=0)\n\n tf_results = test_tf_demo(args, init_tensors, random_samples_total, random_labels_total)\n\n # aggregate forward embedding vector from different worker\n sok_results_filenames = [r\"./sok_embedding_vectors_\" + str(task_id) + r\".file\"\n for task_id in range(args.worker_num)]\n sok_results_total = list()\n for file_name in sok_results_filenames:\n sok_results_local = utils.restore_from_file(file_name)\n sok_results_total.append(sok_results_local)\n\n if (len(sok_results_total[0]) != len(tf_results)):\n raise ValueError(\"The length of results obtained from sok: %d is not equal to that of tensorflow: %d.\"\n %(len(sok_results_total[0]), len(tf_results)))\n if (len(tf_results) != args.iter_num):\n raise ValueError(\"The length of embedding vectors: %d is not equal to iteration number: %d.\"\n %(len(tf_results), args.iter_num))\n\n # for i, sok_vector in enumerate(sok_results_total):\n for i in range(args.iter_num):\n if args.local_gpu_num != 1:\n sok_vector = tf.concat([tf.concat(sok_results_total[task_id][i].values, axis=0)\n for task_id in range(args.worker_num)], axis=0)\n else:\n sok_vector = tf.concat([sok_results_total[task_id][i]\n for task_id in range(args.worker_num)], axis=0)\n tf.debugging.assert_near(tf.reshape(sok_vector, \n shape=[-1, tf.shape(sok_vector)[-1]]),\n tf_results[i],\n atol=1e-4,\n rtol=1e-4)\n\n print(\"\\n[INFO]: With MultiWorkerMirroredStrategy, the embedding vector obtained from \" +\\\n \"sparse operation kit and tensorflow are consistent for %d iterations.\"\n %args.iter_num)\n \n if (1 == args.save_params):\n check_saved_embedding_variables(args, embedding_variable_name)\n\ndef get_task_id(ips):\n local_ip = utils.get_local_ip()\n for i in range(len(ips)):\n if ips[i] == local_ip:\n return i\n raise ValueError(\"Cannot find local_ip: %s in ips list: [%s]\"\n %(local_ip, \", \".join(ips)))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='test demo model with single worker.')\n parser.add_argument('--local_gpu_num', type=int,\n help='the number of GPUs used to do paralell training.',\n required=False, default=8)\n parser.add_argument('--iter_num', type=int,\n help='the number of testing iterations.',\n required=False, default=100)\n parser.add_argument('--max_vocabulary_size_per_gpu', type=int,\n required=False, default=128)\n parser.add_argument('--slot_num', type=int,\n help='the number of feature fields',\n required=False, default=1)\n parser.add_argument('--max_nnz', type=int,\n help='the maximum number of keys in one slot',\n required=False, default=1)\n parser.add_argument('--embedding_vec_size', type=int,\n help='the dimention of embedding vector',\n required=False, default=1)\n parser.add_argument('--combiner', type=str,\n help='the combiner used to do reduction for sparse embedding layer. ' +\\\n 'It is only respected in sparse embedding layer.',\n required=False, default='mean', choices=['mean', 'sum'])\n parser.add_argument('--global_batch_size', type=int, required=False, default=16)\n parser.add_argument('--optimizer', type=str,\n help=\"use what optimizer\",\n required=False, default='plugin_adam',\n choices=['plugin_adam', 'adam', 'sgd'])\n parser.add_argument('--ips', type=str, nargs=\"+\",\n help=\"the ip address of each worker.\",\n required=False, default=\"0.0.0.0\")\n parser.add_argument('--generate_new_datas', type=int, choices=[0, 1],\n help='whether to generate new random samples',\n required=False, default=1)\n parser.add_argument('--save_params', type=int, choices=[0, 1],\n help='whether to save the trained parameters.',\n required=False, default=0)\n parser.add_argument('--restore_params', type=int, choices=[0, 1],\n help='whether to restore from saved files. '+\\\n 'By default, the testing program will generate random ' +\\\n 'initial value to initialize trainable parameters '+\\\n 'rather than restore trainable parameters from file.',\n required=False, default=0)\n args = parser.parse_args()\n\n if not isinstance(args.ips, list):\n args.ips = [args.ips]\n\n args.worker_num = len(args.ips)\n if utils.all_ips_in_local(args.ips):\n processes = list()\n for task_id in range(args.worker_num):\n available_gpus = \",\".join([str(args.local_gpu_num * task_id + i)\n for i in range(args.local_gpu_num)])\n print(\"[INFO]: on task: %d, its available GPUs are: %s\" %(task_id, available_gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = available_gpus\n process = utils.TestProcess(func=compare_sok_with_tf, task_id=task_id, arguments=args)\n process.start()\n processes.append(process)\n\n for process in processes:\n process.join()\n else:\n args.task_id = get_task_id(args.ips)\n\n os.environ['CUDA_VISIBLE_DEVICES'] = \",\".join([str(i) for i in range(args.local_gpu_num)])\n\n compare_sok_with_tf(args)\n \n \n " ]
[ [ "tensorflow.shape", "tensorflow.distribute.MultiWorkerMirroredStrategy", "tensorflow.nn.compute_average_loss", "tensorflow.GradientTape", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.concat", "numpy.concatenate" ] ]
avpak/okama
[ "b3c4f6b7dfcc314d3171f20b3bc95cfa04268c1a" ]
[ "tests/test_frontier.py" ]
[ "import pytest\nfrom pytest import approx\nfrom pytest import mark\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom okama import EfficientFrontier\n\n\[email protected]\ndef test_init_efficient_frontier():\n with pytest.raises(Exception, match=r'The number of symbols cannot be less than two'):\n EfficientFrontier(symbols=['MCFTR.INDX'])\n\n\[email protected]\ndef test_bounds_setter_failing(init_efficient_frontier):\n with pytest.raises(Exception, match=r'The number of symbols \\(2\\) and the length of bounds \\(3\\) should be equal.'):\n init_efficient_frontier.bounds = ((0, 1.), (0.5, 1.), (0, 0.5))\n\n\[email protected]\ndef test_gmv(init_efficient_frontier):\n assert_allclose(init_efficient_frontier.gmv_weights, np.array([0.67501259, 0.32498741]), rtol=1e-2, atol=1e-2)\n\n\[email protected]\ndef test_gmv_monthly(init_efficient_frontier):\n assert init_efficient_frontier.gmv_monthly[0] == approx(0.026076618401825784, rel=1e-2)\n\n\[email protected]\ndef test_gmv_annualized(init_efficient_frontier):\n assert init_efficient_frontier.gmv_annualized[0] == approx(0.10198459385117883, rel=1e-2)\n\n\[email protected]\ndef test_optimize_return(init_efficient_frontier):\n assert init_efficient_frontier.optimize_return(option='max')['Mean_return_monthly'] == approx(0.015324, rel=1e-2)\n assert init_efficient_frontier.optimize_return(option='min')['Mean_return_monthly'] == approx(0.008803, rel=1e-2)\n\n\[email protected]\ndef test_minimize_risk(init_efficient_frontier):\n assert init_efficient_frontier.minimize_risk(target_return=0.015324, monthly_return=True)['SBMX.MOEX'] == approx(1, rel=1e-2)\n assert init_efficient_frontier.minimize_risk(target_return=0.139241, monthly_return=False)['SBMX.MOEX'] == approx(0.32498, rel=1e-2)\n\n\[email protected]\ndef test_minimize_risk_bounds(init_efficient_frontier_bounds):\n assert init_efficient_frontier_bounds.minimize_risk(target_return=0.015324, monthly_return=True)['SBMX.MOEX'] == approx(1, rel=1e-2)\n assert init_efficient_frontier_bounds.minimize_risk(target_return=0.1548, monthly_return=False)['SBMX.MOEX'] == approx(0.50030, rel=1e-2)\n\n\[email protected]\ndef test_mean_return_range(init_efficient_frontier):\n assert_allclose(init_efficient_frontier.mean_return_range, np.array([0.008803, 0.015325]), rtol=1e-2)\n\n\[email protected]\ndef test_mean_return_range_bounds(init_efficient_frontier_bounds):\n assert_allclose(init_efficient_frontier_bounds.mean_return_range, np.array([0.012064, 0.015325]), rtol=1e-2)\n\n\[email protected]\ndef test_ef_points(init_efficient_frontier):\n assert init_efficient_frontier.ef_points['Mean return'].iloc[-1] == approx(0.20007879286573038, rel=1e-2)\n\n\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
KKanda900/Model-Maker
[ "e73c6e1d47b9682657694e4f56ee96a34e3a29ea" ]
[ "Multi_Classification/Multi_Image_Classification.py" ]
[ "# Primary Python Files for Image Classification\nimport numpy as np \nimport pandas as pd \nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # dont show any tensorflow warning messages\nimport cv2\n\n# Keras libraries used for making the model and tensorflow\nimport tensorflow, keras\nfrom tensorflow.keras.utils import to_categorical\nfrom keras.layers import Dense,Conv2D,Flatten,MaxPool2D,Dropout\nfrom keras.models import Sequential\n\n# Sklearn library for splitting the data precisely\nfrom sklearn.model_selection import train_test_split\n\n'''\nMulti_Image_Classification Class\n\nDescription: \n1. Identify different sets of images based on the labels you provide.\n2. Works based off a sequential model.\n3. Uses a Convolutional Neural Network.\n'''\nclass Multi_Image_Classification:\n\n # ------------------------------ Generic Fields Needed for Training ---------------------------------- #\n shape = (200,200) # predefine a established shape for training and resizing the images (default)\n labels = [] # define the labels to train on\n \n # --------------------------- Training Tools ---------------------------------- #\n train_path = './Multi_Classification/train' # define the path where the training images are located\n train_labels = None # define the labels (same as testing)\n train_images = None # define the images with the training \n x_train = None # split the training images for training\n y_train = None # split the training labels for training\n \n # ------------------------- Testing Tools -------------------------------------- #\n test_path = './Multi_Classification/test' # define the path where the testing images are located\n x_val = None # split the training images for testing\n y_val = None # split the training labels for testing\n test_labels = None # define the testing labels (same as training)\n test_images = None # define the testing images \n \n # ----------------------------------- Main Model Tools ------------------------------- #\n epoch = 50 # default epoch \n batch_size = 10 # default batch size\n model = None # define the model (Sequential for Image Classification)\n\n # ------------------------- Define the Functions for Making the model ---------------------- #\n\n # define the labels and images depending on the directory path\n def set_data(self, directory_path):\n data_labels = [] # define the set of labels according to the name of the file\n data_images = [] # define the images\n \n # iterate through all the images in the directory\n for filename in os.listdir(directory_path): \n # Get the values of the images at the directory path\n img = cv2.imread(os.path.join(directory_path, filename))\n # Spliting file names and storing the labels for image in list\n data_labels.append(filename.split('_')[0])\n # Resize all images to a specific shape\n img = cv2.resize(img, self.shape)\n data_images.append(img) # append the image\n \n data_labels = pd.get_dummies(data_labels).values # Get the categorical data\n data_images = np.array(data_images) # Define the image array as a np array for fitting\n\n return data_labels, data_images # return the labels, images for the specific directory\n\n # define the tools for utilzing on creation of the object\n def __init__(self, create_model, labels, shape, epoch, batch_size):\n np.random.seed(1) # sets the random seed of the NumPy pseudo-random number generator\n\n self.shape = shape # let the user enter the shape of the images to be formed (default 200x200)\n\n # let the user define the labels for their model they want to create\n self.labels = labels # default values\n\n # define the training images and labels\n self.train_labels, self.train_images = self.set_data(self.train_path) \n\n # Splitting Training data into train and validation dataset\n self.x_train,self.x_val,self.y_train,self.y_val = train_test_split(self.train_images,self.train_labels,random_state=1)\n \n # define the test labels and images\n self.test_labels, self.test_images = self.set_data(self.test_path)\n \n # define the model for predicition \n if create_model == True:\n self.model = self.create_model(epoch, batch_size, self.x_train, self.y_train, self.x_val, self.y_val)\n\n # create the model to be used for predicition\n def create_model(self, epoch, batch_size, x_train, y_train, x_val, y_val):\n model = Sequential() # define the model as sequential\n \n model.add(Conv2D(kernel_size=(3,3), filters=32, activation='tanh', input_shape=(200,200,3,))) # define the first layer\n model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the second layer\n model.add(MaxPool2D(2,2)) # define the third layer\n model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the fourth layer\n model.add(MaxPool2D(2,2)) # define the fifth layer\n model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the sixth layer\n model.add(Flatten()) # define the seventh layer\n model.add(Dense(20,activation='relu')) # define the eigth layer\n model.add(Dense(15,activation='relu')) # define the ninth layer\n model.add(Dense(len(self.labels),activation = 'softmax')) # define the tenth layer (according to the number of labels for the model)\n \n model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam') # compile the models with categorical because we are working with multiple labels\n history = model.fit(x_train,y_train,epochs=epoch,batch_size=batch_size,validation_data=(x_val,y_val)) # train the model\n \n # after the training is done, define a dictionary that holds the model and history from the training\n complete_model = {} # define the dictionary\n complete_model['model'] = model # define the model with its key\n complete_model['history'] = history # define the history with its key\n complete_model['labels'] = self.labels # save the labels into the dictionary\n \n return complete_model # return the model at the end\n\n # function to save the model that was created in the create_model function\n def save_model(self, model_name, model):\n model.save('./Models/{}.h5'.format(model_name)) # save the model in the models directory\n\n # function to save the model's labels to be used later\n def save_labels(self, labels, model_name):\n f = open('./Models/{}_Labels.txt'.format(model_name), 'a') # create the .txt file that will contain the labels of the model\n # iterate through the labels when the model was first created\n for i in range(len(labels)):\n f.write(\"{}\\n\".format(labels[i])) # write the labels to the file\n f.close() # after iterating through all the labels, close the file so the space can be free\n\n # ------------------------------------------------------ Define the functions used for classifiying --------------------------------------------- #\n \n # classifies images based on the model and the selected image\n def classify_image(self, image, model):\n \n checkImage = image[0] # get the image\n checklabel = image[0] # get the label of the image\n\n predict = model.predict(np.array(checkImage)) # get the predicition \n predicted_label = self.labels[np.argmax(predict)] # get the predicted label\n \n return predicted_label # return the predicted label from the labels provided by the user\n\n\n\n\n\n" ]
[ [ "numpy.random.seed", "numpy.argmax", "numpy.array", "sklearn.model_selection.train_test_split", "pandas.get_dummies" ] ]
BeeQC/ANODE-reproducibility
[ "9d6b5a297302cdaa0bbc3908de1a94f3c28c0606" ]
[ "experiments/experiments_img.py" ]
[ "import json\nimport matplotlib\nmatplotlib.use('Agg') # This is hacky (useful for running on VMs)\nimport numpy as np\nimport os\nimport time\nimport torch\nfrom anode.models import ODENet\nfrom anode.conv_models import ConvODENet\nfrom anode.discrete_models import ResNet\nfrom anode.training import Trainer\nfrom experiments.dataloaders import mnist, cifar10, tiny_imagenet\nfrom viz.plots import histories_plt\n\n\ndef run_and_save_experiments_img(device, path_to_config):\n \"\"\"Runs and saves experiments as they are produced (so results are still\n saved even if NFEs become excessively large or underflow occurs).\n\n Parameters\n ----------\n device : torch.device\n\n path_to_config : string\n Path to config json file.\n \"\"\"\n # Open config file\n with open(path_to_config) as config_file:\n config = json.load(config_file)\n\n # Create a folder to store experiment results\n timestamp = time.strftime(\"%Y-%m-%d_%H-%M\")\n directory = \"img_results_{}_{}\".format(timestamp, config[\"id\"])\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Save config file in experiment directory\n with open(directory + '/config.json', 'w') as config_file:\n json.dump(config, config_file)\n\n num_reps = config[\"num_reps\"]\n dataset = config[\"dataset\"]\n model_configs = config[\"model_configs\"]\n training_config = config[\"training_config\"]\n\n results = {\"dataset\": dataset, \"model_info\": []}\n\n if dataset == 'mnist':\n data_loader, test_loader = mnist(training_config[\"batch_size\"])\n img_size = (1, 28, 28)\n output_dim = 10\n\n if dataset == 'cifar10':\n data_loader, test_loader = cifar10(training_config[\"batch_size\"])\n img_size = (3, 32, 32)\n output_dim = 10\n\n if dataset == 'imagenet':\n data_loader = tiny_imagenet(training_config[\"batch_size\"])\n img_size = (3, 64, 64)\n output_dim = 200\n\n only_success = True # Boolean to keep track of any experiments failing\n\n for i, model_config in enumerate(model_configs):\n results[\"model_info\"].append({})\n # Keep track of losses and nfes\n accuracy_histories = []\n epoch_accuracy_histories = []\n loss_histories = []\n nfe_histories = []\n bnfe_histories = []\n total_nfe_histories = []\n epoch_loss_histories = []\n epoch_nfe_histories = []\n epoch_bnfe_histories = []\n epoch_total_nfe_histories = []\n # Keep track of models potentially failing\n model_stats = {\n \"exceeded\": {\"count\": 0, \"final_losses\": [], \"final_nfes\": [],\n \"final_bnfes\": []},\n \"underflow\": {\"count\": 0, \"final_losses\": [], \"final_nfes\": [],\n \"final_bnfes\": []},\n \"success\": {\"count\": 0, \"final_losses\": [], \"final_nfes\": [],\n \"final_bnfes\": []}\n }\n\n if model_config[\"validation\"]:\n epoch_loss_val_histories = []\n\n is_ode = model_config[\"type\"] == \"odenet\" or model_config[\"type\"] == \"anode\"\n\n for j in range(num_reps):\n print(\"{}/{} model, {}/{} rep\".format(i + 1, len(model_configs), j + 1, num_reps))\n\n if is_ode:\n if model_config[\"type\"] == \"odenet\":\n augment_dim = 0\n else:\n augment_dim = model_config[\"augment_dim\"]\n\n model = ConvODENet(device, img_size, model_config[\"num_filters\"],\n output_dim=output_dim,\n augment_dim=augment_dim,\n time_dependent=model_config[\"time_dependent\"],\n non_linearity=model_config[\"non_linearity\"],\n adjoint=True)\n else:\n model = ResNet(data_dim, model_config[\"hidden_dim\"],\n model_config[\"num_layers\"],\n output_dim=output_dim,\n is_img=True)\n\n model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(),\n lr=model_config[\"lr\"],\n weight_decay=model_config[\"weight_decay\"])\n\n trainer = Trainer(model, optimizer, device,\n classification=True,\n print_freq=training_config[\"print_freq\"],\n record_freq=training_config[\"record_freq\"],\n verbose=True,\n save_dir=(directory, '{}_{}'.format(i, j)))\n\n accuracy_histories.append([])\n epoch_accuracy_histories.append([])\n loss_histories.append([])\n epoch_loss_histories.append([])\n nfe_histories.append([])\n epoch_nfe_histories.append([])\n bnfe_histories.append([])\n epoch_bnfe_histories.append([])\n total_nfe_histories.append([])\n epoch_total_nfe_histories.append([])\n\n if model_config[\"validation\"]:\n epoch_loss_val_histories.append([])\n\n # Train one epoch at a time, as NODEs can underflow or exceed the\n # maximum NFEs\n for epoch in range(training_config[\"epochs\"]):\n print(\"\\nEpoch {}\".format(epoch + 1))\n try:\n trainer.train(data_loader, 1)\n end_training = False\n except AssertionError as e:\n only_success = False\n # Assertion error means we either underflowed or exceeded\n # the maximum number of steps\n error_message = e.args[0]\n # Error message in torchdiffeq for max_num_steps starts\n # with 'max_num_steps'\n if error_message.startswith(\"max_num_steps\"):\n print(\"Maximum number of steps exceeded\")\n file_name_root = 'exceeded'\n elif error_message.startswith(\"underflow\"):\n print(\"Underflow\")\n file_name_root = 'underflow'\n else:\n print(\"Unknown assertion error\")\n file_name_root = 'unknown'\n\n model_stats[file_name_root][\"count\"] += 1\n\n if len(trainer.buffer['loss']):\n final_loss = np.mean(trainer.buffer['loss'])\n else:\n final_loss = None\n model_stats[file_name_root][\"final_losses\"].append(final_loss)\n\n if len(trainer.buffer['nfe']):\n final_nfes = np.mean(trainer.buffer['nfe'])\n else:\n final_nfes = None\n model_stats[file_name_root][\"final_nfes\"].append(final_nfes)\n\n if len(trainer.buffer['bnfe']):\n final_bnfes = np.mean(trainer.buffer['bnfe'])\n else:\n final_bnfes = None\n model_stats[file_name_root][\"final_bnfes\"].append(final_bnfes)\n\n # Save final NFEs before error happened\n with open(directory + '/{}_{}_{}.json'.format(file_name_root, i, j), 'w') as f:\n json.dump({\"forward\": trainer.nfe_buffer, \"backward\": trainer.bnfe_buffer}, f)\n\n end_training = True\n\n # Save info at every epoch\n accuracy_histories[-1] = trainer.histories['accuracy_history']\n epoch_accuracy_histories[-1] = trainer.histories['epoch_accuracy_history']\n loss_histories[-1] = trainer.histories['loss_history']\n epoch_loss_histories[-1] = trainer.histories['epoch_loss_history']\n if is_ode:\n nfe_histories[-1] = trainer.histories['nfe_history']\n epoch_nfe_histories[-1] = trainer.histories['epoch_nfe_history']\n bnfe_histories[-1] = trainer.histories['bnfe_history']\n epoch_bnfe_histories[-1] = trainer.histories['epoch_bnfe_history']\n total_nfe_histories[-1] = trainer.histories['total_nfe_history']\n epoch_total_nfe_histories[-1] = trainer.histories['epoch_total_nfe_history']\n\n if model_config[\"validation\"]:\n epoch_loss_val = dataset_mean_loss(trainer, test_loader, device)\n if epoch == 0:\n epoch_loss_val_histories[-1] = [epoch_loss_val]\n else:\n epoch_loss_val_histories[-1].append(epoch_loss_val)\n\n results[\"model_info\"][-1][\"type\"] = model_config[\"type\"]\n results[\"model_info\"][-1][\"loss_history\"] = loss_histories\n results[\"model_info\"][-1][\"accuracy_history\"] = accuracy_histories\n results[\"model_info\"][-1][\"epoch_accuracy_history\"] = epoch_accuracy_histories\n results[\"model_info\"][-1][\"epoch_loss_history\"] = epoch_loss_histories\n if model_config[\"validation\"]:\n results[\"model_info\"][-1][\"epoch_loss_val_history\"] = epoch_loss_val_histories\n\n if is_ode:\n results[\"model_info\"][-1][\"epoch_nfe_history\"] = epoch_nfe_histories\n results[\"model_info\"][-1][\"nfe_history\"] = nfe_histories\n results[\"model_info\"][-1][\"epoch_bnfe_history\"] = epoch_bnfe_histories\n results[\"model_info\"][-1][\"bnfe_history\"] = bnfe_histories\n results[\"model_info\"][-1][\"epoch_total_nfe_history\"] = epoch_total_nfe_histories\n results[\"model_info\"][-1][\"total_nfe_history\"] = total_nfe_histories\n\n # Save losses and nfes at every epoch\n with open(directory + '/losses_and_nfes.json', 'w') as f:\n json.dump(results['model_info'], f)\n\n # If training failed, move on to next rep\n if end_training:\n break\n\n # If we reached end of training, increment success counter\n if epoch == training_config[\"epochs\"] - 1:\n model_stats[\"success\"][\"count\"] += 1\n\n if len(trainer.buffer['loss']):\n final_loss = np.mean(trainer.buffer['loss'])\n else:\n final_loss = None\n model_stats[\"success\"][\"final_losses\"].append(final_loss)\n\n if len(trainer.buffer['nfe']):\n final_nfes = np.mean(trainer.buffer['nfe'])\n else:\n final_nfes = None\n model_stats[\"success\"][\"final_nfes\"].append(final_nfes)\n\n if len(trainer.buffer['bnfe']):\n final_bnfes = np.mean(trainer.buffer['bnfe'])\n else:\n final_bnfes = None\n model_stats[\"success\"][\"final_bnfes\"].append(final_bnfes)\n\n # Save model stats\n with open(directory + '/model_stats{}.json'.format(i), 'w') as f:\n json.dump(model_stats, f)\n\n # Create plots\n\n # Extract size of augmented dims\n augment_labels = ['p = 0' if model_config['type'] == 'odenet' else 'p = {}'.format(model_config['augment_dim'])\n for model_config in config['model_configs']]\n # Create losses figure\n # Note that we can only calculate mean loss if all models trained to\n # completion. Therefore we only include mean if only_success is True\n histories_plt(results[\"model_info\"], plot_type='loss', labels=augment_labels,\n include_mean=only_success, save_fig=directory + '/losses.png')\n histories_plt(results[\"model_info\"], plot_type='loss', labels=augment_labels,\n include_mean=only_success, shaded_err=True, save_fig=directory + '/losses_shaded.png')\n\n # Create NFE plots if ODE model is included\n contains_ode = False\n for model_config in config[\"model_configs\"]:\n if model_config[\"type\"] == \"odenet\" or model_config[\"type\"] == \"anode\":\n contains_ode = True\n break\n\n if contains_ode:\n # If adjoint method was used, plot forwards, backwards and total nfes\n if trainer.model.odeblock.adjoint:\n nfe_types = ['nfe', 'bnfe', 'total_nfe']\n else:\n nfe_types = ['nfe']\n\n for nfe_type in nfe_types:\n histories_plt(results[\"model_info\"], plot_type='nfe', labels=augment_labels,\n include_mean=only_success, nfe_type=nfe_type,\n save_fig=directory + '/{}s.png'.format(nfe_type))\n histories_plt(results[\"model_info\"], plot_type='nfe', labels=augment_labels,\n include_mean=only_success, shaded_err=True, nfe_type=nfe_type,\n save_fig=directory + '/{}s_shaded.png'.format(nfe_type))\n histories_plt(results[\"model_info\"], plot_type='nfe_vs_loss', labels=augment_labels,\n include_mean=only_success, nfe_type=nfe_type,\n save_fig=directory + '/{}_vs_loss.png'.format(nfe_type))\n histories_plt(results[\"model_info\"], plot_type='nfe_vs_loss', labels=augment_labels,\n include_mean=only_success, nfe_type=nfe_type,\n save_fig=directory + '/{}_vs_loss.png'.format(nfe_type))\n\n\ndef dataset_mean_loss(trainer, data_loader, device):\n \"\"\"Returns mean loss of model on a dataset. Useful for calculating\n validation loss.\n\n Parameters\n ----------\n trainer : training.Trainer instance\n Trainer instance for model we want to evaluate.\n\n data_loader : torch.utils.data.DataLoader\n\n device : torch.device\n \"\"\"\n epoch_loss = 0.\n for x_batch, y_batch in data_loader:\n x_batch = x_batch.to(device)\n y_batch = y_batch.to(device)\n y_pred = trainer.model(x_batch)\n loss = trainer._loss(y_pred, y_batch)\n epoch_loss += loss.item()\n return epoch_loss / len(data_loader)\n" ]
[ [ "matplotlib.use", "numpy.mean" ] ]
physwkim/silx
[ "e3f39babad34c97db8ec5dfbb8e92287ce059f70" ]
[ "silx/gui/plot/actions/io.py" ]
[ "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2004-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"\n:mod:`silx.gui.plot.actions.io` provides a set of QAction relative of inputs\nand outputs for a :class:`.PlotWidget`.\n\nThe following QAction are available:\n\n- :class:`CopyAction`\n- :class:`PrintAction`\n- :class:`SaveAction`\n\"\"\"\n\nfrom __future__ import division\n\n__authors__ = [\"V.A. Sole\", \"T. Vincent\", \"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"25/09/2020\"\n\nfrom . import PlotAction\nfrom silx.io.utils import save1D, savespec, NEXUS_HDF5_EXT\nfrom silx.io.nxdata import save_NXdata\nimport logging\nimport sys\nimport os.path\nfrom collections import OrderedDict\nimport traceback\nimport numpy\nfrom silx.utils.deprecation import deprecated\nfrom silx.gui import qt, printer\nfrom silx.gui.dialog.GroupDialog import GroupDialog\nfrom silx.third_party.EdfFile import EdfFile\nfrom silx.third_party.TiffIO import TiffIO\nfrom ...utils.image import convertArrayToQImage\nif sys.version_info[0] == 3:\n from io import BytesIO\nelse:\n import cStringIO as _StringIO\n BytesIO = _StringIO.StringIO\n\n_logger = logging.getLogger(__name__)\n\n_NEXUS_HDF5_EXT_STR = ' '.join(['*' + ext for ext in NEXUS_HDF5_EXT])\n\n\ndef selectOutputGroup(h5filename):\n \"\"\"Open a dialog to prompt the user to select a group in\n which to output data.\n\n :param str h5filename: name of an existing HDF5 file\n :rtype: str\n :return: Name of output group, or None if the dialog was cancelled\n \"\"\"\n dialog = GroupDialog()\n dialog.addFile(h5filename)\n dialog.setWindowTitle(\"Select an output group\")\n if not dialog.exec_():\n return None\n return dialog.getSelectedDataUrl().data_path()\n\n\nclass SaveAction(PlotAction):\n \"\"\"QAction for saving Plot content.\n\n It opens a Save as... dialog.\n\n :param plot: :class:`.PlotWidget` instance on which to operate.\n :param parent: See :class:`QAction`.\n \"\"\"\n\n SNAPSHOT_FILTER_SVG = 'Plot Snapshot as SVG (*.svg)'\n SNAPSHOT_FILTER_PNG = 'Plot Snapshot as PNG (*.png)'\n\n DEFAULT_ALL_FILTERS = (SNAPSHOT_FILTER_PNG, SNAPSHOT_FILTER_SVG)\n\n # Dict of curve filters with CSV-like format\n # Using ordered dict to guarantee filters order\n # Note: '%.18e' is numpy.savetxt default format\n CURVE_FILTERS_TXT = OrderedDict((\n ('Curve as Raw ASCII (*.txt)',\n {'fmt': '%.18e', 'delimiter': ' ', 'header': False}),\n ('Curve as \";\"-separated CSV (*.csv)',\n {'fmt': '%.18e', 'delimiter': ';', 'header': True}),\n ('Curve as \",\"-separated CSV (*.csv)',\n {'fmt': '%.18e', 'delimiter': ',', 'header': True}),\n ('Curve as tab-separated CSV (*.csv)',\n {'fmt': '%.18e', 'delimiter': '\\t', 'header': True}),\n ('Curve as OMNIC CSV (*.csv)',\n {'fmt': '%.7E', 'delimiter': ',', 'header': False}),\n ('Curve as SpecFile (*.dat)',\n {'fmt': '%.10g', 'delimiter': '', 'header': False})\n ))\n\n CURVE_FILTER_NPY = 'Curve as NumPy binary file (*.npy)'\n\n CURVE_FILTER_NXDATA = 'Curve as NXdata (%s)' % _NEXUS_HDF5_EXT_STR\n\n DEFAULT_CURVE_FILTERS = list(CURVE_FILTERS_TXT.keys()) + [\n CURVE_FILTER_NPY, CURVE_FILTER_NXDATA]\n\n DEFAULT_ALL_CURVES_FILTERS = (\"All curves as SpecFile (*.dat)\",)\n\n IMAGE_FILTER_EDF = 'Image data as EDF (*.edf)'\n IMAGE_FILTER_TIFF = 'Image data as TIFF (*.tif)'\n IMAGE_FILTER_NUMPY = 'Image data as NumPy binary file (*.npy)'\n IMAGE_FILTER_ASCII = 'Image data as ASCII (*.dat)'\n IMAGE_FILTER_CSV_COMMA = 'Image data as ,-separated CSV (*.csv)'\n IMAGE_FILTER_CSV_SEMICOLON = 'Image data as ;-separated CSV (*.csv)'\n IMAGE_FILTER_CSV_TAB = 'Image data as tab-separated CSV (*.csv)'\n IMAGE_FILTER_RGB_PNG = 'Image as PNG (*.png)'\n IMAGE_FILTER_NXDATA = 'Image as NXdata (%s)' % _NEXUS_HDF5_EXT_STR\n\n DEFAULT_IMAGE_FILTERS = (IMAGE_FILTER_EDF,\n IMAGE_FILTER_TIFF,\n IMAGE_FILTER_NUMPY,\n IMAGE_FILTER_ASCII,\n IMAGE_FILTER_CSV_COMMA,\n IMAGE_FILTER_CSV_SEMICOLON,\n IMAGE_FILTER_CSV_TAB,\n IMAGE_FILTER_RGB_PNG,\n IMAGE_FILTER_NXDATA)\n\n SCATTER_FILTER_NXDATA = 'Scatter as NXdata (%s)' % _NEXUS_HDF5_EXT_STR\n DEFAULT_SCATTER_FILTERS = (SCATTER_FILTER_NXDATA,)\n\n # filters for which we don't want an \"overwrite existing file\" warning\n DEFAULT_APPEND_FILTERS = (CURVE_FILTER_NXDATA, IMAGE_FILTER_NXDATA,\n SCATTER_FILTER_NXDATA)\n\n def __init__(self, plot, parent=None):\n self._filters = {\n 'all': OrderedDict(),\n 'curve': OrderedDict(),\n 'curves': OrderedDict(),\n 'image': OrderedDict(),\n 'scatter': OrderedDict()}\n\n self._appendFilters = list(self.DEFAULT_APPEND_FILTERS)\n\n # Initialize filters\n for nameFilter in self.DEFAULT_ALL_FILTERS:\n self.setFileFilter(\n dataKind='all', nameFilter=nameFilter, func=self._saveSnapshot)\n\n for nameFilter in self.DEFAULT_CURVE_FILTERS:\n self.setFileFilter(\n dataKind='curve', nameFilter=nameFilter, func=self._saveCurve)\n\n for nameFilter in self.DEFAULT_ALL_CURVES_FILTERS:\n self.setFileFilter(\n dataKind='curves', nameFilter=nameFilter, func=self._saveCurves)\n\n for nameFilter in self.DEFAULT_IMAGE_FILTERS:\n self.setFileFilter(\n dataKind='image', nameFilter=nameFilter, func=self._saveImage)\n\n for nameFilter in self.DEFAULT_SCATTER_FILTERS:\n self.setFileFilter(\n dataKind='scatter', nameFilter=nameFilter, func=self._saveScatter)\n\n super(SaveAction, self).__init__(\n plot, icon='document-save', text='Save as...',\n tooltip='Save curve/image/plot snapshot dialog',\n triggered=self._actionTriggered,\n checkable=False, parent=parent)\n self.setShortcut(qt.QKeySequence.Save)\n self.setShortcutContext(qt.Qt.WidgetShortcut)\n\n @staticmethod\n def _errorMessage(informativeText='', parent=None):\n \"\"\"Display an error message.\"\"\"\n # TODO issue with QMessageBox size fixed and too small\n msg = qt.QMessageBox(parent)\n msg.setIcon(qt.QMessageBox.Critical)\n msg.setInformativeText(informativeText + ' ' + str(sys.exc_info()[1]))\n msg.setDetailedText(traceback.format_exc())\n msg.exec_()\n\n def _saveSnapshot(self, plot, filename, nameFilter):\n \"\"\"Save a snapshot of the :class:`PlotWindow` widget.\n\n :param str filename: The name of the file to write\n :param str nameFilter: The selected name filter\n :return: False if format is not supported or save failed,\n True otherwise.\n \"\"\"\n if nameFilter == self.SNAPSHOT_FILTER_PNG:\n fileFormat = 'png'\n elif nameFilter == self.SNAPSHOT_FILTER_SVG:\n fileFormat = 'svg'\n else: # Format not supported\n _logger.error(\n 'Saving plot snapshot failed: format not supported')\n return False\n\n plot.saveGraph(filename, fileFormat=fileFormat)\n return True\n\n def _getAxesLabels(self, item):\n # If curve has no associated label, get the default from the plot\n xlabel = item.getXLabel() or self.plot.getXAxis().getLabel()\n ylabel = item.getYLabel() or self.plot.getYAxis().getLabel()\n return xlabel, ylabel\n\n def _get1dData(self, item):\n \"provide xdata, [ydata], xlabel, [ylabel] and manages error bars\"\n xlabel, ylabel = self._getAxesLabels(item)\n x_data = item.getXData(copy=False)\n y_data = item.getYData(copy=False)\n x_err = item.getXErrorData(copy=False)\n y_err = item.getYErrorData(copy=False)\n labels = [ylabel]\n data = [y_data]\n\n if x_err is not None:\n if numpy.isscalar(x_err):\n data.append(numpy.zeros_like(y_data) + x_err)\n labels.append(xlabel + \"_errors\")\n elif x_err.ndim == 1:\n data.append(x_err)\n labels.append(xlabel + \"_errors\")\n elif x_err.ndim == 2:\n data.append(x_err[0])\n labels.append(xlabel + \"_errors_below\")\n data.append(x_err[1])\n labels.append(xlabel + \"_errors_above\")\n\n if y_err is not None:\n if numpy.isscalar(y_err):\n data.append(numpy.zeros_like(y_data) + y_err)\n labels.append(ylabel + \"_errors\")\n elif y_err.ndim == 1:\n data.append(y_err)\n labels.append(ylabel + \"_errors\")\n elif y_err.ndim == 2:\n data.append(y_err[0])\n labels.append(ylabel + \"_errors_below\")\n data.append(y_err[1])\n labels.append(ylabel + \"_errors_above\")\n return x_data, data, xlabel, labels\n\n @staticmethod\n def _selectWriteableOutputGroup(filename, parent):\n if os.path.exists(filename) and os.path.isfile(filename) \\\n and os.access(filename, os.W_OK):\n entryPath = selectOutputGroup(filename)\n if entryPath is None:\n _logger.info(\"Save operation cancelled\")\n return None\n return entryPath\n elif not os.path.exists(filename):\n # create new entry in new file\n return \"/entry\"\n else:\n SaveAction._errorMessage('Save failed (file access issue)\\n', parent=parent)\n return None\n\n def _saveCurveAsNXdata(self, curve, filename):\n entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)\n if entryPath is None:\n return False\n\n xlabel, ylabel = self._getAxesLabels(curve)\n\n return save_NXdata(\n filename,\n nxentry_name=entryPath,\n signal=curve.getYData(copy=False),\n axes=[curve.getXData(copy=False)],\n signal_name=\"y\",\n axes_names=[\"x\"],\n signal_long_name=ylabel,\n axes_long_names=[xlabel],\n signal_errors=curve.getYErrorData(copy=False),\n axes_errors=[curve.getXErrorData(copy=True)],\n title=self.plot.getGraphTitle())\n\n def _saveCurve(self, plot, filename, nameFilter):\n \"\"\"Save a curve from the plot.\n\n :param str filename: The name of the file to write\n :param str nameFilter: The selected name filter\n :return: False if format is not supported or save failed,\n True otherwise.\n \"\"\"\n if nameFilter not in self.DEFAULT_CURVE_FILTERS:\n return False\n\n # Check if a curve is to be saved\n curve = plot.getActiveCurve()\n # before calling _saveCurve, if there is no selected curve, we\n # make sure there is only one curve on the graph\n if curve is None:\n curves = plot.getAllCurves()\n if not curves:\n self._errorMessage(\"No curve to be saved\", parent=self.plot)\n return False\n curve = curves[0]\n\n if nameFilter in self.CURVE_FILTERS_TXT:\n filter_ = self.CURVE_FILTERS_TXT[nameFilter]\n fmt = filter_['fmt']\n csvdelim = filter_['delimiter']\n autoheader = filter_['header']\n else:\n # .npy or nxdata\n fmt, csvdelim, autoheader = (\"\", \"\", False)\n\n if nameFilter == self.CURVE_FILTER_NXDATA:\n return self._saveCurveAsNXdata(curve, filename)\n\n xdata, data, xlabel, labels = self._get1dData(curve)\n\n try:\n save1D(filename,\n xdata, data,\n xlabel, labels,\n fmt=fmt, csvdelim=csvdelim,\n autoheader=autoheader)\n except IOError:\n self._errorMessage('Save failed\\n', parent=self.plot)\n return False\n\n return True\n\n def _saveCurves(self, plot, filename, nameFilter):\n \"\"\"Save all curves from the plot.\n\n :param str filename: The name of the file to write\n :param str nameFilter: The selected name filter\n :return: False if format is not supported or save failed,\n True otherwise.\n \"\"\"\n if nameFilter not in self.DEFAULT_ALL_CURVES_FILTERS:\n return False\n\n curves = plot.getAllCurves()\n if not curves:\n self._errorMessage(\"No curves to be saved\", parent=self.plot)\n return False\n\n curve = curves[0]\n scanno = 1\n try:\n xdata, data, xlabel, labels = self._get1dData(curve)\n\n specfile = savespec(filename,\n xdata, data,\n xlabel, labels,\n fmt=\"%.7g\", scan_number=1, mode=\"w\",\n write_file_header=True,\n close_file=False)\n except IOError:\n self._errorMessage('Save failed\\n', parent=self.plot)\n return False\n\n for curve in curves[1:]:\n try:\n scanno += 1\n xdata, data, xlabel, labels = self._get1dData(curve)\n specfile = savespec(specfile,\n xdata, data,\n xlabel, labels,\n fmt=\"%.7g\", scan_number=scanno,\n write_file_header=False,\n close_file=False)\n except IOError:\n self._errorMessage('Save failed\\n', parent=self.plot)\n return False\n specfile.close()\n\n return True\n\n def _saveImage(self, plot, filename, nameFilter):\n \"\"\"Save an image from the plot.\n\n :param str filename: The name of the file to write\n :param str nameFilter: The selected name filter\n :return: False if format is not supported or save failed,\n True otherwise.\n \"\"\"\n if nameFilter not in self.DEFAULT_IMAGE_FILTERS:\n return False\n\n image = plot.getActiveImage()\n if image is None:\n qt.QMessageBox.warning(\n plot, \"No Data\", \"No image to be saved\")\n return False\n\n data = image.getData(copy=False)\n\n # TODO Use silx.io for writing files\n if nameFilter == self.IMAGE_FILTER_EDF:\n edfFile = EdfFile(filename, access=\"w+\")\n edfFile.WriteImage({}, data, Append=0)\n return True\n\n elif nameFilter == self.IMAGE_FILTER_TIFF:\n tiffFile = TiffIO(filename, mode='w')\n tiffFile.writeImage(data, software='silx')\n return True\n\n elif nameFilter == self.IMAGE_FILTER_NUMPY:\n try:\n numpy.save(filename, data)\n except IOError:\n self._errorMessage('Save failed\\n', parent=self.plot)\n return False\n return True\n\n elif nameFilter == self.IMAGE_FILTER_NXDATA:\n entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)\n if entryPath is None:\n return False\n xorigin, yorigin = image.getOrigin()\n xscale, yscale = image.getScale()\n xaxis = xorigin + xscale * numpy.arange(data.shape[1])\n yaxis = yorigin + yscale * numpy.arange(data.shape[0])\n xlabel, ylabel = self._getAxesLabels(image)\n interpretation = \"image\" if len(data.shape) == 2 else \"rgba-image\"\n\n return save_NXdata(filename,\n nxentry_name=entryPath,\n signal=data,\n axes=[yaxis, xaxis],\n signal_name=\"image\",\n axes_names=[\"y\", \"x\"],\n axes_long_names=[ylabel, xlabel],\n title=plot.getGraphTitle(),\n interpretation=interpretation)\n\n elif nameFilter in (self.IMAGE_FILTER_ASCII,\n self.IMAGE_FILTER_CSV_COMMA,\n self.IMAGE_FILTER_CSV_SEMICOLON,\n self.IMAGE_FILTER_CSV_TAB):\n csvdelim, filetype = {\n self.IMAGE_FILTER_ASCII: (' ', 'txt'),\n self.IMAGE_FILTER_CSV_COMMA: (',', 'csv'),\n self.IMAGE_FILTER_CSV_SEMICOLON: (';', 'csv'),\n self.IMAGE_FILTER_CSV_TAB: ('\\t', 'csv'),\n }[nameFilter]\n\n height, width = data.shape\n rows, cols = numpy.mgrid[0:height, 0:width]\n try:\n save1D(filename, rows.ravel(), (cols.ravel(), data.ravel()),\n filetype=filetype,\n xlabel='row',\n ylabels=['column', 'value'],\n csvdelim=csvdelim,\n autoheader=True)\n\n except IOError:\n self._errorMessage('Save failed\\n', parent=self.plot)\n return False\n return True\n\n elif nameFilter == self.IMAGE_FILTER_RGB_PNG:\n # Get displayed image\n rgbaImage = image.getRgbaImageData(copy=False)\n # Convert RGB QImage\n qimage = convertArrayToQImage(rgbaImage[:, :, :3])\n\n if qimage.save(filename, 'PNG'):\n return True\n else:\n _logger.error('Failed to save image as %s', filename)\n qt.QMessageBox.critical(\n self.parent(),\n 'Save image as',\n 'Failed to save image')\n\n return False\n\n def _saveScatter(self, plot, filename, nameFilter):\n \"\"\"Save an image from the plot.\n\n :param str filename: The name of the file to write\n :param str nameFilter: The selected name filter\n :return: False if format is not supported or save failed,\n True otherwise.\n \"\"\"\n if nameFilter not in self.DEFAULT_SCATTER_FILTERS:\n return False\n\n if nameFilter == self.SCATTER_FILTER_NXDATA:\n entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)\n if entryPath is None:\n return False\n scatter = plot.getScatter()\n\n x = scatter.getXData(copy=False)\n y = scatter.getYData(copy=False)\n z = scatter.getValueData(copy=False)\n\n xerror = scatter.getXErrorData(copy=False)\n if isinstance(xerror, float):\n xerror = xerror * numpy.ones(x.shape, dtype=numpy.float32)\n\n yerror = scatter.getYErrorData(copy=False)\n if isinstance(yerror, float):\n yerror = yerror * numpy.ones(x.shape, dtype=numpy.float32)\n\n xlabel = plot.getGraphXLabel()\n ylabel = plot.getGraphYLabel()\n\n return save_NXdata(\n filename,\n nxentry_name=entryPath,\n signal=z,\n axes=[x, y],\n signal_name=\"values\",\n axes_names=[\"x\", \"y\"],\n axes_long_names=[xlabel, ylabel],\n axes_errors=[xerror, yerror],\n title=plot.getGraphTitle())\n\n def setFileFilter(self, dataKind, nameFilter, func, index=None, appendToFile=False):\n \"\"\"Set a name filter to add/replace a file format support\n\n :param str dataKind:\n The kind of data for which the provided filter is valid.\n One of: 'all', 'curve', 'curves', 'image', 'scatter'\n :param str nameFilter: The name filter in the QFileDialog.\n See :meth:`QFileDialog.setNameFilters`.\n :param callable func: The function to call to perform saving.\n Expected signature is:\n bool func(PlotWidget plot, str filename, str nameFilter)\n :param bool appendToFile: True to append the data into the selected\n file.\n :param integer index: Index of the filter in the final list (or None)\n \"\"\"\n assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter')\n\n if appendToFile:\n self._appendFilters.append(nameFilter)\n\n # first append or replace the new filter to prevent colissions\n self._filters[dataKind][nameFilter] = func\n if index is None:\n # we are already done\n return\n\n # get the current ordered list of keys\n keyList = list(self._filters[dataKind].keys())\n\n # deal with negative indices\n if index < 0:\n index = len(keyList) + index\n if index < 0:\n index = 0\n\n if index >= len(keyList):\n # nothing to be done, already at the end\n txt = 'Requested index %d impossible, already at the end' % index\n _logger.info(txt)\n return\n\n # get the new ordered list\n oldIndex = keyList.index(nameFilter)\n del keyList[oldIndex]\n keyList.insert(index, nameFilter)\n\n # build the new filters\n newFilters = OrderedDict()\n for key in keyList:\n newFilters[key] = self._filters[dataKind][key]\n\n # and update the filters\n self._filters[dataKind] = newFilters\n return\n\n def getFileFilters(self, dataKind):\n \"\"\"Returns the nameFilter and associated function for a kind of data.\n\n :param str dataKind:\n The kind of data for which the provided filter is valid.\n On of: 'all', 'curve', 'curves', 'image', 'scatter'\n :return: {nameFilter: function} associations.\n :rtype: collections.OrderedDict\n \"\"\"\n assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter')\n\n return self._filters[dataKind].copy()\n\n def _actionTriggered(self, checked=False):\n \"\"\"Handle save action.\"\"\"\n # Set-up filters\n filters = OrderedDict()\n\n # Add image filters if there is an active image\n if self.plot.getActiveImage() is not None:\n filters.update(self._filters['image'].items())\n\n # Add curve filters if there is a curve to save\n if (self.plot.getActiveCurve() is not None or\n len(self.plot.getAllCurves()) == 1):\n filters.update(self._filters['curve'].items())\n if len(self.plot.getAllCurves()) >= 1:\n filters.update(self._filters['curves'].items())\n\n # Add scatter filters if there is a scatter\n # todo: CSV\n if self.plot.getScatter() is not None:\n filters.update(self._filters['scatter'].items())\n\n filters.update(self._filters['all'].items())\n\n # Create and run File dialog\n dialog = qt.QFileDialog(self.plot)\n dialog.setOption(dialog.DontUseNativeDialog)\n dialog.setWindowTitle(\"Output File Selection\")\n dialog.setModal(1)\n dialog.setNameFilters(list(filters.keys()))\n\n dialog.setFileMode(dialog.AnyFile)\n dialog.setAcceptMode(dialog.AcceptSave)\n\n def onFilterSelection(filt_):\n # disable overwrite confirmation for NXdata types,\n # because we append the data to existing files\n if filt_ in self._appendFilters:\n dialog.setOption(dialog.DontConfirmOverwrite)\n else:\n dialog.setOption(dialog.DontConfirmOverwrite, False)\n\n dialog.filterSelected.connect(onFilterSelection)\n\n if not dialog.exec_():\n return False\n\n nameFilter = dialog.selectedNameFilter()\n filename = dialog.selectedFiles()[0]\n dialog.close()\n\n if '(' in nameFilter and ')' == nameFilter.strip()[-1]:\n # Check for correct file extension\n # Extract file extensions as .something\n extensions = [ext[ext.find('.'):] for ext in\n nameFilter[nameFilter.find('(') + 1:-1].split()]\n for ext in extensions:\n if (len(filename) > len(ext) and\n filename[-len(ext):].lower() == ext.lower()):\n break\n else: # filename has no extension supported in nameFilter, add one\n if len(extensions) >= 1:\n filename += extensions[0]\n\n # Handle save\n func = filters.get(nameFilter, None)\n if func is not None:\n return func(self.plot, filename, nameFilter)\n else:\n _logger.error('Unsupported file filter: %s', nameFilter)\n return False\n\n\ndef _plotAsPNG(plot):\n \"\"\"Save a :class:`Plot` as PNG and return the payload.\n\n :param plot: The :class:`Plot` to save\n \"\"\"\n pngFile = BytesIO()\n plot.saveGraph(pngFile, fileFormat='png')\n pngFile.flush()\n pngFile.seek(0)\n data = pngFile.read()\n pngFile.close()\n return data\n\n\nclass PrintAction(PlotAction):\n \"\"\"QAction for printing the plot.\n\n It opens a Print dialog.\n\n Current implementation print a bitmap of the plot area and not vector\n graphics, so printing quality is not great.\n\n :param plot: :class:`.PlotWidget` instance on which to operate.\n :param parent: See :class:`QAction`.\n \"\"\"\n\n def __init__(self, plot, parent=None):\n super(PrintAction, self).__init__(\n plot, icon='document-print', text='Print...',\n tooltip='Open print dialog',\n triggered=self.printPlot,\n checkable=False, parent=parent)\n self.setShortcut(qt.QKeySequence.Print)\n self.setShortcutContext(qt.Qt.WidgetShortcut)\n\n def getPrinter(self):\n \"\"\"The QPrinter instance used by the PrintAction.\n\n :rtype: QPrinter\n \"\"\"\n return printer.getDefaultPrinter()\n\n @property\n @deprecated(replacement=\"getPrinter()\", since_version=\"0.8.0\")\n def printer(self):\n return self.getPrinter()\n\n def printPlotAsWidget(self):\n \"\"\"Open the print dialog and print the plot.\n\n Use :meth:`QWidget.render` to print the plot\n\n :return: True if successful\n \"\"\"\n dialog = qt.QPrintDialog(self.getPrinter(), self.plot)\n dialog.setWindowTitle('Print Plot')\n if not dialog.exec_():\n return False\n\n # Print a snapshot of the plot widget at the top of the page\n widget = self.plot.centralWidget()\n\n painter = qt.QPainter()\n if not painter.begin(self.getPrinter()):\n return False\n\n pageRect = self.getPrinter().pageRect()\n xScale = pageRect.width() / widget.width()\n yScale = pageRect.height() / widget.height()\n scale = min(xScale, yScale)\n\n painter.translate(pageRect.width() / 2., 0.)\n painter.scale(scale, scale)\n painter.translate(-widget.width() / 2., 0.)\n widget.render(painter)\n painter.end()\n\n return True\n\n def printPlot(self):\n \"\"\"Open the print dialog and print the plot.\n\n Use :meth:`Plot.saveGraph` to print the plot.\n\n :return: True if successful\n \"\"\"\n # Init printer and start printer dialog\n dialog = qt.QPrintDialog(self.getPrinter(), self.plot)\n dialog.setWindowTitle('Print Plot')\n if not dialog.exec_():\n return False\n\n # Save Plot as PNG and make a pixmap from it with default dpi\n pngData = _plotAsPNG(self.plot)\n\n pixmap = qt.QPixmap()\n pixmap.loadFromData(pngData, 'png')\n\n xScale = self.getPrinter().pageRect().width() / pixmap.width()\n yScale = self.getPrinter().pageRect().height() / pixmap.height()\n scale = min(xScale, yScale)\n\n # Draw pixmap with painter\n painter = qt.QPainter()\n if not painter.begin(self.getPrinter()):\n return False\n\n painter.drawPixmap(0, 0,\n pixmap.width() * scale,\n pixmap.height() * scale,\n pixmap)\n painter.end()\n\n return True\n\n\nclass CopyAction(PlotAction):\n \"\"\"QAction to copy :class:`.PlotWidget` content to clipboard.\n\n :param plot: :class:`.PlotWidget` instance on which to operate\n :param parent: See :class:`QAction`\n \"\"\"\n\n def __init__(self, plot, parent=None):\n super(CopyAction, self).__init__(\n plot, icon='edit-copy', text='Copy plot',\n tooltip='Copy a snapshot of the plot into the clipboard',\n triggered=self.copyPlot,\n checkable=False, parent=parent)\n self.setShortcut(qt.QKeySequence.Copy)\n self.setShortcutContext(qt.Qt.WidgetShortcut)\n\n def copyPlot(self):\n \"\"\"Copy plot content to the clipboard as a bitmap.\"\"\"\n # Save Plot as PNG and make a QImage from it with default dpi\n pngData = _plotAsPNG(self.plot)\n image = qt.QImage.fromData(pngData, 'png')\n qt.QApplication.clipboard().setImage(image)\n" ]
[ [ "numpy.ones", "numpy.save", "numpy.zeros_like", "numpy.arange", "numpy.isscalar" ] ]
Nijta/project-NN-Pytorch-scripts
[ "06a50ab072613fb60b8b8e1cea85c4aa8e75549d" ]
[ "project/03-asvspoof-mega/03_fuse_score_evaluate.py" ]
[ "#!/usr/bin/python\n\"\"\" \nWrapper to fuse score and compute EER and min tDCF\nSimple score averaging.\n\nUsage:\npython 03_fuse_score_evaluate.py log_output_testset_1 log_output_testset_2 ...\n\nThe log_output_testset is produced by the pytorch code, for\nexample, ./lfcc-lcnn-lstmsum-am/01/__pretrained/log_output_testset\n\nIt has information like:\n...\nGenerating 71230,LA_E_9999427,0,43237,0, time: 0.005s\nOutput, LA_E_9999487, 0, 0.172325\n...\n(See README for the format of this log)\n\nThis script will extract the line starts with \"Output, ...\"\n\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nfrom sandbox import eval_asvspoof\n\ndef parse_txt(file_path):\n bonafide = []\n bonafide_file_name = []\n spoofed = []\n spoofed_file_name = []\n with open(file_path, 'r') as file_ptr:\n for line in file_ptr:\n if line.startswith('Output,'):\n #Output, LA_E_9999487, 0, 0.172325\n temp = line.split(',')\n flag = int(temp[2])\n name = temp[1]\n if flag:\n bonafide_file_name.append(name)\n bonafide.append(float(temp[-1]))\n else:\n spoofed.append(float(temp[-1]))\n spoofed_file_name.append(name)\n bonafide = np.array(bonafide)\n spoofed = np.array(spoofed)\n return bonafide, spoofed, bonafide_file_name, spoofed_file_name\n\n\ndef fuse_score(file_path_lists):\n bonafide_score = {}\n spoofed_score = {}\n for data_path in file_path_lists:\n bonafide, spoofed, bona_name, spoof_name = parse_txt(data_path)\n for score, name in zip(bonafide, bona_name):\n if name in bonafide_score:\n bonafide_score[name].append(score)\n else:\n bonafide_score[name] = [score]\n for score, name in zip(spoofed, spoof_name):\n if name in spoofed_score:\n spoofed_score[name].append(score)\n else:\n spoofed_score[name] = [score]\n fused_bonafide = np.array([np.mean(y) for x, y in bonafide_score.items()])\n fused_spoofed = np.array([np.mean(y) for x, y in spoofed_score.items()])\n return fused_bonafide, fused_spoofed\n \n\nif __name__ == \"__main__\":\n \n data_paths = sys.argv[1:]\n bonafide, spoofed = fuse_score(data_paths)\n mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed)\n print(\"Score file: {:s}\".format(str(data_paths)))\n print(\"mintDCF: {:1.4f}\".format(mintDCF))\n print(\"EER: {:2.3f}%\".format(eer * 100))\n print(\"Threshold: {:f}\".format(threshold))\n" ]
[ [ "numpy.array", "numpy.mean" ] ]
onsabbatical/PoET-BiN
[ "5226cf7e8e34316a3ced73ce30528ac49730ecf4" ]
[ "mnist/storage.py" ]
[ "import torch \nimport numpy as np\n\ndef store_value(main_array,cu_fl,i,name):\n\n\tcu_uint8 = cu_fl.type(torch.ByteTensor)\n\tmain_array = torch.cat((main_array,cu_uint8),0)\n\t#print(i)\n\n\tif (i + 1)%100 == 0:\n\t\tmain_array_np = main_array.cpu().numpy()\n\t\tnp.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:,:])\n\t\tmain_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2],np.shape(main_array)[3])\n\treturn main_array\n\n\ndef store_value_3d(main_array,cu_fl,i,name):\n\n\tcu_uint8 = cu_fl.type(torch.ByteTensor)\n\tcu_uint8 = torch.reshape(cu_uint8,(cu_fl.size()[0],cu_fl.size()[2],cu_fl.size()[3]))\n\tmain_array = torch.cat((main_array,cu_uint8),0)\n\t#print(i)\n\n\tif (i + 1)%100 == 0:\n\t\tmain_array_np = main_array.cpu().numpy()\n\t\tnp.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:])\n\t\tmain_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2])\n\treturn main_array\n\ndef store_value_2d(main_array,cu_fl,i,name):\n\n\tcu_uint8 = cu_fl.type(torch.ByteTensor)\n\tmain_array = torch.cat((main_array,cu_uint8),0)\n\t#print(i)\n\n\tif (i + 1)%100 == 0:\n\t\tmain_array_np = main_array.cpu().numpy()\n\t\tnp.save(name + str(int(i/100)) + '.npy',main_array[1:,:])\n\t\tmain_array = torch.ByteTensor(1,np.shape(main_array)[1])\n\treturn main_array\n\ndef store_value2(main_array,cu_fl,i,name):\n\n\tcu_uint8 = cu_fl.type(torch.ByteTensor)\n\tmain_array = torch.cat((main_array,cu_uint8),0)\n\t#print(i)\n\n\tif (i + 1)%100 == 0:\n\t\tmain_array_np = main_array.cpu().numpy()\n\t\tnp.save(name + str(int(i/100)) + '.npy',main_array[1:])\n\t\tmain_array = torch.ByteTensor(1)\n\treturn main_array\n\ndef store_all_weights(dict_wb):\n\tweight_matrix = torch.Tensor(1,8).type(torch.cuda.FloatTensor)\n\tbias_matrix = torch.Tensor(1).type(torch.cuda.FloatTensor)\n\n\tfor items in dict_wb:\n\t\tprint(weight_matrix.size())\n\t\tif 'weight' in items:\n\t\t\tprint(dict_wb[items].size())\n\t\t\tweight_matrix = torch.cat((weight_matrix,dict_wb[items]),0)\n\n\t\tif 'bias' in items:\n\t\t\tbias_matrix = torch.cat((bias_matrix,dict_wb[items]),0)\n\tnp.save('weight_matrix.npy',weight_matrix[1:,:].cpu().numpy())\n\tnp.save('bias_matrix.npy',bias_matrix[1:].cpu().numpy())" ]
[ [ "torch.ByteTensor", "torch.cat", "numpy.shape", "torch.Tensor" ] ]
keadwen/CFU-Playground
[ "74c79158e85e1365170ececd1d91ea3fa48faba0" ]
[ "third_party/tflite-micro/tensorflow/lite/micro/tools/metrics/create_size_log.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Script to build the required binaries, profile their size and generate log.\n\"\"\"\n\nimport argparse\nimport datetime\nimport os\nimport pandas as pd\nimport subprocess\n\n\ndef _build_a_binary(root_dir, binary_name, makefile_options):\n os.chdir(root_dir)\n\n params_list = [\n \"make\", \"-f\", \"tensorflow/lite/micro/tools/make/Makefile\", binary_name\n ] + [\"%s=%s\" % (key, value) for (key, value) in makefile_options.items()]\n\n process = subprocess.Popen(params_list,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n if process.returncode != 0:\n raise RuntimeError(\"Building %s failed with \\n\\n %s\" %\n (\" \".join(params_list), stderr.decode()))\n\n\ndef _profile_a_binary(root_dir, binary_name, makefile_options, build_info):\n target_dir = \"%s_%s_%s\" % (makefile_options[\"TARGET\"],\n makefile_options[\"TARGET_ARCH\"],\n makefile_options[\"BUILD_TYPE\"])\n binary_path = os.path.join(root_dir, 'tensorflow/lite/micro/tools/make/gen/',\n target_dir, 'bin', binary_name)\n csv_path = os.path.join(root_dir, 'data/continuous_builds/size_profiling',\n target_dir, \"%s.csv\" % binary_name)\n\n # Run size command and extract the output\n process = subprocess.Popen([\"size\", binary_path],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n if process.returncode != 0:\n raise RuntimeError(\"size %s failed with \\n\\n %s\" %\n (binary_name, stderr.decode()))\n\n output_str = stdout.decode()\n df = pd.DataFrame([line.split() for line in output_str.split('\\n')[1:]],\n columns=list(output_str.split('\\n')[0].split()))\n\n # Append the output from the size to the CSV file\n report = _create_or_read_csv(csv_path)\n report.loc[len(report.index)] = [\n build_info[\"date\"], build_info['sha'], df['text'][0], df['data'][0],\n df['bss'][0], df['dec'][0]\n ]\n\n report.to_csv(csv_path, index=False, header=False, mode='a')\n\n\ndef _create_or_read_csv(csv_file_name):\n if os.path.exists(csv_file_name) is not True:\n csv_df = pd.DataFrame(\n columns=['date', 'sha', 'text', 'data', 'bss', 'total'])\n csv_df.to_csv(csv_file_name, index=False, mode='w')\n\n csv_head = pd.read_csv(csv_file_name, index_col=False, nrows=0)\n return csv_head\n\n\ndef _get_build_info(root_dir):\n os.chdir(root_dir)\n\n current_time = str(datetime.datetime.now())\n\n git_process = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=subprocess.PIPE,\n cwd=root_dir)\n sha, err = git_process.communicate()\n if git_process.returncode != 0:\n raise RuntimeError(\"Git failed with %s\" % err.decode())\n\n return {'date': current_time, 'sha': sha.decode().strip('\\n')}\n\n\ndef _build_and_profile(root_dir, makefile_options, binary_names):\n build_info = _get_build_info(root_dir)\n\n for binary_name in binary_names:\n _build_a_binary(root_dir, binary_name, makefile_options)\n _profile_a_binary(root_dir, binary_name, makefile_options, build_info)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n default_binary_list_string = 'keyword_benchmark,baseline_memory_footprint,interpreter_memory_footprint'\n parser.add_argument(\n '--binary_list',\n nargs='?',\n const=default_binary_list_string,\n default=default_binary_list_string,\n help=\n 'binary list separated by comma (e.g. keyword_benchmark,baseline_memory_footprint)'\n )\n parser.add_argument('--build_type',\n nargs='?',\n const='release',\n default='release',\n help='build type (e.g. release)')\n parser.add_argument('--target',\n nargs='?',\n const='linux',\n default='linux',\n help='host target (e.g. linux)')\n parser.add_argument('--target_arch',\n nargs='?',\n const='x86_64',\n default='x86_64',\n help='target architecture (e.g x86_64)')\n args = parser.parse_args()\n\n makefile_options = {\n \"BUILD_TYPE\": args.build_type,\n \"TARGET\": args.target,\n \"TARGET_ARCH\": args.target_arch\n }\n binary_names = args.binary_list.split(',')\n\n script_path = os.path.dirname(os.path.realpath(__file__))\n root_dir = os.path.join(script_path, '../../../../..')\n\n _build_and_profile(root_dir, makefile_options, binary_names)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
chandar-lab/IIRC
[ "ae6ffcfc0a42274bcda66e2288e09118604620e4" ]
[ "experiments/utils.py" ]
[ "import numpy as np\nimport torch.nn as nn\nimport json\n\n\ndef log(epoch, task_id, log_dict, logbook):\n log_dict[\"message\"] = f\"task_{task_id}_metrics\"\n log_dict[\"task_id\"] = task_id\n log_dict[\"task_epoch\"] = epoch\n log_dict[\"step\"] = epoch\n logbook.write_metric(log_dict)\n\n\ndef log_task(task_id, log_dict, logbook):\n log_dict[\"message\"] = f\"incremental_metrics\"\n log_dict[\"task_id\"] = task_id\n log_dict[\"step\"] = task_id\n logbook.write_metric(log_dict)\n\n\ndef pad_random_crop(tensor_img, per_direction_padding=0):\n pad_left = pad_right = pad_top = pad_bottom = per_direction_padding\n tensor_width = tensor_img.shape[-1]\n tensor_height = tensor_img.shape[-2]\n tensor_img = nn.functional.pad(tensor_img,\n [pad_left, pad_right, pad_top, pad_bottom])\n\n start_index_width = np.random.randint(0, pad_left + pad_right)\n start_index_height = np.random.randint(0, pad_top + pad_bottom)\n end_index_width = start_index_width + tensor_width\n end_index_height = start_index_height + tensor_height\n\n return tensor_img[..., start_index_height:end_index_height, start_index_width:end_index_width]\n\n\ndef random_horizontal_flip(tensor_img, flip_prop=0.5):\n do_flip = np.random.random() >= (1 - flip_prop)\n if do_flip:\n return tensor_img.flip((-1))\n else:\n return tensor_img\n\n\ndef remove_extra_logs(cur_task_id, cur_epoch, file):\n logs_to_keep = []\n remove_task_summary = False\n with open(file, 'r') as logs_file:\n for line in logs_file:\n json_line = json.loads(line)\n if not (json_line['logbook_type'] == \"metric\"):\n logs_to_keep.append(json_line)\n elif json_line[\"task_id\"] < cur_task_id:\n logs_to_keep.append(json_line)\n elif json_line[\"task_id\"] == cur_task_id:\n if \"task_epoch\" in json_line.keys() and json_line[\"task_epoch\"] < cur_epoch:\n logs_to_keep.append(json_line)\n elif \"task_epoch\" in json_line.keys() and json_line[\"task_epoch\"] >= cur_epoch:\n remove_task_summary = True\n elif not remove_task_summary:\n logs_to_keep.append(json_line)\n with open(file, 'w') as logs_file:\n for json_line in logs_to_keep:\n logs_file.write(json.dumps(json_line))\n logs_file.write(\"\\n\")\n\n\ndef extend_list(input_, output_length):\n if isinstance(input_, int):\n output = [input_ for _ in range(output_length)]\n elif hasattr(input_, '__iter__'):\n if len(input_) < output_length:\n output = input_\n output.extend([input_[-1] for _ in range(output_length - len(input_))])\n elif len(input_) > output_length:\n output = input_[:output_length]\n else:\n output = input_\n else:\n raise TypeError(\"Neither an integer nor an iterable was provided\")\n return output" ]
[ [ "numpy.random.random", "torch.nn.functional.pad", "numpy.random.randint" ] ]
carlo-/RNNet
[ "995fcce1da58ac2c840afd865bde88d11d81006f" ]
[ "experiments.py" ]
[ "#\n# KTH Royal Institute of Technology\n# DD2424: Deep Learning in Data Science\n# Assignment 4\n#\n# Carlo Rapisarda ([email protected])\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport dataset as dt\nfrom os.path import exists\nfrom model import RNNet\nfrom utilities import compute_grads_numerical, compare_grads, unpickle, pickle, eprint, simple_smooth_1d\n\nGOBLET_RESULTS_PATH = '../goblet_results.pkl'\n\n\ndef check_gradients():\n\n book = dt.load_goblet_of_fire()\n seq_len = 25\n m = 5\n\n X, Y, _ = book.get_labeled_data(0, seq_len)\n h0 = np.zeros((m, 1))\n\n np.random.seed(42)\n net = RNNet(m=m, K=book.K)\n\n print('===> Computing numerical gradients...')\n num_grads = compute_grads_numerical(X, Y, h0, net)\n\n print('===> Computing analytical gradients...')\n grads = net._backward(X, Y, h0, *net._forward(X, h0))\n\n errors = compare_grads(num_grads, grads, m, book.K)\n errors_v = vars(errors)\n for k in errors_v:\n v = errors_v[k]\n print(f'MSEs for {k} -> max: {v.max()},\\t avg: {v.mean()},\\t std: {v.std()}')\n\n\ndef train_with_goblet_of_fire(results_path=None):\n\n book = dt.load_goblet_of_fire()\n\n np.random.seed(42)\n net = RNNet(m=100, K=book.K)\n\n # optimizer = RNNet.AdaGrad(net, eta=0.1)\n optimizer = RNNet.RMSProp(net, eta=0.001, gamma=0.9)\n\n config = {\n 'epochs': 10,\n 'output_folder': '../out',\n 'optimizer': optimizer,\n 'sequence_length': 25,\n 'record_interval': 1_000,\n 'test_length': 200\n }\n\n res = net.train(book, config)\n\n if results_path is not None:\n pickle(res, results_path)\n\n return res\n\n\ndef plot_results(res, fig_path=None):\n\n interval = res['interval']\n smooth_losses_by_interval = res['smooth_losses_by_interval']\n smooth_losses_by_epoch = res['smooth_losses_by_epoch']\n\n epochs = len(smooth_losses_by_epoch)\n iters_per_epoch = 1.0 * len(smooth_losses_by_interval) * interval / epochs\n\n smoother = np.array(smooth_losses_by_interval)\n smoother = simple_smooth_1d(smoother, 0.95)\n\n fig = plt.figure(figsize=(9, 4))\n\n ax1 = fig.add_subplot(111)\n ax1.plot(np.arange(len(smooth_losses_by_interval)) * interval, smooth_losses_by_interval)\n ax1.plot(np.arange(smoother.size) * interval, smoother)\n ax1.set_xlabel('step')\n ax1.set_ylabel('loss')\n\n ax2 = ax1.twiny()\n ax2.set_xlabel('epoch')\n ax2.set_xlim(ax1.get_xlim())\n ax2.set_xticks(np.arange(1,epochs+1) * iters_per_epoch)\n ax2.set_xticklabels(np.arange(1,epochs+1))\n\n ax2.grid()\n ax1.grid(axis='y')\n\n fig.tight_layout()\n fig.legend(['training loss', 'smoothed'], bbox_to_anchor=(0.98, 0.86), bbox_transform=fig.transFigure)\n\n if fig_path is not None:\n fig.savefig(fig_path, bbox_inches='tight')\n\n fig.show()\n\n\ndef print_evolution(res, interval, limit=None):\n smooth_losses = res['smooth_losses_by_interval']\n synth_samples = res['synthesized_text_by_interval']\n res_interval = res['interval']\n assert interval % res_interval == 0, 'Print interval must be a multiple of the recorded interval'\n selected_indexes = [x for x in range(0, len(synth_samples), interval // res_interval)]\n if limit is not None:\n selected_indexes = selected_indexes[:limit]\n # last_step = selected_indexes[-1] * res_interval\n # print(f'\\nModel evolution from step 1 to {last_step}:\\n')\n print('\\n')\n for i in selected_indexes:\n step = max(i * res_interval, 1)\n text = synth_samples[i]\n smooth_loss = smooth_losses[i]\n print(f'===> Step: {step}, smooth_loss: {round(smooth_loss, 4)}, synthesized:\\n{text}\\n\\n')\n\n\ndef synthesize_with_best_model():\n model_path = '../trained_models/2018-06-12-2205-e10.pkl'\n if exists(model_path):\n book = dt.load_goblet_of_fire()\n net = RNNet.import_model(model_path)\n np.random.seed(50)\n print(net.synthesize(1000, book.char_to_one_hot, book.index_to_char))\n else:\n eprint('Best trained model found!')\n\n\ndef main():\n\n check_gradients()\n\n if not exists(GOBLET_RESULTS_PATH):\n train_with_goblet_of_fire(GOBLET_RESULTS_PATH)\n\n results = unpickle(GOBLET_RESULTS_PATH)\n\n plot_results(results, '../Report/Figs/training_goblet.eps')\n\n print_evolution(results, 10_000, 11)\n\n print(f'===> Passage from the final model (smooth_loss: {results[\"smooth_losses_by_epoch\"][-1]}):')\n synthesize_with_best_model()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.figure", "numpy.random.seed", "numpy.arange", "numpy.array" ] ]
mpekalski/Y8M
[ "24b61107a0f482fdb36ab8b15b768cea24e5808a" ]
[ "video_level_code/xp_frame_level_models.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains a collection of models which operate on variable-length sequences.\n\"\"\"\nimport math\n\nimport models\nimport video_level_models\nimport tensorflow as tf\nimport model_utils as utils\n\nimport tensorflow.contrib.slim as slim\nfrom tensorflow import flags\nfrom tensorflow import logging\n\nFLAGS = flags.FLAGS\n\n\nclass RangeLogisticModel(models.BaseModel):\n\n def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n \"\"\"Creates a model which uses a logistic classifier over the average of the\n frame-level features.\n\n This class is intended to be an example for implementors of frame level\n models. If you want to train a model over averaged features it is more\n efficient to average them beforehand rather than on the fly.\n\n Args:\n model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of\n input features.\n vocab_size: The number of classes in the dataset.\n num_frames: A vector of length 'batch' which indicates the number of\n frames for each video (before padding).\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n 'batch_size' x 'num_classes'.\n \"\"\"\n# num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n# feature_size = model_input.get_shape().as_list()[2]\n\n# denominators = tf.reshape(\n# tf.tile(num_frames, [1, feature_size]), [-1, feature_size])\n# avg_pooled = tf.reduce_sum(model_input,\n# axis=[1]) / denominators\n range_pooled = tf.reduce_max(model_input, axis=[1]) - \\\n tf.reduce_min(model_input, axis=[1])\n output = slim.fully_connected(\n range_pooled, vocab_size, activation_fn=tf.nn.sigmoid,\n weights_regularizer=slim.l2_regularizer(1e-4))\n return {\"predictions\": output}\n\nclass FNN_mvt_Model(models.BaseModel):\n\n def create_model(self, model_input, vocab_size, num_frames,\n l2_penalty=1e-4, is_training=True, **unused_params):\n \"\"\"Creates a model which uses a logistic classifier over the average of the\n frame-level features.\n\n This class is intended to be an example for implementors of frame level\n models. If you want to train a model over averaged features it is more\n efficient to average them beforehand rather than on the fly.\n\n Args:\n model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of\n input features.\n vocab_size: The number of classes in the dataset.\n num_frames: A vector of length 'batch' which indicates the number of\n frames for each video (before padding).\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n 'batch_size' x 'num_classes'.\n \"\"\"\n \n inter_f_mean, inter_f_var = tf.nn.moments(model_input, [1])\n inter_f_std = tf.sqrt(inter_f_var)\n \n kk = 3\n xt = tf.transpose(model_input, perm=[0,2,1])\n tk = tf.nn.top_k(xt, kk).values \n\n logging.info( 'xt: {}'.format(xt.get_shape().as_list() ))\n logging.info( 'tk: {}'.format(tk.get_shape().as_list() )) \n \n topk = tf.reshape(tk, [-1, kk * tk.get_shape().as_list()[1]])\n logging.info( 'topk: {}'.format(topk.get_shape().as_list() )) \n \n# inter_f_feats = tf.concat([inter_f_mean, inter_f_std], 1)\n inter_f_feats = tf.concat([inter_f_mean, inter_f_std, topk], 1)\n \n logging.info('inter_f_mean: {}'.format(inter_f_mean.get_shape().as_list()))\n logging.info( 'feats: {}'.format(inter_f_feats.get_shape().as_list() )) \n \n tf.summary.histogram(\"inter_f_mean\", inter_f_mean)\n tf.summary.histogram(\"inter_f_std\", inter_f_std)\n \n with tf.name_scope('FNN_mvt_Model'):\n A0 = slim.batch_norm(\n inter_f_feats,\n center=True,\n scale=True,\n is_training=is_training,\n scope=\"BN\")\n \n h1Units = 3600\n A1 = slim.fully_connected(\n A0, h1Units, activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(l2_penalty),\n scope='FC_H1')\n output = slim.fully_connected(\n A1, vocab_size, activation_fn=tf.nn.sigmoid,\n weights_regularizer=slim.l2_regularizer(l2_penalty),\n scope='FC_P')\n return {\"predictions\": output}\n\nclass DbofModel2(models.BaseModel):\n \"\"\"Creates a Deep Bag of Frames model.\n\n The model projects the features for each frame into a higher dimensional\n 'clustering' space, pools across frames in that space, and then\n uses a configurable video-level model to classify the now aggregated features.\n\n The model will randomly sample either frames or sequences of frames during\n training to speed up convergence.\n\n Args:\n model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of\n input features.\n vocab_size: The number of classes in the dataset.\n num_frames: A vector of length 'batch' which indicates the number of\n frames for each video (before padding).\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n 'batch_size' x 'num_classes'.\n \"\"\"\n\n def create_model(self,\n model_input,\n vocab_size,\n num_frames,\n iterations=None,\n add_batch_norm=None,\n sample_random_frames=None,\n cluster_size=None,\n hidden_size=None,\n is_training=True,\n **unused_params):\n iterations = iterations or FLAGS.iterations\n add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm\n random_frames = sample_random_frames or FLAGS.sample_random_frames\n cluster_size = cluster_size or FLAGS.dbof_cluster_size\n hidden1_size = hidden_size or FLAGS.dbof_hidden_size\n\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n if random_frames:\n model_input = utils.SampleRandomFrames(model_input, num_frames,\n iterations)\n else:\n model_input = utils.SampleRandomSequence(model_input, num_frames,\n iterations)\n max_frames = model_input.get_shape().as_list()[1]\n feature_size = model_input.get_shape().as_list()[2]\n reshaped_input = tf.reshape(model_input, [-1, feature_size])\n tf.summary.histogram(\"input_hist\", reshaped_input)\n\n if add_batch_norm:\n reshaped_input = slim.batch_norm(\n reshaped_input,\n center=True,\n scale=True,\n is_training=is_training,\n scope=\"input_bn\")\n\n cluster_weights = tf.get_variable(\"cluster_weights\",\n [feature_size, cluster_size],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n tf.summary.histogram(\"cluster_weights\", cluster_weights)\n activation = tf.matmul(reshaped_input, cluster_weights)\n if add_batch_norm:\n activation = slim.batch_norm(\n activation,\n center=True,\n scale=True,\n is_training=is_training,\n scope=\"cluster_bn\")\n else:\n cluster_biases = tf.get_variable(\"cluster_biases\",\n [cluster_size],\n initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))\n tf.summary.histogram(\"cluster_biases\", cluster_biases)\n activation += cluster_biases\n activation = tf.nn.relu6(activation)\n tf.summary.histogram(\"cluster_output\", activation)\n\n activation = tf.reshape(activation, [-1, max_frames, cluster_size])\n activation = utils.FramePooling(activation, FLAGS.dbof_pooling_method)\n\n hidden1_weights = tf.get_variable(\"hidden1_weights\",\n [cluster_size, hidden1_size],\n initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))\n tf.summary.histogram(\"hidden1_weights\", hidden1_weights)\n activation = tf.matmul(activation, hidden1_weights)\n if add_batch_norm:\n activation = slim.batch_norm(\n activation,\n center=True,\n scale=True,\n is_training=is_training,\n scope=\"hidden1_bn\")\n else:\n hidden1_biases = tf.get_variable(\"hidden1_biases\",\n [hidden1_size],\n initializer = tf.random_normal_initializer(stddev=0.01))\n tf.summary.histogram(\"hidden1_biases\", hidden1_biases)\n activation += hidden1_biases\n activation = tf.nn.relu6(activation)\n tf.summary.histogram(\"hidden1_output\", activation)\n\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n return aggregated_model().create_model(\n model_input=activation,\n vocab_size=vocab_size,\n **unused_params)\n\nclass LstmModel2(models.BaseModel):\n\n def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n \"\"\"Creates a model which uses a stack of LSTMs to represent the video.\n\n Args:\n model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of\n input features.\n vocab_size: The number of classes in the dataset.\n num_frames: A vector of length 'batch' which indicates the number of\n frames for each video (before padding).\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n 'batch_size' x 'num_classes'.\n \"\"\"\n lstm_size = FLAGS.lstm_cells\n number_of_layers = FLAGS.lstm_layers\n\n ## Batch normalize the input\n stacked_lstm = tf.contrib.rnn.MultiRNNCell(\n [\n tf.contrib.rnn.BasicLSTMCell(\n lstm_size, forget_bias=1.0, state_is_tuple=False)\n for _ in range(number_of_layers)\n ], state_is_tuple=False)\n\n #loss = 0.0\n with tf.variable_scope(\"RNN\"):\n outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,\n sequence_length=num_frames,\n dtype=tf.float32)\n\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n return aggregated_model().create_model(\n model_input=state,\n vocab_size=vocab_size,\n num_mixtures=2,\n **unused_params)\n\nclass FMoeModel1(models.BaseModel):\n\n def create_model(self, model_input, vocab_size, num_frames,\n l2_penalty=1e-4, is_training=True, **unused_params):\n \"\"\"Creates a model which uses a logistic classifier over the average of the\n frame-level features.\n\n This class is intended to be an example for implementors of frame level\n models. If you want to train a model over averaged features it is more\n efficient to average them beforehand rather than on the fly.\n\n Args:\n model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of\n input features.\n vocab_size: The number of classes in the dataset.\n num_frames: A vector of length 'batch' which indicates the number of\n frames for each video (before padding).\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n 'batch_size' x 'num_classes'.\n \"\"\"\n\n \n inter_f_mean, inter_f_var = tf.nn.moments(model_input, [1])\n inter_f_std = tf.sqrt(inter_f_var)\n \n kk = 5\n xt = tf.transpose(model_input, perm=[0,2,1])\n tk = tf.nn.top_k(xt, kk).values \n\n logging.info( 'xt: {}'.format(xt.get_shape().as_list() ))\n logging.info( 'tk: {}'.format(tk.get_shape().as_list() )) \n \n topk = tf.reshape(tk, [-1, kk * tk.get_shape().as_list()[1]])\n logging.info( 'topk: {}'.format(topk.get_shape().as_list() )) \n \n# inter_f_feats = tf.concat([inter_f_mean, inter_f_std], 1)\n inter_f_feats = tf.concat([inter_f_mean, inter_f_std, topk], 1)\n \n logging.info('inter_f_mean: {}'.format(inter_f_mean.get_shape().as_list()))\n logging.info( 'feats: {}'.format(inter_f_feats.get_shape().as_list() )) \n \n tf.summary.histogram(\"inter_f_mean\", inter_f_mean)\n tf.summary.histogram(\"inter_f_std\", inter_f_std)\n \n A0 = slim.batch_norm(\n inter_f_feats,\n center=True,\n scale=True,\n is_training=is_training,\n scope=\"BN\")\n \n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n return aggregated_model().create_model(\n model_input=A0,\n vocab_size=vocab_size,\n num_mixtures=2,\n **unused_params)\n \nclass FMoeModel2(models.BaseModel):\n\n def create_model(self, model_input, vocab_size, num_frames,\n l2_penalty=1e-4, **unused_params):\n \"\"\"Creates a model which uses a logistic classifier over the average of the\n frame-level features.\n\n This class is intended to be an example for implementors of frame level\n models. If you want to train a model over averaged features it is more\n efficient to average them beforehand rather than on the fly.\n\n Args:\n model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of\n input features.\n vocab_size: The number of classes in the dataset.\n num_frames: A vector of length 'batch' which indicates the number of\n frames for each video (before padding).\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n 'batch_size' x 'num_classes'.\n \"\"\"\n# num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n# feature_size = model_input.get_shape().as_list()[2]\n# \n# logging.info('model_input shape: {}'.format(\n# model_input.get_shape().as_list()))\n#\n# denominators = tf.reshape(\n# tf.tile(num_frames, [1, feature_size]), [-1, feature_size])\n# avg_pooled = tf.reduce_sum(model_input, axis=[1]) / denominators\n \n avg_pooled = utils.FramePooling(model_input, 'average')\n \n logging.info( 'avg_pooled shape: {}'.format(\n avg_pooled.get_shape().as_list() )) \n \n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n return aggregated_model().create_model(\n model_input=avg_pooled,\n vocab_size=vocab_size,\n num_mixtures=2,\n **unused_params)\n" ]
[ [ "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.contrib.slim.l2_regularizer", "tensorflow.nn.top_k", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.nn.dynamic_rnn", "tensorflow.nn.relu6", "tensorflow.name_scope", "tensorflow.concat", "tensorflow.summary.histogram", "tensorflow.random_normal_initializer", "tensorflow.reduce_min", "tensorflow.transpose", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.expand_dims", "tensorflow.nn.moments", "tensorflow.contrib.slim.batch_norm", "tensorflow.sqrt" ] ]
MaZhanyu007/MSDGAN
[ "037ad33025c29869dbc9cb233a45b8762d31179d" ]
[ "decoder.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# In[2]:\n\n\nclass Decoder(nn.Module):\n def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout_rate, attention):\n super().__init__()\n \n self.output_dim = output_dim\n self.emb_dim = emb_dim\n self.enc_hid_dim = enc_hid_dim\n self.dec_hid_dim = dec_hid_dim\n self.dropout_rate = dropout_rate\n self.attention = attention\n \n self.embedding = nn.Embedding(output_dim, emb_dim)\n self.gru = nn.GRU(enc_hid_dim + emb_dim, dec_hid_dim, batch_first=True)\n self.fc = nn.Linear(enc_hid_dim + dec_hid_dim + emb_dim, output_dim)\n self.dropout = nn.Dropout(dropout_rate)\n \n def forward(self, input, hidden, encoder_outputs):\n # input = [batch_size]\n # hidden = [batch_size, dec_hid_dim]\n # encoder_outputs = [batch_size, seq_len, enc_hid_dim * 2]\n\n input = input.unsqueeze(1)\n # input = [batch_size, 1]\n \n embedded = self.dropout(self.embedding(input))\n # embedded = [batch_size, 1, emb_dim]\n \n a = self.attention(hidden, encoder_outputs)\n # a = [batch_size, seq_len]\n a = a.unsqueeze(1)\n # a = [batch_size, 1, seq_len]\n \n context = torch.bmm(a, encoder_outputs)\n # context = [batch_size, 1, enc_hid_dim * 2]\n \n gru_input = torch.cat((embedded, context), dim=2)\n # gru_input = [batch_size, 1, (enc hid dim * 2) + emb dim]\n \n output, hidden = self.gru(gru_input, hidden.unsqueeze(0))\n # output = [batch_size, seq_len, dec hid dim * n directions]\n # hidden = [n layers * n directions, batch size, dec hid dim]\n \n #seq_len, n layers and n directions will always be 1 in this decoder, therefore:\n #output = [batch_size, 1, dec_hid_dim]\n #hidden = [1, batch_size, dec_hid_dim]\n #this also means that output == hidden\n \n #assert (output == hidden).all()\n \n embedded = embedded.squeeze(1) #[batch_size, emb_dim]\n output = output.squeeze(1) #[batch_size, dec_hid_dim * n directions]??????????\n context = context.squeeze(1) #[batch_size, enc_hid_dim * 2]\n \n output = self.fc(torch.cat((output, context, embedded), dim=1))\n # output = [batch_size, output_dim]\n \n return output, hidden.squeeze(0)" ]
[ [ "torch.nn.Linear", "torch.nn.Embedding", "torch.nn.GRU", "torch.cat", "torch.bmm", "torch.nn.Dropout" ] ]
opesci/seigen
[ "7d12eab05ed5a857601babe2933aa804c853de66" ]
[ "tests/tiling/explosive_source.py" ]
[ "\"\"\"\nThis is an explicit DG method: we invert the mass matrix and perform\na matrix-vector multiplication to get the solution in a time step\n\"\"\"\n\nfrom math import *\nimport mpi4py\nimport numpy as np\nfrom time import time\nimport sys\nimport os\nimport cProfile\n\nfrom firedrake import *\nfrom firedrake.petsc import PETSc\n\nfrom pyop2.utils import cached_property\nfrom pyop2.profiling import timed_region\nfrom pyop2.base import _trace, Dat, DataSet\nfrom pyop2.fusion.interface import loop_chain\nfrom pyop2.logger import info, set_log_level, INFO\n\nimport coffee.base as ast\n\nfrom utils import parser, output_time, calculate_sdepth, FusionSchemes\n\n\nclass ElasticLF4(object):\n r\"\"\"An elastic wave equation solver, using the finite element method\n for spatial discretisation, and a fourth-order leap-frog time-stepping scheme.\"\"\"\n\n loop_chain_length = 28\n num_solves = 8\n\n def __init__(self, mesh, family, degree, dimension, output=1, params=None):\n r\"\"\" Initialise a new elastic wave simulation.\n\n :param mesh: The underlying computational mesh of vertices and edges.\n :param str family: Specify whether CG or DG should be used.\n :param int degree: Use polynomial basis functions of this degree.\n :param int dimension: The spatial dimension of the problem (1, 2 or 3).\n :param int output: period, in timesteps, to write solution fields to a file.\n :param dict params: simulation and optimisation parameters\n :returns: None\n \"\"\"\n self.degree = degree\n self.mesh = mesh\n self.dimension = dimension\n self.output = output\n\n self.tofile = params['tofile']\n\n self.S = TensorFunctionSpace(mesh, family, degree, name='S')\n self.U = VectorFunctionSpace(mesh, family, degree, name='U')\n # Assumes that the S and U function spaces are the same.\n self.S_tot_dofs = op2.MPI.COMM_WORLD.allreduce(self.S.dof_count, op=mpi4py.MPI.SUM)\n self.U_tot_dofs = op2.MPI.COMM_WORLD.allreduce(self.U.dof_count, op=mpi4py.MPI.SUM)\n info(\"Number of degrees of freedom (Velocity): %d\" % self.U_tot_dofs)\n info(\"Number of degrees of freedom (Stress): %d\" % self.S_tot_dofs)\n\n self.s = TrialFunction(self.S)\n self.v = TestFunction(self.S)\n self.u = TrialFunction(self.U)\n self.w = TestFunction(self.U)\n\n self.s0 = Function(self.S, name=\"StressOld\")\n self.sh1 = Function(self.S, name=\"StressHalf1\")\n self.stemp = Function(self.S, name=\"StressTemp\")\n self.sh2 = Function(self.S, name=\"StressHalf2\")\n self.s1 = Function(self.S, name=\"StressNew\")\n\n self.u0 = Function(self.U, name=\"VelocityOld\")\n self.uh1 = Function(self.U, name=\"VelocityHalf1\")\n self.utemp = Function(self.U, name=\"VelocityTemp\")\n self.uh2 = Function(self.U, name=\"VelocityHalf2\")\n self.u1 = Function(self.U, name=\"VelocityNew\")\n\n self.absorption_function = None\n self.source_function = None\n self.source_expression = None\n self._dt = None\n self._density = None\n self._mu = None\n self._l = None\n\n self.n = FacetNormal(self.mesh)\n self.I = Identity(self.dimension)\n\n # Tiling options\n self.tiling_size = params['tile_size']\n self.tiling_uf = params['num_unroll']\n self.tiling_mode = params['mode']\n self.tiling_halo = params['extra_halo']\n self.tiling_explicit = params['explicit_mode']\n self.tiling_explicit_id = params['explicit_mode_id']\n self.tiling_log = params['log']\n self.tiling_sdepth = params['s_depth']\n self.tiling_part = params['partitioning']\n self.tiling_coloring = params['coloring']\n self.tiling_glb_maps = params['use_glb_maps']\n self.tiling_prefetch = params['use_prefetch']\n\n # Mat-vec AST cache\n self.asts = {}\n\n if self.tofile:\n # File output streams\n platform = os.environ.get('NODENAME', 'unknown')\n tmpdir = os.environ['TMPDIR']\n base = os.path.join(tmpdir, 'output', platform,\n 'p%d' % self.degree, 'uf%d' % self.tiling_uf)\n if op2.MPI.COMM_WORLD.rank == 0:\n if not os.path.exists(base):\n os.makedirs(base)\n sub_dirs = [d for d in os.listdir(base)\n if os.path.isdir(os.path.join(base, d))]\n sub_dir = \"%d_em%d_part%s_tile%s\" % (len(sub_dirs),\n self.tiling_explicit_id,\n self.tiling_size if self.tiling_uf else 0,\n self.tiling_part if self.tiling_uf else 'None')\n base = os.path.join(base, sub_dir)\n os.makedirs(base)\n op2.MPI.COMM_WORLD.barrier()\n base = op2.MPI.COMM_WORLD.bcast(base, root=0)\n self.u_stream = File(os.path.join(base, 'velocity.pvd'))\n self.s_stream = File(os.path.join(base, 'stress.pvd'))\n\n @property\n def absorption(self):\n r\"\"\" The absorption coefficient :math:`\\sigma` for the absorption term\n\n .. math:: \\sigma\\mathbf{u}\n\n where :math:`\\mathbf{u}` is the velocity field.\n \"\"\"\n return self.absorption_function\n\n @absorption.setter\n def absorption(self, expression):\n r\"\"\" Setter function for the absorption field.\n :param firedrake.Expression expression: The expression to interpolate onto the absorption field.\n \"\"\"\n self.absorption_function.interpolate(expression)\n\n # Source term\n @property\n def source(self):\n r\"\"\" The source term on the RHS of the velocity (or stress) equation. \"\"\"\n return self.source_function\n\n @source.setter\n def source(self, expression):\n r\"\"\" Setter function for the source field.\n :param firedrake.Expression expression: The expression to interpolate onto the source field.\n \"\"\"\n self.source_function.interpolate(expression)\n\n def assemble_inverse_mass(self):\n r\"\"\" Compute the inverse of the consistent mass matrix for the velocity and stress equations.\n :returns: None\n \"\"\"\n # Inverse of the (consistent) mass matrix for the velocity equation.\n self.inverse_mass_velocity = assemble(inner(self.w, self.u)*dx, inverse=True)\n self.inverse_mass_velocity.assemble()\n self.imass_velocity = self.inverse_mass_velocity.M\n # Inverse of the (consistent) mass matrix for the stress equation.\n self.inverse_mass_stress = assemble(inner(self.v, self.s)*dx, inverse=True)\n self.inverse_mass_stress.assemble()\n self.imass_stress = self.inverse_mass_stress.M\n\n def copy_massmatrix_into_dat(self):\n\n # Copy the velocity mass matrix into a Dat\n vmat = self.imass_velocity.handle\n dofs_per_entity = self.U.fiat_element.entity_dofs()\n dofs_per_entity = sum(self.mesh.make_dofs_per_plex_entity(dofs_per_entity))\n arity = dofs_per_entity*self.U.topological.dim\n self.velocity_mass_asdat = Dat(DataSet(self.mesh.cell_set, arity*arity), dtype='double')\n istart, iend = vmat.getOwnershipRange()\n idxs = [PETSc.IS().createGeneral(np.arange(i, i+arity, dtype=np.int32),\n comm=PETSc.COMM_SELF)\n for i in range(istart, iend, arity)]\n submats = vmat.getSubMatrices(idxs, idxs)\n for i, m in enumerate(submats):\n self.velocity_mass_asdat.data[i] = m[:, :].flatten()\n info(\"Computed velocity mass matrix\")\n\n # Copy the stress mass matrix into a Dat\n smat = self.imass_stress.handle\n dofs_per_entity = self.S.fiat_element.entity_dofs()\n dofs_per_entity = sum(self.mesh.make_dofs_per_plex_entity(dofs_per_entity))\n arity = dofs_per_entity*self.S.topological.dim\n self.stress_mass_asdat = Dat(DataSet(self.mesh.cell_set, arity*arity), dtype='double')\n istart, iend = smat.getOwnershipRange()\n idxs = [PETSc.IS().createGeneral(np.arange(i, i+arity, dtype=np.int32),\n comm=PETSc.COMM_SELF)\n for i in range(istart, iend, arity)]\n submats = smat.getSubMatrices(idxs, idxs)\n for i, m in enumerate(submats):\n self.stress_mass_asdat.data[i] = m[:, :].flatten()\n info(\"Computed stress mass matrix\")\n\n @property\n def form_uh1(self):\n \"\"\" UFL for uh1 equation. \"\"\"\n F = inner(self.w, self.u)*dx - self.f(self.w, self.s0, self.u0, self.n, self.absorption)\n return F\n\n @cached_property\n def rhs_uh1(self):\n \"\"\" RHS for uh1 equation. \"\"\"\n return rhs(self.form_uh1)\n\n @property\n def form_stemp(self):\n \"\"\" UFL for stemp equation. \"\"\"\n F = inner(self.v, self.s)*dx - self.g(self.v, self.uh1, self.I, self.n, self.l, self.mu, self.source)\n return F\n\n @cached_property\n def rhs_stemp(self):\n \"\"\" RHS for stemp equation. \"\"\"\n return rhs(self.form_stemp)\n\n @property\n def form_uh2(self):\n \"\"\" UFL for uh2 equation. \"\"\"\n F = inner(self.w, self.u)*dx - self.f(self.w, self.stemp, self.u0, self.n, self.absorption)\n return F\n\n @cached_property\n def rhs_uh2(self):\n \"\"\" RHS for uh2 equation. \"\"\"\n return rhs(self.form_uh2)\n\n @property\n def form_u1(self):\n \"\"\" UFL for u1 equation. \"\"\"\n # Note that we have multiplied through by dt here.\n F = self.density*inner(self.w, self.u)*dx - self.density*inner(self.w, self.u0)*dx - self.dt*inner(self.w, self.uh1)*dx - ((self.dt**3)/24.0)*inner(self.w, self.uh2)*dx\n return F\n\n @cached_property\n def rhs_u1(self):\n \"\"\" RHS for u1 equation. \"\"\"\n return rhs(self.form_u1)\n\n @property\n def form_sh1(self):\n \"\"\" UFL for sh1 equation. \"\"\"\n F = inner(self.v, self.s)*dx - self.g(self.v, self.u1, self.I, self.n, self.l, self.mu, self.source)\n return F\n\n @cached_property\n def rhs_sh1(self):\n \"\"\" RHS for sh1 equation. \"\"\"\n return rhs(self.form_sh1)\n\n @property\n def form_utemp(self):\n \"\"\" UFL for utemp equation. \"\"\"\n F = inner(self.w, self.u)*dx - self.f(self.w, self.sh1, self.u1, self.n, self.absorption)\n return F\n\n @cached_property\n def rhs_utemp(self):\n \"\"\" RHS for utemp equation. \"\"\"\n return rhs(self.form_utemp)\n\n @property\n def form_sh2(self):\n \"\"\" UFL for sh2 equation. \"\"\"\n F = inner(self.v, self.s)*dx - self.g(self.v, self.utemp, self.I, self.n, self.l, self.mu, self.source)\n return F\n\n @cached_property\n def rhs_sh2(self):\n \"\"\" RHS for sh2 equation. \"\"\"\n return rhs(self.form_sh2)\n\n @property\n def form_s1(self):\n \"\"\" UFL for s1 equation. \"\"\"\n # Note that we have multiplied through by dt here.\n F = inner(self.v, self.s)*dx - inner(self.v, self.s0)*dx - self.dt*inner(self.v, self.sh1)*dx - ((self.dt**3)/24.0)*inner(self.v, self.sh2)*dx\n return F\n\n @cached_property\n def rhs_s1(self):\n \"\"\" RHS for s1 equation. \"\"\"\n return rhs(self.form_s1)\n\n def f(self, w, s0, u0, n, absorption=None):\n \"\"\" The RHS of the velocity equation. \"\"\"\n f = -inner(grad(w), s0)*dx + inner(avg(s0)*n('+'), w('+'))*dS + inner(avg(s0)*n('-'), w('-'))*dS\n if(absorption):\n f += -inner(w, absorption*u0)*dx\n return f\n\n def g(self, v, u1, I, n, l, mu, source=None):\n \"\"\" The RHS of the stress equation. \"\"\"\n g = - l*(v[i, j]*I[i, j]).dx(k)*u1[k]*dx + l*(jump(v[i, j], n[k])*I[i, j]*avg(u1[k]))*dS + l*(v[i, j]*I[i, j]*u1[k]*n[k])*ds - mu*inner(div(v), u1)*dx + mu*inner(avg(u1), jump(v, n))*dS - mu*inner(div(v.T), u1)*dx + mu*inner(avg(u1), jump(v.T, n))*dS + mu*inner(u1, dot(v, n))*ds + mu*inner(u1, dot(v.T, n))*ds\n if(source):\n g += inner(v, source)*dx\n return g\n\n def ast_matmul(self, F_a, implementation='optimized'):\n \"\"\"Generate an AST for a PyOP2 kernel performing a matrix-vector multiplication.\"\"\"\n\n # The number of dofs on each element is /ndofs*cdim/\n F_a_fs = F_a.function_space()\n ndofs = F_a_fs.fiat_element.entity_dofs()\n ndofs = sum(self.mesh.make_dofs_per_plex_entity(ndofs))\n cdim = F_a_fs.dim\n name = 'mat_vec_mul_kernel_%s' % F_a_fs.name\n\n identifier = (ndofs, cdim, name, implementation)\n if identifier in self.asts:\n return self.asts[identifier]\n\n from coffee import isa, options\n if cdim and cdim % isa['dp_reg'] == 0:\n simd_pragma = '#pragma simd reduction(+:sum)'\n else:\n simd_pragma = ''\n\n # Craft the AST\n if implementation == 'optimized' and cdim >= 4:\n body = ast.Incr(ast.Symbol('sum'),\n ast.Prod(ast.Symbol('A', ('i',), ((ndofs*cdim, 'j*%d + k' % cdim),)),\n ast.Symbol('B', ('j', 'k'))))\n body = ast.c_for('k', cdim, body, simd_pragma).children[0]\n body = [ast.Decl('const int', ast.Symbol('index'), init=ast.Symbol('i%%%d' % cdim)),\n ast.Decl('double', ast.Symbol('sum'), init=ast.Symbol('0.0')),\n ast.c_for('j', ndofs, body).children[0],\n ast.Assign(ast.Symbol('C', ('i/%d' % cdim, 'index')), 'sum')]\n body = ast.Block([ast.c_for('i', ndofs*cdim, body).children[0]])\n funargs = [ast.Decl('double* restrict', 'A'),\n ast.Decl('double *restrict *restrict', 'B'),\n ast.Decl('double *restrict *', 'C')]\n fundecl = ast.FunDecl('void', name, funargs, body, ['static', 'inline'])\n else:\n body = ast.Incr(ast.Symbol('C', ('i/%d' % cdim, 'index')),\n ast.Prod(ast.Symbol('A', ('i',), ((ndofs*cdim, 'j*%d + k' % cdim),)),\n ast.Symbol('B', ('j', 'k'))))\n body = ast.c_for('k', cdim, body).children[0]\n body = [ast.Decl('const int', ast.Symbol('index'), init=ast.Symbol('i%%%d' % cdim)),\n ast.Assign(ast.Symbol('C', ('i/%d' % cdim, 'index' % cdim)), '0.0'),\n ast.c_for('j', ndofs, body).children[0]]\n body = ast.Block([ast.c_for('i', ndofs*cdim, body).children[0]])\n funargs = [ast.Decl('double* restrict', 'A'),\n ast.Decl('double *restrict *restrict', 'B'),\n ast.Decl('double *restrict *', 'C')]\n fundecl = ast.FunDecl('void', name, funargs, body, ['static', 'inline'])\n\n # Track the AST for later fast retrieval\n self.asts[identifier] = fundecl\n\n return fundecl\n\n def solve(self, rhs, matrix_asdat, result):\n F_a = assemble(rhs)\n ast_matmul = self.ast_matmul(F_a)\n\n # Create the par loop (automatically added to the trace of loops to be executed)\n kernel = op2.Kernel(ast_matmul, ast_matmul.name)\n op2.par_loop(kernel, self.mesh.cell_set,\n matrix_asdat(op2.READ),\n F_a.dat(op2.READ, F_a.cell_node_map()),\n result.dat(op2.WRITE, result.cell_node_map()))\n\n def write(self, u=None, s=None, output=True):\n r\"\"\" Write the velocity and/or stress fields to file.\n :param firedrake.Function u: The velocity field.\n :param firedrake.Function s: The stress field.\n :returns: None\n \"\"\"\n _trace.evaluate_all()\n if output:\n with timed_region('i/o'):\n if(u):\n self.u_stream.write(u)\n if(s):\n # FIXME: Cannot currently write tensor valued fields to a VTU file.\n # See https://github.com/firedrakeproject/firedrake/issues/538\n #self.s_stream << s\n pass\n\n def run(self, T, TS=0):\n \"\"\" Run the elastic wave simulation until t = T or ntimesteps = TS.\n :param float T: The finish time of the simulation.\n :param float TS: The maximum number of timesteps performed; ignored if = 0.\n :returns: The final solution fields for velocity and stress.\n \"\"\"\n\n # Write out the initial condition.\n self.write(self.u1, self.s1, self.tofile)\n\n info(\"Generating inverse mass matrix\")\n # Pre-assemble the inverse mass matrices, which should stay\n # constant throughout the simulation (assuming no mesh adaptivity).\n start = time()\n self.assemble_inverse_mass()\n end = time()\n info(\"DONE! (Elapsed: %f s)\" % round(end - start, 3))\n op2.MPI.COMM_WORLD.barrier()\n info(\"Copying inverse mass matrix into a dat...\")\n start = time()\n self.copy_massmatrix_into_dat()\n end = time()\n info(\"DONE! (Elapsed: %f s)\" % round(end - start, 3))\n op2.MPI.COMM_WORLD.barrier()\n\n start = time()\n t = self.dt\n timestep = 0\n ntimesteps = sys.maxint if TS == 0 else TS\n\n while t <= T + 1e-12 and timestep < ntimesteps:\n if op2.MPI.COMM_WORLD.rank == 0 and timestep % self.output == 0:\n info(\"t = %f, (timestep = %d)\" % (t, timestep))\n with loop_chain(\"main1\",\n tile_size=self.tiling_size,\n num_unroll=self.tiling_uf,\n mode=self.tiling_mode,\n extra_halo=self.tiling_halo,\n explicit=self.tiling_explicit,\n use_glb_maps=self.tiling_glb_maps,\n use_prefetch=self.tiling_prefetch,\n coloring=self.tiling_coloring,\n ignore_war=True,\n log=self.tiling_log):\n # In case the source is time-dependent, update the time 't' here.\n if(self.source):\n with timed_region('source term update'):\n self.source_expression.t = t\n self.source = self.source_expression\n\n # Solve for the velocity vector field.\n self.solve(self.rhs_uh1, self.velocity_mass_asdat, self.uh1)\n self.solve(self.rhs_stemp, self.stress_mass_asdat, self.stemp)\n self.solve(self.rhs_uh2, self.velocity_mass_asdat, self.uh2)\n self.solve(self.rhs_u1, self.velocity_mass_asdat, self.u1)\n\n # Solve for the stress tensor field.\n self.solve(self.rhs_sh1, self.stress_mass_asdat, self.sh1)\n self.solve(self.rhs_utemp, self.velocity_mass_asdat, self.utemp)\n self.solve(self.rhs_sh2, self.stress_mass_asdat, self.sh2)\n self.solve(self.rhs_s1, self.stress_mass_asdat, self.s1)\n\n self.u0.assign(self.u1)\n self.s0.assign(self.s1)\n\n # Write out the new fields\n self.write(self.u1, self.s1, self.tofile and timestep % self.output == 0)\n\n # Move onto next timestep\n t += self.dt\n timestep += 1\n\n # Write out the final state of the fields\n self.write(self.u1, self.s1, self.tofile)\n\n end = time()\n\n return start, end, timestep, self.u1, self.s1\n\n\n# Helper stuff\n\ndef Vp(mu, l, density):\n r\"\"\" Calculate the P-wave velocity, given by\n\n .. math:: \\sqrt{\\frac{(\\lambda + 2\\mu)}{\\rho}}\n\n where :math:`\\rho` is the density, and :math:`\\lambda` and :math:`\\mu` are\n the first and second Lame parameters, respectively.\n\n :param mu: The second Lame parameter.\n :param l: The first Lame parameter.\n :param density: The density.\n :returns: The P-wave velocity.\n :rtype: float\n \"\"\"\n return sqrt((l + 2*mu)/density)\n\n\ndef Vs(mu, density):\n r\"\"\" Calculate the S-wave velocity, given by\n\n .. math:: \\sqrt{\\frac{\\mu}{\\rho}}\n\n where :math:`\\rho` is the density, and :math:`\\mu` is the second Lame parameter.\n\n :param mu: The second Lame parameter.\n :param density: The density.\n :returns: The P-wave velocity.\n :rtype: float\n \"\"\"\n return sqrt(mu/density)\n\n\ndef cfl_dt(dx, Vp, courant_number):\n r\"\"\" Computes the maximum permitted value for the timestep math:`\\delta t`.\n :param float dx: The characteristic element length.\n :param float Vp: The P-wave velocity.\n :param float courant_number: The desired Courant number\n :returns: The maximum permitted timestep, math:`\\delta t`.\n :rtype: float\n \"\"\"\n return (courant_number*dx)/Vp\n\n\nclass ExplosiveSourceLF4(object):\n\n def explosive_source_lf4(self, T=2.5, TS=0, Lx=300.0, Ly=150.0, h=2.5, cn=0.05,\n mesh_file=None, output=1, poly_order=2, params=None):\n\n tile_size = params['tile_size']\n num_unroll = params['num_unroll']\n extra_halo = params['extra_halo']\n part_mode = params['partitioning']\n explicit_mode = params['explicit_mode']\n\n if explicit_mode:\n fusion_scheme = FusionSchemes.get(explicit_mode, part_mode, tile_size)\n num_solves, params['explicit_mode'] = fusion_scheme\n else:\n num_solves = ElasticLF4.num_solves\n\n if mesh_file:\n mesh = Mesh(mesh_file)\n else:\n mesh = RectangleMesh(int(Lx/h), int(Ly/h), Lx, Ly)\n\n set_log_level(INFO)\n\n kwargs = {}\n if params['mode'] in ['tile', 'only_tile']:\n s_depth = calculate_sdepth(num_solves, num_unroll, extra_halo)\n if part_mode == 'metis':\n kwargs['reorder'] = ('metis-rcm', mesh.num_cells() / tile_size)\n else:\n s_depth = 1\n # FIXME: need s_depth in firedrake to be able to use this\n # kwargs['s_depth'] = s_depth\n params['s_depth'] = s_depth\n\n mesh.topology.init(**kwargs)\n slope(mesh, debug=True)\n\n # Instantiate the model\n self.elastic = ElasticLF4(mesh, \"DG\", poly_order, 2, output, params)\n\n info(\"S-depth used: %d\" % s_depth)\n info(\"Polynomial order: %d\" % poly_order)\n\n # Constants\n self.elastic.density = 1.0\n self.elastic.mu = 3600.0\n self.elastic.l = 3599.3664\n\n self.Vp = Vp(self.elastic.mu, self.elastic.l, self.elastic.density)\n self.Vs = Vs(self.elastic.mu, self.elastic.density)\n info(\"P-wave velocity: %f\" % self.Vp)\n info(\"S-wave velocity: %f\" % self.Vs)\n\n self.dx = h\n self.courant_number = cn\n self.elastic.dt = cfl_dt(self.dx, self.Vp, self.courant_number)\n info(\"Using a timestep of %f\" % self.elastic.dt)\n\n # Source\n exp_area = (44.5, 45.5, Ly - 1.5, Ly - 0.5)\n if poly_order == 1:\n # Adjust explosion area\n exp_area = (149.5, 150.5, Ly - 1.5, Ly - 0.5)\n a = 159.42\n self.elastic.source_expression = Expression(((\"x[0] >= %f && x[0] <= %f && x[1] >= %f && x[1] <= %f ? (-1.0 + 2*a*pow(t - 0.3, 2))*exp(-a*pow(t - 0.3, 2)) : 0.0\" % exp_area, \"0.0\"),\n (\"0.0\", \"x[0] >= %f && x[0] <= %f && x[1] >= %f && x[1] <= %f ? (-1.0 + 2*a*pow(t - 0.3, 2))*exp(-a*pow(t - 0.3, 2)) : 0.0\" % exp_area)), a=a, t=0)\n self.elastic.source_function = Function(self.elastic.S)\n self.elastic.source = self.elastic.source_expression\n\n # Absorption\n F = FunctionSpace(mesh, \"DG\", poly_order, name='F')\n self.elastic.absorption_function = Function(F)\n self.elastic.absorption = Expression(\"x[0] <= 20 || x[0] >= %f || x[1] <= 20.0 ? 1000 : 0\" % (Lx - 20,))\n\n # Initial conditions\n uic = Expression(('0.0', '0.0'))\n self.elastic.u0.assign(Function(self.elastic.U).interpolate(uic))\n sic = Expression((('0', '0'), ('0', '0')))\n self.elastic.s0.assign(Function(self.elastic.S).interpolate(sic))\n\n # Run the simulation\n start, end, ntimesteps, u1, s1 = self.elastic.run(T, TS=TS)\n\n # Print runtime summary\n output_time(start, end,\n tofile=params['tofile'],\n verbose=params['verbose'],\n meshid=(\"h%s\" % h).replace('.', ''),\n ntimesteps=ntimesteps,\n nloops=ElasticLF4.loop_chain_length*num_unroll,\n partitioning=part_mode,\n tile_size=tile_size,\n extra_halo=extra_halo,\n explicit_mode=explicit_mode,\n glb_maps=params['use_glb_maps'],\n prefetch=params['use_prefetch'],\n coloring=params['coloring'],\n poly_order=poly_order,\n domain=os.path.splitext(os.path.basename(mesh.name))[0],\n function_spaces=[self.elastic.S, self.elastic.U])\n\n return u1, s1\n\n\nif __name__ == '__main__':\n set_log_level(INFO)\n\n # Parse the input\n args = parser()\n params = {\n 'num_unroll': args.num_unroll,\n 'tile_size': args.tile_size,\n 'mode': args.fusion_mode,\n 'partitioning': args.part_mode,\n 'coloring': args.coloring,\n 'extra_halo': args.extra_halo,\n 'explicit_mode': args.explicit_mode,\n 'explicit_mode_id': args.explicit_mode,\n 'use_glb_maps': args.glb_maps,\n 'use_prefetch': args.prefetch,\n 'log': args.log,\n 'tofile': args.tofile,\n 'verbose': args.verbose\n }\n\n # Set the kernel optimizaation level (default: O2)\n parameters['coffee']['optlevel'] = args.coffee_opt\n\n # Is it just a run to check correctness?\n if args.check:\n Lx, Ly, h, time_max, tolerance = 20, 20, 2.5, 0.01, 1e-10\n info(\"Checking correctness of original and tiled versions, with:\")\n info(\" (Lx, Ly, T, tolerance)=%s\" % str((Lx, Ly, time_max, tolerance)))\n info(\" %s\" % params)\n # Run the tiled variant\n u1, s1 = ExplosiveSourceLF4().explosive_source_lf4(time_max, Lx, Ly, h,\n sys.maxint, params)\n # Run the original code\n original = {'num_unroll': 0, 'tile_size': 0, 'mode': None,\n 'partitioning': 'chunk', 'extra_halo': 0}\n u1_orig, s1_orig = ExplosiveSourceLF4().explosive_source_lf4(time_max, Lx, Ly, h,\n sys.maxint, original)\n # Check output\n info(\"Checking output...\")\n assert np.allclose(u1.dat.data, u1_orig.dat.data, rtol=1e-10)\n assert np.allclose(s1.dat.data, s1_orig.dat.data, rtol=1e-10)\n info(\"Results OK!\")\n sys.exit(0)\n\n # Set the input mesh\n if args.mesh_file:\n info(\"Using the unstructured mesh %s\" % args.mesh_file)\n kwargs = {'T': args.time_max, 'TS': args.timesteps_max, 'mesh_file': args.mesh_file,\n 'h': args.ms, 'cn': args.cn, 'output': args.output, 'poly_order': args.poly_order,\n 'params': params}\n else:\n Lx, Ly = eval(args.mesh_size)\n info(\"Using the structured mesh with values (Lx,Ly,h)=%s\" % str((Lx, Ly, args.ms)))\n kwargs = {'T': args.time_max, 'TS': args.timesteps_max, 'Lx': Lx, 'Ly': Ly, 'h': args.ms,\n 'output': args.output, 'poly_order': args.poly_order, 'params': params}\n\n info(\"h=%f, courant number=%f\" % (args.ms, args.cn))\n\n if args.profile:\n cProfile.run('ExplosiveSourceLF4().explosive_source_lf4(**kwargs)',\n 'log_rank%d.cprofile' % op2.MPI.COMM_WORLD.rank)\n else:\n u1, s1 = ExplosiveSourceLF4().explosive_source_lf4(**kwargs)\n" ]
[ [ "numpy.arange", "numpy.allclose" ] ]
isabella232/gps_building_blocks
[ "86ef8be60a42cd12e27696007589388b7b053f4f" ]
[ "py/gps_building_blocks/analysis/exp_design/ab_testing_design_test.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for gps_building_blocks.analysis.exp_design.ab_testing_design.\"\"\"\n\nfrom absl.testing import absltest\nimport numpy as np\nfrom gps_building_blocks.analysis.exp_design import ab_testing_design\n\nBASELINE_CONVERSION_RATE_PERCENTAGE = 5\nEXPECTED_UPLIFT_PERCENTAGE = 10\nLABELS = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\nPREDICTIONS = np.array([\n 0.7, 0.63, 0.4, 0.77, 0.45, 0.8, 0.41, 0.82, 0.7, 0.6, 0.5, 0.45, 0.74,\n 0.11, 0.21, 0.05, 0.67, 0.79, 0.60, 0.10\n])\n\n\nclass ABTestingExperimentalDesignTest(absltest.TestCase):\n\n def test_calc_chisquared_sample_size_returns_correct_values(self):\n result_sample_size = ab_testing_design.calc_chisquared_sample_size(\n baseline_conversion_rate_percentage=BASELINE_CONVERSION_RATE_PERCENTAGE,\n expected_uplift_percentage=EXPECTED_UPLIFT_PERCENTAGE)\n\n self.assertEqual(result_sample_size, 14913.0)\n\n def test_calc_chisquared_sample_size_change_power_and_confidence(self):\n result_sample_size = ab_testing_design.calc_chisquared_sample_size(\n baseline_conversion_rate_percentage=BASELINE_CONVERSION_RATE_PERCENTAGE,\n expected_uplift_percentage=EXPECTED_UPLIFT_PERCENTAGE,\n power_percentage=90,\n confidence_level_percentage=99)\n\n self.assertEqual(result_sample_size, 28271.0)\n\n def test_calc_chisquared_sample_sizes_for_bins_returns_correct_values(self):\n results = ab_testing_design.calc_chisquared_sample_sizes_for_bins(\n labels=LABELS, probability_predictions=PREDICTIONS, number_bins=3)\n\n self.assertEqual(results.shape, (24, 7))\n self.assertListEqual(\n list(results.columns), [\n 'bin_number', 'bin_size', 'conv_rate_percentage',\n 'uplift_percentage', 'power_percentage',\n 'confidence_level_percentage', 'sample_size'\n ])\n self.assertListEqual(\n list(results['sample_size']), [\n 248.0, 314.0, 343.0, 421.0, 62.0, 79.0, 86.0, 106.0, 928.0, 1178.0,\n 1285.0, 1577.0, 232.0, 295.0, 322.0, 395.0, 1031.0, 1309.0, 1428.0,\n 1752.0, 258.0, 328.0, 357.0, 438.0\n ])\n\n def test_resulted_bin_metrics_does_not_contain_nas(self):\n results = ab_testing_design.calc_chisquared_sample_sizes_for_bins(\n labels=LABELS, probability_predictions=PREDICTIONS, number_bins=3)\n\n self.assertFalse(results.isna().values.any())\n\n def test_calc_chisquared_sample_sizes_for_cumulative_bins_returns_right_vals(\n self):\n results = ab_testing_design.calc_chisquared_sample_sizes_for_cumulative_bins(\n labels=LABELS, probability_predictions=PREDICTIONS, number_bins=5)\n\n self.assertEqual(results.shape, (40, 8))\n self.assertListEqual(\n list(results.columns), [\n 'cumulative_bin_number', 'bin_size', 'bin_size_percentage',\n 'conv_rate_percentage', 'uplift_percentage', 'power_percentage',\n 'confidence_level_percentage', 'sample_size'\n ])\n self.assertListEqual(\n list(results['sample_size']), [\n 207.0, 262.0, 286.0, 351.0, 52.0, 66.0, 72.0, 88.0, 371.0, 471.0,\n 514.0, 631.0, 93.0, 118.0, 129.0, 158.0, 442.0, 561.0, 612.0, 751.0,\n 111.0, 141.0, 153.0, 188.0, 371.0, 471.0, 514.0, 631.0, 93.0, 118.0,\n 129.0, 158.0, 619.0, 785.0, 857.0, 1051.0, 155.0, 197.0, 215.0,\n 263.0\n ])\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.array" ] ]
ashishpatel26/mealpy
[ "69e8dc727e15527e31ac5ace1debe92a0bc7d828" ]
[ "mealpy/fake/RHO.py" ]
[ "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 14:53, 17/03/2020 %\n# %\n# Email: [email protected] %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieu1995 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom numpy.random import uniform, normal\nfrom numpy.linalg import norm\nfrom numpy import exp, power, pi, zeros, array, mean, ones, dot\nfrom math import gamma\nfrom copy import deepcopy\nfrom mealpy.root import Root\n\n\nclass OriginalRHO(Root):\n \"\"\"\n The original version of: Rhino Herd Optimization (RHO)\n (A Novel Metaheuristic Algorithm inspired by Rhino Herd Behavior)\n Link:\n https://doi.org/10.3384/ecp171421026\n \"\"\"\n\n def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True,\n epoch=750, pop_size=100, c=0.53, a=2831, r=0.04, A=1):\n Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose)\n self.epoch = epoch\n self.pop_size = pop_size\n self.c = c # shape parameter - default = 0.53 > 0\n self.a = a # scale parameter - default = 2831 > 0\n self.r = r # default = 0.04\n self.A = A # the area of each grid cell - default = 1\n\n def train(self):\n pop = [self.create_solution() for _ in range(self.pop_size)]\n g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)\n\n # Epoch loop\n for epoch in range(self.epoch):\n\n pos_list = array([item[self.ID_POS] for item in pop])\n fit_list = array([item[self.ID_FIT] for item in pop])\n fx_list = deepcopy(fit_list)\n pos_center = mean(pos_list, axis=0)\n\n ## Each individual loop\n for i in range(0, self.pop_size):\n # Eq. 1\n exp_component = -1 * power(norm(pop[i][self.ID_POS] - pos_center) / self.a, 2.0 / self.c)\n fx = 2 * exp(exp_component) / (self.c ** 2 * pi * self.a ** 2 * gamma(self.c))\n fx_list[i] = fx\n\n # Eq. 7\n s_component = ones(self.problem_size)\n for j in range(0, self.problem_size):\n sum_temp = 0\n for i in range(0, self.pop_size):\n sum_temp += fx_list[i] * (1 + pop[i][self.ID_POS][j] / (self.EPSILON + pop[i][self.ID_FIT]))\n s_component[j] = self.A * sum_temp\n\n for i in range(0, self.pop_size):\n x_new = pop[i][self.ID_POS]\n for j in range(0, self.problem_size):\n # Eq. 7\n s_x = fx_list[i] * (1 + pop[i][self.ID_FIT] * pop[i][self.ID_POS][j]) / s_component[j]\n\n # Eq. 9\n if uniform() <= 0.5:\n x_new[j] = pop[i][self.ID_POS][j] - uniform() * s_x * pop[i][self.ID_POS][j]\n else:\n x_new[j] = pop[i][self.ID_POS][j] + uniform() * s_x * pop[i][self.ID_POS][j]\n x_new = self.amend_position_faster(x_new)\n fit = self.get_fitness_position(x_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [x_new, fit]\n\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n self.loss_train.append(g_best[self.ID_FIT])\n if self.verbose:\n print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n self.solution = g_best\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n\n\nclass BaseRHO(Root):\n \"\"\"\n My version of: Rhino Herd Optimization (RHO)\n (A Novel Metaheuristic Algorithm inspired by Rhino Herd Behavior)\n Notes:\n + Remove third loop\n \"\"\"\n\n def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True,\n epoch=750, pop_size=100, c=0.53, a=2831, r=0.04, A=1):\n Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose)\n self.epoch = epoch\n self.pop_size = pop_size\n self.c = c # shape parameter - default = 0.53 > 0\n self.a = a # scale parameter - default = 2831 > 0\n self.r = r # default = 0.04\n self.A = A # the area of each grid cell - default = 1\n\n def train(self):\n pop = [self.create_solution() for _ in range(self.pop_size)]\n g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)\n pop_size = self.pop_size\n\n # Epoch loop\n for epoch in range(self.epoch):\n pop_new = deepcopy(pop)\n\n pos_list = array([item[self.ID_POS] for item in pop])\n fit_list = array([item[self.ID_FIT] for item in pop])\n fx_list = deepcopy(fit_list)\n pos_center = mean(pos_list, axis=0)\n\n ## Calculate the fx for each individual\n for i in range(0, pop_size):\n # Eq. 1\n exp_component = -1 * power(norm(pop[i][self.ID_POS] - pos_center) / self.a , 2.0/self.c )\n fx = 2 * exp(exp_component) / (self.c ** 2 * pi * self.a ** 2 * gamma(self.c))\n fx_list[i] = fx\n\n # print(fx_list)\n\n # Eq. 7\n sum_temp = zeros(self.problem_size)\n for i in range(0, pop_size):\n sum_temp += fx_list[i] * (1 + pop[i][self.ID_POS] * pop[i][self.ID_FIT])\n sum_temp = self.A * sum_temp\n\n for i in range(0, pop_size):\n s_x = fx_list[i] * (1 + pop[i][self.ID_POS]/pop[i][self.ID_FIT]) / sum_temp\n if uniform() <= 0.5:\n x_new = pop[i][self.ID_POS] - uniform() * dot(s_x, pop[i][self.ID_POS])\n else:\n x_new = pop[i][self.ID_POS] + uniform() * dot(s_x, pop[i][self.ID_POS])\n x_new = self.amend_position_faster(x_new)\n fit = self.get_fitness_position(x_new)\n if fit < pop[i][self.ID_FIT]:\n pop_new[i] = [x_new, fit]\n\n if epoch % 100 == 0:\n pop_size = self.pop_size\n pop_new = sorted(pop_new, key=lambda item: item[self.ID_FIT])\n pop = deepcopy(pop_new[:pop_size])\n else:\n pop_size = pop_size + int(self.r * pop_size)\n n_new = pop_size - len(pop)\n for i in range(0, n_new):\n pop_new.extend([self.create_solution()])\n pop = deepcopy(pop_new)\n\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n self.loss_train.append(g_best[self.ID_FIT])\n if self.verbose:\n print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n self.solution = g_best\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n\n\nclass LevyRHO(BaseRHO):\n \"\"\"\n My modified version of: Rhino Herd Optimization (RH)\n (A Novel Metaheuristic Algorithm inspired by Rhino Herd Behavior)\n Notes:\n + Change the flow of algorithm\n + Uses normal in equation instead of uniform\n + Uses levy-flight instead of uniform-equation\n \"\"\"\n\n def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True,\n epoch=750, pop_size=100, c=0.53, a=2831, r=0.04, A=1):\n BaseRHO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size, c, a, r, A)\n\n\n def train(self):\n pop = [self.create_solution(minmax=0) for _ in range(self.pop_size)]\n g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)\n pop_size = self.pop_size\n\n # Epoch loop\n for epoch in range(self.epoch):\n pop_new = deepcopy(pop)\n\n pos_list = array([item[self.ID_POS] for item in pop])\n pos_center = mean(pos_list, axis=0)\n fx_list = zeros(pop_size)\n\n ## Calculate the fx for each individual\n for i in range(0, pop_size):\n # Eq. 1\n exp_component = -1 * power( norm(pop[i][self.ID_POS] - pos_center) / self.a , 2.0/self.c )\n fx = 2 * exp(exp_component) / (self.c ** 2 * pi * self.a ** 2 * gamma(self.c))\n fx_list[i] = fx\n #print(fx_list)\n # Eq. 7\n sum_temp = zeros(self.problem_size)\n for i in range(0, self.pop_size):\n sum_temp += fx_list[i] * (1 + pop[i][self.ID_POS] / pop[i][self.ID_FIT] + self.EPSILON)\n sum_temp = self.A * sum_temp\n\n for i in range(0, pop_size):\n s_x = fx_list[i] * (1 + pop[i][self.ID_FIT] * pop[i][self.ID_POS]) / sum_temp\n if uniform() < 0.5:\n x_new = pop[i][self.ID_POS] - normal() * dot(s_x, pop[i][self.ID_POS])\n else:\n x_new = self.levy_flight(epoch+1, pop[i][self.ID_POS], g_best[self.ID_POS])\n x_new = self.amend_position_faster(x_new)\n fit = self.get_fitness_position(x_new)\n if fit < pop[i][self.ID_FIT]:\n pop_new[i] = [x_new, fit]\n\n if epoch % 100 == 0:\n pop_size = self.pop_size\n pop_new = sorted(pop_new, key=lambda item: item[self.ID_FIT])\n pop = deepcopy(pop_new[:pop_size])\n else:\n pop_size = pop_size + int(self.r * pop_size)\n n_new = pop_size - len(pop)\n for i in range(0, n_new):\n pop_new.extend([self.create_solution()])\n pop = deepcopy(pop_new)\n\n ## Make sure the population does not have duplicates.\n new_set = set()\n for idx, obj in enumerate(pop):\n if tuple(obj[self.ID_POS].tolist()) in new_set:\n pop[idx] = self.create_solution()\n else:\n new_set.add(tuple(obj[self.ID_POS].tolist()))\n\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n self.loss_train.append(g_best[self.ID_FIT])\n if self.verbose:\n print(\"> Epoch: {}, Pop Size: {}, Best Fit: {}\".format(epoch+1, pop_size, g_best[self.ID_FIT]))\n self.solution = g_best\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n\n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.zeros", "numpy.random.normal", "numpy.exp", "numpy.array", "numpy.dot", "numpy.linalg.norm", "numpy.mean" ] ]
intel-isl/MetaLearningTradeoffs
[ "bb1b849742a959310f3b9b630bb76ae3509a5d4a" ]
[ "maml_zoo/baselines/zero_baseline.py" ]
[ "from maml_zoo.baselines.base import Baseline\nimport numpy as np\n\n\nclass ZeroBaseline(Baseline):\n \"\"\"\n Dummy baseline\n \"\"\"\n\n def __init__(self):\n super(ZeroBaseline, self).__init__()\n\n def get_param_values(self, **kwargs):\n \"\"\"\n Returns the parameter values of the baseline object\n\n Returns:\n (None): coefficients of the baseline\n\n \"\"\"\n return None\n\n def set_param_values(self, value, **kwargs):\n \"\"\"\n Sets the parameter values of the baseline object\n\n Args:\n value (None): coefficients of the baseline\n\n \"\"\"\n pass\n\n def fit(self, paths, **kwargs):\n \"\"\"\n Improves the quality of zeroes output by baseline\n\n Args:\n paths: list of paths\n\n \"\"\"\n pass\n\n def predict(self, path):\n \"\"\"\n Produces some zeroes\n\n Args:\n path (dict): dict of lists/numpy array containing trajectory / path information\n such as \"observations\", \"rewards\", ...\n\n Returns:\n (np.ndarray): numpy array of the same length as paths[\"observations\"] specifying the reward baseline\n \n \"\"\"\n return np.zeros_like(path[\"rewards\"])" ]
[ [ "numpy.zeros_like" ] ]
ouyangyike/Inference-Algorithm
[ "ac3470e2fbc4415174b32ecc2e2f3f101da1ca38" ]
[ "logistic regression/logistic_adam/adam_train_loss .py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom logistic_adam import *\n\n\n#learing rate = 1,batch_size = 500, epoch=15, lamda = 0.01\nlogging = runLogistic(1,500,15,0.01)\n#print(logging)\nplt.plot(logging[:,0],marker='+',label='learning rate = 1')\n\n#learing rate = 0.1,batch_size = 500, epoch=15, lamda = 0.01\nlogging = runLogistic(0.1,500,15,0.01)\n#print(logging)\nplt.plot(logging[:,0],marker='*',label='learning rate = 0.1')\n\n#learing rate = 0.01,batch_size = 500, epoch=15, lamda = 0.01\nlogging = runLogistic(0.01,500,15,0.01)\n#print(logging)\nplt.plot(logging[:,0],marker='h',label='learning rate = 0.01')\n\n#learing rate = 0.001,batch_size = 500, epoch=15, lamda = 0.01\nlogging = runLogistic(0.001,500,15,0.01)\n#print(logging)\nplt.plot(logging[:,0],marker='d',label='learning rate = 0.001')\n\n\nplt.legend(loc='upper right')\nplt.title('Plot of Train_CrossEntropy vs. Iterations with batch_size=500')\nplt.xlabel('Iterations')\nplt.ylabel('Train_CrossEntropy')\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
NinaTian98369/HypoGen
[ "14f192ecc1ef0c6fc5864f0816ef61885dc9e864" ]
[ "Code/HypoBertClas/pybert/test/predicter.py" ]
[ "#encoding:utf-8\nimport torch\nimport numpy as np\nfrom ..utils.utils import model_device,load_bert\n\nclass Predicter(object):\n def __init__(self,\n model,\n logger,\n n_gpu,\n model_path\n ):\n self.model = model\n self.logger = logger\n self.width = 30\n self.model, self.device = model_device(n_gpu= n_gpu, model=self.model, logger=self.logger)\n loads = load_bert(model_path=model_path,model = self.model)\n self.model = loads[0]\n\n def show_info(self,batch_id,n_batch):\n recv_per = int(100 * (batch_id + 1) / n_batch)\n if recv_per >= 100:\n recv_per = 100\n # show bar\n show_bar = f\"\\r[predict]{batch_id+1}/{n_batch}[{int(self.width * recv_per / 100) * '>':<{self.width}s}]{recv_per}%\"\n print(show_bar,end='')\n\n def predict(self,data):\n all_logits = None\n self.model.eval()\n n_batch = len(data)\n with torch.no_grad():\n for step, (input_ids, input_mask, segment_ids, label_ids) in enumerate(data):\n input_ids = input_ids.to(self.device)\n input_mask = input_mask.to(self.device)\n segment_ids = segment_ids.to(self.device)\n logits = self.model(input_ids, segment_ids, input_mask)\n logits = logits.sigmoid()\n self.show_info(step,n_batch)\n if all_logits is None:\n all_logits = logits.detach().cpu().numpy()\n else:\n all_logits = np.concatenate([all_logits,logits.detach().cpu().numpy()],axis = 0)\n return all_logits\n\n\n\n\n\n\n" ]
[ [ "torch.no_grad" ] ]
StarWang/detext
[ "66f071ec2cebf5e54e7d1de40936b5f281c2a69b" ]
[ "src/smart_compose/train/data_fn.py" ]
[ "import tensorflow as tf\nfrom functools import partial\n\nfrom smart_compose.utils.parsing_utils import get_input_files, InputFtrType, iterate_items_with_list_val\n\n\ndef _read_specified_features(inputs, feature_type2name):\n \"\"\"Only reads in features specified in the DeText arguments\"\"\"\n required_inputs = {}\n for _, ftr_name_list in iterate_items_with_list_val(feature_type2name):\n for ftr_name in ftr_name_list:\n required_inputs[ftr_name] = inputs[ftr_name]\n return required_inputs\n\n\n_FTR_TYPE_TO_SCHEMA = {\n InputFtrType.TARGET_COLUMN_NAME: tf.io.FixedLenFeature(shape=[], dtype=tf.string)\n}\n\n\ndef _get_tfrecord_feature_parsing_schema(feature_type_2_name: dict):\n \"\"\"Returns parsing schema for input TFRecord\n\n :param feature_type_2_name: Features mapping from feature types to feature names\n \"\"\"\n ftr_name_2_schema = dict()\n for ftr_type, ftr_name_lst in iterate_items_with_list_val(feature_type_2_name):\n for ftr_name in ftr_name_lst:\n ftr_name_2_schema[ftr_name] = _FTR_TYPE_TO_SCHEMA[ftr_type]\n\n return ftr_name_2_schema\n\n\ndef _cast_features_to_smaller_dtype(example, feature_type_2_names: dict):\n \"\"\"Casts tensor to smaller storage dtype. int64 -> int32, float64 -> float32\"\"\"\n\n def _cast_to_dtype_of_smaller_size(t):\n if t.dtype == tf.int64:\n return tf.cast(t, dtype=tf.int32)\n elif t.dtype == tf.float64:\n return tf.cast(t, dtype=tf.float32)\n else:\n return t\n\n for ftr_type, ftr_name_lst in iterate_items_with_list_val(feature_type_2_names):\n for ftr_name in ftr_name_lst:\n example[ftr_name] = _cast_to_dtype_of_smaller_size(example[ftr_name])\n return example\n\n\n_FTR_TYPE_TO_DENSE_DEFAULT_VAL = {\n InputFtrType.TARGET_COLUMN_NAME: '',\n}\n\n\ndef input_fn_tfrecord(input_pattern,\n batch_size,\n mode,\n feature_type_2_name: dict,\n block_length=100,\n prefetch_size=tf.data.experimental.AUTOTUNE,\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n input_pipeline_context=None):\n \"\"\"\n Data input function for training given TFRecord\n \"\"\"\n output_buffer_size = 1000\n\n input_files = get_input_files(input_pattern)\n feature_type_2_name = feature_type_2_name.copy()\n if len(input_files) > 1: # Multiple input files\n # Preprocess files concurrently, and interleave blocks of block_length records from each file\n dataset = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n # Shard input when using distributed training strategy\n if mode == tf.estimator.ModeKeys.TRAIN and input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:\n dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n input_pipeline_context.input_pipeline_id)\n\n dataset = dataset.shuffle(buffer_size=len(input_files))\n\n dataset = dataset.interleave(tf.data.TFRecordDataset, block_length=block_length,\n num_parallel_calls=num_parallel_calls)\n else:\n dataset = tf.data.TFRecordDataset(input_files[0])\n\n # Parse and preprocess data\n dataset = tfrecord_transform_fn(dataset,\n batch_size,\n mode,\n feature_type_2_name,\n output_buffer_size,\n prefetch_size)\n return dataset\n\n\ndef _split_features_and_labels(example, feature_type_2_name: dict):\n \"\"\"Split inputs into two parts: features and label\"\"\"\n target_ftr_name = feature_type_2_name[InputFtrType.TARGET_COLUMN_NAME]\n labels = {\n target_ftr_name: example.pop(target_ftr_name)\n }\n\n return example, labels\n\n\ndef tfrecord_transform_fn(dataset,\n batch_size,\n mode,\n feature_type_2_name,\n output_buffer_size,\n prefetch_size=tf.data.experimental.AUTOTUNE,\n num_parallel_calls=tf.data.experimental.AUTOTUNE):\n \"\"\" Preprocesses datasets including\n 1. dataset shuffling\n 2. record parsing\n 3. padding and batching\n \"\"\"\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(output_buffer_size)\n dataset = dataset.repeat()\n\n def _process_data(record, features_schema):\n example = tf.io.parse_single_example(serialized=record, features=features_schema)\n example = _cast_features_to_smaller_dtype(example, feature_type_2_name)\n features, labels = _split_features_and_labels(example, feature_type_2_name)\n return features, labels\n\n features_schema = _get_tfrecord_feature_parsing_schema(feature_type_2_name)\n dataset = dataset.map(partial(_process_data, features_schema=features_schema),\n num_parallel_calls=num_parallel_calls)\n\n dataset = (dataset\n .batch(batch_size, drop_remainder=True)\n .prefetch(prefetch_size))\n return dataset\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.io.parse_single_example", "tensorflow.cast", "tensorflow.io.FixedLenFeature", "tensorflow.constant" ] ]
wensun/baselines
[ "81b7b988918de2c1c2f5fa9f38b7716608efc125" ]
[ "baselines/ddpg/main.py" ]
[ "import argparse\nimport time\nimport os\nimport logging\nfrom baselines import logger, bench\nfrom baselines.common.misc_util import (\n set_global_seeds,\n boolean_flag,\n)\n#import baselines.ddpg.training as training\nimport training as training\nfrom baselines.ddpg.models import Actor, Critic\nfrom baselines.ddpg.memory import Memory\nfrom baselines.ddpg.noise import *\n\nimport gym\nimport tensorflow as tf\nfrom mpi4py import MPI\n\ndef run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs):\n # Configure things.\n rank = MPI.COMM_WORLD.Get_rank()\n if rank != 0:\n logger.set_level(logger.DISABLED)\n\n # Create envs.\n env = gym.make(env_id)\n env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))\n\n if evaluation and rank==0:\n eval_env = gym.make(env_id)\n eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval'))\n #env = bench.Monitor(env, None)\n else:\n eval_env = None\n\n # Parse noise_type\n action_noise = None\n param_noise = None\n nb_actions = env.action_space.shape[-1]\n for current_noise_type in noise_type.split(','):\n current_noise_type = current_noise_type.strip()\n if current_noise_type == 'none':\n pass\n elif 'adaptive-param' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))\n elif 'normal' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))\n elif 'ou' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))\n else:\n raise RuntimeError('unknown noise type \"{}\"'.format(current_noise_type))\n\n # Configure components.\n memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)\n critic = Critic(layer_norm=layer_norm)\n actor = Actor(nb_actions, layer_norm=layer_norm)\n\n # Seed everything to make things reproducible.\n seed = seed + 1000000 * rank\n logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir()))\n tf.reset_default_graph()\n set_global_seeds(seed)\n env.seed(seed)\n if eval_env is not None:\n eval_env.seed(seed)\n\n # Disable logging for rank != 0 to avoid noise.\n if rank == 0:\n start_time = time.time()\n training.train(env=env, eval_env=eval_env, param_noise=param_noise,\n action_noise=action_noise, actor=actor, critic=critic, memory=memory, **kwargs)\n env.close()\n if eval_env is not None:\n eval_env.close()\n if rank == 0:\n logger.info('total runtime: {}s'.format(time.time() - start_time))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--env-id', type=str, default='HalfCheetah-v2')\n boolean_flag(parser, 'render-eval', default=False)\n boolean_flag(parser, 'layer-norm', default=True)\n boolean_flag(parser, 'render', default=False)\n boolean_flag(parser, 'normalize-returns', default=False)\n boolean_flag(parser, 'normalize-observations', default=True)\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--critic-l2-reg', type=float, default=1e-2)\n parser.add_argument('--batch-size', type=int, default=64) # per MPI worker\n parser.add_argument('--actor-lr', type=float, default=1e-4)\n parser.add_argument('--critic-lr', type=float, default=1e-3)\n boolean_flag(parser, 'popart', default=False)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--reward-scale', type=float, default=1.)\n parser.add_argument('--clip-norm', type=float, default=None)\n parser.add_argument('--nb-epochs', type=int, default=500) # with default settings, perform 1M steps total, was 500\n parser.add_argument('--nb-epoch-cycles', type=int, default=20)\n parser.add_argument('--nb-train-steps', type=int, default=50) # per epoch cycle and MPI worker\n parser.add_argument('--nb-eval-steps', type=int, default=1000) # per epoch cycle and MPI worker\n parser.add_argument('--nb-rollout-steps', type=int, default=100) # per epoch cycle and MPI worker\n parser.add_argument('--noise-type', type=str, default='adaptive-param_0.2') # choices are adaptive-param_xx, ou_xx, normal_xx, none\n parser.add_argument('--num-timesteps', type=int, default=None)\n parser.add_argument('--alg', type=str, default='DDPG') # DDPG or DDPGRM\n #boolean_flag(parser, 'evaluation', default=False)\n \n boolean_flag(parser, 'evaluation', default=True) #turn evaluation on \n args = parser.parse_args()\n # we don't directly specify timesteps for this script, so make sure that if we do specify them\n # they agree with the other parameters. default: 1M total steps\n \n\n eval_steps_per_epoch = args.nb_epoch_cycles*args.nb_eval_steps #defualt: 1000*20 = 10K (~ 20 episodes)\n print(args)\n if args.num_timesteps is not None:\n assert(args.num_timesteps == args.nb_epochs * args.nb_epoch_cycles * args.nb_rollout_steps)\n dict_args = vars(args)\n del dict_args['num_timesteps']\n return dict_args\n\n\nif __name__ == '__main__':\n args = parse_args()\n if MPI.COMM_WORLD.Get_rank() == 0:\n logger.configure()\n # Run actual script.\n run(**args)\n" ]
[ [ "tensorflow.reset_default_graph" ] ]
Ziaeemehr/brian2
[ "0d28f61881a033f877fb333b5e93c56e5c479b4b" ]
[ "brian2/tests/test_codegen.py" ]
[ "\nfrom collections import namedtuple\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom brian2 import prefs, clear_cache, _cache_dirs_and_extensions\nfrom brian2.codegen.cpp_prefs import compiler_supports_c99\nfrom brian2.codegen.optimisation import optimise_statements\nfrom brian2.codegen.translation import (analyse_identifiers,\n get_identifiers_recursively,\n parse_statement,\n make_statements,\n )\nfrom brian2.codegen.statements import Statement\nfrom brian2.codegen.codeobject import CodeObject\nfrom brian2.parsing.sympytools import str_to_sympy, sympy_to_str\nfrom brian2.core.variables import Subexpression, Variable, Constant, ArrayVariable\nfrom brian2.core.functions import Function, DEFAULT_FUNCTIONS, DEFAULT_CONSTANTS\nfrom brian2.devices.device import auto_target, device\nfrom brian2.units.fundamentalunits import Unit\nfrom brian2.units import second, ms\n\nFakeGroup = namedtuple('FakeGroup', ['variables'])\n\[email protected]_independent\ndef test_auto_target():\n # very basic test that the \"auto\" codegen target is useable\n assert issubclass(auto_target(), CodeObject)\n\n\[email protected]_independent\ndef test_analyse_identifiers():\n '''\n Test that the analyse_identifiers function works on a simple clear example.\n '''\n code = '''\n a = b+c\n d = e+f\n '''\n known = {'b': Variable(name='b'),\n 'c': Variable(name='c'),\n 'd': Variable(name='d'),\n 'g': Variable(name='g')}\n \n defined, used_known, dependent = analyse_identifiers(code, known)\n assert 'a' in defined # There might be an additional constant added by the\n # loop-invariant optimisation\n assert used_known == {'b', 'c', 'd'}\n assert dependent == {'e', 'f'}\n\n\[email protected]_independent\ndef test_get_identifiers_recursively():\n '''\n Test finding identifiers including subexpressions.\n '''\n variables = {'sub1': Subexpression(name='sub1',\n dtype=np.float32, expr='sub2 * z',\n owner=FakeGroup(variables={}),\n device=None),\n 'sub2': Subexpression(name='sub2',\n dtype=np.float32, expr='5 + y',\n owner=FakeGroup(variables={}),\n device=None),\n 'x': Variable(name='x')}\n identifiers = get_identifiers_recursively(['_x = sub1 + x'],\n variables)\n assert identifiers == {'x', '_x', 'y', 'z', 'sub1', 'sub2'}\n\n\[email protected]_independent\ndef test_write_to_subexpression():\n variables = {\n 'a': Subexpression(name='a', dtype=np.float32,\n owner=FakeGroup(variables={}), device=None,\n expr='2*z'),\n 'z': Variable(name='z')\n }\n\n # Writing to a subexpression is not allowed\n code = 'a = z'\n with pytest.raises(SyntaxError):\n make_statements(code, variables, np.float32)\n\n\[email protected]_independent\ndef test_repeated_subexpressions():\n variables = {\n 'a': Subexpression(name='a', dtype=np.float32,\n owner=FakeGroup(variables={}), device=None,\n expr='2*z'),\n 'x': Variable(name='x'),\n 'y': Variable(name='y'),\n 'z': Variable(name='z')\n }\n # subexpression a (referring to z) is used twice, but can be reused the\n # second time (no change to z)\n code = '''\n x = a\n y = a\n '''\n scalar_stmts, vector_stmts = make_statements(code, variables, np.float32)\n assert len(scalar_stmts) == 0\n assert [stmt.var for stmt in vector_stmts] == ['a', 'x', 'y']\n assert vector_stmts[0].constant\n\n code = '''\n x = a\n z *= 2\n '''\n scalar_stmts, vector_stmts = make_statements(code, variables, np.float32)\n assert len(scalar_stmts) == 0\n assert [stmt.var for stmt in vector_stmts] == ['a', 'x', 'z']\n # Note that we currently do not mark the subexpression as constant in this\n # case, because its use after the \"z *=2\" line would actually redefine it.\n # Our algorithm is currently not smart enough to detect that it is actually\n # not used afterwards\n\n # a refers to z, therefore we have to redefine a after z changed, and a\n # cannot be constant\n code = '''\n x = a\n z *= 2\n y = a\n '''\n scalar_stmts, vector_stmts = make_statements(code, variables, np.float32)\n assert len(scalar_stmts) == 0\n assert [stmt.var for stmt in vector_stmts] == ['a', 'x', 'z', 'a', 'y']\n assert not any(stmt.constant for stmt in vector_stmts)\n\n\[email protected]_independent\ndef test_nested_subexpressions():\n '''\n This test checks that code translation works with nested subexpressions.\n '''\n code = '''\n x = a + b + c\n c = 1\n x = a + b + c\n d = 1\n x = a + b + c\n '''\n variables = {\n 'a': Subexpression(name='a', dtype=np.float32, owner=FakeGroup(variables={}), device=None,\n expr='b*b+d'),\n 'b': Subexpression(name='b', dtype=np.float32, owner=FakeGroup(variables={}), device=None,\n expr='c*c*c'),\n 'c': Variable(name='c'),\n 'd': Variable(name='d'),\n }\n scalar_stmts, vector_stmts = make_statements(code, variables, np.float32)\n assert len(scalar_stmts) == 0\n evalorder = ''.join(stmt.var for stmt in vector_stmts)\n # This is the order that variables ought to be evaluated in (note that\n # previously this test did not expect the last \"b\" evaluation, because its\n # value did not change (c was not changed). We have since removed this\n # subexpression caching, because it did not seem to apply in practical\n # use cases)\n assert evalorder == 'baxcbaxdbax'\n\[email protected]_independent\ndef test_apply_loop_invariant_optimisation():\n variables = {'v': Variable('v', scalar=False),\n 'w': Variable('w', scalar=False),\n 'dt': Constant('dt', dimensions=second.dim, value=0.1*ms),\n 'tau': Constant('tau', dimensions=second.dim, value=10*ms),\n 'exp': DEFAULT_FUNCTIONS['exp']}\n statements = [Statement('v', '=', 'dt*w*exp(-dt/tau)/tau + v*exp(-dt/tau)', '', np.float32),\n Statement('w', '=', 'w*exp(-dt/tau)', '', np.float32)]\n scalar, vector = optimise_statements([], statements, variables)\n # The optimisation should pull out at least exp(-dt / tau)\n assert len(scalar) >= 1\n assert np.issubdtype(scalar[0].dtype, np.floating)\n assert scalar[0].var == '_lio_1'\n assert len(vector) == 2\n assert all('_lio_' in stmt.expr for stmt in vector)\n\[email protected]_independent\ndef test_apply_loop_invariant_optimisation_integer():\n variables = {'v': Variable('v', scalar=False),\n 'N': Constant('N', 10),\n 'b': Variable('b', scalar=True, dtype=int),\n 'c': Variable('c', scalar=True, dtype=int),\n 'd': Variable('d', scalar=True, dtype=int),\n 'y': Variable('y', scalar=True, dtype=float),\n 'z': Variable('z', scalar=True, dtype=float),\n 'w': Variable('w', scalar=True, dtype=float),\n }\n statements = [Statement('v', '=', 'v % (2*3*N)', '', np.float32),\n # integer version doesn't get rewritten but float version does\n Statement('a', ':=', 'b//(c//d)', '', int),\n Statement('x', ':=', 'y/(z/w)', '', float),\n ]\n scalar, vector = optimise_statements([], statements, variables)\n assert len(scalar) == 3\n assert np.issubdtype(scalar[0].dtype, np.signedinteger)\n assert scalar[0].var == '_lio_1'\n expr = scalar[0].expr.replace(' ', '')\n assert expr=='6*N' or expr=='N*6'\n assert np.issubdtype(scalar[1].dtype, np.signedinteger)\n assert scalar[1].var == '_lio_2'\n expr = scalar[1].expr.replace(' ', '')\n assert expr=='b//(c//d)'\n assert np.issubdtype(scalar[2].dtype, np.floating)\n assert scalar[2].var == '_lio_3'\n expr = scalar[2].expr.replace(' ', '')\n assert expr=='(y*w)/z' or expr=='(w*y)/z'\n\[email protected]_independent\ndef test_apply_loop_invariant_optimisation_boolean():\n variables = {'v1': Variable('v1', scalar=False),\n 'v2': Variable('v2', scalar=False),\n 'N': Constant('N', 10),\n 'b': Variable('b', scalar=True, dtype=bool),\n 'c': Variable('c', scalar=True, dtype=bool),\n 'int': DEFAULT_FUNCTIONS['int'],\n 'foo': Function(lambda x: None,\n arg_units=[Unit(1)], return_unit=Unit(1),\n arg_types=['boolean'], return_type='float',\n stateless=False)\n }\n # The calls for \"foo\" cannot be pulled out, since foo is marked as stateful\n statements = [Statement('v1', '=', '1.0*int(b and c)', '', np.float32),\n Statement('v1', '=', '1.0*foo(b and c)', '', np.float32),\n Statement('v2', '=', 'int(not b and True)', '', np.float32),\n Statement('v2', '=', 'foo(not b and True)', '', np.float32)\n ]\n scalar, vector = optimise_statements([], statements, variables)\n assert len(scalar) == 4\n assert scalar[0].expr == '1.0 * int(b and c)'\n assert scalar[1].expr == 'b and c'\n assert scalar[2].expr == 'int((not b) and True)'\n assert scalar[3].expr == '(not b) and True'\n assert len(vector) == 4\n assert vector[0].expr == '_lio_1'\n assert vector[1].expr == 'foo(_lio_2)'\n assert vector[2].expr == '_lio_3'\n assert vector[3].expr == 'foo(_lio_4)'\n\[email protected]_independent\ndef test_apply_loop_invariant_optimisation_no_optimisation():\n variables = {'v1': Variable('v1', scalar=False),\n 'v2': Variable('v2', scalar=False),\n 'N': Constant('N', 10),\n 's1': Variable('s1', scalar=True, dtype=float),\n 's2': Variable('s2', scalar=True, dtype=float),\n 'rand': DEFAULT_FUNCTIONS['rand']\n }\n statements = [\n # This hould not be simplified to 0!\n Statement('v1', '=', 'rand() - rand()', '', np.float),\n Statement('v1', '=', '3*rand() - 3*rand()', '', np.float),\n Statement('v1', '=', '3*rand() - ((1+2)*rand())', '', np.float),\n # This should not pull out rand()*N\n Statement('v1', '=', 's1*rand()*N', '', np.float),\n Statement('v1', '=', 's2*rand()*N', '', np.float),\n # This is not important mathematically, but it would change the numbers\n # that are generated\n Statement('v1', '=', '0*rand()*N', '', np.float),\n Statement('v1', '=', '0/rand()*N', '', np.float)\n ]\n scalar, vector = optimise_statements([], statements, variables)\n for vs in vector[:3]:\n assert vs.expr.count('rand()') == 2, 'Expression should still contain two rand() calls, but got ' + str(vs)\n for vs in vector[3:]:\n assert vs.expr.count('rand()') == 1, 'Expression should still contain a rand() call, but got ' + str(vs)\n\[email protected]_independent\ndef test_apply_loop_invariant_optimisation_simplification():\n variables = {'v1': Variable('v1', scalar=False),\n 'v2': Variable('v2', scalar=False),\n 'i1': Variable('i1', scalar=False, dtype=int),\n 'N': Constant('N', 10)\n }\n statements = [\n # Should be simplified to 0.0\n Statement('v1', '=', 'v1 - v1', '', np.float),\n Statement('v1', '=', 'N*v1 - N*v1', '', np.float),\n Statement('v1', '=', 'v1*N * 0', '', np.float),\n Statement('v1', '=', 'v1 * 0', '', np.float),\n Statement('v1', '=', 'v1 * 0.0', '', np.float),\n Statement('v1', '=', '0.0 / (v1*N)', '', np.float),\n # Should be simplified to 0\n Statement('i1', '=', 'i1*N * 0', '', np.int),\n Statement('i1', '=', '0 * i1', '', np.int),\n Statement('i1', '=', '0 * i1*N', '', np.int),\n Statement('i1', '=', 'i1 * 0', '', np.int),\n # Should be simplified to v1*N\n Statement('v2', '=', '0 + v1*N', '', np.float),\n Statement('v2', '=', 'v1*N + 0.0', '', np.float),\n Statement('v2', '=', 'v1*N - 0', '', np.float),\n Statement('v2', '=', 'v1*N - 0.0', '', np.float),\n Statement('v2', '=', '1 * v1*N', '', np.float),\n Statement('v2', '=', '1.0 * v1*N', '', np.float),\n Statement('v2', '=', 'v1*N / 1.0', '', np.float),\n Statement('v2', '=', 'v1*N / 1', '', np.float),\n # Should be simplified to i1\n Statement('i1', '=', 'i1*1', '', int),\n Statement('i1', '=', 'i1//1', '', int),\n Statement('i1', '=', 'i1+0', '', int),\n Statement('i1', '=', '0+i1', '', int),\n Statement('i1', '=', 'i1-0', '', int),\n # Should *not* be simplified (because it would change the type,\n # important for integer division, for example)\n Statement('v1', '=', 'i1*1.0', '', float),\n Statement('v1', '=', '1.0*i1', '', float),\n Statement('v1', '=', 'i1/1.0', '', float),\n Statement('v1', '=', 'i1/1', '', float),\n Statement('v1', '=', 'i1+0.0', '', float),\n Statement('v1', '=', '0.0+i1', '', float),\n Statement('v1', '=', 'i1-0.0', '', float),\n ## Should *not* be simplified, flooring division by 1 changes the value\n Statement('v1', '=', 'v2//1.0', '', float),\n Statement('i1', '=', 'i1//1.0', '', float) # changes type\n ]\n scalar, vector = optimise_statements([], statements, variables)\n assert len(scalar) == 0\n for s in vector[:6]:\n assert s.expr == '0.0'\n for s in vector[6:10]:\n assert s.expr == '0', s.expr # integer\n for s in vector[10:18]:\n expr = s.expr.replace(' ', '')\n assert expr == 'v1*N' or expr == 'N*v1'\n for s in vector[18:23]:\n expr = s.expr.replace(' ', '')\n assert expr == 'i1'\n for s in vector[23:27]:\n expr = s.expr.replace(' ', '')\n assert expr == '1.0*i1' or expr == 'i1*1.0' or expr == 'i1/1.0'\n for s in vector[27:30]:\n expr = s.expr.replace(' ', '')\n assert expr == '0.0+i1' or expr == 'i1+0.0'\n for s in vector[30:31]:\n expr = s.expr.replace(' ', '')\n assert expr == 'v2//1.0' or expr == 'v2//1'\n for s in vector[31:]:\n expr = s.expr.replace(' ', '')\n assert expr == 'i1//1.0'\n\n\[email protected]_independent\ndef test_apply_loop_invariant_optimisation_constant_evaluation():\n variables = {'v1': Variable('v1', scalar=False),\n 'v2': Variable('v2', scalar=False),\n 'i1': Variable('i1', scalar=False, dtype=int),\n 'N': Constant('N', 10),\n 's1': Variable('s1', scalar=True, dtype=float),\n 's2': Variable('s2', scalar=True, dtype=float),\n 'exp': DEFAULT_FUNCTIONS['exp']\n }\n statements = [\n Statement('v1', '=', 'v1 * (1 + 2 + 3)', '', np.float),\n Statement('v1', '=', 'exp(N)*v1', '', np.float),\n Statement('v1', '=', 'exp(0)*v1', '', np.float),\n ]\n scalar, vector = optimise_statements([], statements, variables)\n # exp(N) should be pulled out of the vector statements, the rest should be\n # evaluated in place\n assert len(scalar) == 1\n assert scalar[0].expr == 'exp(N)'\n assert len(vector) == 3\n expr = vector[0].expr.replace(' ', '')\n assert expr == '_lio_1*v1' or 'v1*_lio_1'\n expr = vector[1].expr.replace(' ', '')\n assert expr == '6.0*v1' or 'v1*6.0'\n assert vector[2].expr == 'v1'\n\n\[email protected]_independent\ndef test_automatic_augmented_assignments():\n # We test that statements that could be rewritten as augmented assignments\n # are correctly rewritten (using sympy to test for symbolic equality)\n variables = {\n 'x': ArrayVariable('x', owner=None, size=10,\n device=device),\n 'y': ArrayVariable('y', owner=None, size=10,\n device=device),\n 'z': ArrayVariable('y', owner=None, size=10,\n device=device),\n 'b': ArrayVariable('b', owner=None, size=10,\n dtype=np.bool, device=device),\n 'clip': DEFAULT_FUNCTIONS['clip'],\n 'inf': DEFAULT_CONSTANTS['inf']\n }\n statements = [\n # examples that should be rewritten\n # Note that using our approach, we will never get -= or /= but always\n # the equivalent += or *= statements\n ('x = x + 1.0', 'x += 1.0'),\n ('x = 2.0 * x', 'x *= 2.0'),\n ('x = x - 3.0', 'x += -3.0'),\n ('x = x/2.0', 'x *= 0.5'),\n ('x = y + (x + 1.0)', 'x += y + 1.0'),\n ('x = x + x', 'x *= 2.0'),\n ('x = x + y + z', 'x += y + z'),\n ('x = x + y + z', 'x += y + z'),\n # examples that should not be rewritten\n ('x = 1.0/x', 'x = 1.0/x'),\n ('x = 1.0', 'x = 1.0'),\n ('x = 2.0*(x + 1.0)', 'x = 2.0*(x + 1.0)'),\n ('x = clip(x + y, 0.0, inf)', 'x = clip(x + y, 0.0, inf)'),\n ('b = b or False', 'b = b or False')\n ]\n for orig, rewritten in statements:\n scalar, vector = make_statements(orig, variables, np.float32)\n try: # we augment the assertion error with the original statement\n assert len(scalar) == 0, 'Did not expect any scalar statements but got ' + str(scalar)\n assert len(vector) == 1, 'Did expect a single statement but got ' + str(vector)\n statement = vector[0]\n expected_var, expected_op, expected_expr, _ = parse_statement(rewritten)\n assert expected_var == statement.var, 'expected write to variable %s, not to %s' % (expected_var, statement.var)\n assert expected_op == statement.op, 'expected operation %s, not %s' % (expected_op, statement.op)\n # Compare the two expressions using sympy to allow for different order etc.\n sympy_expected = str_to_sympy(expected_expr)\n sympy_actual = str_to_sympy(statement.expr)\n assert sympy_expected == sympy_actual, ('RHS expressions \"%s\" and \"%s\" are not identical' % (sympy_to_str(sympy_expected),\n sympy_to_str(sympy_actual)))\n except AssertionError as ex:\n raise AssertionError('Transformation for statement \"%s\" gave an unexpected result: %s' % (orig, str(ex)))\n\n\ndef test_clear_cache():\n target = prefs.codegen.target\n if target == 'numpy':\n assert 'numpy' not in _cache_dirs_and_extensions\n with pytest.raises(ValueError):\n clear_cache('numpy')\n else:\n assert target in _cache_dirs_and_extensions\n cache_dir, _ = _cache_dirs_and_extensions[target]\n # Create a file that should not be there\n fname = os.path.join(cache_dir, 'some_file.py')\n open(fname, 'w').close()\n # clear_cache should refuse to clear the directory\n with pytest.raises(IOError):\n clear_cache(target)\n\n os.remove(fname)\n\n\ndef test_compiler_c99():\n # On a user's computer, we do not know whether the compiler actually\n # has C99 support, so we just check whether the test does not raise an\n # error\n c99_support = compiler_supports_c99()\n # On our Azure test server we know that the compilers support C99\n if os.environ.get('AGENT_OS', ''):\n assert c99_support\n\n\nif __name__ == '__main__':\n test_auto_target()\n test_analyse_identifiers()\n test_get_identifiers_recursively()\n test_write_to_subexpression()\n test_repeated_subexpressions()\n test_nested_subexpressions()\n test_apply_loop_invariant_optimisation()\n test_apply_loop_invariant_optimisation_integer()\n test_apply_loop_invariant_optimisation_boolean()\n test_apply_loop_invariant_optimisation_no_optimisation()\n test_apply_loop_invariant_optimisation_simplification()\n test_apply_loop_invariant_optimisation_constant_evaluation()\n test_automatic_augmented_assignments()\n test_clear_cache()\n\n" ]
[ [ "numpy.issubdtype" ] ]
Kronemeyer/project-athena
[ "0e79cba1c4d30146326ce7bd311f69f2ee845e80" ]
[ "src/attacks/attack.py" ]
[ "\"\"\"\nImplement white-box attacks on top of IBM ART.\n@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)\n\"\"\"\n\nimport numpy as np\nimport torch\n\n# from art.attacks.evasion.fast_gradient import FastGradientMethod\n# from art.attacks.evasion.projected_gradient_descent import ProjectedGradientDescent\nfrom art.attacks.evasion.carlini import CarliniL2Method, CarliniLInfMethod\nfrom art.attacks.evasion.deepfool import DeepFool\nfrom art.attacks.evasion.saliency_map import SaliencyMapMethod\nfrom art.attacks.evasion.iterative_method import BasicIterativeMethod\nfrom art.attacks.evasion.spatial_transformation import SpatialTransformation\nfrom art.attacks.evasion.hop_skip_jump import HopSkipJump\nfrom art.attacks.evasion.zoo import ZooAttack\n\nfrom attacks.fast_gradient import FastGradientMethod\nfrom attacks.pgd import ProjectedGradientDescent\nfrom attacks.utils import WHITEBOX_ATTACK as ATTACK\n\n\ndef generate(model, data_loader, attack_args, device=None):\n \"\"\"\n Generate adversarial examples.\n :param model: an instances of art.classifiers.classifier. The targeted model.\n :param data_loader: a tuple of benign samples and corresponding true labels.\n :param attack_args: dictionary. adversarial configurations.\n :param device: string. cuda (for gpu) or cpu.\n :return:\n \"\"\"\n attack = attack_args.get('attack').lower()\n eot = attack_args.get('eot')\n\n if eot and attack not in [ATTACK.FGSM.value, ATTACK.PGD.value]:\n raise NotImplementedError(\"`EOT` is not supported for {} attack yet.\".format(attack))\n\n print(\">>> Generating {}(EOT:{}) examples.\".format(attack_args.get('description'),\n \"ON\" if eot else \"OFF\"))\n\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n images, labels = data_loader\n\n if attack == ATTACK.FGSM.value:\n return _fgsm(model, images, labels, attack_args)\n elif attack == ATTACK.CW.value:\n return _cw(model, images, labels, attack_args)\n elif attack == ATTACK.PGD.value:\n return _pgd(model, images, labels, attack_args)\n elif attack == ATTACK.BIM.value:\n return _bim(model, images, labels, attack_args)\n elif attack == ATTACK.JSMA.value:\n return _jsma(model, images, labels, attack_args)\n elif attack == ATTACK.DF.value:\n return _df(model, images, labels, attack_args)\n elif attack == ATTACK.MIM.value:\n return _mim(model, images, labels, attack_args)\n elif attack == ATTACK.OP.value:\n return _op(model, images, labels, attack_args)\n elif attack == ATTACK.HOP_SKIP_JUMP.value:\n raise _hop_skip_jump(model, images, labels, attack_args)\n elif attack == ATTACK.SPATIAL_TRANS.value:\n return _spatial(model, images, labels, attack_args)\n elif attack == ATTACK.ZOO.value:\n return _zoo(model, images, labels, attack_args)\n else:\n raise ValueError('{} is not supported.'.format(attack))\n\n\ndef _fgsm(model, data, labels, attack_args):\n \"\"\"\n Fast Gradient Sign Method\n Explaining and Harnessing Adversarial Examples\n by Ian J. Goodfellow, Jonathon Shlens, Christian Szegedy\n ``https://arxiv.org/abs/1412.6572``\n :param model:\n :param data:\n :param labels:\n :param attack_args:\n :param distribution: dictionary. the configurations of distribution (for EOT)\n :return:\n \"\"\"\n eps = attack_args.get('eps', 0.3)\n\n targeted = attack_args.get('targeted', False)\n num_random_init = attack_args.get('num_random_init', 0)\n minimal = attack_args.get('minimal', False)\n\n if attack_args.get(\"eot\"):\n distribution = attack_args.get('distribution', None)\n else:\n distribution = None\n\n attacker = FastGradientMethod(model, eps=eps, eps_step=eps, targeted=targeted,\n num_random_init=num_random_init, minimal=minimal,\n distribution=distribution)\n\n return attacker.generate(data, labels)\n\n\ndef _cw(model, data, labels, attack_args):\n \"\"\"\n Carlini & Wanger\n Towards Evaluating the Robustness of Neural Networks\n by Nicholas Carlini, David Wagner\n ``https://arxiv.org/abs/1608.04644``\n :param model:\n :param data:\n :param labels:\n :param attack_args:\n :return:\n \"\"\"\n norm = attack_args.get('norm').lower()\n\n lr = attack_args.get('lr')\n max_iter = attack_args.get('max_iter', 10)\n\n # use default values for the following arguments\n confidence = attack_args.get('confidence', 0.0)\n targeted = attack_args.get('targeted', False)\n init_const = attack_args.get('init_const', 0.01)\n max_halving = attack_args.get('max_halving', 5)\n max_doubling = attack_args.get('max_doubling', 5)\n\n if norm == 'l2':\n print('>>> Generating CW_l2 examples.')\n binary_search_steps = attack_args.get('binary_search_steps', 10)\n\n attacker = CarliniL2Method(classifier=model, confidence=confidence, targeted=targeted, learning_rate=lr,\n binary_search_steps=binary_search_steps, max_iter=max_iter,\n initial_const=init_const, max_halving=max_halving,\n max_doubling=max_doubling)\n\n elif norm == 'linf':\n print('>>> Generating CW_linf examples.')\n eps = attack_args.get('eps', 0.3)\n attacker = CarliniLInfMethod(classifier=model, confidence=confidence, targeted=targeted, learning_rate=lr,\n max_iter=max_iter, max_halving=max_halving, max_doubling=max_doubling, eps=eps)\n else:\n raise ValueError('Support `l2` and `linf` norms. But found {}'.format(norm))\n\n return attacker.generate(data, labels)\n\n\ndef _pgd(model, data, labels, attack_args):\n \"\"\"\n Projected Gradient Descent\n Towards deep learning models resistant to adversarial attacks\n by Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu.\n ``https://arxiv.org/abs/1706.06083``\n :param model:\n :param data:\n :param labels:\n :param attack_args:\n :return:\n \"\"\"\n eps = attack_args.get('eps', 0.3)\n eps_step = attack_args.get('eps_step', eps/10.)\n max_iter = attack_args.get('max_iter', 10)\n\n norm = _get_norm_value(attack_args.get('norm', 'linf'))\n targeted = attack_args.get('targeted', False)\n num_random_init = attack_args.get('num_random_init', 0)\n random_eps = attack_args.get('random_eps', False)\n\n if attack_args.get(\"eot\"):\n distribution = attack_args.get('distribution', None)\n else:\n distribution = None\n\n attacker = ProjectedGradientDescent(classifier=model, norm=norm, eps=eps, eps_step=eps_step,\n max_iter=max_iter, targeted=targeted,\n num_random_init=num_random_init, random_eps=random_eps,\n distribution=distribution)\n return attacker.generate(data, labels)\n\n\ndef _bim(model, data, labels, attack_args):\n \"\"\"\n Basic Iteractive Method\n ADVERSARIAL EXAMPLES IN THE PHYSICAL WORLD\n Alexey Kurakin, Ian J. Goodfellow, Samy Bengio\n ``https://arxiv.org/pdf/1607.02533.pdf``\n :param model:\n :param data:\n :param labels:\n :param attack_args:\n :return:\n \"\"\"\n eps = attack_args.get('eps', 0.3)\n eps_step = attack_args.get('eps_step', eps/10.)\n max_iter = attack_args.get('max_iter', 100)\n\n targeted = attack_args.get('targeted', False)\n attacker = BasicIterativeMethod(classifier=model, eps=eps, eps_step=eps_step,\n max_iter=max_iter, targeted=targeted)\n return attacker.generate(data, labels)\n\n\ndef _jsma(model, data, labels, attack_args):\n theta = attack_args.get('theta', 0.15)\n gamma = attack_args.get('gamma', 0.5)\n\n batch_size = attack_args.get('batch_size', 1)\n\n attacker = SaliencyMapMethod(classifier=model, theta=theta, gamma=gamma, batch_size=batch_size)\n return attacker.generate(data, labels)\n\n\ndef _df(model, data, labels, attack_args):\n max_iter = attack_args.get('max_iter', 100)\n eps = attack_args.get('eps', 0.01)\n nb_grads = attack_args.get('nb_grads', 10)\n\n attacker = DeepFool(classifier=model, max_iter=max_iter, epsilon=eps, nb_grads=nb_grads)\n return attacker.generate(data, labels)\n\n\ndef _mim(model, data, labels, attack_args):\n raise NotImplementedError\n\n\ndef _op(model, data, labels, attack_args):\n raise NotImplementedError\n\n\ndef _spatial(model, data, labels, attack_args):\n max_translation = attack_args.get('max_translation', 0.2)\n num_translations = attack_args.get('num_translations', 1)\n max_rotation = attack_args.get('max_rotation', 10)\n num_rotations = attack_args.get('num_rotations', 1)\n\n attacker = SpatialTransformation(classifier=model,\n max_translation=max_translation, num_translations=num_translations,\n max_rotation=max_rotation, num_rotations=num_rotations)\n return attacker.generate(data, labels)\n\n\ndef _hop_skip_jump(model, data, labels, attack_args):\n norm = _get_norm_value(attack_args.get('norm', 'l2'))\n max_iter = attack_args.get('max_iter', 50)\n max_eval = attack_args.get('max_eval', 10000)\n init_eval = attack_args.get('init_eval', 100)\n init_size = attack_args.get('init_size', 100)\n\n targeted = attack_args.get('targeted', False)\n attacker = HopSkipJump(classifier=model, targeted=targeted, norm=norm,\n max_iter=max_iter, max_eval=max_eval,\n init_eval=init_eval, init_size=init_size)\n\n return attacker.generate(data, labels)\n\n\ndef _zoo(model, data, labels, attack_args):\n lr = attack_args.get('learning_rate', 0.01)\n max_iter = attack_args.get('max_iter', 10)\n binary_search_steps = attack_args.get('binary_search_steps', 1)\n\n confidence = attack_args.get('confidence', 0.0)\n targeted = attack_args.get('targeted', False)\n init_const = attack_args.get('init_const', 1e-3)\n abort_early = attack_args.get('abort_early', True)\n use_resize = attack_args.get('use_resize', True)\n use_importance = attack_args.get('use_importance', True)\n nb_parallel = attack_args.get('nb_parallel', 128)\n variable_h = attack_args.get('variable_h', 1e-4)\n\n attacker = ZooAttack(classifier=model, confidence=confidence, targeted=targeted,\n learning_rate=lr, max_iter=max_iter, binary_search_steps=binary_search_steps,\n initial_const=init_const, abort_early=abort_early, use_resize=use_resize,\n use_importance=use_importance, nb_parallel=nb_parallel, variable_h=variable_h)\n\n return attacker.generate(data, labels)\n\n\ndef _get_norm_value(norm):\n \"\"\"\n Convert a string norm to a numeric value.\n :param norm:\n :return:\n \"\"\"\n norm = norm.lower()\n if norm == 'linf':\n value = np.inf\n elif norm == 'l2':\n value = 2\n else:\n raise ValueError('Support `l2` and `linf` norms. But found {}.'.format(norm))\n\n return value\n" ]
[ [ "torch.cuda.is_available" ] ]
visr/neuralhydrology
[ "77f6c9214945c8e857e3b9545afe8470da751cab" ]
[ "neuralhydrology/datasetzoo/camelsus.py" ]
[ "from pathlib import Path\nfrom typing import Dict, List, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport xarray\n\nfrom neuralhydrology.datasetzoo.basedataset import BaseDataset\nfrom neuralhydrology.utils.config import Config\n\n\nclass CamelsUS(BaseDataset):\n \"\"\"Data set class for the CAMELS US data set by [#]_ and [#]_.\n \n Parameters\n ----------\n cfg : Config\n The run configuration.\n is_train : bool \n Defines if the dataset is used for training or evaluating. If True (training), means/stds for each feature\n are computed and stored to the run directory. If one-hot encoding is used, the mapping for the one-hot encoding \n is created and also stored to disk. If False, a `scaler` input is expected and similarly the `id_to_int` input\n if one-hot encoding is used. \n period : {'train', 'validation', 'test'}\n Defines the period for which the data will be loaded\n basin : str, optional\n If passed, the data for only this basin will be loaded. Otherwise the basin(s) are read from the appropriate\n basin file, corresponding to the `period`.\n additional_features : List[Dict[str, pd.DataFrame]], optional\n List of dictionaries, mapping from a basin id to a pandas DataFrame. This DataFrame will be added to the data\n loaded from the dataset and all columns are available as 'dynamic_inputs', 'static_inputs' and \n 'target_variables'\n id_to_int : Dict[str, int], optional\n If the config argument 'use_basin_id_encoding' is True in the config and period is either 'validation' or \n 'test', this input is required. It is a dictionary, mapping from basin id to an integer (the one-hot encoding).\n scaler : Dict[str, Union[pd.Series, xarray.DataArray]], optional\n If period is either 'validation' or 'test', this input is required. It contains the means and standard \n deviations for each feature and is stored to the run directory during training (train_data/train_data_scaler.p)\n \n References\n ----------\n .. [#] A. J. Newman, M. P. Clark, K. Sampson, A. Wood, L. E. Hay, A. Bock, R. J. Viger, D. Blodgett, \n L. Brekke, J. R. Arnold, T. Hopson, and Q. Duan: Development of a large-sample watershed-scale \n hydrometeorological dataset for the contiguous USA: dataset characteristics and assessment of regional \n variability in hydrologic model performance. Hydrol. Earth Syst. Sci., 19, 209-223, \n doi:10.5194/hess-19-209-2015, 2015\n .. [#] Addor, N., Newman, A. J., Mizukami, N. and Clark, M. P.: The CAMELS data set: catchment attributes and \n meteorology for large-sample studies, Hydrol. Earth Syst. Sci., 21, 5293-5313, doi:10.5194/hess-21-5293-2017,\n 2017.\n \"\"\"\n\n def __init__(self,\n cfg: Config,\n is_train: bool,\n period: str,\n basin: str = None,\n additional_features: List[Dict[str, pd.DataFrame]] = [],\n id_to_int: Dict[str, int] = {},\n scaler: Dict[str, Union[pd.Series, xarray.DataArray]] = {}):\n super(CamelsUS, self).__init__(cfg=cfg,\n is_train=is_train,\n period=period,\n basin=basin,\n additional_features=additional_features,\n id_to_int=id_to_int,\n scaler=scaler)\n\n def _load_basin_data(self, basin: str) -> pd.DataFrame:\n \"\"\"Load input and output data from text files.\"\"\"\n # get forcings\n dfs = []\n for forcing in self.cfg.forcings:\n df, area = load_camels_us_forcings(self.cfg.data_dir, basin, forcing)\n\n # rename columns\n if len(self.cfg.forcings) > 1:\n df = df.rename(columns={col: f\"{col}_{forcing}\" for col in df.columns})\n dfs.append(df)\n df = pd.concat(dfs, axis=1)\n\n # add discharge\n df['QObs(mm/d)'] = load_camels_us_discharge(self.cfg.data_dir, basin, area)\n\n # replace invalid discharge values by NaNs\n qobs_cols = [col for col in df.columns if \"qobs\" in col.lower()]\n for col in qobs_cols:\n df.loc[df[col] < 0, col] = np.nan\n\n return df\n\n def _load_attributes(self) -> pd.DataFrame:\n if self.cfg.camels_attributes:\n\n df = load_camels_us_attributes(self.cfg.data_dir, basins=self.basins)\n\n # remove all attributes not defined in the config\n drop_cols = [c for c in df.columns if c not in self.cfg.camels_attributes]\n df = df.drop(drop_cols, axis=1)\n\n return df\n\n\ndef load_camels_us_attributes(data_dir: Path, basins: List[str] = []) -> pd.DataFrame:\n \"\"\"Load CAMELS US attributes from the dataset provided by [#]_\n\n Parameters\n ----------\n data_dir : Path\n Path to the CAMELS US directory. This folder must contain a 'camels_attributes_v2.0' folder (the original \n data set) containing the corresponding txt files for each attribute group.\n basins : List[str], optional\n If passed, return only attributes for the basins specified in this list. Otherwise, the attributes of all basins\n are returned.\n\n Returns\n -------\n pandas.DataFrame\n Basin-indexed DataFrame, containing the attributes as columns.\n\n References\n ----------\n .. [#] Addor, N., Newman, A. J., Mizukami, N. and Clark, M. P.: The CAMELS data set: catchment attributes and \n meteorology for large-sample studies, Hydrol. Earth Syst. Sci., 21, 5293-5313, doi:10.5194/hess-21-5293-2017,\n 2017.\n \"\"\"\n attributes_path = Path(data_dir) / 'camels_attributes_v2.0'\n\n if not attributes_path.exists():\n raise RuntimeError(f\"Attribute folder not found at {attributes_path}\")\n\n txt_files = attributes_path.glob('camels_*.txt')\n\n # Read-in attributes into one big dataframe\n dfs = []\n for txt_file in txt_files:\n df_temp = pd.read_csv(txt_file, sep=';', header=0, dtype={'gauge_id': str})\n df_temp = df_temp.set_index('gauge_id')\n\n dfs.append(df_temp)\n\n df = pd.concat(dfs, axis=1)\n # convert huc column to double digit strings\n df['huc'] = df['huc_02'].apply(lambda x: str(x).zfill(2))\n df = df.drop('huc_02', axis=1)\n\n if basins:\n # drop rows of basins not contained in the passed list\n drop_basins = [b for b in df.index if b not in basins]\n df = df.drop(drop_basins, axis=0)\n\n return df\n\n\ndef load_camels_us_forcings(data_dir: Path, basin: str, forcings: str) -> Tuple[pd.DataFrame, int]:\n \"\"\"Load the forcing data for a basin of the CAMELS US data set.\n\n Parameters\n ----------\n data_dir : Path\n Path to the CAMELS US directory. This folder must contain a 'basin_mean_forcing' folder containing one \n subdirectory for each forcing. The forcing directories have to contain 18 subdirectories (for the 18 HUCS) as in\n the original CAMELS data set. In each HUC folder are the forcing files (.txt), starting with the 8-digit basin \n id.\n basin : str\n 8-digit USGS identifier of the basin.\n forcings : str\n Can be e.g. 'daymet' or 'nldas', etc. Must match the folder names in the 'basin_mean_forcing' directory. \n\n Returns\n -------\n pd.DataFrame\n Time-indexed DataFrame, containing the forcing data.\n int\n Catchment area (m2), specified in the header of the forcing file.\n \"\"\"\n forcing_path = data_dir / 'basin_mean_forcing' / forcings\n if not forcing_path.is_dir():\n raise OSError(f\"{forcing_path} does not exist\")\n\n files = list(forcing_path.glob('**/*_forcing_leap.txt'))\n file_path = [f for f in files if f.name[:8] == basin]\n if file_path:\n file_path = file_path[0]\n else:\n raise FileNotFoundError(f'No file for Basin {basin} at {file_path}')\n\n df = pd.read_csv(file_path, sep='\\s+', header=3)\n df[\"date\"] = pd.to_datetime(df.Year.map(str) + \"/\" + df.Mnth.map(str) + \"/\" + df.Day.map(str), format=\"%Y/%m/%d\")\n df = df.set_index(\"date\")\n\n # load area from header\n with open(file_path, 'r') as fp:\n content = fp.readlines()\n area = int(content[2])\n\n return df, area\n\n\ndef load_camels_us_discharge(data_dir: Path, basin: str, area: int) -> pd.Series:\n \"\"\"Load the discharge data for a basin of the CAMELS US data set.\n\n Parameters\n ----------\n data_dir : Path\n Path to the CAMELS US directory. This folder must contain a 'usgs_streamflow' folder with 18\n subdirectories (for the 18 HUCS) as in the original CAMELS data set. In each HUC folder are the discharge files \n (.txt), starting with the 8-digit basin id.\n basin : str\n 8-digit USGS identifier of the basin.\n area : int\n Catchment area (m2), used to normalize the discharge.\n\n Returns\n -------\n pd.Series\n Time-index pandas.Series of the discharge values (mm/day)\n \"\"\"\n\n discharge_path = data_dir / 'usgs_streamflow'\n files = list(discharge_path.glob('**/*_streamflow_qc.txt'))\n file_path = [f for f in files if f.name[:8] == basin]\n if file_path:\n file_path = file_path[0]\n else:\n raise FileNotFoundError(f'No file for Basin {basin} at {file_path}')\n\n col_names = ['basin', 'Year', 'Mnth', 'Day', 'QObs', 'flag']\n df = pd.read_csv(file_path, sep='\\s+', header=None, names=col_names)\n df[\"date\"] = pd.to_datetime(df.Year.map(str) + \"/\" + df.Mnth.map(str) + \"/\" + df.Day.map(str), format=\"%Y/%m/%d\")\n df = df.set_index(\"date\")\n\n # normalize discharge from cubic feed per second to mm per day\n df.QObs = 28316846.592 * df.QObs * 86400 / (area * 10**6)\n\n return df.QObs\n" ]
[ [ "pandas.read_csv", "pandas.concat" ] ]
biomac-lab/covid19_forecast
[ "6613064f8a6d8023ecbdaddbc2e7525b6ad0796f" ]
[ "functions/plot_utils.py" ]
[ "from matplotlib.dates import date2num, num2date\nfrom matplotlib.colors import ListedColormap\nfrom matplotlib import dates as mdates\nfrom matplotlib.patches import Patch\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker\n\nimport os\n\ndef plot_fit(df_fit, df_data, y_label='Deaths', y_lim_up = 200, color='blue', col_data='smoothed_death', col_up='high_95', col_down='low_95', col_point='median', ax=None, sharey=True, forecast=True, path_to_save=None):\n \"\"\" df_fit with columns:\n 'mean', 'median', 'std', 'low_95', 'high_95', 'low_80', 'high_80', 'low_50', 'high_50', 'type'\n type in ['estimate', 'forecast']\n\n df_data with columns:\n 'confirmed', 'death', 'smoothed_confirmed', 'smoothed_death', 'type'\n type in ['fitted', 'preliminary']\n \"\"\"\n\n df_estimate = df_fit.copy(); df_estimate = df_estimate[ df_estimate.type=='estimate' ]\n df_forecast = df_fit.copy(); df_forecast = df_forecast[ df_forecast.type=='forecast' ]\n\n df_data_fitted = df_data.copy(); df_data_fitted = df_data_fitted[df_data_fitted.type=='fitted']\n df_data_preliminary = df_data.copy(); df_data_preliminary = df_data_preliminary[df_data_preliminary.type=='preliminary']\n\n fig, axes = plt.subplots(1, 2, figsize=(20, 7), sharey=sharey)\n axes[0].fill_between(df_estimate.index.values, df_estimate[col_down], df_estimate[col_up], color='gray', alpha=0.4, label='95 CI - Nowcast')\n axes[0].plot(df_estimate.index.values, df_estimate[col_point], color='black', alpha=0.4, label='Median - Nowcast')\n\n axes[0].scatter(df_data_fitted.index.values, df_data_fitted[col_data], facecolor='black', alpha=0.6, edgecolor='black', s=30)\n (y1_l, y2_l) = axes[0].get_ylim()\n\n axes[0].fill_between(df_forecast.index.values, df_forecast[col_down], df_forecast[col_up], color=color, alpha=0.6, label='95% CI')\n axes[0].fill_between(df_forecast.index.values, df_forecast['low_80'], df_forecast['high_80'], color=color, alpha=0.4, label='80% CI')\n axes[0].fill_between(df_forecast.index.values, df_forecast['low_50'], df_forecast['high_50'], color=color, alpha=0.4, label='50% CI')\n\n axes[0].plot(df_forecast.index.values, df_forecast[col_point], color=color, alpha=0.4, label='Forecast - Median')\n axes[0].scatter(df_forecast.index.values, df_forecast[col_point], edgecolor='k', facecolor='white', s=10)\n axes[0].tick_params(axis='both', labelsize=15)\n\n axes[1].fill_between(df_estimate.iloc[-10:].index.values, df_estimate.iloc[-10:][col_up], df_estimate.iloc[-10:][col_down], color='gray', alpha=0.4)\n axes[1].plot(df_estimate.iloc[-10:].index.values, df_estimate.iloc[-10:][col_point], color='black', alpha=0.4)\n axes[1].fill_between(df_forecast.index.values, df_forecast[col_down], df_forecast[col_up], color=color, alpha=0.2, label='90% CI')\n axes[1].fill_between(df_forecast.index.values, df_forecast['low_80'], df_forecast['high_80'], color=color, alpha=0.4, label='80% CI')\n axes[1].fill_between(df_forecast.index.values, df_forecast['low_50'], df_forecast['high_50'], color=color, alpha=0.6, label='50% CI')\n\n axes[1].plot(df_forecast.index.values, df_forecast[col_point], color='black', alpha=0.4)\n axes[1].scatter(df_estimate.iloc[-10:].index.values, df_data_fitted.iloc[-10:][col_data], facecolor='black', alpha=0.6, edgecolor='black', s=50)\n axes[1].scatter(df_data_preliminary.index.values, df_data_preliminary[col_data], edgecolor='k', facecolor='red', s=50, label='Preliminary data')\n\n\n for ax in axes:\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax.xaxis.set_minor_locator(mdates.DayLocator())\n ax.xaxis.set_major_locator(mdates.WeekdayLocator())\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.grid(which='major', axis='y', c='k', alpha=.1, zorder=-2)\n ax.yaxis.set_major_formatter(ticker.StrMethodFormatter(\"{x:.0f}\"))\n ax.set_ylabel(y_label, size=15)\n ax.set_ylim( (y1_l, y_lim_up) )\n ax.legend(loc='upper left')\n\n axes[1].xaxis.set_major_locator(mdates.MonthLocator())\n axes[1].xaxis.set_major_formatter(mdates.DateFormatter('%d-%b'))\n axes[1].xaxis.set_minor_locator(mdates.DayLocator())\n axes[1].xaxis.set_minor_formatter(mdates.DateFormatter('%d'))\n\n axes[1].xaxis.set_major_locator(mdates.WeekdayLocator())\n axes[1].xaxis.set_major_locator(mdates.MonthLocator())\n axes[1].tick_params(which='both', axis='both', labelrotation=90, labelsize=15)\n\n axes[1].grid(which='both', axis='x', c='k', alpha=.1, zorder=-2)\n axes[0].grid(which='major', axis='x', c='k', alpha=.1, zorder=-2)\n plt.tight_layout()\n\n if path_to_save:\n fig.savefig(path_to_save, dpi=300, bbox_inches='tight', transparent=False)\n plt.close()" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.subplots", "matplotlib.dates.WeekdayLocator", "matplotlib.dates.MonthLocator", "matplotlib.dates.DayLocator", "matplotlib.pyplot.close", "matplotlib.ticker.StrMethodFormatter" ] ]
rekhabiswal/sage
[ "e8633b09919542a65e7e990c8369fee30c7edefd" ]
[ "src/sage/plot/arrow.py" ]
[ "\"\"\"\nArrows\n\"\"\"\n#*****************************************************************************\n# Copyright (C) 2006 Alex Clemesha <[email protected]>,\n# William Stein <[email protected]>,\n# 2008 Mike Hansen <[email protected]>,\n# 2009 Emily Kirkman\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# http://www.gnu.org/licenses/\n#*****************************************************************************\nfrom sage.plot.primitive import GraphicPrimitive\nfrom sage.misc.decorators import options, rename_keyword\nfrom sage.plot.colors import to_mpl_color\n\n\nclass CurveArrow(GraphicPrimitive):\n def __init__(self, path, options):\n \"\"\"\n Returns an arrow graphics primitive along the provided path (bezier curve).\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import CurveArrow\n sage: b = CurveArrow(path=[[(0,0),(.5,.5),(1,0)],[(.5,1),(0,0)]],\n ....: options={})\n sage: b\n CurveArrow from (0, 0) to (0, 0)\n \"\"\"\n import numpy as np\n self.path = path\n codes = [1] + (len(self.path[0])-1)*[len(self.path[0])]\n vertices = self.path[0]\n for curve in self.path[1:]:\n vertices += curve\n codes += (len(curve))*[len(curve)+1]\n self.codes = codes\n self.vertices = np.array(vertices, np.float)\n GraphicPrimitive.__init__(self, options)\n\n def get_minmax_data(self):\n \"\"\"\n Returns a dictionary with the bounding box data.\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import CurveArrow\n sage: b = CurveArrow(path=[[(0,0),(.5,.5),(1,0)],[(.5,1),(0,0)]],\n ....: options={})\n sage: d = b.get_minmax_data()\n sage: d['xmin']\n 0.0\n sage: d['xmax']\n 1.0\n \"\"\"\n return {'xmin': self.vertices[:,0].min(),\n 'xmax': self.vertices[:,0].max(),\n 'ymin': self.vertices[:,1].min(),\n 'ymax': self.vertices[:,1].max()}\n\n def _allowed_options(self):\n \"\"\"\n Return the dictionary of allowed options for the curve arrow graphics\n primitive.\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import CurveArrow\n sage: list(sorted(CurveArrow(path=[[(0,0),(2,3)]],options={})._allowed_options().items()))\n [('arrowsize', 'The size of the arrowhead'),\n ('arrowstyle', 'todo'),\n ('head', '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)'),\n ('hue', 'The color given as a hue.'),\n ('legend_color', 'The color of the legend text.'),\n ('legend_label', 'The label for this item in the legend.'),\n ('linestyle', \"2d only: The style of the line, which is one of\n 'dashed', 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.',\n respectively.\"),\n ('rgbcolor', 'The color as an RGB tuple.'),\n ('thickness', 'The thickness of the arrow.'),\n ('width', 'The width of the shaft of the arrow, in points.'),\n ('zorder', '2-d only: The layer level in which to draw')]\n \"\"\"\n return {'width': 'The width of the shaft of the arrow, in points.',\n 'rgbcolor': 'The color as an RGB tuple.',\n 'hue': 'The color given as a hue.',\n 'legend_label': 'The label for this item in the legend.',\n 'legend_color': 'The color of the legend text.',\n 'arrowstyle': 'todo',\n 'arrowsize': 'The size of the arrowhead',\n 'thickness': 'The thickness of the arrow.',\n 'zorder': '2-d only: The layer level in which to draw',\n 'head': '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)',\n 'linestyle': \"2d only: The style of the line, which is one of \"\n \"'dashed', 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.', \"\n \"respectively.\"}\n\n def _repr_(self):\n \"\"\"\n Text representation of an arrow graphics primitive.\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import CurveArrow\n sage: CurveArrow(path=[[(0,0),(1,4),(2,3)]],options={})._repr_()\n 'CurveArrow from (0, 0) to (2, 3)'\n \"\"\"\n return \"CurveArrow from %s to %s\" % (self.path[0][0], self.path[-1][-1])\n\n def _render_on_subplot(self, subplot):\n \"\"\"\n Render this arrow in a subplot. This is the key function that\n defines how this arrow graphics primitive is rendered in\n matplotlib's library.\n\n EXAMPLES::\n\n This function implicitly ends up rendering this arrow on a matplotlib\n subplot:\n sage: arrow(path=[[(0,1), (2,-1), (4,5)]])\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n from sage.plot.misc import get_matplotlib_linestyle\n\n options = self.options()\n width = float(options['width'])\n head = options.pop('head')\n if head == 0: style = '<|-'\n elif head == 1: style = '-|>'\n elif head == 2: style = '<|-|>'\n else: raise KeyError('head parameter must be one of 0 (start), 1 (end) or 2 (both).')\n arrowsize = float(options.get('arrowsize', 5))\n head_width = arrowsize\n head_length = arrowsize * 2.0\n color = to_mpl_color(options['rgbcolor'])\n from matplotlib.patches import FancyArrowPatch\n from matplotlib.path import Path\n bpath = Path(self.vertices, self.codes)\n p = FancyArrowPatch(path=bpath,\n lw=width, arrowstyle='%s,head_width=%s,head_length=%s' % (style, head_width, head_length),\n fc=color, ec=color, \n linestyle=get_matplotlib_linestyle(options['linestyle'], return_type='long'))\n p.set_zorder(options['zorder'])\n p.set_label(options['legend_label'])\n subplot.add_patch(p)\n return p\n\n\nclass Arrow(GraphicPrimitive):\n \"\"\"\n Primitive class that initializes the (line) arrow graphics type\n\n EXAMPLES:\n\n We create an arrow graphics object, then take the 0th entry\n in it to get the actual Arrow graphics primitive::\n\n sage: P = arrow((0,1), (2,3))[0]\n sage: type(P)\n <class 'sage.plot.arrow.Arrow'>\n sage: P\n Arrow from (0.0,1.0) to (2.0,3.0)\n \"\"\"\n def __init__(self, xtail, ytail, xhead, yhead, options):\n \"\"\"\n Create an arrow graphics primitive.\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import Arrow\n sage: Arrow(0,0,2,3,{})\n Arrow from (0.0,0.0) to (2.0,3.0)\n \"\"\"\n self.xtail = float(xtail)\n self.xhead = float(xhead)\n self.ytail = float(ytail)\n self.yhead = float(yhead)\n GraphicPrimitive.__init__(self, options)\n\n def get_minmax_data(self):\n \"\"\"\n Returns a bounding box for this arrow.\n\n EXAMPLES::\n\n sage: d = arrow((1,1), (5,5)).get_minmax_data()\n sage: d['xmin']\n 1.0\n sage: d['xmax']\n 5.0\n \"\"\"\n return {'xmin': min(self.xtail, self.xhead),\n 'xmax': max(self.xtail, self.xhead),\n 'ymin': min(self.ytail, self.yhead),\n 'ymax': max(self.ytail, self.yhead)}\n\n def _allowed_options(self):\n \"\"\"\n Return the dictionary of allowed options for the line arrow graphics\n primitive.\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import Arrow\n sage: list(sorted(Arrow(0,0,2,3,{})._allowed_options().items()))\n [('arrowshorten', 'The length in points to shorten the arrow.'),\n ('arrowsize', 'The size of the arrowhead'),\n ('head',\n '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)'),\n ('hue', 'The color given as a hue.'),\n ('legend_color', 'The color of the legend text.'),\n ('legend_label', 'The label for this item in the legend.'),\n ('linestyle',\n \"2d only: The style of the line, which is one of 'dashed',\n 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.',\n respectively.\"),\n ('rgbcolor', 'The color as an RGB tuple.'),\n ('thickness', 'The thickness of the arrow.'),\n ('width', 'The width of the shaft of the arrow, in points.'),\n ('zorder', '2-d only: The layer level in which to draw')]\n \"\"\"\n return {'width': 'The width of the shaft of the arrow, in points.',\n 'rgbcolor': 'The color as an RGB tuple.',\n 'hue': 'The color given as a hue.',\n 'arrowshorten': 'The length in points to shorten the arrow.',\n 'arrowsize': 'The size of the arrowhead',\n 'thickness': 'The thickness of the arrow.',\n 'legend_label': 'The label for this item in the legend.',\n 'legend_color': 'The color of the legend text.',\n 'zorder': '2-d only: The layer level in which to draw',\n 'head': '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)',\n 'linestyle': \"2d only: The style of the line, which is one of \"\n \"'dashed', 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.', \"\n \"respectively.\"}\n\n def _plot3d_options(self, options=None):\n \"\"\"\n Translate 2D plot options into 3D plot options.\n\n EXAMPLES::\n\n sage: P = arrow((0,1), (2,3), width=5)\n sage: p=P[0]; p\n Arrow from (0.0,1.0) to (2.0,3.0)\n sage: q=p.plot3d()\n sage: q.thickness\n 5\n \"\"\"\n if options is None:\n options = self.options()\n options = dict(self.options())\n options_3d = {}\n if 'width' in options:\n options_3d['thickness'] = options['width']\n del options['width']\n # ignore zorder and head in 3d plotting\n if 'zorder' in options:\n del options['zorder']\n if 'head' in options:\n del options['head']\n if 'linestyle' in options:\n del options['linestyle']\n options_3d.update(GraphicPrimitive._plot3d_options(self, options))\n return options_3d\n\n def plot3d(self, ztail=0, zhead=0, **kwds):\n \"\"\"\n Takes 2D plot and places it in 3D.\n\n EXAMPLES::\n\n sage: A = arrow((0,0),(1,1))[0].plot3d()\n sage: A.jmol_repr(A.testing_render_params())[0]\n 'draw line_1 diameter 2 arrow {0.0 0.0 0.0} {1.0 1.0 0.0} '\n\n Note that we had to index the arrow to get the Arrow graphics\n primitive. We can also change the height via the :meth:`Graphics.plot3d`\n method, but only as a whole::\n\n sage: A = arrow((0,0),(1,1)).plot3d(3)\n sage: A.jmol_repr(A.testing_render_params())[0][0]\n 'draw line_1 diameter 2 arrow {0.0 0.0 3.0} {1.0 1.0 3.0} '\n\n Optional arguments place both the head and tail outside the\n `xy`-plane, but at different heights. This must be done on\n the graphics primitive obtained by indexing::\n\n sage: A=arrow((0,0),(1,1))[0].plot3d(3,4)\n sage: A.jmol_repr(A.testing_render_params())[0]\n 'draw line_1 diameter 2 arrow {0.0 0.0 3.0} {1.0 1.0 4.0} '\n \"\"\"\n from sage.plot.plot3d.shapes2 import line3d\n options = self._plot3d_options()\n options.update(kwds)\n return line3d([(self.xtail, self.ytail, ztail), (self.xhead, self.yhead, zhead)], arrow_head=True, **options)\n\n def _repr_(self):\n \"\"\"\n Text representation of an arrow graphics primitive.\n\n EXAMPLES::\n\n sage: from sage.plot.arrow import Arrow\n sage: Arrow(0,0,2,3,{})._repr_()\n 'Arrow from (0.0,0.0) to (2.0,3.0)'\n \"\"\"\n return \"Arrow from (%s,%s) to (%s,%s)\" % (self.xtail, self.ytail, self.xhead, self.yhead)\n\n def _render_on_subplot(self, subplot):\n r\"\"\"\n Render this arrow in a subplot. This is the key function that\n defines how this arrow graphics primitive is rendered in\n matplotlib's library.\n\n EXAMPLES:\n\n This function implicitly ends up rendering this arrow on\n a matplotlib subplot::\n\n sage: arrow((0,1), (2,-1))\n Graphics object consisting of 1 graphics primitive\n\n TESTS:\n\n The length of the ends (shrinkA and shrinkB) should not depend\n on the width of the arrow, because Matplotlib already takes\n this into account. See :trac:`12836`::\n\n sage: fig = Graphics().matplotlib()\n sage: sp = fig.add_subplot(1,1,1, label='axis1')\n sage: a = arrow((0,0), (1,1))\n sage: b = arrow((0,0), (1,1), width=20)\n sage: p1 = a[0]._render_on_subplot(sp)\n sage: p2 = b[0]._render_on_subplot(sp)\n sage: p1.shrinkA == p2.shrinkA\n True\n sage: p1.shrinkB == p2.shrinkB\n True\n\n Dashed arrows should have solid arrowheads,\n :trac:`12852`. This test saves the plot of a dashed arrow to\n an EPS file. Within the EPS file, ``stroke`` will be called\n twice: once to draw the line, and again to draw the\n arrowhead. We check that both calls do not occur while the\n dashed line style is enabled::\n\n sage: a = arrow((0,0), (1,1), linestyle='dashed')\n sage: filename = tmp_filename(ext='.eps')\n sage: a.save(filename=filename)\n sage: with open(filename, 'r') as f:\n ....: contents = f.read().replace('\\n', ' ')\n sage: two_stroke_pattern = r'setdash.*stroke.*stroke.*setdash.*setdash'\n sage: import re\n sage: two_stroke_re = re.compile(two_stroke_pattern)\n sage: two_stroke_re.search(contents) is None\n True\n \"\"\"\n from sage.plot.misc import get_matplotlib_linestyle\n\n options = self.options()\n head = options.pop('head')\n if head == 0: style = '<|-'\n elif head == 1: style = '-|>'\n elif head == 2: style = '<|-|>'\n else: raise KeyError('head parameter must be one of 0 (start), 1 (end) or 2 (both).')\n width = float(options['width'])\n arrowshorten_end = float(options.get('arrowshorten', 0)) / 2.0\n arrowsize = float(options.get('arrowsize', 5))\n head_width = arrowsize\n head_length = arrowsize * 2.0\n color = to_mpl_color(options['rgbcolor'])\n from matplotlib.patches import FancyArrowPatch\n p = FancyArrowPatch((self.xtail, self.ytail), (self.xhead, self.yhead),\n lw=width,\n arrowstyle='%s,head_width=%s,head_length=%s' % (style, head_width, head_length),\n shrinkA=arrowshorten_end, shrinkB=arrowshorten_end,\n fc=color, ec=color,\n linestyle=get_matplotlib_linestyle(options['linestyle'], return_type='long'))\n p.set_zorder(options['zorder'])\n p.set_label(options['legend_label'])\n\n if options['linestyle'] != 'solid':\n # The next few lines work around a design issue in matplotlib.\n # Currently, the specified linestyle is used to draw both the path\n # and the arrowhead. If linestyle is 'dashed', this looks really\n # odd. This code is from Jae-Joon Lee in response to a post to the\n # matplotlib mailing list.\n # See http://sourceforge.net/mailarchive/forum.php?thread_name=CAG%3DuJ%2Bnw2dE05P9TOXTz_zp-mGP3cY801vMH7yt6vgP9_WzU8w%40mail.gmail.com&forum_name=matplotlib-users\n\n import matplotlib.patheffects as pe\n\n class CheckNthSubPath(object):\n def __init__(self, patch, n):\n \"\"\"\n creates an callable object that returns True if the\n provided path is the n-th path from the patch.\n \"\"\"\n self._patch = patch\n self._n = n\n\n def get_paths(self, renderer):\n self._patch.set_dpi_cor(renderer.points_to_pixels(1.))\n paths, fillables = self._patch.get_path_in_displaycoord()\n return paths\n\n def __call__(self, renderer, gc, tpath, affine, rgbFace):\n path = self.get_paths(renderer)[self._n]\n vert1, code1 = path.vertices, path.codes\n import numpy as np\n\n return np.array_equal(vert1, tpath.vertices) and np.array_equal(code1, tpath.codes)\n\n class ConditionalStroke(pe.RendererBase):\n\n def __init__(self, condition_func, pe_list):\n \"\"\"\n path effect that is only applied when the condition_func\n returns True.\n \"\"\"\n super(ConditionalStroke, self).__init__()\n self._pe_list = pe_list\n self._condition_func = condition_func\n\n def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n\n if self._condition_func(renderer, gc, tpath, affine, rgbFace):\n for pe1 in self._pe_list:\n pe1.draw_path(renderer, gc, tpath, affine, rgbFace)\n\n pe1 = ConditionalStroke(CheckNthSubPath(p, 0), [pe.Stroke()])\n pe2 = ConditionalStroke(CheckNthSubPath(p, 1), [pe.Stroke(dashes={'dash_offset': 0, 'dash_list': None})])\n p.set_path_effects([pe1, pe2])\n\n subplot.add_patch(p)\n return p\n\n\ndef arrow(tailpoint=None, headpoint=None, **kwds):\n \"\"\"\n Returns either a 2-dimensional or 3-dimensional arrow depending\n on value of points.\n\n For information regarding additional arguments, see either arrow2d?\n or arrow3d?.\n\n EXAMPLES::\n\n sage: arrow((0,0), (1,1))\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n sphinx_plot(arrow((0,0), (1,1)))\n\n ::\n\n sage: arrow((0,0,1), (1,1,1))\n Graphics3d Object\n\n .. PLOT::\n\n sphinx_plot(arrow((0,0,1), (1,1,1)))\n\n \"\"\"\n try:\n return arrow2d(tailpoint, headpoint, **kwds)\n except ValueError:\n from sage.plot.plot3d.shapes import arrow3d\n return arrow3d(tailpoint, headpoint, **kwds)\n\n\n@rename_keyword(color='rgbcolor')\n@options(width=2, rgbcolor=(0,0,1), zorder=2, head=1, linestyle='solid', legend_label=None)\ndef arrow2d(tailpoint=None, headpoint=None, path=None, **options):\n \"\"\"\n If ``tailpoint`` and ``headpoint`` are provided, returns an arrow from\n (xtail, ytail) to (xhead, yhead). If ``tailpoint`` or ``headpoint`` is None and\n ``path`` is not None, returns an arrow along the path. (See further info on\n paths in :class:`bezier_path`).\n\n INPUT:\n\n - ``tailpoint`` - the starting point of the arrow\n\n - ``headpoint`` - where the arrow is pointing to\n\n - ``path`` - the list of points and control points (see bezier_path for\n detail) that the arrow will follow from source to destination\n\n - ``head`` - 0, 1 or 2, whether to draw the head at the start (0), end (1)\n or both (2) of the path (using 0 will swap headpoint and tailpoint).\n This is ignored in 3D plotting.\n\n - ``linestyle`` - (default: ``'solid'``) The style of the line, which is\n one of ``'dashed'``, ``'dotted'``, ``'solid'``, ``'dashdot'``,\n or ``'--'``, ``':'``, ``'-'``, ``'-.'``, respectively.\n\n - ``width`` - (default: 2) the width of the arrow shaft, in points\n\n - ``color`` - (default: (0,0,1)) the color of the arrow (as an RGB tuple or\n a string)\n\n - ``hue`` - the color of the arrow (as a number)\n\n - ``arrowsize`` - the size of the arrowhead\n\n - ``arrowshorten`` - the length in points to shorten the arrow (ignored if\n using path parameter)\n\n - ``legend_label`` - the label for this item in the legend\n\n - ``legend_color`` - the color for the legend label\n\n - ``zorder`` - the layer level to draw the arrow-- note that this is\n ignored in 3D plotting.\n\n EXAMPLES:\n\n A straight, blue arrow::\n\n sage: arrow2d((1,1), (3,3))\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n sphinx_plot(arrow2d((1,1), (3,3)))\n\n Make a red arrow::\n\n sage: arrow2d((-1,-1), (2,3), color=(1,0,0))\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n sphinx_plot(arrow2d((-1,-1), (2,3), color=(1,0,0)))\n\n ::\n\n sage: arrow2d((-1,-1), (2,3), color='red')\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n sphinx_plot(arrow2d((-1,-1), (2,3), color='red'))\n\n You can change the width of an arrow::\n\n sage: arrow2d((1,1), (3,3), width=5, arrowsize=15)\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n P = arrow2d((1,1), (3,3), width=5, arrowsize=15)\n sphinx_plot(P)\n\n Use a dashed line instead of a solid one for the arrow::\n\n sage: arrow2d((1,1), (3,3), linestyle='dashed')\n Graphics object consisting of 1 graphics primitive\n sage: arrow2d((1,1), (3,3), linestyle='--')\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n P = arrow2d((1,1), (3,3), linestyle='--')\n sphinx_plot(P)\n\n A pretty circle of arrows::\n\n sage: sum([arrow2d((0,0), (cos(x),sin(x)), hue=x/(2*pi)) for x in [0..2*pi,step=0.1]])\n Graphics object consisting of 63 graphics primitives\n\n .. PLOT::\n\n P = sum([arrow2d((0,0), (cos(x*0.1),sin(x*0.1)), hue=x/(20*pi)) for x in range(floor(20*pi)+1)])\n sphinx_plot(P)\n\n If we want to draw the arrow between objects, for example, the\n boundaries of two lines, we can use the ``arrowshorten`` option\n to make the arrow shorter by a certain number of points::\n\n sage: L1 = line([(0,0), (1,0)], thickness=10)\n sage: L2 = line([(0,1), (1,1)], thickness=10)\n sage: A = arrow2d((0.5,0), (0.5,1), arrowshorten=10, rgbcolor=(1,0,0))\n sage: L1 + L2 + A\n Graphics object consisting of 3 graphics primitives\n\n .. PLOT::\n\n L1 = line([(0,0), (1,0)],thickness=10)\n L2 = line([(0,1), (1,1)], thickness=10)\n A = arrow2d((0.5,0), (0.5,1), arrowshorten=10, rgbcolor=(1,0,0))\n sphinx_plot(L1 + L2 + A)\n\n If BOTH ``headpoint`` and ``tailpoint`` are None, then an empty plot is\n returned::\n\n sage: arrow2d(headpoint=None, tailpoint=None)\n Graphics object consisting of 0 graphics primitives\n\n We can also draw an arrow with a legend::\n\n sage: arrow((0,0), (0,2), legend_label='up', legend_color='purple')\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n P = arrow((0,0), (0,2), legend_label='up', legend_color='purple')\n sphinx_plot(P)\n\n Extra options will get passed on to :meth:`Graphics.show()`, as long as they are valid::\n\n sage: arrow2d((-2,2), (7,1), frame=True)\n Graphics object consisting of 1 graphics primitive\n\n .. PLOT::\n\n sphinx_plot(arrow2d((-2,2), (7,1), frame=True))\n\n ::\n\n sage: arrow2d((-2,2), (7,1)).show(frame=True)\n \"\"\"\n from sage.plot.all import Graphics\n g = Graphics()\n g._set_extra_kwds(Graphics._extract_kwds_for_show(options))\n\n if headpoint is not None and tailpoint is not None:\n xtail, ytail = tailpoint\n xhead, yhead = headpoint\n g.add_primitive(Arrow(xtail, ytail, xhead, yhead, options=options))\n elif path is not None:\n g.add_primitive(CurveArrow(path, options=options))\n elif tailpoint is None and headpoint is None:\n return g\n else:\n raise TypeError('Arrow requires either both headpoint and tailpoint or a path parameter.')\n if options['legend_label']:\n g.legend(True)\n g._legend_colors = [options['legend_color']]\n return g\n" ]
[ [ "numpy.array", "numpy.array_equal", "matplotlib.path.Path", "matplotlib.patheffects.Stroke" ] ]
Nirmal1313/Regression-Methods
[ "b1f885dc798ca4aae47661e0a27fe0e21e4ee4e0" ]
[ "Linear_Ridge_Regression .py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd # for working with data in Python\nimport numpy as np\nimport matplotlib.pyplot as plt # for visualization\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import linear_model\n\n# use Pandas to read in csv files. The pd.read_csv() method creates a DataFrame from a csv file\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\nprint(\"1 \\n\")\n\n# check out the size of the data\nprint(\"Train data shape:\", train.shape)\nprint(\"Test data shape:\", test.shape)\n\n\n\nprint(\"2 \\n\")\n\n# look at a few rows using the DataFrame.head() method\n# train.head()\nprint(train.head())\n\n\n# In[3]:\n\n\nplt.style.use(style='ggplot')\nplt.rcParams['figure.figsize'] = (10, 6)\n\n\n#######################################################\n# 2. Explore the data and engineer Features ###\n#######################################################\n\nprint(\"3 \\n\")\n\n\n# In[4]:\n\n\n# to get more information like count, mean, std, min, max etc\n# train.SalePrice.describe()\nprint (train.SalePrice.describe())\n\nprint(\"4 \\n\")\n\n# to plot a histogram of SalePrice\nprint (\"Skew is:\", train.SalePrice.skew())\nplt.hist(train.SalePrice, color='blue')\nplt.show()\n\nprint(\"5 \\n\")\n\n\n# In[5]:\n\n\n# use np.log() to transform train.SalePric and calculate the skewness a second time, as well as re-plot the data\ntarget = np.log(train.SalePrice)\nprint (\"\\n Skew is:\", target.skew())\nplt.hist(target, color='blue')\nplt.show()\n\n\n# In[6]:\n\n\n# return a subset of columns matching the specified data types\nnumeric_features = train.select_dtypes(include=[np.number])\n# numeric_features.dtypes\nprint(numeric_features.dtypes)\n\n\n# In[7]:\n\n\ncorr = numeric_features.corr()\n\n# The first five features are the most positively correlated with SalePrice, while the next five are the most negatively correlated.\nprint (corr['SalePrice'].sort_values(ascending=False)[:5], '\\n')\nprint (corr['SalePrice'].sort_values(ascending=False)[-5:])\n\n\n# In[8]:\n\n\nprint(train.OverallQual.unique())\n\"\"\"\nprint(\"9 \\n\")\n\"\"\"\n#investigate the relationship between OverallQual and SalePrice.\n#We set index='OverallQual' and values='SalePrice'. We chose to look at the median here.\nquality_pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median)\nprint(quality_pivot)\n\n\n# In[11]:\n\n\n#visualize this pivot table more easily, we can create a bar plot\n#Notice that the median sales price strictly increases as Overall Quality increases.\nquality_pivot.plot(kind='bar', color='blue')\nplt.xlabel('Overall Quality')\nplt.ylabel('Median Sale Price')\nplt.xticks(rotation=0)\nplt.show()\n\n\n# In[12]:\n\n\nprint(\"11 \\n\")\n\"\"\"\n#to generate some scatter plots and visualize the relationship between the Ground Living Area(GrLivArea) and SalePrice\nplt.scatter(x=train['GrLivArea'], y=target)\nplt.ylabel('Sale Price')\nplt.xlabel('Above grade (ground) living area square feet')\nplt.show()\n\"\"\"\nprint(\"12 \\n\")\n\n# do the same for GarageArea.\nplt.scatter(x=train['GarageArea'], y=target)\nplt.ylabel('Sale Price')\nplt.xlabel('Garage Area')\nplt.show()\n\n\n# In[13]:\n\n\n# create a new dataframe with some outliers removed\ntrain = train[train['GarageArea'] < 1200]\n\n# display the previous graph again without outliers\nplt.scatter(x=train['GarageArea'], y=np.log(train.SalePrice))\nplt.xlim(-200,1600) # This forces the same scale as before\nplt.ylabel('Sale Price')\nplt.xlabel('Garage Area')\nplt.show()\n\n\n# In[14]:\n\n\n# create a DataFrame to view the top null columns and return the counts of the null values in each column\nnulls = pd.DataFrame(train.isnull().sum().sort_values(ascending=False)[:25])\nnulls.columns = ['Null Count']\nnulls.index.name = 'Feature'\n#nulls\nprint(nulls)\n\n\n# In[15]:\n\n\nprint(\"15 \\n\")\n\"\"\"\n#to return a list of the unique values\nprint (\"Unique values are:\", train.MiscFeature.unique())\n\"\"\"\n\n######################################################\n# Wrangling the non-numeric Features ##\n######################################################\n\nprint(\"16 \\n\")\n\n# consider the non-numeric features and display details of columns\ncategoricals = train.select_dtypes(exclude=[np.number])\n#categoricals.describe()\nprint(categoricals.describe())\n\n\n# In[16]:\n\n\n#####################################################\n# Transforming and engineering features ##\n######################################################\n\nprint(\"17 \\n\")\n\n# When transforming features, it's important to remember that any transformations that you've applied to the training data before\n# fitting the model must be applied to the test data.\n\n#Eg:\nprint (\"Original: \\n\")\nprint (train.Street.value_counts(), \"\\n\")\n\n\n# In[17]:\n\n\nprint(\"18 \\n\")\n\n# our model needs numerical data, so we will use one-hot encoding to transform the data into a Boolean column.\n# create a new column called enc_street. The pd.get_dummies() method will handle this for us\ntrain['enc_street'] = pd.get_dummies(train.Street, drop_first=True)\ntest['enc_street'] = pd.get_dummies(test.Street, drop_first=True)\n\nprint ('Encoded: \\n')\nprint (train.enc_street.value_counts()) # Pave and Grvl values converted into 1 and 0\n\nprint(\"19 \\n\")\n\n# look at SaleCondition by constructing and plotting a pivot table, as we did above for OverallQual\ncondition_pivot = train.pivot_table(index='SaleCondition', values='SalePrice', aggfunc=np.median)\ncondition_pivot.plot(kind='bar', color='blue')\nplt.xlabel('Sale Condition')\nplt.ylabel('Median Sale Price')\nplt.xticks(rotation=0)\nplt.show()\n\n\n# In[18]:\n\n\n# encode this SaleCondition as a new feature by using a similar method that we used for Street above\ndef encode(x): return 1 if x == 'Partial' else 0\ntrain['enc_condition'] = train.SaleCondition.apply(encode)\ntest['enc_condition'] = test.SaleCondition.apply(encode)\n\nprint(\"20 \\n\")\n\n# explore this newly modified feature as a plot.\ncondition_pivot = train.pivot_table(index='enc_condition', values='SalePrice', aggfunc=np.median)\ncondition_pivot.plot(kind='bar', color='blue')\nplt.xlabel('Encoded Sale Condition')\nplt.ylabel('Median Sale Price')\nplt.xticks(rotation=0)\nplt.show()\n\n\n# In[19]:\n\n\n######################################################################################################\n# Dealing with missing values #\n# We'll fill the missing values with an average value and then assign the results to data #\n# This is a method of interpolation #\n######################################################################################################\ndata = train.select_dtypes(include=[np.number]).interpolate().dropna()\n\nprint(\"21 \\n\")\n# Check if the all of the columns have 0 null values.\n# sum(data.isnull().sum() != 0)\nprint(sum(data.isnull().sum() != 0))\n\nprint(\"22 \\n\")\n\n\n# In[20]:\n\n\n######################################################\n# 3. Build a linear model ##\n######################################################\n\n# separate the features and the target variable for modeling.\n# We will assign the features to X and the target variable(Sales Price)to y.\n\ny = np.log(train.SalePrice)\nX = data.drop(['SalePrice', 'Id'], axis=1)\n# exclude ID from features since Id is just an index with no relationship to SalePrice.\n\n#======= partition the data ===================================================================================================#\n# Partitioning the data in this way allows us to evaluate how our model might perform on data that it has never seen before.\n# If we train the model on all of the test data, it will be difficult to tell if overfitting has taken place.\n#==============================================================================================================================#\n# also state how many percentage from train data set, we want to take as test data set\n# In this example, about 33% of the data is devoted to the hold-out set.\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=.33)\n\n\n# In[21]:\n\n\n#========= Begin modelling =========================#\n# Linear Regression Model #\n#===================================================#\n\n# ---- first create a Linear Regression model.\n# First, we instantiate the model.\nlr = linear_model.LinearRegression()\n\n# ---- fit the model / Model fitting\n# lr.fit() method will fit the linear regression on the features and target variable that we pass.\nmodel = lr.fit(X_train, y_train)\n\nprint(\"23 \\n\")\n\n\n# In[22]:\n\n\n\n# ---- Evaluate the performance and visualize results\n# r-squared value is a measure of how close the data are to the fitted regression line\n# a higher r-squared value means a better fit(very close to value 1)\nprint(\"R^2 is: \\n\", model.score(X_test, y_test))\n\n# use the model we have built to make predictions on the test data set.\npredictions = model.predict(X_test)\n\nprint(\"24 \\n\")\n\n\n# In[23]:\n\n\nprint('RMSE is: \\n', mean_squared_error(y_test, predictions))\n\nprint(\"25 \\n\")\n# view this relationship between predictions and actual_values graphically with a scatter plot.\nactual_values = y_test\nplt.scatter(predictions, actual_values, alpha=.75,\n color='b') # alpha helps to show overlapping data\nplt.xlabel('Predicted Price')\nplt.ylabel('Actual Price')\nplt.title('Linear Regression Model')\nplt.show()\n\n\n# In[24]:\n\n\n#====== improve the model ================================================================#\n# try using Ridge Regularization to decrease the influence of less important features #\n#=========================================================================================#\n\nprint(\"26 \\n\")\n# experiment by looping through a few different values of alpha, and see how this changes our results.\n\nfor i in range (-2, 3):\n alpha = 10**i\n rm = linear_model.Ridge(alpha=alpha)\n ridge_model = rm.fit(X_train, y_train)\n preds_ridge = ridge_model.predict(X_test)\n\n plt.scatter(preds_ridge, actual_values, alpha=.75, color='b')\n plt.xlabel('Predicted Price')\n plt.ylabel('Actual Price')\n plt.title('Ridge Regularization with alpha = {}'.format(alpha))\n overlay = 'R^2 is: {}\\nRMSE is: {}'.format(\n ridge_model.score(X_test, y_test),\n mean_squared_error(y_test, preds_ridge))\n plt.annotate(s=overlay,xy=(12.1,10.6),size='x-large')\n plt.show()\n\n# if you examined the plots you can see these models perform almost identically to the first model.\n# In our case, adjusting the alpha did not substantially improve our model.\n\nprint(\"27 \\n\")\nprint(\"R^2 is: \\n\", model.score(X_test, y_test))\n\n\n# In[25]:\n\n\n######################################################\n# 4. Make a submission ##\n######################################################\n\n# create a csv that contains the predicted SalePrice for each observation in the test.csv dataset.\nsubmission = pd.DataFrame()\n# The first column must the contain the ID from the test data.\nsubmission['Id'] = test.Id\n\n# select the features from the test data for the model as we did above.\nfeats = test.select_dtypes(\n include=[np.number]).drop(['Id'], axis=1).interpolate()\n\n# generate predictions\npredictions = model.predict(feats)\n\n# transform the predictions to the correct form\n# apply np.exp() to our predictions becasuse we have taken the logarithm(np.log()) previously.\nfinal_predictions = np.exp(predictions)\n\nprint(\"28 \\n\")\n\n# check the difference\nprint(\"Original predictions are: \\n\", predictions[:10], \"\\n\")\nprint(\"Final predictions are: \\n\", final_predictions[:10])\n\nprint(\"29 \\n\")\n# assign these predictions and check\nsubmission['SalePrice'] = final_predictions\n# submission.head()\nprint(submission.head())\n\n# export to a .csv file as Kaggle expects.\n# pass index=False because Pandas otherwise would create a new index for us.\nsubmission.to_csv('submission1.csv', index=False)\n\n\nprint(\"\\n Finish\")\n\n" ]
[ [ "sklearn.linear_model.Ridge", "numpy.log", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.xticks", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.annotate", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "matplotlib.pyplot.hist", "matplotlib.pyplot.scatter", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.metrics.mean_squared_error", "pandas.DataFrame", "numpy.exp", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel", "pandas.get_dummies" ] ]
Nintorac/survae_experiments
[ "d68cc25e2604aab08b53617c1f3ffe4716f166c4" ]
[ "survae/transforms/bijections/conv1x1.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom survae.transforms.bijections import Bijection\n\n\nclass Conv1x1(Bijection):\n \"\"\"\n Invertible 1x1 Convolution [1].\n The weight matrix is initialized as a random rotation matrix\n as described in Section 3.2 of [1].\n\n Args:\n num_channels (int): Number of channels in the input and output.\n orthogonal_init: bool, if True initialize weights to be a random orthogonal matrix (default=True).\n\n References:\n [1] Glow: Generative Flow with Invertible 1×1 Convolutions,\n Kingma & Dhariwal, 2018, https://arxiv.org/abs/1807.03039\n \"\"\"\n def __init__(self, num_channels, orthogonal_init=True):\n super(Conv1x1, self).__init__()\n self.num_channels = num_channels\n self.weight = nn.Parameter(torch.Tensor(num_channels, num_channels))\n self.reset_parameters(orthogonal_init)\n\n def reset_parameters(self, orthogonal_init):\n self.orthogonal_init = orthogonal_init\n\n if self.orthogonal_init:\n nn.init.orthogonal_(self.weight)\n else:\n bound = 1.0 / np.sqrt(self.num_channels)\n nn.init.uniform_(self.weight, -bound, bound)\n\n def _conv(self, weight, v):\n return F.conv2d(v, weight.unsqueeze(-1).unsqueeze(-1))\n\n def _logdet(self, x_shape):\n b, c, h, w = x_shape\n _, ldj_per_pixel = torch.slogdet(self.weight)\n ldj = ldj_per_pixel * h * w\n return ldj.expand([b])\n\n def forward(self, x):\n z = self._conv(self.weight, x)\n ldj = self._logdet(x.shape)\n return z, ldj\n\n def inverse(self, z):\n weight_inv = torch.inverse(self.weight)\n x = self._conv(weight_inv, z)\n return x\n\n\nclass Conv1x11d(Bijection):\n \"\"\"\n Invertible 1x1 Convolution [1].\n The weight matrix is initialized as a random rotation matrix\n as described in Section 3.2 of [1].\n\n Args:\n num_channels (int): Number of channels in the input and output.\n orthogonal_init: bool, if True initialize weights to be a random orthogonal matrix (default=True).\n\n References:\n [1] Glow: Generative Flow with Invertible 1×1 Convolutions,\n Kingma & Dhariwal, 2018, https://arxiv.org/abs/1807.03039\n \"\"\"\n def __init__(self, num_channels, orthogonal_init=True):\n super(Conv1x11d, self).__init__()\n self.num_channels = num_channels\n self.weight = nn.Parameter(torch.Tensor(num_channels, num_channels))\n self.reset_parameters(orthogonal_init)\n\n def reset_parameters(self, orthogonal_init):\n self.orthogonal_init = orthogonal_init\n\n if self.orthogonal_init:\n nn.init.orthogonal_(self.weight)\n else:\n bound = 1.0 / np.sqrt(self.num_channels)\n nn.init.uniform_(self.weight, -bound, bound)\n\n def _conv(self, weight, v):\n return F.conv1d(v, weight.unsqueeze(-1))\n\n def _logdet(self, x_shape):\n b, c, l = x_shape\n _, ldj_per_pixel = torch.slogdet(self.weight)\n ldj = ldj_per_pixel * l\n return ldj.expand([b])\n\n def forward(self, x):\n z = self._conv(self.weight, x)\n ldj = self._logdet(x.shape)\n return z, ldj\n\n def inverse(self, z):\n weight_inv = torch.inverse(self.weight)\n x = self._conv(weight_inv, z)\n return x\n" ]
[ [ "torch.inverse", "torch.nn.init.uniform_", "torch.slogdet", "numpy.sqrt", "torch.nn.init.orthogonal_", "torch.Tensor" ] ]
WarrenWeckesser/numtypes
[ "4e46ac4a338ab46eec11cbacf9165827841ea4ff" ]
[ "numtypes/tests/test_nint32.py" ]
[ "\nimport pytest\nimport math\nimport numpy as np\nfrom numpy.testing import assert_equal\nfrom numtypes import nint32\n\n\ndef test_basic():\n x = nint32(3)\n assert x == 3\n assert int(x) == 3\n\n\[email protected]('typ', [np.int8, np.uint8, np.int16, np.uint16,\n np.int32, np.uint32, np.int64, np.uint64])\ndef test_init_np_types(typ):\n x = nint32(typ(123))\n assert x == 123\n\n\ndef test_init_str_type():\n x = nint32(\"123\")\n assert x == 123\n\n\ndef test_comparison():\n x = nint32(100)\n y = nint32(-500)\n assert x > 0\n assert x < 200\n assert x < 123.4\n assert x <= 200\n assert 200 >= x\n assert x == 100\n assert x > y\n assert x >= y\n assert y < x\n assert y <= x\n assert x != y\n\n\ndef test_true_division():\n x = nint32(20)\n y = nint32(10)\n z = x / y\n assert isinstance(z, float)\n assert z == 2.0\n\n\[email protected]('nanstr', ['nan', '\\t+NAN ', '-nAn'])\ndef test_nan_str(nanstr):\n z = nint32(nanstr)\n assert math.isnan(float(z))\n assert math.isnan(z + 1.5)\n\n\ndef test_nan():\n z = nint32(math.nan)\n assert math.isnan(float(z))\n assert z != z\n\n\ndef test_bool():\n assert bool(nint32(123))\n assert bool(nint32('nan'))\n assert not bool(nint32(0))\n\n\ndef test_other():\n z = 1.0 + 2.0j\n a = nint32(2)\n w = z / a\n assert w == z/2\n\n\[email protected]('value', [2**31, -2**31, 2**65])\ndef test_init_arg_too_big(value):\n with pytest.raises(OverflowError, match='int too big to convert'):\n nint32(value)\n\n\[email protected]('arg', [2.5, None, 'abc'])\ndef test_init_bad_arg(arg):\n with pytest.raises(TypeError, match='argument must be'):\n nint32(arg)\n\n\[email protected]('extreme_func, expected',\n [(np.maximum, [20, 10, 18]),\n (np.minimum, [10, -2, 10])])\ndef test_extreme_func(extreme_func, expected):\n a = np.array([10, -2, 18], dtype=np.int32).astype(nint32)\n b = np.array([20, 10, 10], dtype=np.int32).astype(nint32)\n m = extreme_func(a, b)\n assert m.dtype == nint32\n assert_equal(m, expected)\n\n\[email protected]('methodname, expected', [('min', -2), ('max', 18)])\ndef test_extreme_method(methodname, expected):\n a = np.array([10, -2, 18], dtype=nint32)\n m = getattr(a, methodname)()\n assert m.dtype == nint32\n assert m == expected\n\n\[email protected]('methodname', ['min', 'max'])\ndef test_extreme_method_with_nan(methodname):\n a = np.array([10, np.nan, -2, 18], dtype=nint32)\n m = getattr(a, methodname)()\n assert m.dtype == nint32\n assert np.isnan(m)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.array", "numpy.isnan" ] ]
PApostol/pandas
[ "578e918777f6f512f85a917dc34910df87f63e90" ]
[ "pandas/tests/util/test_show_versions.py" ]
[ "import json\nimport os\nimport re\n\nimport pytest\n\nfrom pandas.compat import (\n IS64,\n is_ci_environment,\n)\nfrom pandas.util._print_versions import (\n _get_dependency_info,\n _get_sys_info,\n)\n\nimport pandas as pd\n\n\[email protected](\n # openpyxl\n \"ignore:defusedxml.lxml is no longer supported:DeprecationWarning\"\n)\[email protected](\n # html5lib\n \"ignore:Using or importing the ABCs from:DeprecationWarning\"\n)\[email protected](\n # fastparquet\n \"ignore:pandas.core.index is deprecated:FutureWarning\"\n)\[email protected](\n # pandas_datareader\n \"ignore:pandas.util.testing is deprecated:FutureWarning\"\n)\[email protected](\n # https://github.com/pandas-dev/pandas/issues/35252\n \"ignore:Distutils:UserWarning\"\n)\[email protected](\"ignore:Setuptools is replacing distutils:UserWarning\")\ndef test_show_versions(tmpdir):\n # GH39701\n as_json = os.path.join(tmpdir, \"test_output.json\")\n\n pd.show_versions(as_json=as_json)\n\n with open(as_json) as fd:\n # check if file output is valid JSON, will raise an exception if not\n result = json.load(fd)\n\n # Basic check that each version element is found in output\n expected = {\n \"system\": _get_sys_info(),\n \"dependencies\": _get_dependency_info(),\n }\n\n assert result == expected\n\n\ndef test_show_versions_console_json(capsys):\n # GH39701\n pd.show_versions(as_json=True)\n stdout = capsys.readouterr().out\n\n # check valid json is printed to the console if as_json is True\n result = json.loads(stdout)\n\n # Basic check that each version element is found in output\n expected = {\n \"system\": _get_sys_info(),\n \"dependencies\": _get_dependency_info(),\n }\n\n assert result == expected\n\n\[email protected](\n is_ci_environment() and not IS64, reason=\"Failing on 32 bit Python CI job\"\n)\ndef test_show_versions_console(capsys):\n # gh-32041\n # gh-32041\n pd.show_versions(as_json=False)\n result = capsys.readouterr().out\n\n # check header\n assert \"INSTALLED VERSIONS\" in result\n\n # check full commit hash\n assert re.search(r\"commit\\s*:\\s[0-9a-f]{40}\\n\", result)\n\n # check required dependency\n # 2020-12-09 npdev has \"dirty\" in the tag\n # 2022-05-25 npdev released with RC wo/ \"dirty\".\n # Just ensure we match [0-9]+\\..* since npdev version is variable\n assert re.search(r\"numpy\\s*:\\s[0-9]+\\..*\\n\", result)\n\n # check optional dependency\n assert re.search(r\"pyarrow\\s*:\\s([0-9\\.]+|None)\\n\", result)\n\n\ndef test_json_output_match(capsys, tmpdir):\n # GH39701\n pd.show_versions(as_json=True)\n result_console = capsys.readouterr().out\n\n out_path = os.path.join(tmpdir, \"test_json.json\")\n pd.show_versions(as_json=out_path)\n with open(out_path) as out_fd:\n result_file = out_fd.read()\n\n assert result_console == result_file\n" ]
[ [ "pandas.util._print_versions._get_sys_info", "pandas.show_versions", "pandas.compat.is_ci_environment", "pandas.util._print_versions._get_dependency_info" ] ]
riokt/video-paragraph
[ "2da3298819e73809af495457db2cf1dfffad712f" ]
[ "metrics/evaluation.py" ]
[ "from cap_eval.bleu.bleu import Bleu\nfrom cap_eval.cider.cider import Cider\nfrom cap_eval.meteor.meteor import Meteor\n\nimport json\nimport numpy as np\n\n# initialize the caption evaluators\nmeteor_scorer = Meteor()\ncider_scorer = Cider()\nbleu_scorer = Bleu(4)\n\n\ndef bleu_eval(refs, cands):\n print (\"calculating bleu_4 score...\")\n bleu, _ = bleu_scorer.compute_score(refs, cands)\n return bleu\n\n\ndef cider_eval(refs, cands):\n print (\"calculating cider score...\")\n cider, _ = cider_scorer.compute_score(refs, cands)\n return cider\n\n\ndef meteor_eval(refs, cands):\n print (\"calculating meteor score...\")\n meteor, _ = meteor_scorer.compute_score(refs, cands)\n return meteor\n\n\ndef getNgrams(words_pred, unigrams, bigrams, trigrams, fourgrams):\n # N=1\n for w in words_pred:\n if w not in unigrams:\n unigrams[w] = 0\n unigrams[w] += 1\n # N=2\n for i, w in enumerate(words_pred):\n if i<len(words_pred)-1:\n w_next = words_pred[i+1]\n bigram = '%s_%s' % (w, w_next)\n if bigram not in bigrams:\n bigrams[bigram] = 0\n bigrams[bigram] += 1\n # N=3\n for i, w in enumerate(words_pred):\n if i<len(words_pred)-2:\n w_next = words_pred[i + 1]\n w_next_ = words_pred[i + 2]\n tri = '%s_%s_%s' % (w, w_next, w_next_)\n if tri not in trigrams:\n trigrams[tri] = 0\n trigrams[tri] += 1\n # N=4\n for i, w in enumerate(words_pred):\n if i<len(words_pred)-3:\n w_next = words_pred[i + 1]\n w_next_ = words_pred[i + 2]\n w_next__ = words_pred[i + 3]\n four = '%s_%s_%s_%s' % (w, w_next, w_next_, w_next__)\n if four not in fourgrams:\n fourgrams[four] = 0\n fourgrams[four] += 1\n return unigrams, bigrams, trigrams, fourgrams\n\n\ndef diversity(data_pred):\n div1, div2, re4 = [], [], []\n try:\n for i in range(len(data_pred)):\n unigrams, bigrams, trigrams, fourgrams = {}, {}, {}, {}\n if data_pred[i][-1] == '.':\n para = data_pred[i].split('.')[:-1]\n else:\n para = data_pred[i].split('.')\n for j, pred_sentence in enumerate(para):\n if pred_sentence[-1] == '.':\n pred_sentence = pred_sentence[:-1]\n while len(pred_sentence) > 0 and pred_sentence[-1] == ' ':\n pred_sentence = pred_sentence[:-1]\n while len(pred_sentence) > 0 and pred_sentence[0] == ' ':\n pred_sentence = pred_sentence[1:]\n pred_sentence = pred_sentence.replace(',', ' ')\n while ' ' in pred_sentence:\n pred_sentence = pred_sentence.replace(' ', ' ')\n\n words_pred = pred_sentence.split(' ')\n unigrams, bigrams, trigrams, fourgrams = getNgrams(words_pred, unigrams, bigrams, trigrams, fourgrams)\n\n sum_unigrams = sum([unigrams[un] for un in unigrams])\n vid_div1 = float(len(unigrams)) / (float(sum_unigrams) + 1e-28)\n vid_div2 = float(len(bigrams)) / (float(sum_unigrams) + 1e-28)\n vid_re4 = float(sum([max(fourgrams[f]-1,0) for f in fourgrams])) / (float(sum([fourgrams[f] for f in fourgrams])) + 1e-28)\n\n div1.append(vid_div1)\n div2.append(vid_div2)\n re4.append(vid_re4)\n except Exception as e:\n print(e)\n pass\n return np.mean(div1), np.mean(div2), np.mean(re4)\n\n\ndef compute(preds, names, refs):\n refcaps = {}\n candcaps = {}\n for i in range(len(preds)):\n candcaps[i] = [preds[i]]\n refcaps[i] = refs[names[i]]\n bleu = bleu_eval(refcaps, candcaps)\n cider = cider_eval(refcaps, candcaps)\n meteor = meteor_eval(refcaps, candcaps)\n div1, div2, re4 = diversity(preds)\n scores = {'bleu_4':bleu[3], 'bleu_3':bleu[2], 'bleu_2':bleu[1], 'bleu_1':bleu[0],\n 'cider':cider, 'meteor':meteor,\n 'div1':div1, 'div2':div2, 're4':re4}\n return scores\n\n if guess in word:\n print(\"\\nYes!\", guess, \"is in the word!\")\n\n # Create a new variable (so_far) to contain the guess\n new = \"\"\n i = 0\n for i in range(len(word)):\n if guess == word[i]:\n new += guess\n else:\n new += so_far[i]\n so_far = new\n" ]
[ [ "numpy.mean" ] ]
Noahs-ARK/PaLM
[ "fe943bb0516d80b09f2b56de60dac9c54dc196e6" ]
[ "eval.py" ]
[ "import math\nimport numpy as np\nimport torch\nimport data\nfrom torch.autograd import Variable\nfrom utils import batchify, get_batch, repackage_hidden\nimport argparser\nargs = argparser.args()\nfrom utils import Input\n\n# Set the random seed manually for reproducibility.\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n else:\n torch.cuda.manual_seed(args.seed)\n\ndef model_load(fn):\n global model, criterion, optimizer\n with open(fn, 'rb') as f:\n model, criterion, optimizer = torch.load(f)\n\nimport os\nimport hashlib\n\nfn = 'corpus'\nif os.path.exists(fn):\n print('Loading cached dataset...')\n corpus = torch.load(fn)\nelse:\n print('Producing dataset...')\n corpus = data.Corpus(args.data)\n torch.save(corpus, fn)\neval_batch_size = 10\ntest_batch_size = 1\ntrain_data, train_rps = batchify(corpus.train, corpus.train_rps, args.batch_size, args)\nval_data, val_rps = batchify(corpus.valid, corpus.valid_rps, eval_batch_size, args)\ntest_data, test_rps = batchify(corpus.test, corpus.test_rps, test_batch_size, args)\nprint('Args:', args)\n\ndef evaluate(data_source, rps, batch_size=10):\n # Turn on evaluation mode which disables dropout.\n \n criterion = torch.nn.CrossEntropyLoss()\n ntokens = len(corpus.dictionary)\n model.eval()\n if args.model == 'QRNN': model.reset()\n total_loss = 0\n hidden = model.init_hidden(batch_size)\n with torch.no_grad():\n for i in range(0, data_source.shape[1] - 1, args.bptt):\n data, rp, targets = get_batch(\n data_source, rps, i, batch_size=batch_size, args=args, evaluation=True)\n input = Input(x=data, rp=rp)\n output, hidden = model(input, hidden)\n # total_loss += data.size(1) * criterion(model.decoder.weight, model.decoder.bias, output, targets).data\n output = torch.nn.functional.linear(output, model.decoder.weight, bias=model.decoder.bias)\n output = torch.nn.functional.log_softmax(output, dim=-1)\n output_flat = output.view(-1, ntokens)\n total_loss += data.size(1) * criterion(output_flat, targets).data\n hidden = repackage_hidden(hidden)\n return total_loss.item() / data_source.shape[1]\nmodel_load(args.save)\n\ntest_loss = evaluate(val_data, val_rps, 10)\nprint('=' * 89)\nprint('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(\n val_loss, math.exp(test_loss), val_loss / math.log(2)))\nprint('=' * 89)\ndsa\n\ntest_loss = evaluate(test_data, test_rps, 1)\nprint('=' * 89)\nprint('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(\n test_loss, math.exp(test_loss), test_loss / math.log(2)))\nprint('=' * 89)\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.load", "torch.nn.functional.linear", "torch.cuda.manual_seed", "torch.manual_seed", "torch.save", "numpy.random.seed", "torch.no_grad", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available" ] ]
javiergodoy/pandas-profiling
[ "0bed133520b9982263ed8cbc1af6b8f5a511bf0d" ]
[ "tests/unit/test_url.py" ]
[ "import pandas as pd\nimport numpy as np\n\nimport pandas_profiling\n\n\ndef test_urls(get_data_file):\n file_name = get_data_file(\n \"whitelist_urls.csv\",\n \"https://raw.githubusercontent.com/openeventdata/scraper/master/whitelist_urls.csv\",\n )\n\n df = pd.read_csv(\n file_name, header=None, names=[\"source\", \"url\", \"reach\", \"language\"]\n )\n\n # Add ~10% missing values\n df = df.mask(np.random.random(df.shape) < 0.1)\n\n profile = df.profile_report(\n title=\"DataFrame with URL column\", samples={\"head\": 0, \"tail\": 0}\n )\n\n assert \"<small>URL</small>\" in profile.to_html(), \"URL not detected\"\n assert \"<th>URL</th>\" in profile.to_html(), \"URL not detected\"\n" ]
[ [ "pandas.read_csv", "numpy.random.random" ] ]
Lguiller/machinelearning-az
[ "7c062302944b91131783fe663e1cff21e5956ca2" ]
[ "datasets/Part 2 - Regression/Section 6 - Polynomial Regression/polinomial_regression.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 5 12:45:44 2019\n\n@author: juangabriel\n\"\"\"\n\n# Regresión polinómica\n\n# Cómo importar las librerías\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importar el data set\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n\n# Dividir el data set en conjunto de entrenamiento y conjunto de testing\n\"\"\"\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\"\"\"\n\n# Escalado de variables\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\"\"\"\n\n# Ajustar la regresión lineal con el dataset\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(X, y)\n\n# Ajustar la regresión polinómica con el dataset\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree = 4)\nX_poly = poly_reg.fit_transform(X)\nlin_reg_2 = LinearRegression()\nlin_reg_2.fit(X_poly, y)\n\n# Visualización de los resultados del Modelo Lineal\nplt.scatter(X, y, color = \"red\")\nplt.plot(X, lin_reg.predict(X), color = \"blue\")\nplt.title(\"Modelo de Regresión Lineal\")\nplt.xlabel(\"Posición del empleado\")\nplt.ylabel(\"Sueldo (en $)\")\nplt.show()\n\n# Visualización de los resultados del Modelo Polinómico\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape(len(X_grid), 1)\nplt.scatter(X, y, color = \"red\")\nplt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = \"blue\")\nplt.title(\"Modelo de Regresión Polinómica\")\nplt.xlabel(\"Posición del empleado\")\nplt.ylabel(\"Sueldo (en $)\")\nplt.show()\n\n# Predicción de nuestros modelos\nlin_reg.predict(6.5)\nlin_reg_2.predict(poly_reg.fit_transform(6.5))\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "sklearn.preprocessing.PolynomialFeatures", "matplotlib.pyplot.scatter" ] ]
sighingnow/mars
[ "c7897fbd144d230fff5edabc1494fb3ff44aa0d2" ]
[ "mars/tensor/reduction/nanargmin.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import Int64Field, TupleField\nfrom .core import TensorReduction, TensorArgReductionMixin, TensorArgMapMixin, TensorArgCombineMixin\n\n\nclass TensorNanArgminMap(TensorReduction, TensorArgMapMixin):\n _op_type_ = OperandDef.NANARGMIN_CHUNK\n\n _offset = Int64Field('offset')\n _total_shape = TupleField('total_shape')\n\n _func_name = 'nanargmin'\n _agg_func_name = 'nanmin'\n\n def __init__(self, axis=None, dtype=np.dtype(int), combine_size=None,\n offset=None, total_shape=None,**kw):\n super(TensorNanArgminMap, self).__init__(_axis=axis, _dtype=dtype, _combine_size=combine_size,\n _offset=offset, _total_shape=total_shape, **kw)\n\n @property\n def offset(self):\n return getattr(self, '_offset', None)\n\n @property\n def total_shape(self):\n return getattr(self, '_total_shape', None)\n\n\nclass TensorNanArgminCombine(TensorReduction, TensorArgCombineMixin):\n _op_type_ = OperandDef.NANARGMIN_COMBINE\n _func_name = 'nanargmin'\n\n def __init__(self, axis=None, dtype=np.dtype(int), combine_size=None, **kw):\n super(TensorNanArgminCombine, self).__init__(_axis=axis, _dtype=dtype,\n _combine_size=combine_size, **kw)\n\n\nclass TensorNanArgmin(TensorReduction, TensorArgReductionMixin):\n _op_type_ = OperandDef.NANARGMIN\n _func_name = 'nanargmin'\n\n def __init__(self, axis=None, dtype=np.dtype(int), combine_size=None, **kw):\n super(TensorNanArgmin, self).__init__(_axis=axis, _dtype=dtype, _combine_size=combine_size, **kw)\n\n @staticmethod\n def _get_op_types():\n return TensorNanArgminMap, TensorNanArgmin, TensorNanArgminCombine\n\n\ndef nanargmin(a, axis=None, out=None, combine_size=None):\n \"\"\"\n Return the indices of the minimum values in the specified axis ignoring\n NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results\n cannot be trusted if a slice contains only NaNs and Infs.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : int, optional\n Axis along which to operate. By default flattened input is used.\n combine_size: int, optional\n The number of chunks to combine.\n\n Returns\n -------\n index_array : Tensor\n A tensor of indices or a single index value.\n\n See Also\n --------\n argmin, nanargmax\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.array([[mt.nan, 4], [2, 3]])\n >>> mt.argmin(a).execute()\n 0\n >>> mt.nanargmin(a).execute()\n 2\n >>> mt.nanargmin(a, axis=0).execute()\n array([1, 1])\n >>> mt.nanargmin(a, axis=1).execute()\n array([1, 0])\n\n \"\"\"\n op = TensorNanArgmin(axis=axis, dtype=np.dtype(int), combine_size=combine_size)\n return op(a, out=out)\n" ]
[ [ "numpy.dtype" ] ]
nabobalis/glue
[ "1c718378b5527e64d85cc6a6f9a0330652e5cf4b" ]
[ "glue/viewers/image/composite_array.py" ]
[ "# This artist can be used to deal with the sampling of the data as well as any\n# RGB blending.\n\nimport numpy as np\n\nfrom matplotlib.colors import ColorConverter, Colormap\nfrom astropy.visualization import (LinearStretch, SqrtStretch, AsinhStretch,\n LogStretch, ManualInterval, ContrastBiasStretch)\n\n\n__all__ = ['CompositeArray']\n\nCOLOR_CONVERTER = ColorConverter()\n\nSTRETCHES = {\n 'linear': LinearStretch,\n 'sqrt': SqrtStretch,\n 'arcsinh': AsinhStretch,\n 'log': LogStretch\n}\n\n\nclass CompositeArray(object):\n\n def __init__(self, **kwargs):\n\n # We keep a dictionary of layers. The key should be the UUID of the\n # layer artist, and the values should be dictionaries that contain\n # 'zorder', 'visible', 'array', 'color', and 'alpha'.\n self.layers = {}\n\n self._first = True\n\n def allocate(self, uuid):\n self.layers[uuid] = {'zorder': 0,\n 'visible': True,\n 'array': None,\n 'shape': None,\n 'color': '0.5',\n 'alpha': 1,\n 'clim': (0, 1),\n 'contrast': 1,\n 'bias': 0.5,\n 'stretch': 'linear'}\n\n def deallocate(self, uuid):\n self.layers.pop(uuid)\n\n def set(self, uuid, **kwargs):\n for key, value in kwargs.items():\n if key not in self.layers[uuid]:\n raise KeyError(\"Unknown key: {0}\".format(key))\n else:\n self.layers[uuid][key] = value\n\n @property\n def shape(self):\n for layer in self.layers.values():\n if callable(layer['shape']):\n shape = layer['shape']()\n elif layer['shape'] is not None:\n shape = layer['shape']\n elif callable(layer['array']):\n array = layer['array']()\n if array is None:\n return None\n else:\n shape = array.shape\n else:\n shape = layer['array'].shape\n if shape is not None:\n return shape\n return None\n\n def __getitem__(self, item):\n return self()[item]\n\n def __call__(self, bounds=None):\n\n img = None\n visible_layers = 0\n\n for uuid in sorted(self.layers, key=lambda x: self.layers[x]['zorder']):\n\n layer = self.layers[uuid]\n\n if not layer['visible']:\n continue\n\n interval = ManualInterval(*layer['clim'])\n contrast_bias = ContrastBiasStretch(layer['contrast'], layer['bias'])\n\n if callable(layer['array']):\n array = layer['array'](bounds=bounds)\n else:\n array = layer['array']\n\n if array is None:\n continue\n\n if np.isscalar(array):\n scalar = True\n array = np.atleast_2d(array)\n else:\n scalar = False\n\n data = STRETCHES[layer['stretch']]()(contrast_bias(interval(array)))\n data[np.isnan(data)] = 0\n\n if isinstance(layer['color'], Colormap):\n\n if img is None:\n img = np.ones(data.shape + (4,))\n\n # Compute colormapped image\n plane = layer['color'](data)\n\n alpha_plane = layer['alpha'] * plane[:, :, 3]\n\n # Use traditional alpha compositing\n plane[:, :, 0] = plane[:, :, 0] * alpha_plane\n plane[:, :, 1] = plane[:, :, 1] * alpha_plane\n plane[:, :, 2] = plane[:, :, 2] * alpha_plane\n\n img[:, :, 0] *= (1 - alpha_plane)\n img[:, :, 1] *= (1 - alpha_plane)\n img[:, :, 2] *= (1 - alpha_plane)\n img[:, :, 3] = 1\n\n else:\n\n if img is None:\n img = np.zeros(data.shape + (4,))\n\n # Get color and pre-multiply by alpha values\n color = COLOR_CONVERTER.to_rgba_array(layer['color'])[0]\n color *= layer['alpha']\n\n # We should treat NaN values as zero (post-stretch), which means\n # that those pixels don't contribute towards the final image.\n reset = np.isnan(data)\n if np.any(reset):\n data[reset] = 0.\n\n plane = data[:, :, np.newaxis] * color\n plane[:, :, 3] = 1\n\n visible_layers += 1\n\n if scalar:\n plane = plane[0, 0]\n\n img += plane\n\n if img is None:\n return None\n else:\n img = np.clip(img, 0, 1)\n\n return img\n\n @property\n def dtype(self):\n return np.dtype(float)\n\n @property\n def ndim(self):\n return 2\n\n @property\n def size(self):\n return np.product(self.shape)\n\n def __contains__(self, item):\n return item in self.layers\n" ]
[ [ "matplotlib.colors.ColorConverter", "numpy.ones", "numpy.atleast_2d", "numpy.zeros", "numpy.dtype", "numpy.any", "numpy.clip", "numpy.product", "numpy.isnan", "numpy.isscalar" ] ]
SalAlba/matplotlib
[ "f73ff4e77074152fb9abc400d66f56111e656687" ]
[ "tutorial/basic/ex3.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom sal_timer import timer\n\n\n\ndef plot_1():\n # ...\n data = {\n 'a': np.arange(50),\n 'c': np.random.randint(0, 50, 50),\n 'd': np.random.randn(50)\n }\n data['b'] = data['a'] + 10 * np.random.randn(50)\n data['d'] = np.abs(data['d']) * 100\n\n # ...\n # x : x\n # y : y\n # c : color\n # s : size\n plt.scatter(x='a', y='b', c='c', s='d', data=data)\n\n # ...\n plt.xlabel('entry a')\n plt.ylabel('entry b')\n plt.show()\n\n\n\n@timer\ndef main():\n plot_1()\n\n\n\nif __name__ == '__main__':\n print('========================================== START ==========================================')\n #...\n main()\n print('========================================== END ============================================')" ]
[ [ "numpy.random.randn", "numpy.abs", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.random.randint", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
eherr/vis_utils
[ "b757b01f42e6da02ad62130c3b0e61e9eaa3886f" ]
[ "vis_utils/graphics/geometry/splines.py" ]
[ "#!/usr/bin/env python\n#\n# Copyright 2019 DFKI GmbH.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the\n# following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n# USE OR OTHER DEALINGS IN THE SOFTWARE.\n# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nimport scipy.interpolate as si\nimport math\nfrom .utils import closestLowerValueBinarySearch\n \nB_SPLINE_DEGREE=3\n\n\nclass BSplineWrapper(object):\n def __init__(self, points, degree=B_SPLINE_DEGREE, domain=None):\n self.points = np.array(points)\n if isinstance(points[0], (int, float, complex)):\n self.dimensions = 1\n else:\n self.dimensions = len(points[0])\n self.degree = degree\n if domain is not None:\n self.domain = domain\n else:\n self.domain = (0.0, 1.0)\n \n self.initiated = True\n self.spline_def = []\n points_t = np.array(points).T\n t_func = np.linspace(self.domain[0], self.domain[1], len(points)).tolist()\n for d in range(len(points_t)):\n #print d, self.dimensions\n self.spline_def.append(si.splrep(t_func, points_t[d], w=None, k=3))\n\n def _initiate_control_points(self):\n return\n\n def clear(self):\n return\n\n def queryPoint(self, u):\n \"\"\"\n\n \"\"\"\n point = []\n for d in range(self.dimensions):\n point.append(si.splev(u, self.spline_def[d]))\n return np.array(point)\n\n def get_last_control_point(self):\n return self.points[-1]\n\nclass BSpline(object):\n \"\"\"\n http://demonstrations.wolfram.com/GeneratingABSplineCurveByTheCoxDeBoorAlgorithm/\n http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-basis.html\n \"\"\"\n def __init__(self, points, degree=3, domain=None):\n self.points = np.array(points)\n if isinstance(points[0], (int, float, complex)):\n self.dimensions = 1\n else:\n self.dimensions = len(points[0])\n self.degree = degree\n if domain is not None:\n self.domain = domain\n else:\n self.domain = (0.0, 1.0)\n self.knots = None\n self.initiated = False\n self._create_knots()\n\n def _initiate_control_points(self):\n return\n\n def clear(self):\n return\n\n def get_last_control_point(self):\n return self.points[-1]\n\n def _create_knots(self):\n \"\"\"\n http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-curve.html\n #To change the shape of a B-spline curve, one can modify one or more of \n #these control parameters: \n #the positions of control points, the positions of knots, and the degree of the curve.\n # given n+1 control points and m+1 knots the following property must be true\n #m = n + p + 1. // p+1 = m-n\n # for a campled curve the last knot must be of multiplicity p+1\n \n If you have n+1 control points (n=9) and p = 3. \n Then, m must be 13 so that the knot vector has 14 knots\n The remaining 14 - (4 + 4) = 6 knots can be anywhere in the domain. \n U = { 0, 0, 0, 0, 0.14, 0.28, 0.42, 0.57, 0.71, 0.85, 1, 1, 1, 1 }. \n how do find the knot points C(ui).\n \"\"\"\n outer_knots = self.degree+1\n print(\"multiplicity\", outer_knots)\n n = len(self.points) - 1 \n print(\"control points\", len(self.points))\n print(\"n\", n)\n p = self.degree\n m = n + p + 1\n n_knots = m + 1\n inner_knots = n_knots-(outer_knots*2 - 2)\n print(\"knots\", n_knots)\n print(\"free knots\", inner_knots)\n print(\"domain\", self.domain)\n #print np.linspace(0.0, 1.0, 4)\n knots = np.linspace(self.domain[0], self.domain[1], inner_knots).tolist()\n #print self.knots\n self.knots = knots[:1] * (outer_knots-1) + knots +\\\n knots[-1:] * (outer_knots-1)\n print(self.knots)\n print(len(self.knots))\n self.initiated = True\n\n def queryPoint(self, u):\n \"\"\"\n\n \"\"\"\n return self.evaluate(u, algorithm=\"deboor\")\n\n def evaluate(self, u, algorithm=\"standard\"):\n #print \"evaluate\", u\n if self.domain[0] < u < self.domain[1]:\n if algorithm == \"standard\":\n value = 0.0#np.zeros(self.dim)\n n = len(self.points)\n w_list = []\n for i in range(n):\n #i+=self.degree\n #print \"iteration\",i, self.basis(u, i, self.degree)\n #i = self.get_begin_of_knot_range(u)\n w = self.basis(u, i, self.degree)\n w_list.append(w)\n #print temp\n value += w * self.points[i]\n #print sum(w_list)\n return value\n elif algorithm == \"deboor\":\n i = self.get_begin_of_knot_range(u)\n #print u\n return self.deboor(self.degree, self.degree, u, i)\n elif u >= self.domain[1]:\n return self.points[-1]\n elif u <= self.domain[0]:\n return self.points[0]\n\n def basis(self, u, i, p):\n \"\"\"http://devosaurus.blogspot.de/2013/10/exploring-b-splines-in-python.html\n \"\"\"\n if p == 0:\n if self.knots[i] <= u < self.knots[i+1]:\n return 1.0\n else:\n return 0.0\n elif p >= 1:\n #print i+p\n #print \"knot interval\", i, i+p, self.knots[i+p]\n out = 0.0\n w_nom = (u-self.knots[i])\n w_denom = (self.knots[i+p]-self.knots[i])\n if w_denom > 0.0:\n w = w_nom / w_denom\n out += w * self.basis(u, i, p-1)\n \n w_inv_nom = (self.knots[i+p+1] - u)\n w_inv_denom = (self.knots[i+p+1] - self.knots[i+1])\n if w_inv_denom > 0.0:\n w_inv = w_inv_nom / w_inv_denom\n out += w_inv * self.basis(u, i+1, p-1)\n return out\n \n def get_begin_of_knot_range(self, u):\n begin_of_range = 0 \n for i, u_i in enumerate(self.knots):\n if u_i < u:\n begin_of_range = i\n else:\n break\n #print \"begin\", begin_of_range\n return begin_of_range\n \n def deboor(self, k, p, u, i):\n \"\"\"\n https://chi3x10.wordpress.com/2009/10/18/de-boor-algorithm-in-c/\n \"\"\"\n if k == 0:\n return self.points[i]\n elif k >= 1:\n\n denom = (self.knots[i+p+1-k] - self.knots[i])\n if denom >0:\n alpha = (u-self.knots[i])/denom\n return (1-alpha) * self.deboor(k-1, p, u, i-1) \\\n + (alpha * self.deboor(k-1, p, u, i))\n else:\n return np.zeros(self.dimensions)\n\n\nclass CatmullRomSpline():\n '''\n spline that goes through control points with arc length mapping used by motion planning\n implemented using the following resources and examples:\n #http://www.cs.cmu.edu/~462/projects/assn2/assn2/catmullRom.pdf\n #http://algorithmist.net/docs/catmullrom.pdf\n #http://www.mvps.org/directx/articles/catmull/\n #http://hawkesy.blogspot.de/2010/05/catmull-rom-spline-curve-implementation.html\n #http://pages.cpsc.ucalgary.ca/~jungle/587/pdf/5-interpolation.pdf\n '''\n def __init__(self,controlPoints, dimensions, granularity=100):\n self.granularity = granularity\n #http://algorithmist.net/docs/catmullrom.pdf\n #base matrix to calculate one component of a point on the spline based on the influence of control points\n self.catmullRomBaseMatrix = np.array([[-1.0, 3.0, -3.0, 1.0],\n [2.0, -5.0, 4.0, -1.0],\n [-1.0, 0.0, 1.0, 0.0],\n [0.0, 2.0, 0.0, 0.0]])\n self.dimensions = dimensions\n self.fullArcLength = 0\n self.initiated = False\n self.controlPoints = []\n self.numberOfSegments = 0\n if len (controlPoints) >0:\n self.initiateControlPoints(controlPoints)\n self.initiated = True\n\n\n def initiateControlPoints(self,controlPoints):\n '''\n @param controlPoints array of class accessible by controlPoints[index][dimension]\n '''\n self.numberOfSegments = len(controlPoints)-1\n self.controlPoints = [controlPoints[0]]+controlPoints+[controlPoints[-1],controlPoints[-1]]#as a workaround add multiple points at the end instead of one\n print(\"length of control point list \",len(self.controlPoints))\n print(\"number of segments \",self.numberOfSegments)\n print(\"number of dimensions\",self.dimensions)\n\n\n self.updateArcLengthMappingTable()\n\n return\n\n def addPoint(self,point):\n\n #add point replace auxiliary control points\n if self.initiated:\n del self.controlPoints[-2:]\n self.numberOfSegments = len(self.controlPoints)-1#\"-2 + 1\n self.controlPoints += [point,point,point]\n print(self.controlPoints)\n\n #update arc length mapping\n self.updateArcLengthMappingTable()\n else:\n self.initiateControlPoints([point,])\n self.initiated = True\n\n\n def clear(self):\n self.controlPoints = []\n self.initiated = False\n self.fullArcLength = 0\n self.numberOfSegments = 0\n self.arcLengthMap = []\n\n def transformByMatrix(self,matrix):\n '''\n matrix nxn transformation matrix where n is the number of dimensions of the catmull rom spline\n '''\n if self.dimensions < matrix.shape[0]:\n for i in range(len(self.controlPoints)):\n self.controlPoints[i] = np.dot(matrix, self.controlPoints[i])\n else:\n print(\"failed\",matrix.shape)\n return\n\n def updateArcLengthMappingTable(self):\n '''\n creates a table that maps from parameter space of query point to relative arc length based on the given granularity in the constructor of the catmull rom spline\n http://pages.cpsc.ucalgary.ca/~jungle/587/pdf/5-interpolation.pdf\n '''\n self.fullArcLength = 0\n granularity = self.granularity\n u = np.arange(granularity+1) / float(granularity)\n lastPoint = None\n numberOfEvalulations = 0\n self.arcLengthMap = []\n for i in u:\n point = self.queryPoint(i)\n if lastPoint is not None:\n delta = []\n d = 0\n while d < self.dimensions:\n delta.append(math.sqrt((point[d]-lastPoint[d])**2))\n d += 1\n self.fullArcLength += np.sum(delta)#(point-lastPoint).length()\n #print self.fullArcLength\n self.arcLengthMap.append([i,self.fullArcLength])\n numberOfEvalulations+=1\n lastPoint= point\n\n # self.fullArcLength = arcLength\n #normalize values\n if self.fullArcLength > 0 :\n for i in range(numberOfEvalulations):\n self.arcLengthMap[i][1] /= self.fullArcLength\n\n\n def getFullArcLength(self, granularity = 100):\n #granularity = self.granularity\n u = np.arange(granularity+1) / float(granularity)\n arcLength = 0.0\n lastPoint = None\n for i in u:\n print(\"sample\",i)\n point = self.queryPoint(i)\n if lastPoint != None:\n arcLength += np.linalg.norm(point-lastPoint)#(point-lastPoint).length()\n lastPoint= point\n print(point)\n return arcLength\n\n def getDistanceToPath(self,absoluteArcLength, position):\n '''\n evaluates a point with absoluteArcLength on self to get a point on the path\n then the distance between the given position and the point on the path is returned\n '''\n pointOnPath = self.getPointAtAbsoluteArcLength(absoluteArcLength)\n return np.linalg.norm(position-pointOnPath)\n\n def getLastControlPoint(self):\n if len(self.controlPoints)> 0:\n return self.controlPoints[-1]\n else:\n return [0,0,0]\n\n def getArcLengthForParameter(self,t):\n stepSize = 1/self.granularity\n tableIndex = int(t/stepSize)\n return self.arcLengthMap[tableIndex][1]*self.fullArcLength\n\n\n\n def getPointAtAbsoluteArcLength(self,absoluteArcLength):\n point = np.zeros((1,self.dimensions))#source of bug\n if absoluteArcLength <= self.fullArcLength:\n # parameterize curve by arc length\n relativeArcLength = absoluteArcLength/self.fullArcLength\n point = self.queryPointByRelativeArcLength(relativeArcLength)\n else:\n return None\n# else:\n# raise ValueError('%f exceeded arc length %f' % (absoluteArcLength,self.fullArcLength))\n return point\n\n def findClosestValuesInArcLengthMap(self,relativeArcLength):\n '''\n - given a relative arc length between 0 and 1 it uses closestLowerValueBinarySearch from the Generic Algorithms module to search the self.arcLengthMap for the values bounding the searched value\n - returns floor parameter, ceiling parameter, floor arc length, ceiling arc length and a bool if the exact value was found\n '''\n foundExactValue = True\n result = closestLowerValueBinarySearch(self.arcLengthMap,0,len(self.arcLengthMap)-1,relativeArcLength, getter = lambda A,i: A[i][1])#returns the index and a flag value, requires a getter for the array\n\n index = result[0]\n\n if result[1] == 0:#found exact value\n floorP, ceilP = self.arcLengthMap[index][0],self.arcLengthMap[index][0]\n floorL, ceilL = self.arcLengthMap[index][1],self.arcLengthMap[index][1]\n foundExactValue = True\n elif result[1] ==1:#found lower value\n floorP = self.arcLengthMap[index][0]\n floorL = self.arcLengthMap[index][1]\n if index <len(self.arcLengthMap):#check array bounds\n ceilP = self.arcLengthMap[index+1][0]\n ceilL = self.arcLengthMap[index+1][1]\n foundExactValue = False\n else:\n foundExactValue = True\n ceilP= floorP\n ceilL = floorL\n elif result[1] ==2:#value smaller than smallest element in the array\n ceilP = self.arcLengthMap[index][0]\n floorL = self.arcLengthMap[index][1]\n floorP = ceilP\n ceilL = floorL\n foundExactValue = True\n elif result[1] ==3:#value larger than largest element in the array\n ceilP = self.arcLengthMap[index][0]\n ceilL = self.arcLengthMap[index][1]\n floorP = ceilP\n floorL = ceilL\n foundExactValue = True\n #print relativeArcLength,floorL,ceilL,foundExactValue\n return floorP,ceilP,floorL,ceilL,foundExactValue\n\n #see slide 30 of http://pages.cpsc.ucalgary.ca/~jungle/587/pdf/5-interpolation.pdf\n #note it does a binary search so it is rather expensive to be called at every frame\n def queryPointByRelativeArcLength(self,relativeArcLength):\n\n floorP,ceilP,floorL,ceilL,foundExactValue = self.findClosestValuesInArcLengthMap(relativeArcLength)\n if not foundExactValue:\n alpha = (relativeArcLength-floorL)/(ceilL-floorL)#can be reused a-\n #t = floorL+alpha*(ceilL-floorL)\n t = floorP+alpha*(ceilP-floorP)\n else:\n t = floorP\n #t = relativeArcLength#todo add correct mapping\n\n return self.queryPoint(t)\n\n def mapToSegment(self,t):\n\n i = min(math.floor( self.numberOfSegments *t),self.numberOfSegments)#the part of t before i\n localT =(self.numberOfSegments*t) -math.floor( self.numberOfSegments *t)#the rest, e.g. N = 10 and t = 0.62 => i = 6 and the rest is 0.02\n #i = min(i,self.numberOfSegments)\n return i+1,localT#increment i by 1 to ignore the first auxiliary control point\n\n\n def getControlPointVectors(self,i):\n i = int(i)\n #if i<=self.numberOfSegments-2:\n d = 0\n vectors = []\n\n while d < self.dimensions:\n v = [float(self.controlPoints[i-1][d]),float(self.controlPoints[i][d]),float(self.controlPoints[i+1][d]),float(self.controlPoints[i+2][d])]\n vectors.append(np.array(v))\n d+=1\n\n return vectors\n\n#\n def queryPoint(self, t):\n i,localT = self.mapToSegment(t)\n weightVector = np.array([localT**3,localT**2,localT,1])\n controlPointVectors = self.getControlPointVectors(i)\n point =[]\n d =0\n while d < self.dimensions:\n point.append(self.queryValue(weightVector, controlPointVectors[d]))\n d += 1\n return np.array(point)\n\n def queryValue(self, weightVector, controllPointVector):\n v = np.dot(self.catmullRomBaseMatrix, controllPointVector)\n v = np.dot(weightVector, v)\n return 0.5 * v\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.arange", "scipy.interpolate.splev", "numpy.linspace", "scipy.interpolate.splrep", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
truthiswill/federated
[ "d25eeac036dfc2a485120a195fd904223cfc823a" ]
[ "tensorflow_federated/python/aggregators/quantile_estimation_test.py" ]
[ "# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_privacy as tfp\n\nfrom tensorflow_federated.python.aggregators import quantile_estimation\nfrom tensorflow_federated.python.core.api import test_case\nfrom tensorflow_federated.python.core.backends.test import execution_contexts\nfrom tensorflow_federated.python.core.impl.types import computation_types\nfrom tensorflow_federated.python.core.impl.types import placements\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.templates import estimation_process\nfrom tensorflow_federated.python.core.test import static_assert\n\nQEProcess = quantile_estimation.PrivateQuantileEstimationProcess\n\n\nclass PrivateQEComputationTest(test_case.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('private', True), ('non_private', False))\n def test_process_type_signature(self, private):\n if private:\n quantile_estimator_query = tfp.QuantileEstimatorQuery(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n below_estimate_stddev=0.5,\n expected_num_records=100,\n geometric_update=True)\n else:\n quantile_estimator_query = tfp.NoPrivacyQuantileEstimatorQuery(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n geometric_update=True)\n\n process = QEProcess(quantile_estimator_query)\n\n query_state = quantile_estimator_query.initial_global_state()\n sum_process_state = ()\n\n server_state_type = computation_types.FederatedType(\n type_conversions.type_from_tensors((query_state, sum_process_state)),\n placements.SERVER)\n\n self.assertEqual(\n computation_types.FunctionType(\n parameter=None, result=server_state_type),\n process.initialize.type_signature)\n\n estimate_type = computation_types.FederatedType(tf.float32,\n placements.SERVER)\n\n self.assertEqual(\n computation_types.FunctionType(\n parameter=server_state_type, result=estimate_type),\n process.report.type_signature)\n\n client_value_type = computation_types.FederatedType(tf.float32,\n placements.CLIENTS)\n self.assertTrue(\n process.next.type_signature.is_equivalent_to(\n computation_types.FunctionType(\n parameter=collections.OrderedDict(\n state=server_state_type, value=client_value_type),\n result=server_state_type)))\n\n def test_bad_query(self):\n non_quantile_estimator_query = tfp.GaussianSumQuery(\n l2_norm_clip=1.0, stddev=1.0)\n\n with self.assertRaises(TypeError):\n QEProcess(non_quantile_estimator_query)\n\n def test_bad_aggregation_factory(self):\n quantile_estimator_query = tfp.NoPrivacyQuantileEstimatorQuery(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n geometric_update=True)\n\n with self.assertRaises(TypeError):\n QEProcess(\n quantile_estimator_query=quantile_estimator_query,\n record_aggregation_factory=\"I'm not a record_aggregation_factory.\")\n\n\nclass PrivateQEExecutionTest(test_case.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('arithmetic', False), ('geometric', True))\n def test_adaptation(self, geometric_update):\n initial_estimate = 3.14159\n target_quantile = 0.61803\n learning_rate = 2.71828\n\n quantile_estimator_query = tfp.NoPrivacyQuantileEstimatorQuery(\n initial_estimate=initial_estimate,\n target_quantile=target_quantile,\n learning_rate=learning_rate,\n geometric_update=geometric_update)\n\n process = QEProcess(quantile_estimator_query)\n\n state = process.initialize()\n self.assertAllClose(process.report(state), initial_estimate)\n\n # Run on two records greater than estimate.\n state = process.next(state, [initial_estimate + 1, initial_estimate + 2])\n\n if geometric_update:\n expected_estimate = (\n initial_estimate * np.exp(learning_rate * target_quantile))\n else:\n expected_estimate = initial_estimate + learning_rate * target_quantile\n\n self.assertAllClose(process.report(state), expected_estimate)\n\n def test_no_noise_cls(self):\n process = QEProcess.no_noise(\n initial_estimate=1.0, target_quantile=0.5, learning_rate=1.0)\n self.assertIsInstance(process, QEProcess)\n state = process.initialize()\n self.assertEqual(process.report(state), 1.0)\n\n def test_no_noise_affine_cls(self):\n process = QEProcess.no_noise(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n multiplier=2.0,\n increment=1.0)\n self.assertIsInstance(process, estimation_process.EstimationProcess)\n state = process.initialize()\n self.assertEqual(process.report(state), 3.0)\n\n def test_no_noise_secure_true_false_equal_results(self):\n simple_process = QEProcess.no_noise(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n secure_estimation=False)\n secure_process = QEProcess.no_noise(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n secure_estimation=True)\n\n data = [0.5, 1.5, 2.5] # 2 bigger than the initial estimate 1.0, 1 smaller.\n\n simple_state = simple_process.initialize()\n secure_state = secure_process.initialize()\n for _ in range(3):\n simple_state = simple_process.next(simple_state, data)\n secure_state = secure_process.next(secure_state, data)\n self.assertAllClose(\n simple_process.report(simple_state),\n secure_process.report(secure_state))\n\n def test_secure_estimation_true_only_contains_secure_aggregation(self):\n secure_process = QEProcess.no_noise(\n initial_estimate=1.0,\n target_quantile=0.5,\n learning_rate=1.0,\n secure_estimation=True)\n try:\n static_assert.assert_not_contains_unsecure_aggregation(\n secure_process.next)\n except: # pylint: disable=bare-except\n self.fail('Computation contains non-secure aggregation.')\n\n\nif __name__ == '__main__':\n execution_contexts.set_test_execution_context()\n test_case.main()\n" ]
[ [ "numpy.exp" ] ]
s3a-spatialaudio/VISR
[ "55f6289bc5058d4898106f3520e1a60644ffb3ab" ]
[ "src/python/scripts/rsao/reverbObjectBinauralisation_flexible.py" ]
[ " # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 14 15:59:11 2017\n\n@author: af5u13\n\"\"\"\n\n# Usage for debugging from raw Python console\n#exec(open(\"/Users/af5u13/dev/visr/src/python/scripts/rsao/reverbObjectBinauralisation.py\").read())\n\nimport visr\nimport signalflows\nimport panning\nimport pml\nimport rbbl\nimport rcl\nimport rrl\n#import objectmodel as om\n\nimport h5py\nimport numpy as np;\nimport matplotlib.pyplot as plt\nimport os\n\nclass ReverbToBinaural( visr.CompositeComponent ):\n def __init__( self, context, name, parent,\n loudspeakerConfig,\n numberOfInputs,\n rendererOutputs,\n interpolationPeriod,\n diffusionFilters,\n trackingConfiguration,\n brirRouting,\n brirFilters,\n scenePort = 4242,\n reverbConfiguration=''):\n super(ReverbToBinaural,self).__init__( context, name, parent )\n self.coreRenderer = signalflows.BaselineRenderer( ctxt, 'renderer', self,\n loudspeakerConfig=loudspeakerConfig,\n numberOfInputs=numberOfInputs,\n numberOfOutputs=rendererOutputs,\n interpolationPeriod=interpolationPeriod,\n diffusionFilters=diffusionFilters,\n reverbConfig=reverbConfiguration,\n sceneReceiverPort=scenePort,\n trackingConfiguration=trackingConfiguration\n )\n numFilters = brirFilters.numberOfRows\n firLength = brirFilters.numberOfColumns\n numRoutings = brirRouting.size\n self.convolver = rcl.FirFilterMatrix( ctxt, 'convolver', self,\n numberOfInputs=rendererOutputs,\n numberOfOutputs=2,\n maxFilters=numFilters,\n filterLength=firLength,\n maxRoutings=numRoutings,\n filters=brirFilters,\n routings=brirRouting,\n controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs\n )\n self.audioIn = visr.AudioInputFloat( \"audioIn\", self, numberOfInputs )\n self.audioOut = visr.AudioOutputFloat( \"audioOut\", self, 2 )\n self.audioConnection( self.audioIn, self.coreRenderer.audioPort(\"input\"))\n self.audioConnection( self.coreRenderer.audioPort(\"output\"),\n self.convolver.audioPort(\"in\"))\n self.audioConnection( self.convolver.audioPort(\"out\"), self.audioOut )\n if len(trackingConfiguration) > 0:\n self.posIn = visr.ParameterInput( \"posIn\", self,\n pml.ListenerPosition.staticType,\n pml.DoubleBufferingProtocol.staticType,\n pml.EmptyParameterConfig() )\n self.parameterConnection( self.posIn, self.coreRenderer.parameterPort(\"trackingPositionInput\") )\n\n# Get VISR base directory from rsao subdirectory.\nvisrBaseDirectory = os.path.normpath(os.path.join( os.getcwd(), '../../../..' )).replace('\\\\','/')\n\nblockSize = 1024\nsamplingFrequency = 48000\nparameterUpdatePeriod = 1024\n\nnumBlocks = 8\nsignalLength = blockSize * numBlocks\nt = 1.0/samplingFrequency * np.arange(0,signalLength)\n\nnumObjects = 1;\n\nctxt = visr.SignalFlowContext( blockSize, samplingFrequency)\n\nlspConfigFile = os.path.join( visrBaseDirectory, 'config/bbc/bs2051-4+5+0.xml').replace('\\\\','/')\n# lspConfigFile = os.path.join( visrBaseDirectory, 'config/isvr/audiolab_39speakers_1subwoofer.xml' )\n\nlc = panning.LoudspeakerArray( lspConfigFile )\n\nnumOutputChannels = np.max( lc.channelIndices() + lc.subwooferChannelIndices() ) +1\nnumLoudspeakers = lc.numberOfRegularLoudspeakers\n\ndiffFilterFile = os.path.join( visrBaseDirectory, 'config/filters/random_phase_allpass_64ch_512taps.wav')\ndiffFiltersRaw = np.array(pml.MatrixParameterFloat.fromAudioFile( diffFilterFile ),\n dtype = np.float32 )\ndiffFilters = pml.MatrixParameterFloat( diffFiltersRaw[ np.array(lc.channelIndices() )-1,: ] )\n\nreverbConfigStr = '{ \"numReverbObjects\": %i, \"discreteReflectionsPerObject\": 20, \"lateReverbFilterLength\": 2.0, \"lateReverbDecorrelationFilters\": \"%s/config/filters/random_phase_allpass_64ch_1024taps.wav\" }' % (numObjects, visrBaseDirectory )\n\n## Load the BBC BRIR dataset\nbrirFile = os.path.join( os.getcwd(), 'BBC_BRIR.mat' )\nbrirMat = h5py.File( brirFile )\nbrirFull = np.array( brirMat['h_sweetspot'], dtype=np.float32 ).copy('C')\n# Scalefactor to compensate for the very low amplitudes of the BBC BRIRs\nbrirScaleFactor = 500;\nbrirFlat = brirScaleFactor * np.concatenate( (brirFull[:,0,:], brirFull[:,1,:] ) )\nbrirFilterParam = pml.MatrixParameterFloat( brirFlat, 16 )\nnumBrirSpeakers = brirFull.shape[0]\n# Define the routing for the binaural convolver such that it matches the organisation of the\n# flat BRIR matrix.\nfilterRouting = rbbl.FilterRoutingList()\nfor idx in range(0, numBrirSpeakers ):\n filterRouting.addRouting( idx, 0, idx, 1.0 )\n filterRouting.addRouting( idx, 1, idx+numBrirSpeakers, 1.0 )\n\n\nrenderer = ReverbToBinaural( ctxt, 'top', None,\n loudspeakerConfig=lc,\n numberOfInputs=numObjects,\n rendererOutputs=numOutputChannels,\n interpolationPeriod=parameterUpdatePeriod,\n diffusionFilters=diffFilters,\n trackingConfiguration='',\n brirFilters = brirFilterParam,\n brirRouting = filterRouting,\n reverbConfiguration=reverbConfigStr,\n scenePort = 4242\n )\n\nprint( 'Created renderer.' )\n\nflow = rrl.AudioSignalFlow( renderer )\n\n## Non-realtime code\n#inputSignal = np.zeros( (numObjects, signalLength ), dtype=np.float32 )\n## inputSignal[0,:] = 0.75*np.sin( 2.0*np.pi*440 * t )\n#inputSignal[ 0, 100 ] = 1\n#\n#outputSignal = np.zeros( (2, signalLength ), dtype=np.float32 )\n#\n#for blockIdx in range(0,numBlocks):\n## if blockIdx % (parameterUpdatePeriod/blockSize) == 0:\n## ov = paramInput.data()\n## ov.clear()\n## ov.set( ro.objectId, ro )\n## paramInput.swapBuffers()\n#\n# inputBlock = inputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize]\n# outputBlock = flow.process( inputBlock )\n# outputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize] = outputBlock\n#\n#\n#plt.figure(1)\n#plt.plot( t, outputSignal[0,:], 'bo-', t, outputSignal[1,:], 'rx-' )\n#plt.show( block = False )\n" ]
[ [ "numpy.arange", "numpy.concatenate", "numpy.array" ] ]
Woooosz/dgl
[ "729ff2ef385f302af562c8305b1006d067d2067f" ]
[ "examples/pytorch/gcmc/model.py" ]
[ "\"\"\"NN modules\"\"\"\nimport torch as th\nimport torch.nn as nn\nfrom torch.nn import init\nimport dgl.function as fn\nimport dgl.nn.pytorch as dglnn\n\nfrom utils import get_activation\n\nclass GCMCGraphConv(nn.Module):\n \"\"\"Graph convolution module used in the GCMC model.\n\n Parameters\n ----------\n in_feats : int\n Input feature size.\n out_feats : int\n Output feature size.\n weight : bool, optional\n If True, apply a linear layer. Otherwise, aggregating the messages\n without a weight matrix or with an shared weight provided by caller.\n device: str, optional\n Which device to put data in. Useful in mix_cpu_gpu training and\n multi-gpu training\n \"\"\"\n def __init__(self,\n in_feats,\n out_feats,\n weight=True,\n device=None,\n dropout_rate=0.0):\n super(GCMCGraphConv, self).__init__()\n self._in_feats = in_feats\n self._out_feats = out_feats\n self.device = device\n self.dropout = nn.Dropout(dropout_rate)\n\n if weight:\n self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))\n else:\n self.register_parameter('weight', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"Reinitialize learnable parameters.\"\"\"\n if self.weight is not None:\n init.xavier_uniform_(self.weight)\n\n def forward(self, graph, feat, weight=None):\n \"\"\"Compute graph convolution.\n\n Normalizer constant :math:`c_{ij}` is stored as two node data \"ci\"\n and \"cj\".\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n feat : torch.Tensor\n The input feature\n weight : torch.Tensor, optional\n Optional external weight tensor.\n dropout : torch.nn.Dropout, optional\n Optional external dropout layer.\n\n Returns\n -------\n torch.Tensor\n The output feature\n \"\"\"\n with graph.local_scope():\n if isinstance(feat, tuple):\n feat, _ = feat # dst feature not used\n cj = graph.srcdata['cj']\n ci = graph.dstdata['ci']\n if self.device is not None:\n cj = cj.to(self.device)\n ci = ci.to(self.device)\n if weight is not None:\n if self.weight is not None:\n raise DGLError('External weight is provided while at the same time the'\n ' module has defined its own weight parameter. Please'\n ' create the module with flag weight=False.')\n else:\n weight = self.weight\n\n if weight is not None:\n feat = dot_or_identity(feat, weight, self.device)\n\n feat = feat * self.dropout(cj)\n graph.srcdata['h'] = feat\n graph.update_all(fn.copy_src(src='h', out='m'),\n fn.sum(msg='m', out='h'))\n rst = graph.dstdata['h']\n rst = rst * ci\n\n return rst\n\nclass GCMCLayer(nn.Module):\n r\"\"\"GCMC layer\n\n .. math::\n z_j^{(l+1)} = \\sigma_{agg}\\left[\\mathrm{agg}\\left(\n \\sum_{j\\in\\mathcal{N}_1}\\frac{1}{c_{ij}}W_1h_j, \\ldots,\n \\sum_{j\\in\\mathcal{N}_R}\\frac{1}{c_{ij}}W_Rh_j\n \\right)\\right]\n\n After that, apply an extra output projection:\n\n .. math::\n h_j^{(l+1)} = \\sigma_{out}W_oz_j^{(l+1)}\n\n The equation is applied to both user nodes and movie nodes and the parameters\n are not shared unless ``share_user_item_param`` is true.\n\n Parameters\n ----------\n rating_vals : list of int or float\n Possible rating values.\n user_in_units : int\n Size of user input feature\n movie_in_units : int\n Size of movie input feature\n msg_units : int\n Size of message :math:`W_rh_j`\n out_units : int\n Size of of final output user and movie features\n dropout_rate : float, optional\n Dropout rate (Default: 0.0)\n agg : str, optional\n Function to aggregate messages of different ratings.\n Could be any of the supported cross type reducers:\n \"sum\", \"max\", \"min\", \"mean\", \"stack\".\n (Default: \"stack\")\n agg_act : callable, str, optional\n Activation function :math:`sigma_{agg}`. (Default: None)\n out_act : callable, str, optional\n Activation function :math:`sigma_{agg}`. (Default: None)\n share_user_item_param : bool, optional\n If true, user node and movie node share the same set of parameters.\n Require ``user_in_units`` and ``move_in_units`` to be the same.\n (Default: False)\n device: str, optional\n Which device to put data in. Useful in mix_cpu_gpu training and\n multi-gpu training\n \"\"\"\n def __init__(self,\n rating_vals,\n user_in_units,\n movie_in_units,\n msg_units,\n out_units,\n dropout_rate=0.0,\n agg='stack', # or 'sum'\n agg_act=None,\n out_act=None,\n share_user_item_param=False,\n device=None):\n super(GCMCLayer, self).__init__()\n self.rating_vals = rating_vals\n self.agg = agg\n self.share_user_item_param = share_user_item_param\n self.ufc = nn.Linear(msg_units, out_units)\n if share_user_item_param:\n self.ifc = self.ufc\n else:\n self.ifc = nn.Linear(msg_units, out_units)\n if agg == 'stack':\n # divide the original msg unit size by number of ratings to keep\n # the dimensionality\n assert msg_units % len(rating_vals) == 0\n msg_units = msg_units // len(rating_vals)\n self.dropout = nn.Dropout(dropout_rate)\n self.W_r = nn.ParameterDict()\n subConv = {}\n for rating in rating_vals:\n # PyTorch parameter name can't contain \".\"\n rating = str(rating).replace('.', '_')\n rev_rating = 'rev-%s' % rating\n if share_user_item_param and user_in_units == movie_in_units:\n self.W_r[rating] = nn.Parameter(th.randn(user_in_units, msg_units))\n self.W_r['rev-%s' % rating] = self.W_r[rating]\n subConv[rating] = GCMCGraphConv(user_in_units,\n msg_units,\n weight=False,\n device=device,\n dropout_rate=dropout_rate)\n subConv[rev_rating] = GCMCGraphConv(user_in_units,\n msg_units,\n weight=False,\n device=device,\n dropout_rate=dropout_rate)\n else:\n self.W_r = None\n subConv[rating] = GCMCGraphConv(user_in_units,\n msg_units,\n weight=True,\n device=device,\n dropout_rate=dropout_rate)\n subConv[rev_rating] = GCMCGraphConv(movie_in_units,\n msg_units,\n weight=True,\n device=device,\n dropout_rate=dropout_rate)\n self.conv = dglnn.HeteroGraphConv(subConv, aggregate=agg)\n self.agg_act = get_activation(agg_act)\n self.out_act = get_activation(out_act)\n self.device = device\n self.reset_parameters()\n\n def partial_to(self, device):\n \"\"\"Put parameters into device except W_r\n\n Parameters\n ----------\n device : torch device\n Which device the parameters are put in.\n \"\"\"\n assert device == self.device\n if device is not None:\n self.ufc.cuda(device)\n if self.share_user_item_param is False:\n self.ifc.cuda(device)\n self.dropout.cuda(device)\n\n def reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, graph, ufeat=None, ifeat=None):\n \"\"\"Forward function\n\n Parameters\n ----------\n graph : DGLHeteroGraph\n User-movie rating graph. It should contain two node types: \"user\"\n and \"movie\" and many edge types each for one rating value.\n ufeat : torch.Tensor, optional\n User features. If None, using an identity matrix.\n ifeat : torch.Tensor, optional\n Movie features. If None, using an identity matrix.\n\n Returns\n -------\n new_ufeat : torch.Tensor\n New user features\n new_ifeat : torch.Tensor\n New movie features\n \"\"\"\n in_feats = {'user' : ufeat, 'movie' : ifeat}\n mod_args = {}\n for i, rating in enumerate(self.rating_vals):\n rating = str(rating).replace('.', '_')\n rev_rating = 'rev-%s' % rating\n mod_args[rating] = (self.W_r[rating] if self.W_r is not None else None,)\n mod_args[rev_rating] = (self.W_r[rev_rating] if self.W_r is not None else None,)\n out_feats = self.conv(graph, in_feats, mod_args=mod_args)\n ufeat = out_feats['user']\n ifeat = out_feats['movie']\n ufeat = ufeat.view(ufeat.shape[0], -1)\n ifeat = ifeat.view(ifeat.shape[0], -1)\n\n # fc and non-linear\n ufeat = self.agg_act(ufeat)\n ifeat = self.agg_act(ifeat)\n ufeat = self.dropout(ufeat)\n ifeat = self.dropout(ifeat)\n ufeat = self.ufc(ufeat)\n ifeat = self.ifc(ifeat)\n return self.out_act(ufeat), self.out_act(ifeat)\n\nclass BiDecoder(nn.Module):\n r\"\"\"Bi-linear decoder.\n\n Given a bipartite graph G, for each edge (i, j) ~ G, compute the likelihood\n of it being class r by:\n\n .. math::\n p(M_{ij}=r) = \\text{softmax}(u_i^TQ_rv_j)\n\n The trainable parameter :math:`Q_r` is further decomposed to a linear\n combination of basis weight matrices :math:`P_s`:\n\n .. math::\n Q_r = \\sum_{s=1}^{b} a_{rs}P_s\n\n Parameters\n ----------\n in_units : int\n Size of input user and movie features\n num_classes : int\n Number of classes.\n num_basis : int, optional\n Number of basis. (Default: 2)\n dropout_rate : float, optional\n Dropout raite (Default: 0.0)\n \"\"\"\n def __init__(self,\n in_units,\n num_classes,\n num_basis=2,\n dropout_rate=0.0):\n super(BiDecoder, self).__init__()\n self._num_basis = num_basis\n self.dropout = nn.Dropout(dropout_rate)\n self.Ps = nn.ParameterList()\n for i in range(num_basis):\n self.Ps.append(nn.Parameter(th.randn(in_units, in_units)))\n self.combine_basis = nn.Linear(self._num_basis, num_classes, bias=False)\n self.reset_parameters()\n\n def reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, graph, ufeat, ifeat):\n \"\"\"Forward function.\n\n Parameters\n ----------\n graph : DGLHeteroGraph\n \"Flattened\" user-movie graph with only one edge type.\n ufeat : th.Tensor\n User embeddings. Shape: (|V_u|, D)\n ifeat : th.Tensor\n Movie embeddings. Shape: (|V_m|, D)\n\n Returns\n -------\n th.Tensor\n Predicting scores for each user-movie edge.\n \"\"\"\n with graph.local_scope():\n ufeat = self.dropout(ufeat)\n ifeat = self.dropout(ifeat)\n graph.nodes['movie'].data['h'] = ifeat\n basis_out = []\n for i in range(self._num_basis):\n graph.nodes['user'].data['h'] = ufeat @ self.Ps[i]\n graph.apply_edges(fn.u_dot_v('h', 'h', 'sr'))\n basis_out.append(graph.edata['sr'])\n out = th.cat(basis_out, dim=1)\n out = self.combine_basis(out)\n return out\n\nclass DenseBiDecoder(BiDecoder):\n r\"\"\"Dense bi-linear decoder.\n\n Dense implementation of the bi-linear decoder used in GCMC. Suitable when\n the graph can be efficiently represented by a pair of arrays (one for source\n nodes; one for destination nodes).\n\n Parameters\n ----------\n in_units : int\n Size of input user and movie features\n num_classes : int\n Number of classes.\n num_basis : int, optional\n Number of basis. (Default: 2)\n dropout_rate : float, optional\n Dropout raite (Default: 0.0)\n \"\"\"\n def __init__(self,\n in_units,\n num_classes,\n num_basis=2,\n dropout_rate=0.0):\n super(DenseBiDecoder, self).__init__(in_units,\n num_classes,\n num_basis,\n dropout_rate)\n\n def forward(self, ufeat, ifeat):\n \"\"\"Forward function.\n\n Compute logits for each pair ``(ufeat[i], ifeat[i])``.\n\n Parameters\n ----------\n ufeat : th.Tensor\n User embeddings. Shape: (B, D)\n ifeat : th.Tensor\n Movie embeddings. Shape: (B, D)\n\n Returns\n -------\n th.Tensor\n Predicting scores for each user-movie edge. Shape: (B, num_classes)\n \"\"\"\n ufeat = self.dropout(ufeat)\n ifeat = self.dropout(ifeat)\n basis_out = []\n for i in range(self._num_basis):\n ufeat_i = ufeat @ self.Ps[i]\n out = th.einsum('ab,ab->a', ufeat_i, ifeat)\n basis_out.append(out.unsqueeze(1))\n out = th.cat(basis_out, dim=1)\n out = self.combine_basis(out)\n return out\n\ndef dot_or_identity(A, B, device=None):\n # if A is None, treat as identity matrix\n if A is None:\n return B\n elif len(A.shape) == 1:\n if device is None:\n return B[A]\n else:\n return B[A].to(device)\n else:\n return A @ B\n" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.nn.Linear", "torch.randn", "torch.nn.ParameterDict", "torch.nn.ParameterList", "torch.einsum", "torch.cat", "torch.nn.Dropout", "torch.Tensor" ] ]
xyt556/rsnet
[ "5f20f5308f89695e9f26ee4724d5591201d0c52d" ]
[ "rsnet/dataset/raster.py" ]
[ "import os\n\nimport rasterio\nimport numpy as np\n\nfrom ..utils import pair, bytescale\nfrom .base import BaseRasterData\n\n\nclass RasterSampleDataset(BaseRasterData):\n \"\"\"Dataset wrapper for remote sensing data.\n\n Args:\n fname:\n win_size:\n step_size:\n pad_size:\n band_index:\n \"\"\"\n def __init__(self,\n fname,\n win_size=512,\n step_size=512,\n pad_size=0,\n band_index=None,\n to_type=None,\n data_format='channel_last',\n transform=None):\n super().__init__(fname=fname)\n\n assert data_format in (\n 'channel_first',\n 'channel_last'), \"data format must be 'channel_first' or \"\n f\"'channel_last', but got type {data_format}\"\n self.data_format = data_format\n\n self.win_size = pair(win_size)\n self.step_size = pair(step_size)\n self.pad_size = pair(pad_size)\n\n total_band_index = [i + 1 for i in range(self.count)]\n if band_index is None:\n self.band_index = total_band_index\n else:\n assert set(band_index).issubset(set(total_band_index))\n self.band_index = band_index\n\n self.to_type = to_type\n self.window_ids = self.get_windows_info()\n self.transform = transform\n\n self.start = 0\n self.end = len(self)\n\n def get_windows_info(self):\n left, top = 0, 0\n width, height = self.width, self.height\n left_top_xy = [] # left-top corner coordinates (xmin, ymin)\n while left < width:\n if left + self.win_size[0] >= width:\n left = max(width - self.win_size[0], 0)\n top = 0\n while top < height:\n if top + self.win_size[1] >= height:\n top = max(height - self.win_size[1], 0)\n # right = min(left + self.win_size[0], width - 1)\n # bottom = min(top + self.win_size[1], height - 1)\n # save\n left_top_xy.append((left, top))\n if top + self.win_size[1] >= height:\n break\n else:\n top += self.step_size[1]\n\n if left + self.win_size[0] >= width:\n break\n else:\n left += self.step_size[0]\n\n return left_top_xy\n\n def sample(self, x, y):\n \"\"\"Get the values of dataset at certain positions.\n \"\"\"\n xmin, ymin = x, y\n xsize, ysize = self.win_size\n xpad, ypad = self.pad_size\n\n xmin -= xpad\n ymin -= ypad\n left, top = 0, 0\n if xmin < 0:\n xmin = 0\n xsize += xpad\n left = xpad\n elif xmin + xsize + 2 * xpad > self.width:\n xsize += xpad\n else:\n xsize += 2 * xpad\n\n if ymin < 0:\n ymin = 0\n ysize += ypad\n top = ypad\n elif ymin + ysize + 2 * ypad > self.height:\n ysize += ypad\n else:\n ysize += 2 * ypad\n\n # col_off, row_off, width, height\n window = rasterio.windows.Window(xmin, ymin, xsize, ysize)\n\n # with rasterio.open(self.image_file) as src:\n # bands = [src.read(k, window=tile_window) for k in self.band_index]\n # tile_image = np.stack(bands, axis=-1)\n bands = [self._band.read(k, window=window) for k in self.band_index]\n if self.to_type and np.dtype(self.to_type) != np.dtype(self.dtype):\n bmin, bmax = self.minmax\n msks = [\n self._band.read_masks(k, window=window)\n for k in self.band_index\n ]\n bands = [\n bytescale(b, msk, bmin[i], bmax[i], dtype=self.to_type)\n for i, (b, msk) in enumerate(zip(bands, msks))\n ]\n\n tile_image = np.stack(bands, axis=-1)\n img = np.zeros(\n (self.win_size[0] + 2 * xpad, self.win_size[0] + 2 * ypad,\n len(self.band_index)),\n dtype=tile_image.dtype)\n img[top:top + ysize, left:left + xsize] = tile_image\n\n if self.data_format == 'channel_first':\n img = img.transpose(2, 0, 1)\n\n return img\n\n def __getitem__(self, idx):\n x, y = self.window_ids[idx]\n img = self.sample(x, y)\n if self.transform is not None:\n img = self.transform(img)\n\n return img, x, y\n\n def __len__(self):\n return len(self.window_ids)\n\n @property\n def step(self):\n return self.step_size\n\n @property\n def pad(self):\n return self.pad_size\n" ]
[ [ "numpy.stack", "numpy.dtype" ] ]
KainRasleafar/sedfitter
[ "4f0e9e46f7903a853166835bb74857cc15eef219" ]
[ "sedfitter/sed/sed.py" ]
[ "from __future__ import print_function, division\n\nimport os\n\nimport numpy as np\nfrom astropy import log\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom scipy.interpolate import interp1d\nfrom astropy import units as u\n\nfrom ..utils.validator import validate_array\n\nfrom .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux\n\n__all__ = ['SED']\n\n\nclass SED(object):\n\n def __init__(self):\n\n # Metadata\n self.name = None\n self.distance = None\n\n # Spectral info\n self.wav = None\n self.nu = None\n\n # Apertures\n self.apertures = None\n\n # Fluxes\n self.flux = None\n self.error = None\n\n def __eq__(self, other):\n\n try:\n\n assert self.name == other.name\n\n assert_allclose_quantity(self.distance, other.distance)\n\n assert_allclose_quantity(self.wav, other.wav)\n assert_allclose_quantity(self.nu, other.nu)\n\n assert_allclose_quantity(self.apertures, other.apertures)\n\n assert_allclose_quantity(self.flux, other.flux)\n assert_allclose_quantity(self.error, other.error)\n\n except AssertionError:\n raise\n return False\n else:\n return True\n\n def copy(self):\n from copy import deepcopy\n return deepcopy(self)\n\n def scale_to_distance(self, distance):\n \"\"\"\n Returns the SED scaled to distance `distance`\n\n Parameters\n ----------\n distance : float\n The distance in cm\n\n Returns\n -------\n sed : SED\n The SED, scaled to the new distance\n \"\"\"\n sed = self.copy()\n sed.distance = distance * u.cm\n sed.flux = sed.flux * (self.distance.to(u.cm) / sed.distance) ** 2\n sed.error = sed.error * (self.distance.to(u.cm) / sed.distance) ** 2\n return sed\n\n def scale_to_av(self, av, law):\n sed = self.copy()\n sed.flux = sed.flux * 10. ** (av * law(sed.wav))\n sed.error = sed.error * 10. ** (av * law(sed.wav))\n return sed\n\n @property\n def wav(self):\n \"\"\"\n The wavelengths at which the SED is defined\n \"\"\"\n if self._wav is None and self._nu is not None:\n return self._nu.to(u.micron, equivalencies=u.spectral())\n else:\n return self._wav\n\n @wav.setter\n def wav(self, value):\n if value is None:\n self._wav = None\n else:\n self._wav = validate_array('wav', value, domain='positive', ndim=1,\n shape=None if self.nu is None else (len(self.nu),),\n physical_type='length')\n\n @property\n def nu(self):\n \"\"\"\n The frequencies at which the SED is defined\n \"\"\"\n if self._nu is None and self._wav is not None:\n return self._wav.to(u.Hz, equivalencies=u.spectral())\n else:\n return self._nu\n\n @nu.setter\n def nu(self, value):\n if value is None:\n self._nu = None\n else:\n self._nu = validate_array('nu', value, domain='positive', ndim=1,\n shape=None if self.wav is None else (len(self.wav),),\n physical_type='frequency')\n\n @property\n def apertures(self):\n \"\"\"\n The apertures at which the SED is defined\n \"\"\"\n return self._apertures\n\n @apertures.setter\n def apertures(self, value):\n if value is None:\n self._apertures = None\n else:\n self._apertures = validate_array('apertures', value, domain='positive',\n ndim=1, physical_type='length')\n\n @property\n def flux(self):\n \"\"\"\n The SED fluxes\n \"\"\"\n return self._flux\n\n @flux.setter\n def flux(self, value):\n if value is None:\n self._flux = value\n else:\n self._flux = validate_array('flux', value, ndim=2,\n shape=(self.n_ap, self.n_wav),\n physical_type=('power', 'flux', 'spectral flux density'))\n\n @property\n def error(self):\n \"\"\"\n The convolved flux errors\n \"\"\"\n return self._error\n\n @error.setter\n def error(self, value):\n if value is None:\n self._error = value\n else:\n self._error = validate_array('error', value, ndim=2,\n shape=(self.n_ap, self.n_wav),\n physical_type=('power', 'flux', 'spectral flux density'))\n\n @property\n def n_ap(self):\n if self.apertures is None:\n return 1\n else:\n return len(self.apertures)\n\n @property\n def n_wav(self):\n if self.wav is None:\n return None\n else:\n return len(self.wav)\n\n @classmethod\n def read(cls, filename, unit_wav=u.micron, unit_freq=u.Hz,\n unit_flux=u.erg / u.cm ** 2 / u.s, order='nu'):\n \"\"\"\n Read an SED from a FITS file.\n\n Parameters\n ----------\n filename: str\n The name of the file to read the SED from.\n unit_wav: `~astropy.units.Unit`, optional\n The units to convert the wavelengths to.\n unit_freq: `~astropy.units.Unit`, optional\n The units to convert the frequency to.\n unit_flux: `~astropy.units.Unit`, optional\n The units to convert the flux to.\n order: str, optional\n Whether to sort the SED by increasing wavelength (`wav`) or\n frequency ('nu').\n \"\"\"\n\n # Instantiate SED class\n sed = cls()\n\n # Assume that the filename may be missing the .gz extension\n if not os.path.exists(filename) and os.path.exists(filename + '.gz'):\n filename += \".gz\"\n\n # Open FILE file\n hdulist = fits.open(filename, memmap=False)\n\n # Extract model name\n sed.name = hdulist[0].header['MODEL']\n\n # Check if distance is specified in header, otherwise assume 1kpc\n if 'DISTANCE' in hdulist[0].header:\n sed.distance = hdulist[0].header['DISTANCE'] * u.cm\n else:\n log.debug(\"No distance found in SED file, assuming 1kpc\")\n sed.distance = 1. * u.kpc\n\n # Extract SED values\n wav = hdulist[1].data.field('WAVELENGTH') * parse_unit_safe(hdulist[1].columns[0].unit)\n nu = hdulist[1].data.field('FREQUENCY') * parse_unit_safe(hdulist[1].columns[1].unit)\n ap = hdulist[2].data.field('APERTURE') * parse_unit_safe(hdulist[2].columns[0].unit)\n flux = hdulist[3].data.field('TOTAL_FLUX') * parse_unit_safe(hdulist[3].columns[0].unit)\n error = hdulist[3].data.field('TOTAL_FLUX_ERR') * parse_unit_safe(hdulist[3].columns[1].unit)\n\n # Set SED attributes\n sed.apertures = ap\n\n # Convert wavelength and frequencies to requested units\n sed.wav = wav.to(unit_wav)\n sed.nu = nu.to(unit_freq)\n\n # Set fluxes\n sed.flux = convert_flux(nu, flux, unit_flux, distance=sed.distance)\n sed.error = convert_flux(nu, error, unit_flux, distance=sed.distance)\n\n # Sort SED\n\n if order not in ('nu', 'wav'):\n raise ValueError('order should be nu or wav')\n\n if (order == 'nu' and sed.nu[0] > sed.nu[-1]) or \\\n (order == 'wav' and sed.wav[0] > sed.wav[-1]):\n sed.wav = sed.wav[::-1]\n sed.nu = sed.nu[::-1]\n sed.flux = sed.flux[..., ::-1]\n sed.error = sed.error[..., ::-1]\n\n return sed\n\n def write(self, filename, overwrite=False):\n \"\"\"\n Write an SED to a FITS file.\n\n Parameters\n ----------\n filename: str\n The name of the file to write the SED to.\n \"\"\"\n\n # Create first HDU with meta-data\n hdu0 = fits.PrimaryHDU()\n\n if self.name is None:\n raise ValueError(\"Model name is not set\")\n else:\n hdu0.header['MODEL'] = self.name\n\n if self.distance is None:\n raise ValueError(\"Model distance is not set\")\n else:\n hdu0.header['DISTANCE'] = self.distance.to(u.cm).value\n\n hdu0.header['NAP'] = self.n_ap\n hdu0.header['NWAV'] = self.n_wav\n\n # Create wavelength table\n twav = Table()\n if self.wav is None:\n raise ValueError(\"Wavelengths are not set\")\n else:\n twav['WAVELENGTH'] = self.wav\n if self.nu is None:\n raise ValueError(\"Frequencies are not set\")\n else:\n twav['FREQUENCY'] = self.nu\n twav.sort('FREQUENCY')\n\n # TODO: here sorting needs to be applied to fluxes too?\n\n hdu1 = fits.BinTableHDU(np.array(twav))\n hdu1.columns[0].unit = self.wav.unit.to_string(format='fits')\n hdu1.columns[1].unit = self.nu.unit.to_string(format='fits')\n hdu1.header['EXTNAME'] = \"WAVELENGTHS\"\n\n # Create aperture table\n tap = Table()\n if self.apertures is None:\n tap['APERTURE'] = [1.e-30]\n else:\n tap['APERTURE'] = self.apertures\n hdu2 = fits.BinTableHDU(np.array(tap))\n if self.apertures is None:\n hdu2.columns[0].unit = 'cm'\n else:\n hdu2.columns[0].unit = self.apertures.unit.to_string(format='fits')\n hdu2.header['EXTNAME'] = \"APERTURES\"\n\n # Create flux table\n tflux = Table()\n tflux['TOTAL_FLUX'] = self.flux\n if self.flux is None:\n raise ValueError(\"Fluxes are not set\")\n else:\n tflux['TOTAL_FLUX'] = self.flux\n if self.error is None:\n raise ValueError(\"Errors are not set\")\n else:\n tflux['TOTAL_FLUX_ERR'] = self.error\n hdu3 = fits.BinTableHDU(np.array(tflux))\n hdu3.columns[0].unit = self.flux.unit.to_string(format='fits')\n hdu3.columns[1].unit = self.error.unit.to_string(format='fits')\n hdu3.header['EXTNAME'] = \"SEDS\"\n\n hdus = [hdu0, hdu1, hdu2, hdu3]\n\n # Create overall FITS file\n hdulist = fits.HDUList(hdus)\n hdulist.writeto(filename, clobber=overwrite)\n\n def interpolate(self, apertures):\n \"\"\"\n Interpolate the SED to different apertures\n \"\"\"\n\n # If there is only one aperture, we can't interpolate, we can only repeat\n if self.n_ap == 1:\n return np.repeat(self.flux[0, :], len(apertures)).reshape(self.n_wav, len(apertures))\n\n # Create interpolating function\n flux_interp = interp1d(self.apertures, self.flux.swapaxes(0, 1))\n\n # If any apertures are larger than the defined max, reset to max\n apertures[apertures > self.apertures.max()] = self.apertures.max()\n\n # If any apertures are smaller than the defined min, raise Exception\n if np.any(apertures < self.apertures.min()):\n raise Exception(\"Aperture(s) requested too small\")\n\n return flux_interp(apertures)\n\n def interpolate_variable(self, wavelengths, apertures):\n \"\"\"\n Interpolate the SED to a variable aperture as a function of\n wavelength. This method should be called with an interpolating\n function for aperture as a function of wavelength, in log10 space.\n \"\"\"\n\n if self.n_ap == 1:\n return self.flux[0, :]\n\n sed_apertures = self.apertures.to(u.au).value\n sed_wav = self.wav.to(u.micron).value\n\n # If any apertures are larger than the defined max, reset to max\n apertures[apertures > sed_apertures.max()] = sed_apertures.max() * 0.999\n\n # If any apertures are smaller than the defined min, raise Exception\n if np.any(apertures < sed_apertures.min()):\n raise Exception(\"Aperture(s) requested too small\")\n\n # Find wavelength order\n order = np.argsort(wavelengths)\n\n # Interpolate apertures vs wavelength\n log10_ap_interp = interp1d(np.log10(wavelengths[order]), np.log10(apertures[order]), bounds_error=False, fill_value=np.nan)\n\n # Create interpolating function\n flux_interp = interp1d(sed_apertures, self.flux.swapaxes(0, 1))\n\n # Interpolate the apertures\n apertures = 10. ** log10_ap_interp(np.log10(sed_wav))\n\n # Extrapolate on either side\n apertures[np.log10(sed_wav) < log10_ap_interp.x[0]] = 10. ** log10_ap_interp.y[0]\n apertures[np.log10(sed_wav) > log10_ap_interp.x[-1]] = 10. ** log10_ap_interp.y[-1]\n\n # Interpolate and return only diagonal elements\n return flux_interp(apertures).diagonal()\n" ]
[ [ "numpy.array", "numpy.log10", "numpy.argsort" ] ]
Rick0514/VPR_SMCN
[ "7a00dc8e4de0c21438474c05a4a7be18d05367fa" ]
[ "main/MCN.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport main.utils as utils\nimport time\n\n# ---------------------------- 说明 ----------------------------------\n# MCN的python复现\n# ---------------------------- 说明 ----------------------------------\n\n\nclass MCNParams:\n \"\"\"\n a struct define the input params MCN class use\n \"\"\"\n def __init__(self, probAddCon, nCellPerCol, nConPerCol,\n minColActivity, nColPerPattern, kActiveCol):\n self.probAddCon = probAddCon\n self.nCellPerCol = nCellPerCol\n self.nConPerCol = nConPerCol\n self.minColActivity = minColActivity\n self.nColPerPattern = nColPerPattern\n self.kActiveCol = kActiveCol\n\n\nclass MCN:\n\n def __init__(self, params):\n\n # MCNParams class define the params\n self.params = params\n\n self.nCols = 0\n self.winnerCells = []\n self.prevWinnerCells = []\n\n self.FF = np.empty((self.params.nConPerCol, self.nCols), dtype=np.int)\n self.P = np.empty((self.params.nCellPerCol, self.nCols), dtype=np.bool)\n self.prevP = np.empty_like(self.P, dtype=np.bool)\n\n self.burstedCol = np.empty((self.nCols, ), dtype=np.bool)\n self.predicitionConnections = []\n\n def prepareNewIteration(self):\n\n # winnerCells and P need to reset each time\n self.prevWinnerCells = self.winnerCells\n self.prevP = self.P\n\n self.winnerCells = []\n if self.nCols > 0:\n self.P = np.zeros_like(self.P)\n self.burstedCol = np.zeros_like(self.burstedCol)\n\n\n def resetPredP(self):\n self.prevP = np.empty((self.params.nCellPerCol, self.nCols), dtype=np.bool)\n\n def createNewColumn(self, inputSDR, nNewColumn):\n\n nonZeroIdx = np.where(inputSDR > 0)[0]\n\n start_id = self.nCols\n for i in range(nNewColumn):\n self.nCols += 1\n\n sampleIdx = np.random.randint(0, len(nonZeroIdx), self.params.nConPerCol)\n tmp = nonZeroIdx[sampleIdx].reshape((-1, 1))\n self.FF = np.concatenate((self.FF, tmp), axis=1)\n\n newPcol = np.zeros((self.params.nCellPerCol, 1), dtype=np.bool)\n self.P = np.concatenate((self.P, newPcol), axis=1)\n self.prevP = np.concatenate((self.prevP, newPcol), axis=1)\n self.burstedCol = np.concatenate((self.burstedCol, np.array([0], dtype=bool)))\n for k in range(nNewColumn * self.params.nCellPerCol):\n self.predicitionConnections.append([])\n\n return np.arange(start_id, self.nCols)\n\n\n def compute(self, inputSDR, supressLearningFlag):\n \"\"\"\n compute sequence descriptor\n :param inputSDR:\n :param supressLearningFlag: in case of inference, not learning\n :return:\n \"\"\"\n\n self.prepareNewIteration()\n\n # compare SDR with minicolumn\n simScore = np.sum(inputSDR[self.FF], axis=0) / self.params.nConPerCol\n sort_idx = np.argsort(simScore)\n topk_sort_idx = sort_idx[-self.params.kActiveCol:]\n topk_sort_score = simScore[topk_sort_idx]\n if not supressLearningFlag:\n # if all activities below threshold, then create a new\n # activity and make it active\n # otherwise select the top k most active ones\n if len(simScore):\n activeCols = topk_sort_idx[topk_sort_score > self.params.minColActivity]\n # activeCols = np.array(self.getActiveCols(simScore, supressLearningFlag), dtype=np.int)\n else:\n activeCols = np.empty((0, ), dtype=np.int)\n\n activeCols = np.concatenate((activeCols, self.createNewColumn(inputSDR, max(0, self.params.nColPerPattern - len(activeCols)))))\n\n else:\n # in non-learning mode, take the k most active columns\n # activeCols = np.array(self.getActiveCols(simScore, supressLearningFlag), dtype=np.int)\n activeCols = topk_sort_idx\n # if len(activeCols) == 0:\n # sort_idx = np.argsort(simScore)\n # activeCols = sort_idx[-self.params.nColPerPattern:]\n\n for eachActiveCol in activeCols:\n predictedIdx = np.where(self.prevP[:, eachActiveCol] > 0)[0]\n\n if len(predictedIdx):\n for each_predictedIdx in predictedIdx:\n self.activatePredictions(eachActiveCol, each_predictedIdx)\n self.winnerCells.append(eachActiveCol * self.params.nCellPerCol + each_predictedIdx)\n else:\n winnerCell = self.burst(eachActiveCol, supressLearningFlag)\n for each in winnerCell:\n self.winnerCells.append(eachActiveCol * self.params.nCellPerCol + each)\n\n if not supressLearningFlag:\n self.learnPreditions()\n # predict newly learned preditions, i think it's useless\n for colIdx in range(self.nCols):\n if self.burstedCol[colIdx]:\n for i in range(self.params.nCellPerCol):\n self.activatePredictions(colIdx, i)\n\n return self.winnerCells\n\n def activatePredictions(self, colIdx, cellIdx):\n predIdx = self.predicitionConnections[colIdx * self.params.nCellPerCol + cellIdx]\n for each in predIdx:\n c = each // self.params.nCellPerCol\n r = each % self.params.nCellPerCol\n self.P[r, c] = True\n\n def burst(self, colIdx, supressLearningFlag):\n\n self.burstedCol[colIdx] = True\n for i in range(self.params.nCellPerCol):\n self.activatePredictions(colIdx, i)\n\n # winnerCell is the cells with fewest connections with other cells\n st = colIdx * self.params.nCellPerCol\n nCon = []\n for i in range(self.params.nCellPerCol):\n nCon.append(len(self.predicitionConnections[st + i]))\n\n if not supressLearningFlag:\n # inhibit winning cells from the last iteration\n for i in self.prevWinnerCells:\n col = i // self.params.nCellPerCol\n if col == colIdx:\n nCon[i % self.params.nCellPerCol] += self.params.nCellPerCol\n\n # find the fewest ones\n candidateIdx = [0]\n minV = nCon[0]\n for i in range(1, len(nCon)):\n if nCon[i] < minV:\n candidateIdx = [i]\n minV = nCon[i]\n elif nCon[i] == minV:\n candidateIdx.append(i)\n\n nCan = len(candidateIdx)\n\n if nCan == 1:\n return [candidateIdx[0]]\n else:\n chosenIdx = np.random.randint(0, nCan, 1)\n return [candidateIdx[chosenIdx[0]]]\n\n else:\n # in case of inference, return all used winner cells\n winnerIdx = np.where(np.array(nCon) > 0)[0]\n if len(winnerIdx):\n return winnerIdx\n\n return [np.random.randint(0, self.params.nCellPerCol, 1)[0]]\n\n\n def learnPreditions(self):\n\n for prevIdx in self.prevWinnerCells:\n prevIdxCol = prevIdx // self.params.nCellPerCol\n for curIdx in self.winnerCells:\n curIdxCol = curIdx // self.params.nCellPerCol\n if prevIdxCol == curIdxCol:\n continue\n\n existingPredConFlag = self.checkExistingPredCon(prevIdxCol, curIdx)\n if not existingPredConFlag or np.random.rand() <= self.params.probAddCon:\n if curIdx not in self.predicitionConnections[prevIdx]:\n self.predicitionConnections[prevIdx].append(curIdx)\n\n\n def checkExistingPredCon(self, prevColIdx, curCellIdx):\n st = prevColIdx * self.params.nCellPerCol\n for i in range(self.params.nCellPerCol):\n if curCellIdx in self.predicitionConnections[st + i]:\n return True\n\n return False\n\n\n def visualizeCon(self, displayCol=10):\n\n plt.figure()\n dis = 5\n dCol = displayCol\n plt.title('Prediction Connections')\n plt.xlim(0, dCol * dis)\n plt.ylim(0, self.params.nCellPerCol * dis)\n\n for k, con in enumerate(self.predicitionConnections):\n x = k // self.params.nCellPerCol * dis\n if x >= dCol * dis:\n break\n y = k % self.params.nCellPerCol\n y = (self.params.nCellPerCol - 1 - y) * dis\n plt.plot(x, y, 'o', color='blue')\n if len(con):\n for each in con:\n cx = each // self.params.nCellPerCol * dis\n cy = each % self.params.nCellPerCol\n cy = (self.params.nCellPerCol - 1 - cy) * dis\n plt.plot([x, cx], [y, cy], '-', color='red')\n\n\n\ndef getSim(w1, w2):\n \"\"\"\n\n :param w1: winner cell which should be a list\n :param w2:\n :return: simularity score\n \"\"\"\n w1 = set(w1)\n w2 = set(w2)\n return len(w1 & w2) / len(w1 | w2)\n\n\ndef runMCN(params, dbFeat, qFeat, gt):\n\n # st = time.time()\n _, old_dims = dbFeat.shape\n new_dims = 8192\n P = np.random.rand(old_dims, new_dims // 2)\n P /= np.linalg.norm(P, axis=1, keepdims=True)\n\n D1_slsbh = utils.getLSBH(dbFeat, P, 0.25)\n D2_slsbh = utils.getLSBH(qFeat, P, 0.25)\n\n mcn = MCN(params)\n train_winnerCells = []\n for i in range(D1_slsbh.shape[0]):\n train_winnerCells.append(mcn.compute(D1_slsbh[i, :], False))\n\n valid_winnerCells = []\n mcn.resetPredP()\n for i in range(D2_slsbh.shape[0]):\n valid_winnerCells.append(mcn.compute(D2_slsbh[i, :], True))\n\n # print('Done! cost : %.3f' % (time.time() - st))\n # get similarity matrix\n S_mcn = np.zeros((dbFeat.shape[0], qFeat.shape[0]))\n for k1, each_v in enumerate(valid_winnerCells):\n for k2, each_t in enumerate(train_winnerCells):\n S_mcn[k2, k1] = getSim(each_v, each_t)\n # time_cost = time.time() - st\n # P, R = utils.drawPR(S_mcn, gt)\n # ap = utils.calAvgPred(P, R)\n del train_winnerCells, valid_winnerCells, mcn\n\n return S_mcn\n\ndef runMCN_SDR(params, dbFeat, qFeat, gt):\n\n mcn = MCN(params)\n train_winnerCells = []\n for i in range(dbFeat.shape[0]):\n train_winnerCells.append(mcn.compute(dbFeat[i, :], False))\n\n valid_winnerCells = []\n mcn.resetPredP()\n for i in range(qFeat.shape[0]):\n valid_winnerCells.append(mcn.compute(qFeat[i, :], True))\n\n # print('Done! cost : %.3f' % (time.time() - st))\n # get similarity matrix\n S_mcn = np.zeros((dbFeat.shape[0], qFeat.shape[0]))\n for k1, each_v in enumerate(valid_winnerCells):\n for k2, each_t in enumerate(train_winnerCells):\n S_mcn[k2, k1] = getSim(each_v, each_t)\n # time_cost = time.time() - st\n # P, R = utils.drawPR(S_mcn, gt)\n # ap = utils.calAvgPred(P, R)\n del train_winnerCells, valid_winnerCells, mcn\n\n return S_mcn\n" ]
[ [ "numpy.zeros_like", "numpy.sum", "numpy.empty", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.concatenate", "numpy.argsort", "matplotlib.pyplot.xlim", "numpy.empty_like", "numpy.arange", "matplotlib.pyplot.title", "numpy.random.rand", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.plot", "numpy.where", "numpy.random.randint", "numpy.linalg.norm" ] ]
FynnBe/pytorch-3dunet
[ "34918e82c3afeff02360b03964de973eac3a4f75" ]
[ "pytorch3dunet/augment/transforms.py" ]
[ "import importlib\n\nimport numpy as np\nimport torch\nfrom scipy.ndimage import rotate, map_coordinates, gaussian_filter\nfrom scipy.ndimage.filters import convolve\nfrom skimage.filters import gaussian\nfrom skimage.segmentation import find_boundaries\nfrom torchvision.transforms import Compose\n\n# WARN: use fixed random state for reproducibility; if you want to randomize on each run seed with `time.time()` e.g.\nGLOBAL_RANDOM_STATE = np.random.RandomState(47)\n\n\nclass RandomFlip:\n \"\"\"\n Randomly flips the image across the given axes. Image can be either 3D (DxHxW) or 4D (CxDxHxW).\n\n When creating make sure that the provided RandomStates are consistent between raw and labeled datasets,\n otherwise the models won't converge.\n \"\"\"\n\n def __init__(self, random_state, axis_prob=0.5, **kwargs):\n assert random_state is not None, 'RandomState cannot be None'\n self.random_state = random_state\n self.axes = (0, 1, 2)\n self.axis_prob = axis_prob\n\n def __call__(self, m):\n assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'\n\n for axis in self.axes:\n if self.random_state.uniform() > self.axis_prob:\n if m.ndim == 3:\n m = np.flip(m, axis)\n else:\n channels = [np.flip(m[c], axis) for c in range(m.shape[0])]\n m = np.stack(channels, axis=0)\n\n return m\n\n\nclass RandomRotate90:\n \"\"\"\n Rotate an array by 90 degrees around a randomly chosen plane. Image can be either 3D (DxHxW) or 4D (CxDxHxW).\n\n When creating make sure that the provided RandomStates are consistent between raw and labeled datasets,\n otherwise the models won't converge.\n\n IMPORTANT: assumes DHW axis order (that's why rotation is performed across (1,2) axis)\n \"\"\"\n\n def __init__(self, random_state, **kwargs):\n self.random_state = random_state\n # always rotate around z-axis\n self.axis = (1, 2)\n\n def __call__(self, m):\n assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'\n\n # pick number of rotations at random\n k = self.random_state.randint(0, 4)\n # rotate k times around a given plane\n if m.ndim == 3:\n m = np.rot90(m, k, self.axis)\n else:\n channels = [np.rot90(m[c], k, self.axis) for c in range(m.shape[0])]\n m = np.stack(channels, axis=0)\n\n return m\n\n\nclass RandomRotate:\n \"\"\"\n Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval.\n Rotation axis is picked at random from the list of provided axes.\n \"\"\"\n\n def __init__(self, random_state, angle_spectrum=30, axes=None, mode='reflect', order=0, **kwargs):\n if axes is None:\n axes = [(1, 0), (2, 1), (2, 0)]\n else:\n assert isinstance(axes, list) and len(axes) > 0\n\n self.random_state = random_state\n self.angle_spectrum = angle_spectrum\n self.axes = axes\n self.mode = mode\n self.order = order\n\n def __call__(self, m):\n axis = self.axes[self.random_state.randint(len(self.axes))]\n angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum)\n\n if m.ndim == 3:\n m = rotate(m, angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1)\n else:\n channels = [rotate(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1) for c\n in range(m.shape[0])]\n m = np.stack(channels, axis=0)\n\n return m\n\n\nclass RandomContrast:\n \"\"\"\n Adjust contrast by scaling each voxel to `mean + alpha * (v - mean)`.\n \"\"\"\n\n def __init__(self, random_state, alpha=(0.5, 1.5), mean=0.0, execution_probability=0.1, **kwargs):\n self.random_state = random_state\n assert len(alpha) == 2\n self.alpha = alpha\n self.mean = mean\n self.execution_probability = execution_probability\n\n def __call__(self, m):\n if self.random_state.uniform() < self.execution_probability:\n alpha = self.random_state.uniform(self.alpha[0], self.alpha[1])\n result = self.mean + alpha * (m - self.mean)\n return np.clip(result, -1, 1)\n\n return m\n\n\n# it's relatively slow, i.e. ~1s per patch of size 64x200x200, so use multiple workers in the DataLoader\n# remember to use spline_order=0 when transforming the labels\nclass ElasticDeformation:\n \"\"\"\n Apply elasitc deformations of 3D patches on a per-voxel mesh. Assumes ZYX axis order (or CZYX if the data is 4D).\n Based on: https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62\n \"\"\"\n\n def __init__(self, random_state, spline_order, alpha=2000, sigma=50, execution_probability=0.1, apply_3d=True,\n **kwargs):\n \"\"\"\n :param spline_order: the order of spline interpolation (use 0 for labeled images)\n :param alpha: scaling factor for deformations\n :param sigma: smoothing factor for Gaussian filter\n :param execution_probability: probability of executing this transform\n :param apply_3d: if True apply deformations in each axis\n \"\"\"\n self.random_state = random_state\n self.spline_order = spline_order\n self.alpha = alpha\n self.sigma = sigma\n self.execution_probability = execution_probability\n self.apply_3d = apply_3d\n\n def __call__(self, m):\n if self.random_state.uniform() < self.execution_probability:\n assert m.ndim in [3, 4]\n\n if m.ndim == 3:\n volume_shape = m.shape\n else:\n volume_shape = m[0].shape\n\n if self.apply_3d:\n dz = gaussian_filter(self.random_state.randn(*volume_shape), self.sigma, mode=\"reflect\") * self.alpha\n else:\n dz = np.zeros_like(m)\n\n dy, dx = [\n gaussian_filter(\n self.random_state.randn(*volume_shape),\n self.sigma, mode=\"reflect\"\n ) * self.alpha for _ in range(2)\n ]\n\n z_dim, y_dim, x_dim = volume_shape\n z, y, x = np.meshgrid(np.arange(z_dim), np.arange(y_dim), np.arange(x_dim), indexing='ij')\n indices = z + dz, y + dy, x + dx\n\n if m.ndim == 3:\n return map_coordinates(m, indices, order=self.spline_order, mode='reflect')\n else:\n channels = [map_coordinates(c, indices, order=self.spline_order, mode='reflect') for c in m]\n return np.stack(channels, axis=0)\n\n return m\n\n\ndef blur_boundary(boundary, sigma):\n boundary = gaussian(boundary, sigma=sigma)\n boundary[boundary >= 0.5] = 1\n boundary[boundary < 0.5] = 0\n return boundary\n\n\nclass CropToFixed:\n def __init__(self, random_state, size=(256, 256), centered=False, **kwargs):\n self.random_state = random_state\n self.crop_y, self.crop_x = size\n self.centered = centered\n\n def __call__(self, m):\n def _padding(pad_total):\n half_total = pad_total // 2\n return (half_total, pad_total - half_total)\n\n def _rand_range_and_pad(crop_size, max_size):\n \"\"\"\n Returns a tuple:\n max_value (int) for the corner dimension. The corner dimension is chosen as `self.random_state(max_value)`\n pad (int): padding in both directions; if crop_size is lt max_size the pad is 0\n \"\"\"\n if crop_size < max_size:\n return max_size - crop_size, (0, 0)\n else:\n return 1, _padding(crop_size - max_size)\n\n def _start_and_pad(crop_size, max_size):\n if crop_size < max_size:\n return (max_size - crop_size) // 2, (0, 0)\n else:\n return 0, _padding(crop_size - max_size)\n\n _, y, x = m.shape\n\n if not self.centered:\n y_range, y_pad = _rand_range_and_pad(self.crop_y, y)\n x_range, x_pad = _rand_range_and_pad(self.crop_x, x)\n\n y_start = self.random_state.randint(y_range)\n x_start = self.random_state.randint(x_range)\n\n else:\n y_start, y_pad = _start_and_pad(self.crop_y, y)\n x_start, x_pad = _start_and_pad(self.crop_x, x)\n\n result = m[:, y_start:y_start + self.crop_y, x_start:x_start + self.crop_x]\n return np.pad(result, pad_width=((0, 0), y_pad, x_pad), mode='reflect')\n\n\nclass AbstractLabelToBoundary:\n AXES_TRANSPOSE = [\n (0, 1, 2), # X\n (0, 2, 1), # Y\n (2, 0, 1) # Z\n ]\n\n def __init__(self, ignore_index=None, aggregate_affinities=False, append_label=False, **kwargs):\n \"\"\"\n :param ignore_index: label to be ignored in the output, i.e. after computing the boundary the label ignore_index\n will be restored where is was in the patch originally\n :param aggregate_affinities: aggregate affinities with the same offset across Z,Y,X axes\n :param append_label: if True append the orignal ground truth labels to the last channel\n :param blur: Gaussian blur the boundaries\n :param sigma: standard deviation for Gaussian kernel\n \"\"\"\n self.ignore_index = ignore_index\n self.aggregate_affinities = aggregate_affinities\n self.append_label = append_label\n\n def __call__(self, m):\n \"\"\"\n Extract boundaries from a given 3D label tensor.\n :param m: input 3D tensor\n :return: binary mask, with 1-label corresponding to the boundary and 0-label corresponding to the background\n \"\"\"\n assert m.ndim == 3\n\n kernels = self.get_kernels()\n boundary_arr = [np.where(np.abs(convolve(m, kernel)) > 0, 1, 0) for kernel in kernels]\n channels = np.stack(boundary_arr)\n results = []\n if self.aggregate_affinities:\n assert len(kernels) % 3 == 0, \"Number of kernels must be divided by 3 (one kernel per offset per Z,Y,X axes\"\n # aggregate affinities with the same offset\n for i in range(0, len(kernels), 3):\n # merge across X,Y,Z axes (logical OR)\n xyz_aggregated_affinities = np.logical_or.reduce(channels[i:i + 3, ...]).astype(np.int)\n # recover ignore index\n xyz_aggregated_affinities = _recover_ignore_index(xyz_aggregated_affinities, m, self.ignore_index)\n results.append(xyz_aggregated_affinities)\n else:\n results = [_recover_ignore_index(channels[i], m, self.ignore_index) for i in range(channels.shape[0])]\n\n if self.append_label:\n # append original input data\n results.append(m)\n\n # stack across channel dim\n return np.stack(results, axis=0)\n\n @staticmethod\n def create_kernel(axis, offset):\n # create conv kernel\n k_size = offset + 1\n k = np.zeros((1, 1, k_size), dtype=np.int)\n k[0, 0, 0] = 1\n k[0, 0, offset] = -1\n return np.transpose(k, axis)\n\n def get_kernels(self):\n raise NotImplementedError\n\n\nclass StandardLabelToBoundary:\n def __init__(self, ignore_index=None, append_label=False, blur=False, sigma=1, mode='thick', blobs=False, **kwargs):\n self.ignore_index = ignore_index\n self.append_label = append_label\n self.blur = blur\n self.sigma = sigma\n self.mode = mode\n self.blobs = blobs\n\n def __call__(self, m):\n assert m.ndim == 3\n\n boundaries = find_boundaries(m, connectivity=2, mode=self.mode)\n if self.blur:\n boundaries = blur_boundary(boundaries, self.sigma)\n\n results = []\n if self.blobs:\n blobs = (m > 0).astype('uint8')\n results.append(_recover_ignore_index(blobs, m, self.ignore_index))\n\n results.append(_recover_ignore_index(boundaries, m, self.ignore_index))\n\n if self.append_label:\n # append original input data\n results.append(m)\n\n return np.stack(results, axis=0)\n\n\nclass BlobsWithBoundary:\n def __init__(self, mode=None, append_label=False, blur=False, sigma=1, **kwargs):\n if mode is None:\n mode = ['thick', 'inner', 'outer']\n self.mode = mode\n self.append_label = append_label\n self.blur = blur\n self.sigma = sigma\n\n def __call__(self, m):\n assert m.ndim == 3\n\n # get the segmentation mask\n results = [(m > 0).astype('uint8')]\n\n for bm in self.mode:\n boundary = find_boundaries(m, connectivity=2, mode=bm)\n if self.blur:\n boundary = blur_boundary(boundary, self.sigma)\n results.append(boundary)\n\n if self.append_label:\n results.append(m)\n\n return np.stack(results, axis=0)\n\n\nclass BlobsToMask:\n \"\"\"\n Returns binary mask from labeled image, i.e. every label greater than 0 is treated as foreground.\n\n \"\"\"\n\n def __init__(self, append_label=False, boundary=False, cross_entropy=False, **kwargs):\n self.cross_entropy = cross_entropy\n self.boundary = boundary\n self.append_label = append_label\n\n def __call__(self, m):\n assert m.ndim == 3\n\n # get the segmentation mask\n mask = (m > 0).astype('uint8')\n results = [mask]\n\n if self.boundary:\n outer = find_boundaries(m, connectivity=2, mode='outer')\n if self.cross_entropy:\n # boundary is class 2\n mask[outer > 0] = 2\n results = [mask]\n else:\n results.append(outer)\n\n if self.append_label:\n results.append(m)\n\n return np.stack(results, axis=0)\n\n\nclass RandomLabelToAffinities(AbstractLabelToBoundary):\n \"\"\"\n Converts a given volumetric label array to binary mask corresponding to borders between labels.\n One specify the max_offset (thickness) of the border. Then the offset is picked at random every time you call\n the transformer (offset is picked form the range 1:max_offset) for each axis and the boundary computed.\n One may use this scheme in order to make the network more robust against various thickness of borders in the ground\n truth (think of it as a boundary denoising scheme).\n \"\"\"\n\n def __init__(self, random_state, max_offset=10, ignore_index=None, append_label=False, z_offset_scale=2, **kwargs):\n super().__init__(ignore_index=ignore_index, append_label=append_label, aggregate_affinities=False)\n self.random_state = random_state\n self.offsets = tuple(range(1, max_offset + 1))\n self.z_offset_scale = z_offset_scale\n\n def get_kernels(self):\n rand_offset = self.random_state.choice(self.offsets)\n axis_ind = self.random_state.randint(3)\n # scale down z-affinities due to anisotropy\n if axis_ind == 2:\n rand_offset = max(1, rand_offset // self.z_offset_scale)\n\n rand_axis = self.AXES_TRANSPOSE[axis_ind]\n # return a single kernel\n return [self.create_kernel(rand_axis, rand_offset)]\n\n\nclass LabelToAffinities(AbstractLabelToBoundary):\n \"\"\"\n Converts a given volumetric label array to binary mask corresponding to borders between labels (which can be seen\n as an affinity graph: https://arxiv.org/pdf/1706.00120.pdf)\n One specify the offsets (thickness) of the border. The boundary will be computed via the convolution operator.\n \"\"\"\n\n def __init__(self, offsets, ignore_index=None, append_label=False, aggregate_affinities=False, z_offsets=None,\n **kwargs):\n super().__init__(ignore_index=ignore_index, append_label=append_label,\n aggregate_affinities=aggregate_affinities)\n\n assert isinstance(offsets, list) or isinstance(offsets, tuple), 'offsets must be a list or a tuple'\n assert all(a > 0 for a in offsets), \"'offsets must be positive\"\n assert len(set(offsets)) == len(offsets), \"'offsets' must be unique\"\n if z_offsets is not None:\n assert len(offsets) == len(z_offsets), 'z_offsets length must be the same as the length of offsets'\n else:\n # if z_offsets is None just use the offsets for z-affinities\n z_offsets = list(offsets)\n self.z_offsets = z_offsets\n\n self.kernels = []\n # create kernel for every axis-offset pair\n for xy_offset, z_offset in zip(offsets, z_offsets):\n for axis_ind, axis in enumerate(self.AXES_TRANSPOSE):\n final_offset = xy_offset\n if axis_ind == 2:\n final_offset = z_offset\n # create kernels for a given offset in every direction\n self.kernels.append(self.create_kernel(axis, final_offset))\n\n def get_kernels(self):\n return self.kernels\n\n\nclass LabelToZAffinities(AbstractLabelToBoundary):\n \"\"\"\n Converts a given volumetric label array to binary mask corresponding to borders between labels (which can be seen\n as an affinity graph: https://arxiv.org/pdf/1706.00120.pdf)\n One specify the offsets (thickness) of the border. The boundary will be computed via the convolution operator.\n \"\"\"\n\n def __init__(self, offsets, ignore_index=None, append_label=False, **kwargs):\n super().__init__(ignore_index=ignore_index, append_label=append_label)\n\n assert isinstance(offsets, list) or isinstance(offsets, tuple), 'offsets must be a list or a tuple'\n assert all(a > 0 for a in offsets), \"'offsets must be positive\"\n assert len(set(offsets)) == len(offsets), \"'offsets' must be unique\"\n\n self.kernels = []\n z_axis = self.AXES_TRANSPOSE[2]\n # create kernels\n for z_offset in offsets:\n self.kernels.append(self.create_kernel(z_axis, z_offset))\n\n def get_kernels(self):\n return self.kernels\n\n\nclass LabelToBoundaryAndAffinities:\n \"\"\"\n Combines the StandardLabelToBoundary and LabelToAffinities in the hope\n that that training the network to predict both would improve the main task: boundary prediction.\n \"\"\"\n\n def __init__(self, xy_offsets, z_offsets, append_label=False, blur=False, sigma=1, ignore_index=None, mode='thick',\n blobs=False, **kwargs):\n # blur only StandardLabelToBoundary results; we don't want to blur the affinities\n self.l2b = StandardLabelToBoundary(blur=blur, sigma=sigma, ignore_index=ignore_index, mode=mode, blobs=blobs)\n self.l2a = LabelToAffinities(offsets=xy_offsets, z_offsets=z_offsets, append_label=append_label,\n ignore_index=ignore_index)\n\n def __call__(self, m):\n boundary = self.l2b(m)\n affinities = self.l2a(m)\n return np.concatenate((boundary, affinities), axis=0)\n\n\nclass FlyWingBoundary:\n \"\"\"\n Use if the volume contains a single pixel boundaries between labels. Gives the single pixel boundary in the 1st\n channel and the 'thick' boundary in the 2nd channel and optional z-affinities\n \"\"\"\n\n def __init__(self, append_label=False, thick_boundary=True, ignore_index=None, z_offsets=None, **kwargs):\n self.append_label = append_label\n self.thick_boundary = thick_boundary\n self.ignore_index = ignore_index\n self.lta = None\n if z_offsets is not None:\n self.lta = LabelToZAffinities(z_offsets, ignore_index=ignore_index)\n\n def __call__(self, m):\n boundary = (m == 0).astype('uint8')\n results = [boundary]\n\n if self.thick_boundary:\n t_boundary = find_boundaries(m, connectivity=1, mode='outer', background=0)\n results.append(t_boundary)\n\n if self.lta is not None:\n z_affs = self.lta(m)\n for z_aff in z_affs:\n results.append(z_aff)\n\n if self.ignore_index is not None:\n for b in results:\n b[m == self.ignore_index] = self.ignore_index\n\n if self.append_label:\n # append original input data\n results.append(m)\n\n return np.stack(results, axis=0)\n\n\nclass LabelToMaskAndAffinities:\n def __init__(self, xy_offsets, z_offsets, append_label=False, background=0, ignore_index=None, **kwargs):\n self.background = background\n self.l2a = LabelToAffinities(offsets=xy_offsets, z_offsets=z_offsets, append_label=append_label,\n ignore_index=ignore_index)\n\n def __call__(self, m):\n mask = m > self.background\n mask = np.expand_dims(mask.astype(np.uint8), axis=0)\n affinities = self.l2a(m)\n return np.concatenate((mask, affinities), axis=0)\n\n\nclass Standardize:\n \"\"\"\n Apply Z-score normalization to a given input tensor, i.e. re-scaling the values to be 0-mean and 1-std.\n Mean and std parameter have to be provided explicitly.\n \"\"\"\n\n def __init__(self, mean, std, eps=1e-6, **kwargs):\n self.mean = mean\n self.std = std\n self.eps = eps\n\n def __call__(self, m):\n return (m - self.mean) / np.clip(self.std, a_min=self.eps, a_max=None)\n\n\nclass Normalize:\n \"\"\"\n Apply simple min-max scaling to a given input tensor, i.e. shrinks the range of the data in a fixed range of [-1, 1].\n \"\"\"\n\n def __init__(self, min_value, max_value, **kwargs):\n assert max_value > min_value\n self.min_value = min_value\n self.value_range = max_value - min_value\n\n def __call__(self, m):\n norm_0_1 = (m - self.min_value) / self.value_range\n return np.clip(2 * norm_0_1 - 1, -1, 1)\n\n\nclass AdditiveGaussianNoise:\n def __init__(self, random_state, scale=(0.0, 1.0), execution_probability=0.1, **kwargs):\n self.execution_probability = execution_probability\n self.random_state = random_state\n self.scale = scale\n\n def __call__(self, m):\n if self.random_state.uniform() < self.execution_probability:\n std = self.random_state.uniform(self.scale[0], self.scale[1])\n gaussian_noise = self.random_state.normal(0, std, size=m.shape)\n return m + gaussian_noise\n return m\n\n\nclass AdditivePoissonNoise:\n def __init__(self, random_state, lam=(0.0, 1.0), execution_probability=0.1, **kwargs):\n self.execution_probability = execution_probability\n self.random_state = random_state\n self.lam = lam\n\n def __call__(self, m):\n if self.random_state.uniform() < self.execution_probability:\n lam = self.random_state.uniform(self.lam[0], self.lam[1])\n poisson_noise = self.random_state.poisson(lam, size=m.shape)\n return m + poisson_noise\n return m\n\n\nclass ToTensor:\n \"\"\"\n Converts a given input numpy.ndarray into torch.Tensor. Adds additional 'channel' axis when the input is 3D\n and expand_dims=True (use for raw data of the shape (D, H, W)).\n \"\"\"\n\n def __init__(self, expand_dims, dtype=np.float32, **kwargs):\n self.expand_dims = expand_dims\n self.dtype = dtype\n\n def __call__(self, m):\n assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'\n # add channel dimension\n if self.expand_dims and m.ndim == 3:\n m = np.expand_dims(m, axis=0)\n\n return torch.from_numpy(m.astype(dtype=self.dtype))\n\n\nclass Relabel:\n \"\"\"\n Relabel a numpy array of labels into a consecutive numbers, e.g.\n [10,10, 0, 6, 6] -> [2, 2, 0, 1, 1]. Useful when one has an instance segmentation volume\n at hand and would like to create a one-hot-encoding for it. Without a consecutive labeling the task would be harder.\n \"\"\"\n\n def __init__(self, **kwargs):\n pass\n\n def __call__(self, m):\n _, unique_labels = np.unique(m, return_inverse=True)\n m = unique_labels.reshape(m.shape)\n return m\n\n\nclass Identity:\n def __init__(self, **kwargs):\n pass\n\n def __call__(self, m):\n return m\n\n\ndef get_transformer(config, min_value, max_value, mean, std):\n base_config = {'min_value': min_value, 'max_value': max_value, 'mean': mean, 'std': std}\n return Transformer(config, base_config)\n\n\nclass Transformer:\n def __init__(self, phase_config, base_config):\n self.phase_config = phase_config\n self.config_base = base_config\n self.seed = GLOBAL_RANDOM_STATE.randint(10000000)\n\n def raw_transform(self):\n return self._create_transform('raw')\n\n def label_transform(self):\n return self._create_transform('label')\n\n def weight_transform(self):\n return self._create_transform('weight')\n\n @staticmethod\n def _transformer_class(class_name):\n m = importlib.import_module('pytorch3dunet.augment.transforms')\n clazz = getattr(m, class_name)\n return clazz\n\n def _create_transform(self, name):\n assert name in self.phase_config, f'Could not find {name} transform'\n return Compose([\n self._create_augmentation(c) for c in self.phase_config[name]\n ])\n\n def _create_augmentation(self, c):\n config = dict(self.config_base)\n config.update(c)\n config['random_state'] = np.random.RandomState(self.seed)\n aug_class = self._transformer_class(config['name'])\n return aug_class(**config)\n\n\ndef _recover_ignore_index(input, orig, ignore_index):\n if ignore_index is not None:\n mask = orig == ignore_index\n input[mask] = ignore_index\n\n return input\n" ]
[ [ "numpy.zeros_like", "numpy.transpose", "numpy.zeros", "numpy.concatenate", "scipy.ndimage.filters.convolve", "numpy.random.RandomState", "numpy.arange", "scipy.ndimage.map_coordinates", "numpy.clip", "numpy.rot90", "numpy.expand_dims", "numpy.flip", "numpy.logical_or.reduce", "numpy.stack", "numpy.pad", "scipy.ndimage.rotate", "numpy.unique" ] ]
krevas/ET-BERT
[ "464ce3e7942d4450f55021e267ceb9dd48a36b1f" ]
[ "uer/layers/layer_norm.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass LayerNorm(nn.Module):\n \"\"\"\n Layer Normalization.\n https://arxiv.org/abs/1607.06450\n \"\"\"\n def __init__(self, hidden_size, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.eps = eps\n self.gamma = nn.Parameter(torch.ones(hidden_size))\n self.beta = nn.Parameter(torch.zeros(hidden_size))\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n hidden_states = self.gamma * (x-mean) / (std + self.eps)\n\n return hidden_states + self.beta\n\n\nclass T5LayerNorm(nn.Module):\n \"\"\"\n Construct a layernorm module in the T5 style No bias and no subtraction of mean.\n \"\"\"\n def __init__(self, hidden_size, eps=1e-6):\n\n super().__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, hidden_states):\n # layer norm should always be calculated in float32\n variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)\n hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)\n\n return self.weight * hidden_states.type_as(self.weight)\n" ]
[ [ "torch.zeros", "torch.rsqrt", "torch.ones" ] ]
xupsh/pp4fpgas-cn-hls
[ "d14bd0769ce7f9674f206faf93b7622c5bf905bf" ]
[ "hw/ip/mono_fm/transform.py" ]
[ "import numpy as np\ndetection_file = 'samples.npy'\ndetections = None\nif detection_file is not None:\n detections = np.load(detection_file)\nnp.savetxt('samples.txt', detections, fmt='%0.18f')\n\nf = open('samples.txt')\nout = open('complex.txt', \"w\")\nlines = f.readlines()\nfor line in lines:\n for i in line:\n if i == \"+\":\n out.write(\" \")\n elif i == \"-\":\n out.write(\" -\")\n elif i == \"(\":\n i = i\n elif i == \")\":\n i = i\n elif i == \"j\":\n i = i\n else:\n out.write(str(i))\n #out.write(\"\\n\")\n\n #print(line)\n\nf.close\n" ]
[ [ "numpy.savetxt", "numpy.load" ] ]
Emieeel/OpenFermion
[ "865d8591cad9b0681f6dd25a391a5292ed2de1d4" ]
[ "src/openfermion/utils/rdm_mapping_functions_test.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for rdm_mapping_functions.py\"\"\"\nimport os\nimport unittest\n\nimport numpy\nimport h5py\nfrom openfermion.config import DATA_DIRECTORY, THIS_DIRECTORY\nfrom openfermion.chem import MolecularData\nfrom openfermion.utils.rdm_mapping_functions import (\n kronecker_delta, map_two_pdm_to_two_hole_dm, map_two_pdm_to_one_pdm,\n map_one_pdm_to_one_hole_dm, map_one_hole_dm_to_one_pdm,\n map_two_pdm_to_particle_hole_dm, map_two_hole_dm_to_two_pdm,\n map_two_hole_dm_to_one_hole_dm, map_particle_hole_dm_to_one_pdm,\n map_particle_hole_dm_to_two_pdm)\n\n\nclass RDMMappingTest(unittest.TestCase):\n\n def setUp(self):\n # load files and marginals from testing folder\n tqdm_h2_sto3g = os.path.join(THIS_DIRECTORY,\n 'testing/tqdm_H2_sto-3g_singlet_1.4.hdf5')\n with h5py.File(tqdm_h2_sto3g, 'r') as fid:\n self.tqdm_h2_sto3g = fid['tqdm'][...]\n\n phdm_h2_sto3g = os.path.join(THIS_DIRECTORY,\n 'testing/phdm_H2_sto-3g_singlet_1.4.hdf5')\n with h5py.File(phdm_h2_sto3g, 'r') as fid:\n self.phdm_h2_sto3g = fid['phdm'][...]\n\n tqdm_h2_6_31g = os.path.join(THIS_DIRECTORY,\n 'testing/tqdm_H2_6-31g_singlet_0.75.hdf5')\n with h5py.File(tqdm_h2_6_31g, 'r') as fid:\n self.tqdm_h2_6_31g = fid['tqdm'][...]\n\n phdm_h2_6_31g = os.path.join(THIS_DIRECTORY,\n 'testing/phdm_H2_6-31g_singlet_0.75.hdf5')\n with h5py.File(phdm_h2_6_31g, 'r') as fid:\n self.phdm_h2_6_31g = fid['phdm'][...]\n\n tqdm_lih_sto3g = os.path.join(\n THIS_DIRECTORY, 'testing/tqdm_H1-Li1_sto-3g_singlet_1.45.hdf5')\n with h5py.File(tqdm_lih_sto3g, 'r') as fid:\n self.tqdm_lih_sto3g = fid['tqdm'][...]\n\n phdm_lih_sto3g = os.path.join(\n THIS_DIRECTORY, 'testing/phdm_H1-Li1_sto-3g_singlet_1.45.hdf5')\n with h5py.File(phdm_lih_sto3g, 'r') as fid:\n self.phdm_lih_sto3g = fid['phdm'][...]\n\n def test_kronecker_delta_00(self):\n assert kronecker_delta(0, 0) == 1\n\n def test_kronecker_delta_01(self):\n assert kronecker_delta(0, 1) == 0\n\n def test_kronecker_delta_10(self):\n assert kronecker_delta(1, 0) == 0\n\n def test_kronecker_delta_11(self):\n assert kronecker_delta(1, 1) == 1\n\n def test_kronecker_delta_nonunit_args(self):\n assert kronecker_delta(3, 3) == 1\n\n def test_tpdm_to_opdm(self):\n # for all files in datadirectory check if this map holds\n for file in filter(lambda x: x.endswith(\".hdf5\"),\n os.listdir(DATA_DIRECTORY)):\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, file))\n if (molecule.fci_one_rdm is not None and\n molecule.fci_two_rdm is not None):\n test_opdm = map_two_pdm_to_one_pdm(molecule.fci_two_rdm,\n molecule.n_electrons)\n assert numpy.allclose(test_opdm, molecule.fci_one_rdm)\n\n def test_opdm_to_oqdm(self):\n for file in filter(lambda x: x.endswith(\".hdf5\"),\n os.listdir(DATA_DIRECTORY)):\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, file))\n if molecule.fci_one_rdm is not None:\n test_oqdm = map_one_pdm_to_one_hole_dm(molecule.fci_one_rdm)\n true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm\n assert numpy.allclose(test_oqdm, true_oqdm)\n\n def test_oqdm_to_opdm(self):\n for file in filter(lambda x: x.endswith(\".hdf5\"),\n os.listdir(DATA_DIRECTORY)):\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, file))\n if molecule.fci_one_rdm is not None:\n true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm\n test_opdm = map_one_hole_dm_to_one_pdm(true_oqdm)\n assert numpy.allclose(test_opdm, molecule.fci_one_rdm)\n\n def test_tqdm_conversions_h2_631g(self):\n # construct the 2-hole-RDM for LiH the slow way\n # TODO: speed up this calculation by directly contracting from the wf.\n filename = \"H2_6-31g_singlet_0.75.hdf5\"\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, filename))\n true_tqdm = self.tqdm_h2_6_31g\n\n test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(true_tqdm, test_tqdm)\n\n true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm\n test_oqdm = map_two_hole_dm_to_one_hole_dm(\n true_tqdm, molecule.n_qubits - molecule.n_electrons)\n assert numpy.allclose(true_oqdm, test_oqdm)\n\n test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)\n assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)\n\n def test_tqdm_conversions_h2_sto3g(self):\n filename = \"H2_sto-3g_singlet_1.4.hdf5\"\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, filename))\n true_tqdm = self.tqdm_h2_sto3g\n\n test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(true_tqdm, test_tqdm)\n\n true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm\n test_oqdm = map_two_hole_dm_to_one_hole_dm(\n true_tqdm, molecule.n_qubits - molecule.n_electrons)\n assert numpy.allclose(true_oqdm, test_oqdm)\n\n test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)\n assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)\n\n def test_tqdm_conversions_lih_sto3g(self):\n filename = \"H1-Li1_sto-3g_singlet_1.45.hdf5\"\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, filename))\n true_tqdm = self.tqdm_lih_sto3g\n\n test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(true_tqdm, test_tqdm)\n\n true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm\n test_oqdm = map_two_hole_dm_to_one_hole_dm(\n true_tqdm, molecule.n_qubits - molecule.n_electrons)\n assert numpy.allclose(true_oqdm, test_oqdm)\n\n test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)\n assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)\n\n def test_phdm_conversions_h2_631g(self):\n filename = \"H2_6-31g_singlet_0.75.hdf5\"\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, filename))\n true_phdm = self.phdm_h2_6_31g\n\n test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(test_phdm, true_phdm)\n\n test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,\n molecule.n_electrons,\n molecule.n_qubits)\n assert numpy.allclose(test_opdm, molecule.fci_one_rdm)\n\n test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)\n\n def test_phdm_conversions_h2_sto3g(self):\n filename = \"H2_sto-3g_singlet_1.4.hdf5\"\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, filename))\n true_phdm = self.phdm_h2_sto3g\n\n test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(test_phdm, true_phdm)\n\n test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,\n molecule.n_electrons,\n molecule.n_qubits)\n assert numpy.allclose(test_opdm, molecule.fci_one_rdm)\n\n test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)\n\n def test_phdm_conversions_lih_sto3g(self):\n filename = \"H1-Li1_sto-3g_singlet_1.45.hdf5\"\n molecule = MolecularData(\n filename=os.path.join(DATA_DIRECTORY, filename))\n true_phdm = self.phdm_lih_sto3g\n\n test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(test_phdm, true_phdm)\n\n test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,\n molecule.n_electrons,\n molecule.n_qubits)\n assert numpy.allclose(test_opdm, molecule.fci_one_rdm)\n\n test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,\n molecule.fci_one_rdm)\n assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)\n" ]
[ [ "numpy.allclose", "numpy.eye" ] ]
NTR0314/botorch
[ "f0310c9a415947f3264dac7f3438744784843323" ]
[ "botorch/test_functions/multi_objective.py" ]
[ "#! /usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nr\"\"\"\nMulti-objective optimization benchmark problems.\n\nReferences\n\n.. [Deb2005dtlz]\n K. Deb, L. Thiele, M. Laumanns, E. Zitzler, A. Abraham, L. Jain, R. Goldberg.\n \"Scalable test problems for evolutionary multi-objective optimization\"\n in Evolutionary Multiobjective Optimization, London, U.K.: Springer-Verlag,\n pp. 105-145, 2005.\n\n.. [GarridoMerchan2020]\n E. C. Garrido-Merch ́an and D. Hern ́andez-Lobato. Parallel Predictive Entropy\n Search for Multi-objective Bayesian Optimization with Constraints.\n arXiv e-prints, arXiv:2004.00601, Apr. 2020.\n\n.. [Gelbart2014]\n Michael A. Gelbart, Jasper Snoek, and Ryan P. Adams. 2014. Bayesian\n optimization with unknown constraints. In Proceedings of the Thirtieth\n Conference on Uncertainty in Artificial Intelligence (UAI’14).\n AUAI Press, Arlington, Virginia, USA, 250–259.\n\n.. [Oszycka1995]\n A. Osyczka, S. Kundu. 1995. A new method to solve generalized multicriteria\n optimization problems using the simple genetic algorithm. In Structural\n Optimization 10. 94–99.\n\n.. [Tanabe2020]\n Ryoji Tanabe, Hisao Ishibuchi, An easy-to-use real-world multi-objective\n optimization problem suite, Applied Soft Computing,Volume 89, 2020.\n\n.. [Yang2019a]\n K. Yang, M. Emmerich, A. Deutz, and T. Bäck. 2019.\n \"Multi-Objective Bayesian Global Optimization using expected hypervolume\n improvement gradient\" in Swarm and evolutionary computation 44, pp. 945--956,\n 2019.\n\n.. [Zitzler2000]\n E. Zitzler, K. Deb, and L. Thiele, “Comparison of multiobjective\n evolutionary algorithms: Empirical results,” Evol. Comput., vol. 8, no. 2,\n pp. 173–195, 2000.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport math\nfrom typing import Optional\n\nimport torch\nfrom botorch.test_functions.base import (\n ConstrainedBaseTestProblem,\n MultiObjectiveTestProblem,\n)\nfrom botorch.test_functions.synthetic import Branin\nfrom botorch.utils.sampling import sample_hypersphere, sample_simplex\nfrom botorch.utils.transforms import unnormalize\nfrom scipy.special import gamma\nfrom torch import Tensor\n\n\nclass BraninCurrin(MultiObjectiveTestProblem):\n r\"\"\"Two objective problem composed of the Branin and Currin functions.\n\n Branin (rescaled):\n\n f(x) = (\n 15*x_1 - 5.1 * (15 * x_0 - 5) ** 2 / (4 * pi ** 2) + 5 * (15 * x_0 - 5)\n / pi - 5\n ) ** 2 + (10 - 10 / (8 * pi)) * cos(15 * x_0 - 5))\n\n Currin:\n\n f(x) = (1 - exp(-1 / (2 * x_1))) * (\n 2300 * x_0 ** 3 + 1900 * x_0 ** 2 + 2092 * x_0 + 60\n ) / 100 * x_0 ** 3 + 500 * x_0 ** 2 + 4 * x_0 + 20\n\n \"\"\"\n\n dim = 2\n num_objectives = 2\n _bounds = [(0.0, 1.0), (0.0, 1.0)]\n _ref_point = [18.0, 6.0]\n _max_hv = 59.36011874867746 # this is approximated using NSGA-II\n\n def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:\n r\"\"\"Constructor for Branin-Currin.\n\n Args:\n noise_std: Standard deviation of the observation noise.\n negate: If True, negate the objectives.\n \"\"\"\n super().__init__(noise_std=noise_std, negate=negate)\n self._branin = Branin()\n\n def _rescaled_branin(self, X: Tensor) -> Tensor:\n # return to Branin bounds\n x_0 = 15 * X[..., 0] - 5\n x_1 = 15 * X[..., 1]\n return self._branin(torch.stack([x_0, x_1], dim=-1))\n\n @staticmethod\n def _currin(X: Tensor) -> Tensor:\n x_0 = X[..., 0]\n x_1 = X[..., 1]\n factor1 = 1 - torch.exp(-1 / (2 * x_1))\n numer = 2300 * x_0.pow(3) + 1900 * x_0.pow(2) + 2092 * x_0 + 60\n denom = 100 * x_0.pow(3) + 500 * x_0.pow(2) + 4 * x_0 + 20\n return factor1 * numer / denom\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n # branin rescaled with inputsto [0,1]^2\n branin = self._rescaled_branin(X=X)\n currin = self._currin(X=X)\n return torch.stack([branin, currin], dim=-1)\n\n\nclass DTLZ(MultiObjectiveTestProblem):\n r\"\"\"Base class for DTLZ problems.\n\n See [Deb2005dtlz]_ for more details on DTLZ.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_objectives: int = 2,\n noise_std: Optional[float] = None,\n negate: bool = False,\n ) -> None:\n if dim <= num_objectives:\n raise ValueError(\n f\"dim must be > num_objectives, but got {dim} and {num_objectives}\"\n )\n self.num_objectives = num_objectives\n self.dim = dim\n self.k = self.dim - self.num_objectives + 1\n self._bounds = [(0.0, 1.0) for _ in range(self.dim)]\n self._ref_point = [self._ref_val for _ in range(num_objectives)]\n super().__init__(noise_std=noise_std, negate=negate)\n\n\nclass DTLZ1(DTLZ):\n r\"\"\"DLTZ1 test problem.\n\n d-dimensional problem evaluated on `[0, 1]^d`:\n\n f_0(x) = 0.5 * x_0 * (1 + g(x))\n f_1(x) = 0.5 * (1 - x_0) * (1 + g(x))\n g(x) = 100 * \\sum_{i=m}^{n-1} (\n k + (x_i - 0.5)^2 - cos(20 * pi * (x_i - 0.5))\n )\n\n where k = n - m + 1.\n\n The pareto front is given by the line (or hyperplane) \\sum_i f_i(x) = 0.5.\n The goal is to minimize both objectives. The reference point comes from [Yang2019]_.\n \"\"\"\n\n _ref_val = 400.0\n\n @property\n def _max_hv(self) -> float:\n return self._ref_val ** self.num_objectives - 1 / 2 ** self.num_objectives\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n X_m = X[..., -self.k :]\n X_m_minus_half = X_m - 0.5\n sum_term = (\n X_m_minus_half.pow(2) - torch.cos(20 * math.pi * X_m_minus_half)\n ).sum(dim=-1)\n g_X_m = 100 * (self.k + sum_term)\n g_X_m_term = 0.5 * (1 + g_X_m)\n fs = []\n for i in range(self.num_objectives):\n idx = self.num_objectives - 1 - i\n f_i = g_X_m_term * X[..., :idx].prod(dim=-1)\n if i > 0:\n f_i *= 1 - X[..., idx]\n fs.append(f_i)\n return torch.stack(fs, dim=-1)\n\n def gen_pareto_front(self, n: int) -> Tensor:\n r\"\"\"Generate `n` pareto optimal points.\n\n The pareto points randomly sampled from the hyperplane sum_i f(x_i) = 0.5.\n \"\"\"\n f_X = 0.5 * sample_simplex(\n n=n,\n d=self.num_objectives,\n qmc=True,\n dtype=self.ref_point.dtype,\n device=self.ref_point.device,\n )\n if self.negate:\n f_X *= -1\n return f_X\n\n\nclass DTLZ2(DTLZ):\n r\"\"\"DLTZ2 test problem.\n\n d-dimensional problem evaluated on `[0, 1]^d`:\n\n f_0(x) = (1 + g(x)) * cos(x_0 * pi / 2)\n f_1(x) = (1 + g(x)) * sin(x_0 * pi / 2)\n g(x) = \\sum_{i=m}^{n-1} (x_i - 0.5)^2\n\n The pareto front is given by the unit hypersphere \\sum{i} f_i^2 = 1.\n Note: the pareto front is completely concave. The goal is to minimize\n both objectives.\n \"\"\"\n\n _ref_val = 1.1\n\n @property\n def _max_hv(self) -> float:\n # hypercube - volume of hypersphere in R^n such that all coordinates are\n # positive\n hypercube_vol = self._ref_val ** self.num_objectives\n pos_hypersphere_vol = (\n math.pi ** (self.num_objectives / 2)\n / gamma(self.num_objectives / 2 + 1)\n / 2 ** self.num_objectives\n )\n return hypercube_vol - pos_hypersphere_vol\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n X_m = X[..., -self.k :]\n g_X = (X_m - 0.5).pow(2).sum(dim=-1)\n g_X_plus1 = 1 + g_X\n fs = []\n pi_over_2 = math.pi / 2\n for i in range(self.num_objectives):\n idx = self.num_objectives - 1 - i\n f_i = g_X_plus1.clone()\n f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)\n if i > 0:\n f_i *= torch.sin(X[..., idx] * pi_over_2)\n fs.append(f_i)\n return torch.stack(fs, dim=-1)\n\n def gen_pareto_front(self, n: int) -> Tensor:\n r\"\"\"Generate `n` pareto optimal points.\n\n The pareto points are randomly sampled from the hypersphere's\n positive section.\n \"\"\"\n f_X = sample_hypersphere(\n n=n,\n d=self.num_objectives,\n dtype=self.ref_point.dtype,\n device=self.ref_point.device,\n qmc=True,\n ).abs()\n if self.negate:\n f_X *= -1\n return f_X\n\n\nclass VehicleSafety(MultiObjectiveTestProblem):\n r\"\"\"Optimize Vehicle crash-worthiness.\n\n See [Tanabe2020]_ for details.\n\n The reference point is 1.1 * the nadir point from\n approximate front provided by [Tanabe2020]_.\n\n The maximum hypervolume is computed using the approximate\n pareto front from [Tanabe2020]_.\n \"\"\"\n\n _ref_point = [1864.72022, 11.81993945, 0.2903999384]\n _max_hv = 246.81607081187002\n _bounds = [(1.0, 3.0)] * 5\n dim = 5\n num_objectives = 3\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n X1, X2, X3, X4, X5 = torch.split(X, 1, -1)\n f1 = (\n 1640.2823\n + 2.3573285 * X1\n + 2.3220035 * X2\n + 4.5688768 * X3\n + 7.7213633 * X4\n + 4.4559504 * X5\n )\n f2 = (\n 6.5856\n + 1.15 * X1\n - 1.0427 * X2\n + 0.9738 * X3\n + 0.8364 * X4\n - 0.3695 * X1 * X4\n + 0.0861 * X1 * X5\n + 0.3628 * X2 * X4\n - 0.1106 * X1.pow(2)\n - 0.3437 * X3.pow(2)\n + 0.1764 * X4.pow(2)\n )\n f3 = (\n -0.0551\n + 0.0181 * X1\n + 0.1024 * X2\n + 0.0421 * X3\n - 0.0073 * X1 * X2\n + 0.024 * X2 * X3\n - 0.0118 * X2 * X4\n - 0.0204 * X3 * X4\n - 0.008 * X3 * X5\n - 0.0241 * X2.pow(2)\n + 0.0109 * X4.pow(2)\n )\n f_X = torch.cat([f1, f2, f3], dim=-1)\n return f_X\n\n\nclass ZDT(MultiObjectiveTestProblem):\n r\"\"\"Base class for ZDT problems.\n\n See [Zitzler2000]_ for more details on ZDT.\n \"\"\"\n\n _ref_point = [11.0, 11.0]\n\n def __init__(\n self,\n dim: int,\n num_objectives: int = 2,\n noise_std: Optional[float] = None,\n negate: bool = False,\n ) -> None:\n if num_objectives != 2:\n raise NotImplementedError(\n f\"{type(self).__name__} currently only supports 2 objectives.\"\n )\n if dim < num_objectives:\n raise ValueError(\n f\"dim must be >= num_objectives, but got {dim} and {num_objectives}\"\n )\n self.num_objectives = num_objectives\n self.dim = dim\n self._bounds = [(0.0, 1.0) for _ in range(self.dim)]\n super().__init__(noise_std=noise_std, negate=negate)\n\n @staticmethod\n def _g(X: Tensor) -> Tensor:\n return 1 + 9 * X[..., 1:].mean(dim=-1)\n\n\nclass ZDT1(ZDT):\n r\"\"\"ZDT1 test problem.\n\n d-dimensional problem evaluated on `[0, 1]^d`:\n\n f_0(x) = x_0\n f_1(x) = g(x) * (1 - sqrt(x_0 / g(x))\n g(x) = 1 + 9 / (d - 1) * \\sum_{i=1}^{d-1} x_i\n\n The reference point comes from [Yang2019a]_.\n\n The pareto front is convex.\n \"\"\"\n\n _max_hv = 120 + 2 / 3\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n f_0 = X[..., 0]\n g = self._g(X=X)\n f_1 = g * (1 - (f_0 / g).sqrt())\n return torch.stack([f_0, f_1], dim=-1)\n\n def gen_pareto_front(self, n: int) -> Tensor:\n f_0 = torch.linspace(\n 0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device\n )\n f_1 = 1 - f_0.sqrt()\n f_X = torch.stack([f_0, f_1], dim=-1)\n if self.negate:\n f_X *= -1\n return f_X\n\n\nclass ZDT2(ZDT):\n r\"\"\"ZDT2 test problem.\n\n d-dimensional problem evaluated on `[0, 1]^d`:\n\n f_0(x) = x_0\n f_1(x) = g(x) * (1 - (x_0 / g(x))^2)\n g(x) = 1 + 9 / (d - 1) * \\sum_{i=1}^{d-1} x_i\n\n The reference point comes from [Yang2019a]_.\n\n The pareto front is concave.\n \"\"\"\n\n _max_hv = 120 + 1 / 3\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n f_0 = X[..., 0]\n g = self._g(X=X)\n f_1 = g * (1 - (f_0 / g).pow(2))\n return torch.stack([f_0, f_1], dim=-1)\n\n def gen_pareto_front(self, n: int) -> Tensor:\n f_0 = torch.linspace(\n 0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device\n )\n f_1 = 1 - f_0.pow(2)\n f_X = torch.stack([f_0, f_1], dim=-1)\n if self.negate:\n f_X *= -1\n return f_X\n\n\nclass ZDT3(ZDT):\n r\"\"\"ZDT3 test problem.\n\n d-dimensional problem evaluated on `[0, 1]^d`:\n\n f_0(x) = x_0\n f_1(x) = 1 - sqrt(x_0 / g(x)) - x_0 / g * sin(10 * pi * x_0)\n g(x) = 1 + 9 / (d - 1) * \\sum_{i=1}^{d-1} x_i\n\n The reference point comes from [Yang2019a]_.\n\n The pareto front consists of several discontinuous convex parts.\n \"\"\"\n\n _max_hv = 128.77811613069076060\n _parts = [\n # this interval includes both end points\n [0, 0.0830015349],\n # this interval includes only the right end points\n [0.1822287280, 0.2577623634],\n [0.4093136748, 0.4538821041],\n [0.6183967944, 0.6525117038],\n [0.8233317983, 0.8518328654],\n ]\n # nugget to make sure linspace returns elements within the specified range\n _eps = 1e-6\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n f_0 = X[..., 0]\n g = self._g(X=X)\n f_1 = 1 - (f_0 / g).sqrt() - f_0 / g * torch.sin(10 * math.pi * f_0)\n return torch.stack([f_0, f_1], dim=-1)\n\n def gen_pareto_front(self, n: int) -> Tensor:\n n_parts = len(self._parts)\n n_per_part = torch.full(\n torch.Size([n_parts]),\n n // n_parts,\n dtype=torch.long,\n device=self.bounds.device,\n )\n left_over = n % n_parts\n n_per_part[:left_over] += 1\n f_0s = []\n for i, p in enumerate(self._parts):\n left, right = p\n f_0s.append(\n torch.linspace(\n left + self._eps,\n right - self._eps,\n n_per_part[i],\n dtype=self.bounds.dtype,\n device=self.bounds.device,\n )\n )\n f_0 = torch.cat(f_0s, dim=0)\n f_1 = 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0)\n f_X = torch.stack([f_0, f_1], dim=-1)\n if self.negate:\n f_X *= -1\n return f_X\n\n\n# ------ Constrained Multi-Objective Test Problems ----- #\n\n\nclass BNH(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):\n r\"\"\"The constrained BNH problem.\n\n See [GarridoMerchan2020]_ for more details on this problem. Note that this is a\n minimization problem.\n \"\"\"\n\n dim = 2\n num_objectives = 2\n num_constraints = 2\n _bounds = [(0.0, 5.0), (0.0, 3.0)]\n _ref_point = [0.0, 0.0] # TODO: Determine proper reference point\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n return torch.stack(\n [4.0 * (X ** 2).sum(dim=-1), ((X - 5.0) ** 2).sum(dim=-1)], dim=-1\n )\n\n def evaluate_slack_true(self, X: Tensor) -> Tensor:\n c1 = 25.0 - (X[..., 0] - 5.0) ** 2 - X[..., 1] ** 2\n c2 = (X[..., 0] - 8.0) ** 2 + (X[..., 1] + 3.0) ** 2 - 7.7\n return torch.stack([c1, c2], dim=-1)\n\n\nclass SRN(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):\n r\"\"\"The constrained SRN problem.\n\n See [GarridoMerchan2020]_ for more details on this problem. Note that this is a\n minimization problem.\n \"\"\"\n\n dim = 2\n num_objectives = 2\n num_constraints = 2\n _bounds = [(-20.0, 20.0), (-20.0, 20.0)]\n _ref_point = [0.0, 0.0] # TODO: Determine proper reference point\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n obj1 = 2.0 + ((X - 2.0) ** 2).sum(dim=-1)\n obj2 = 9.0 * X[..., 0] - (X[..., 1] - 1.0) ** 2\n return torch.stack([obj1, obj2], dim=-1)\n\n def evaluate_slack_true(self, X: Tensor) -> Tensor:\n c1 = 225.0 - ((X ** 2) ** 2).sum(dim=-1)\n c2 = -10.0 - X[..., 0] + 3 * X[..., 1]\n return torch.stack([c1, c2], dim=-1)\n\n\nclass CONSTR(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):\n r\"\"\"The constrained CONSTR problem.\n\n See [GarridoMerchan2020]_ for more details on this problem. Note that this is a\n minimization problem.\n \"\"\"\n\n dim = 2\n num_objectives = 2\n num_constraints = 2\n _bounds = [(0.1, 10.0), (0.0, 5.0)]\n _ref_point = [10.0, 10.0]\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n obj1 = X[..., 0]\n obj2 = (1.0 + X[..., 1]) / X[..., 0]\n return torch.stack([obj1, obj2], dim=-1)\n\n def evaluate_slack_true(self, X: Tensor) -> Tensor:\n c1 = 9.0 * X[..., 0] + X[..., 1] - 6.0\n c2 = 9.0 * X[..., 0] - X[..., 1] - 1.0\n return torch.stack([c1, c2], dim=-1)\n\n\nclass ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem):\n r\"\"\"Constrained Branin Currin Function.\n\n This uses the disk constraint from [Gelbart2014]_.\n \"\"\"\n\n dim = 2\n num_objectives = 2\n num_constraints = 1\n _bounds = [(0.0, 1.0), (0.0, 1.0)]\n _con_bounds = [(-5.0, 10.0), (0.0, 15.0)]\n _ref_point = [80.0, 12.0]\n _max_hv = 608.4004237022673 # from NSGA-II with 90k evaluations\n\n def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:\n super().__init__(noise_std=noise_std, negate=negate)\n con_bounds = torch.tensor(self._con_bounds, dtype=torch.float).transpose(-1, -2)\n self.register_buffer(\"con_bounds\", con_bounds)\n\n def evaluate_slack_true(self, X: Tensor) -> Tensor:\n X_tf = unnormalize(X, self.con_bounds)\n return 50 - (X_tf[..., 0:1] - 2.5).pow(2) - (X_tf[..., 1:2] - 7.5).pow(2)\n\n\nclass C2DTLZ2(DTLZ2, ConstrainedBaseTestProblem):\n\n num_constraints = 1\n _r = 0.2\n # approximate from nsga-ii, TODO: replace with analytic\n _max_hv = 0.3996406303723544\n\n def evaluate_slack_true(self, X: Tensor) -> Tensor:\n if X.ndim > 2:\n raise NotImplementedError(\"Batch X is not supported.\")\n f_X = self.evaluate_true(X)\n term1 = (f_X - 1).pow(2)\n mask = ~(torch.eye(f_X.shape[-1], device=f_X.device).bool())\n indices = torch.arange(f_X.shape[1], device=f_X.device).repeat(f_X.shape[1], 1)\n indexer = indices[mask].view(f_X.shape[1], f_X.shape[-1] - 1)\n term2_inner = (\n f_X.unsqueeze(1)\n .expand(f_X.shape[0], f_X.shape[-1], f_X.shape[-1])\n .gather(dim=-1, index=indexer.repeat(f_X.shape[0], 1, 1))\n )\n term2 = (term2_inner.pow(2) - self._r ** 2).sum(dim=-1)\n min1 = (term1 + term2).min(dim=-1).values\n min2 = ((f_X - 1 / math.sqrt(f_X.shape[-1])).pow(2) - self._r ** 2).sum(dim=-1)\n return -torch.min(min1, min2).unsqueeze(-1)\n\n\nclass OSY(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):\n r\"\"\"\n The OSY test problem from [Oszycka1995]_.\n Implementation from\n https://github.com/msu-coinlab/pymoo/blob/master/pymoo/problems/multi/osy.py\n Note that this implementation assumes minimization, so please choose negate=True.\n \"\"\"\n\n dim = 6\n num_constraints = 6\n num_objectives = 2\n _bounds = [\n (0.0, 10.0),\n (0.0, 10.0),\n (1.0, 5.0),\n (0.0, 6.0),\n (1.0, 5.0),\n (0.0, 10.0),\n ]\n _ref_point = [-75.0, 75.0]\n\n def evaluate_true(self, X: Tensor) -> Tensor:\n f1 = -(\n 25 * (X[..., 0] - 2) ** 2\n + (X[..., 1] - 2) ** 2\n + (X[..., 2] - 1) ** 2\n + (X[..., 3] - 4) ** 2\n + (X[..., 4] - 1) ** 2\n )\n f2 = (X ** 2).sum(-1)\n return torch.stack([f1, f2], dim=-1)\n\n def evaluate_slack_true(self, X: Tensor) -> Tensor:\n g1 = X[..., 0] + X[..., 1] - 2.0\n g2 = 6.0 - X[..., 0] - X[..., 1]\n g3 = 2.0 - X[..., 1] + X[..., 0]\n g4 = 2.0 - X[..., 0] + 3.0 * X[..., 1]\n g5 = 4.0 - (X[..., 2] - 3.0) ** 2 - X[..., 3]\n g6 = (X[..., 4] - 3.0) ** 2 + X[..., 5] - 4.0\n return torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)\n" ]
[ [ "torch.min", "torch.stack", "torch.Size", "torch.split", "torch.cos", "torch.linspace", "torch.tensor", "torch.exp", "scipy.special.gamma", "torch.sin", "torch.arange", "torch.eye", "torch.cat" ] ]
mmoussallam/bird
[ "6a362de7d3a52dfcddaed13e8c736d039b03fbb4" ]
[ "bird/tests/test_mdct_tools.py" ]
[ "# Authors: Alexandre Gramfort <[email protected]>\n# Manuel Moussallam <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom bird.mdct_tools import mdct, imdct\n\n\ndef test_mdct():\n \"Test mdct and imdct tight frame property\"\n sfreq = 1000. # Hz\n f = 7. # Hz\n x1 = np.sin(2. * np.pi * f * np.arange(128, dtype=float) / sfreq)\n x2 = np.sin(2. * np.pi * f * np.arange(512, dtype=float) / sfreq)\n\n rng = np.random.RandomState(42)\n x3 = rng.standard_normal(x1.shape)\n\n wsize = 32\n\n for x in [x1, x2, x3]:\n X = mdct(x, wsize)\n xp = imdct(X, wsize)\n\n assert_array_almost_equal(x, xp, decimal=12)\n" ]
[ [ "numpy.random.RandomState", "numpy.testing.assert_array_almost_equal", "numpy.arange" ] ]
anicokatz/PyMultiNestPlus
[ "d223ac90bef7c1b61e337b70c2bdb41ed46cb2b7" ]
[ "example_workspace/inverted_hierarchy/model.py" ]
[ "# INVERTED HIERARCHY\nimport prior_handler as phandle\nimport math\nimport numpy as np\nimport os\ncwd = os.path.dirname(os.path.realpath(__file__))\nprint(cwd)\n\nprior_handler = phandle.PriorHandler(cwd)\ncon = prior_handler.c\nn_pars = prior_handler.n_pars\n\ndef prior(cube, n_dims, n_pars):\n return prior_handler.scale(cube)\n\ndef observables(pars):\n # get the nuisances from the par-based seed\n nui = prior_handler.get_nui(pars)\n \n # get mt value\n c13 = math.cos(pars[4])\n \n a1 = abs(math.cos(pars[3]*c13))**2\n a2 = abs(math.sin(pars[3]*c13))**2\n a3 = abs(math.sin(pars[4]))**2\n \n dm2 = pars[5]\n Dm2 = pars[6]\n \n m3 = pars[0]\n m2 = math.sqrt(max([0, m3**2 + Dm2 + dm2/2]))\n m1 = math.sqrt(max([0, m3**2 + Dm2 - dm2/2]))\n \n # with pars, nui, con, start calculation:\n return [abs(a1*m1*np.exp(-1j*pars[1]) + a2*m2*np.exp(-1j*pars[2]) + a3*m3 )]\n\ndef loglikelihood(pars, n_dims, n_pars):\n mval = observables(pars)\n mval = mval[0]\n loglikelihood = (-((mval-con[0])**2)/(2*(con[1]**2)))\n return loglikelihood" ]
[ [ "numpy.exp" ] ]
elke0011/OpenFlightSim
[ "1e28c54864ffd188f27425c8a71cce8b70a4bd7f" ]
[ "Utilities/JSBSimWriteXml.py" ]
[ "\"\"\"\nUniversity of Minnesota\nAerospace Engineering and Mechanics - UAV Lab\nCopyright 2019 Regents of the University of Minnesota.\nSee: LICENSE.md for complete license details.\n\nAuthor: Louis Mueller, Chris Regan\n\"\"\"\n\nimport os.path\nfrom xml.etree import ElementTree as ET\n\nimport numpy as np\n\n\nft2m = 0.3048\npsf2pa = 47.88026\n\n#%% Save the XML in pretty-ish print\ndef SaveXml(elem, saveFile):\n from xml.dom import minidom\n\n uglyXml = ET.tostring(elem, 'utf-8')\n prettyXml = minidom.parseString(uglyXml).toprettyxml(indent=' ', newl = '\\r\\n')\n\n os.makedirs(os.path.dirname(saveFile), exist_ok=True)\n with open(saveFile, 'w') as saveXML:\n saveXML.write(prettyXml)\n\n saveXML.close()\n\n#%% Function\n\ndef Aircraft(oFdm, convertFdm2Jsb, saveJsbPath, aircraftName):\n\n\n # Start JSB-ML with etree\n elemAircraft = ET.Element('fdm_config', version = '2.0', release = 'Alpha')\n\n\n # Create the Pilot input as a seperate XML file, direct the Aircraft definition to use\n fcsFile = 'FlightControl.xml'\n ET.SubElement(elemAircraft, 'flight_control', file = fcsFile)\n\n SaveXml(FlightControl(oFdm), os.path.join(saveJsbPath, fcsFile))\n\n\n # Effectors as a seperate XML file, direct the Aircraft definition to use\n effFile = 'Effectors.xml'\n ET.SubElement(elemAircraft, 'system', file = effFile)\n\n SaveXml(Effectors(oFdm), os.path.join(saveJsbPath, effFile))\n\n\n # Create the Mass Properties input as a seperate XML file, direct the Aircraft definition to use\n massFile = 'Mass.xml'\n ET.SubElement(elemAircraft, 'mass_balance', file = massFile)\n\n SaveXml(MassBalance(oFdm), os.path.join(saveJsbPath, massFile))\n\n\n # Create the Gear input as a seperate XML file, direct the Aircraft definition to use\n gearFile = 'Gear.xml'\n ET.SubElement(elemAircraft, 'ground_reactions', file = gearFile)\n\n SaveXml(GroundReactions(oFdm), os.path.join(saveJsbPath, gearFile))\n\n\n # Create the Propulsion input as a seperate XML file, direct the Aircraft definition to use\n propFile = 'Propulsion.xml'\n ET.SubElement(elemAircraft, 'propulsion', file = propFile)\n\n SaveXml(Propulsion(oFdm), os.path.join(saveJsbPath, propFile))\n\n\n # Metrics and Aerodynamics as a seperate XML file, direct the Aircraft definition to use\n # Group the Metrics and Aero by similar naming; the dimensionalization inherent to Aero is provided by the Metrics\n metricsFile = 'Metrics.xml'\n ET.SubElement(elemAircraft, 'metrics', file = metricsFile)\n\n SaveXml(Metrics(oFdm), os.path.join(saveJsbPath, metricsFile))\n\n\n aeroFile = 'Aero.xml'\n ET.SubElement(elemAircraft, 'aerodynamics', file = aeroFile)\n\n SaveXml(Aerodynamics(oFdm, convertFdm2Jsb), os.path.join(saveJsbPath, aeroFile))\n\n\n # Launcher as a seperate XML file, direct the Aircraft definition to use\n if 'Winch' in oFdm.keys() :\n winchFile = 'Winch.xml'\n ET.SubElement(elemAircraft, 'external_reactions', file = winchFile)\n\n SaveXml(Winch(oFdm), os.path.join(saveJsbPath, winchFile))\n\n\n # Imu as a seperate XML file, direct the Aircraft definition to use\n if 'Imu' in oFdm['Sensor'].keys() :\n imuFile = 'SensorImu.xml'\n ET.SubElement(elemAircraft, 'system', file = imuFile)\n\n SaveXml(SensorImu(oFdm), os.path.join(saveJsbPath, imuFile))\n\n\n # Gps as a seperate XML file, direct the Aircraft definition to use\n if 'Gps' in oFdm['Sensor'].keys() :\n gpsFile = 'SensorGps.xml'\n ET.SubElement(elemAircraft, 'system', file = gpsFile)\n\n SaveXml(SensorGps(oFdm), os.path.join(saveJsbPath, gpsFile))\n\n\n # Pitot as a seperate XML file, direct the Aircraft definition to use\n if 'Pitot' in oFdm['Sensor'].keys() :\n pitotFile = 'SensorPitot.xml'\n ET.SubElement(elemAircraft, 'system', file = pitotFile)\n\n SaveXml(SensorPitot(oFdm), os.path.join(saveJsbPath, pitotFile))\n\n\n # 5Hole as a seperate XML file, direct the Aircraft definition to use\n if '5Hole' in oFdm['Sensor'].keys() :\n fiveHoleFile = 'Sensor5Hole.xml'\n ET.SubElement(elemAircraft, 'system', file = fiveHoleFile)\n\n SaveXml(Sensor5Hole(oFdm), os.path.join(saveJsbPath, fiveHoleFile))\n\n # Write the Aircraft XML file\n saveFile = os.path.join(saveJsbPath, aircraftName + '.xml')\n SaveXml(elemAircraft, saveFile)\n\n\n return(elemAircraft)\n\n\n\n\n#%% Table Generator, Wrapper\ndef TableGen(elemParent, tableArray, tableSignals, tableBreakPts):\n\n s = tableArray.shape\n iAxisRemList = []\n for iAxis in range(0, len(s)):\n if s[iAxis] == 1:\n iAxisRemList.append(iAxis)\n\n # for iRem in iAxisRemList: # XXX\n # tableArray = tableArray.squeeze(axis=iRem)\n # del tableSignals[iRem]\n # del tableBreakPts[iRem]\n\n if len(tableArray.shape)==3:\n table = TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts)\n elif len(tableArray.shape)==2:\n table = TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts)\n elif (len(tableArray.shape)==1) & (tableArray.size > 1):\n table = TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts)\n else:\n table = ET.SubElement(elemParent, 'value').text = str(tableArray)\n\n\n return table\n\n#%% Table Generator, 3D\ndef TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts):\n table = ET.SubElement(elemParent, 'table')\n #table = ET.Element('table')\n\n ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]\n ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]\n ET.SubElement(table, 'independentVar', lookup = 'table').text = tableSignals[2]\n\n indentSpace = ' '*4\n indentLvl = 4\n\n numRows, numColumns, numTables = np.shape(tableArray)\n\n columnHeader = indentSpace*(indentLvl)\n for columnVal in tableBreakPts[1]:\n columnHeader += ' '*6 + str(columnVal)\n\n\n for iTable in range(0, numTables):\n tableStr = ['\\n' + columnHeader]\n for iRow in range(0, numRows):\n rowStr = str(tableArray[iRow, :, iTable]).replace('[','').replace(']','').replace('\\n', '')\n tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)\n\n tableStr = '\\n'.join(tableStr) + '\\n' + indentLvl*indentSpace # Replace list lines with '/n' strings\n ET.SubElement(table, 'tableData', breakPoint = str(tableBreakPts[2][iTable])).text = tableStr\n\n\n return table\n\n\n#%% Table Generator, 2D\ndef TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts):\n table = ET.SubElement(elemParent, 'table')\n\n ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]\n ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]\n indentSpace = ' '*4\n indentLvl = 4\n\n tableArray = tableArray.transpose()\n numRows, numColumns = np.shape(tableArray)\n\n columnHeader = indentSpace*(indentLvl)\n for columnVal in tableBreakPts[1]:\n columnHeader += ' '*6 + str(columnVal)\n\n tableStr = ['\\n' + columnHeader]\n for iRow in range(0, numRows):\n rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\\n', '')\n tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)\n\n tableStr = '\\n'.join(tableStr) + '\\n' + indentLvl*indentSpace # Replace list lines with '/n' strings\n ET.SubElement(table, 'tableData').text = tableStr\n\n return table\n\n\n#%% Table Generator, 1D\ndef TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts):\n table = ET.SubElement(elemParent, 'table')\n\n ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals\n indentSpace = ' '*4\n indentLvl = 4\n\n numRows = np.shape(tableArray)[0]\n\n tableStr = ['\\n']\n for iRow in range(0, numRows):\n rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\\n', '')\n tableStr.append(indentLvl*indentSpace + str(tableBreakPts[iRow]) + indentSpace + rowStr)\n\n tableStr = '\\n'.join(tableStr) + '\\n' + indentLvl*indentSpace # Replace list lines with '/n' strings\n ET.SubElement(table, 'tableData').text = tableStr\n\n return table\n\n\n#%%\ndef MassBalance(oFdm):\n mass_balance = ET.Element('mass_balance')\n\n # Mass\n ET.SubElement(mass_balance, 'emptywt', unit = 'KG').text = str(oFdm['MassProp']['mass_kg'])\n\n # CG\n location = ET.SubElement(mass_balance, 'location', name = 'CG', unit = 'M')\n ET.SubElement(location, 'x').text = str(oFdm['MassProp']['rCG_S_m'][0])\n ET.SubElement(location, 'y').text = str(oFdm['MassProp']['rCG_S_m'][1])\n ET.SubElement(location, 'z').text = str(oFdm['MassProp']['rCG_S_m'][2])\n\n # Inertia\n ET.SubElement(mass_balance, 'ixx', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,0])\n ET.SubElement(mass_balance, 'iyy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,1])\n ET.SubElement(mass_balance, 'izz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][2,2])\n ET.SubElement(mass_balance, 'ixy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,1])\n ET.SubElement(mass_balance, 'ixz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,2])\n ET.SubElement(mass_balance, 'iyz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,2])\n\n return(mass_balance)\n\n#%%\ndef GroundReactions(oFdm):\n ground_reactions = ET.Element('ground_reactions')\n\n # Loop Each Gear\n for gear in oFdm['Gear'].keys():\n contact = ET.SubElement(ground_reactions, 'contact', type = 'BOGEY', name = gear)\n\n location = ET.SubElement(contact, 'location', unit = 'M')\n ET.SubElement(location, 'x').text = str(oFdm['Gear'][gear]['rGear_S_m'][0])\n ET.SubElement(location, 'y').text = str(oFdm['Gear'][gear]['rGear_S_m'][1])\n ET.SubElement(location, 'z').text = str(oFdm['Gear'][gear]['rGear_S_m'][2])\n\n ET.SubElement(contact, 'static_friction').text = str(oFdm['Gear'][gear]['FricStatic'])\n ET.SubElement(contact, 'dynamic_friction').text = str(oFdm['Gear'][gear]['FricDynamic'])\n ET.SubElement(contact, 'rolling_friction').text = str(oFdm['Gear'][gear]['FricRoll'])\n ET.SubElement(contact, 'spring_coeff', unit = 'N/M').text = str(oFdm['Gear'][gear]['kSpring_Npm'])\n ET.SubElement(contact, 'damping_coeff', unit = 'N/M/SEC').text = str(oFdm['Gear'][gear]['dampSpring_Nspm'])\n\n ET.SubElement(contact, 'max_steer', unit = 'DEG').text = '0.0'\n\n return(ground_reactions)\n\n\n#%%\ndef Metrics(oFdm):\n metrics = ET.Element('metrics')\n\n # Dimensions\n ET.SubElement(metrics, 'wingarea', unit = 'M2').text = str(oFdm['Aero']['Ref']['S_m2'])\n ET.SubElement(metrics, 'wingspan', unit = 'M').text = str(oFdm['Aero']['Ref']['b_m'])\n ET.SubElement(metrics, 'chord', unit = 'M').text = str(oFdm['Aero']['Ref']['cBar_m'])\n\n location = ET.SubElement(metrics, 'location', name = 'AERORP', unit = 'M')\n ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])\n ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])\n ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])\n\n location = ET.SubElement(metrics, 'location', name = 'EYEPOINT', unit = 'M')\n ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])\n ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])\n ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])\n\n location = ET.SubElement(metrics, 'location', name = 'VRP', unit = 'M')\n ET.SubElement(location, 'x').text = '0.0'\n ET.SubElement(location, 'y').text = '0.0'\n ET.SubElement(location, 'z').text = '0.0'\n\n return(metrics)\n\n\n#%%\ndef Aerodynamics(oFdm, convertFdm2Jsb):\n\n import copy\n\n # Aero Coef definitions\n coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']\n\n # Aero Deriv dependencies definitions\n depNamesFdm = convertFdm2Jsb['Dep']['oFdm']\n depNamesJsb = convertFdm2Jsb['Dep']['jsb']\n depScale = convertFdm2Jsb['Dep']['scale']\n\n coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']\n\n # Aero Breakpoint Table defintions\n indVarTable = convertFdm2Jsb['TableDef']['jsb']\n breakPtsTable = convertFdm2Jsb['TableDef']['brkPts']\n\n # Aero Table data to use\n aeroTable = oFdm['Aero']['Coef']\n\n # Define the conversion from oFdm to JSB-ML # FIXIT - switch to a CDo+CDi drag computation\n coefTable = {'CL': {'axis': 'LIFT', 'scale': None, 'type': 'force', 'deriv': 'dCL'}, \\\n 'CD': {'axis': 'DRAG', 'scale': None, 'type': 'force', 'deriv': 'dCD'}, \\\n 'CY': {'axis': 'SIDE', 'scale': None, 'type': 'force', 'deriv': 'dCY'}, \\\n 'CMl': {'axis': 'ROLL', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMl'}, \\\n 'CMm': {'axis': 'PITCH', 'scale': 'metrics/cbarw-ft', 'type': 'moment', 'deriv': 'dCMm'}, \\\n 'CMn': {'axis': 'YAW', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMn'}}\n\n\n aerodynamics = ET.Element('aerodynamics')\n\n #\n # Create each coefficient individually, just the table look-up\n coefNames = coefTable.keys()\n for iCoef, coef in enumerate(coefNames):\n convertCoef = coefTable[coef]\n\n # For each coefficient: create just the table look-up, then the Multiplication, then the summation\n for iDep, dep in enumerate(coefNamesFdm):\n function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))\n ET.SubElement(function, 'description').text = str(coef + '__' + dep)\n\n # Use the Table Generator to create the properly formated Table for JSB-ML\n tableArray = aeroTable[coef][dep]\n tableSignals = indVarTable\n tableBreakPts = breakPtsTable\n\n table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))\n\n # For each derivative: create just the table look-up, then the Multiplication, then the summation\n deriv = convertCoef['deriv']\n\n for iDep, dep in enumerate(depNamesFdm):\n function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + deriv + '__' + dep))\n ET.SubElement(function, 'description').text = str(deriv + '__' + dep)\n\n # Use the Table Generator to create the properly formated Table for JSB-ML\n tableArray = aeroTable[deriv][dep]\n tableSignals = indVarTable\n tableBreakPts = breakPtsTable\n\n table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))\n\n # Multiply each derivative by it's dependent variable\n function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))\n ET.SubElement(function, 'description').text = str(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)\n\n #print(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)\n\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/coefficient/' + deriv + '__' + dep\n\n #print(deriv + '__' + dep)\n\n depSignal = depNamesJsb[iDep]\n #print(depSignal)\n if depSignal != None:\n ET.SubElement(product, 'property').text = depSignal # Dependent Variable/Signal\n\n scale = depScale[iDep]\n if scale != None:\n if isinstance(scale, str):\n ET.SubElement(product, 'property').text = str(scale) # Dependent Variable Scaling\n else:\n ET.SubElement(product, 'value').text = str(scale) # Dependent Variable Scaling\n\n\n\n # Sum the Coeficients\n function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef))\n ET.SubElement(function, 'description').text = str(coef + ' summation')\n #print(coef + ' summation')\n\n summation = ET.SubElement(function, 'sum')\n for iDep, dep in enumerate(coefNamesFdm):\n ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep\n #print(coef + '__' + dep)\n\n for iDep, dep in enumerate(depNamesFdm):\n ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep\n #print(coef + '__' + dep)\n\n #\n # Dimensionalize the Coefficients into Forces and Moments\n for iCoef, coef in enumerate(coefNames):\n convertCoef = coefTable[coef]\n\n axis = ET.SubElement(aerodynamics, 'axis', name = convertCoef['axis'])\n\n function = ET.SubElement(axis, 'function', name = str('aero/' + convertCoef['type'] + '/' + convertCoef['axis'] + '__' + coef))\n ET.SubElement(function, 'description').text = str(convertCoef['axis'] + ' from ' + coef)\n\n product = ET.SubElement(function, 'product')\n\n ET.SubElement(product, 'property').text = 'aero/qbar-area' # qBar * sRef\n\n if convertCoef['scale'] != None:\n ET.SubElement(product, 'property').text = convertCoef['scale'] # Coefficient Scaling\n\n ET.SubElement(product, 'property').text = 'aero/coefficient/' + coef\n\n\n return(aerodynamics)\n\n#%%\ndef Propulsion(oFdm):\n propulsion = ET.Element('propulsion')\n\n for key in oFdm['Prop'].keys():\n\n prop = oFdm['Prop'][key]\n\n # Motor/Engine\n engine = ET.SubElement(propulsion, 'engine', file = prop['nameMotor'])\n# location = ET.SubElement(engine, 'location', unit = 'M')\n# ET.SubElement(location, 'x').text = str(prop['rMotor_S_m'][0])\n# ET.SubElement(location, 'y').text = str(prop['rMotor_S_m'][1])\n# ET.SubElement(location, 'z').text = str(prop['rMotor_S_m'][2])\n# orient = ET.SubElement(engine, 'orient', unit = 'DEG')\n# ET.SubElement(orient, 'roll').text = str(prop['sMotor_deg'][0])\n# ET.SubElement(orient, 'pitch').text = str(prop['sMotor_deg'][1])\n# ET.SubElement(orient, 'yaw').text = str(prop['sMotor_deg'][2])\n\n # Thruster/Prop as an element of the Engine\n thruster = ET.SubElement(engine, 'thruster', file = prop['nameProp'])\n location = ET.SubElement(thruster, 'location', unit = 'M')\n ET.SubElement(location, 'x').text = str(prop['rProp_S_m'][0])\n ET.SubElement(location, 'y').text = str(prop['rProp_S_m'][1])\n ET.SubElement(location, 'z').text = str(prop['rProp_S_m'][2])\n orient = ET.SubElement(thruster, 'orient', unit = 'DEG')\n ET.SubElement(orient, 'roll').text = str(prop['sProp_deg'][0])\n ET.SubElement(orient, 'pitch').text = str(prop['sProp_deg'][1])\n ET.SubElement(orient, 'yaw').text = str(prop['sProp_deg'][2])\n\n ET.SubElement(thruster, 'sense').text = str(prop['sense']) # 1 = CW as viewed from cockpit, -1 = CCW\n ET.SubElement(thruster, 'p_factor').text = str(prop['p_factor'])\n\n\n return(propulsion)\n\n\n#%% FCS\ndef FlightControl(oFdm):\n\n # Define all the Pilot input definition\n # Pilot Inputs, us the FG normalized sticks\n fcsPilotDef = {}\n fcsPilotDef['summer'] = {}\n fcsPilotDef['gain'] = {}\n\n fcsPilotDef['summer']['pilotRoll_norm'] = {}\n fcsPilotDef['summer']['pilotRoll_norm']['inputList'] = ['fcs/aileron-cmd-norm', 'fcs/roll-trim-cmd-norm']\n fcsPilotDef['summer']['pilotRoll_norm']['min'] = -1.0\n fcsPilotDef['summer']['pilotRoll_norm']['max'] = 1.0\n\n fcsPilotDef['gain']['cmdRoll_rps'] = {}\n fcsPilotDef['gain']['cmdRoll_rps']['input'] = 'fcs/pilotRoll_norm'\n fcsPilotDef['gain']['cmdRoll_rps']['gain'] = oFdm['FCS']['Pilot']['kRoll']\n\n fcsPilotDef['summer']['pilotPitch_norm'] = {}\n fcsPilotDef['summer']['pilotPitch_norm']['inputList'] = ['fcs/elevator-cmd-norm', 'fcs/pitch-trim-cmd-norm']\n fcsPilotDef['summer']['pilotPitch_norm']['min'] = -1.0\n fcsPilotDef['summer']['pilotPitch_norm']['max'] = 1.0\n\n fcsPilotDef['gain']['cmdPitch_rps'] = {}\n fcsPilotDef['gain']['cmdPitch_rps']['input'] = 'fcs/pilotPitch_norm'\n fcsPilotDef['gain']['cmdPitch_rps']['gain'] = oFdm['FCS']['Pilot']['kPitch']\n\n fcsPilotDef['summer']['pilotYaw_norm'] = {}\n fcsPilotDef['summer']['pilotYaw_norm']['inputList'] = ['fcs/rudder-cmd-norm', 'fcs/yaw-trim-cmd-norm']\n fcsPilotDef['summer']['pilotYaw_norm']['min'] = -1.0\n fcsPilotDef['summer']['pilotYaw_norm']['max'] = 1.0\n\n fcsPilotDef['gain']['cmdYaw_rps'] = {}\n fcsPilotDef['gain']['cmdYaw_rps']['input'] = 'fcs/pilotYaw_norm'\n fcsPilotDef['gain']['cmdYaw_rps']['gain'] = oFdm['FCS']['Pilot']['kYaw']\n\n fcsPilotDef['summer']['pilotFlap_norm'] = {}\n fcsPilotDef['summer']['pilotFlap_norm']['inputList'] = ['fcs/flap-cmd-norm']\n fcsPilotDef['summer']['pilotFlap_norm']['min'] = -1.0\n fcsPilotDef['summer']['pilotFlap_norm']['max'] = 1.0\n\n fcsPilotDef['gain']['cmdFlap_rad'] = {}\n fcsPilotDef['gain']['cmdFlap_rad']['input'] = 'fcs/pilotFlap_norm'\n fcsPilotDef['gain']['cmdFlap_rad']['gain'] = oFdm['FCS']['Pilot']['kFlap']\n\n\n # Create the JSB-ML\n elemFCS = ET.Element('flight_control', name = 'Generic Flight Control')\n\n pilot = ET.SubElement(elemFCS, 'channel', name = 'Pilot_Inputs')\n for type in fcsPilotDef:\n if type == 'summer':\n for key in fcsPilotDef['summer'].keys():\n entry = fcsPilotDef['summer'][key]\n\n summer = ET.SubElement(pilot, 'summer', name = key)\n\n for input in entry['inputList']:\n ET.SubElement(summer, 'input').text = input\n\n if ('min' in entry.keys()) or ('max' in entry.keys()):\n clipto = ET.SubElement(summer, 'clipto')\n if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])\n if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])\n\n ET.SubElement(summer, 'output').text = 'fcs/' + key\n\n if type == 'gain':\n for key in fcsPilotDef['gain'].keys():\n entry = fcsPilotDef['gain'][key]\n\n gain = ET.SubElement(pilot, 'pure_gain', name = key)\n\n ET.SubElement(gain, 'input').text = entry['input']\n ET.SubElement(gain, 'gain').text = str(entry['gain'])\n\n if ('min' in entry.keys()) or ('max' in entry.keys()):\n clipto = ET.SubElement(gain, 'clipto')\n if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])\n if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])\n\n ET.SubElement(gain, 'output').text = 'fcs/' + key\n\n\n # Control System Surface Mixer\n mixer = ET.SubElement(elemFCS, 'channel', name = 'Control Mixer')\n\n fcsMixerDef = oFdm['FCS']['Mixer']\n\n for iSurf, surf in enumerate(fcsMixerDef['surfNames']):\n cmdSurf = 'cmd' + surf + '_rad'\n keyList = []\n for iInput, input in enumerate(fcsMixerDef['inputs']):\n val = fcsMixerDef['surfMix'][iSurf][iInput]\n\n key = input + '_2_' + surf\n\n if val != 0.0:\n keyList.append(key)\n gain = ET.SubElement(mixer, 'pure_gain', name = key.replace('fcs/',''))\n\n ET.SubElement(gain, 'input').text = 'fcs/' + input\n ET.SubElement(gain, 'gain').text = str(val)\n\n ET.SubElement(gain, 'output').text = 'fcs/' + key\n\n if any(keyList):\n summer = ET.SubElement(mixer, 'summer', name = cmdSurf)\n for key in keyList:\n ET.SubElement(summer, 'input').text = 'fcs/' + key\n ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf\n\n\n\n # Inputs for External Commands, this just add property to create the node in the tree\n for iSurf, surf in enumerate(fcsMixerDef['surfNames']):\n cmdSurfExt = 'cmd' + surf + '_ext_rad'\n prop = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdSurfExt\n\n name = 'Motor'\n cmdMotorExt = 'cmd' + name + '_ext_nd'\n motor = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdMotorExt # Add the Motor external command\n\n\n # Inputs for External Commands, this just add property to create the node in the tree\n extern = ET.SubElement(elemFCS, 'channel', name = 'External Input Summations')\n for iSurf, surf in enumerate(fcsMixerDef['surfNames']):\n cmdSurf = 'cmd' + surf + '_rad'\n cmdSurfExt = 'cmd' + surf + '_ext_rad'\n\n summer = ET.SubElement(extern, 'summer')\n ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurf\n ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurfExt\n ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf\n\n name = 'Motor'\n cmdMotor = 'cmd' + name + '_nd'\n cmdMotorExt = 'cmd' + name + '_ext_nd'\n summer = ET.SubElement(extern, 'summer')\n ET.SubElement(summer, 'input').text = 'fcs/throttle-cmd-norm'\n ET.SubElement(summer, 'input').text = 'fcs/' + cmdMotorExt\n ET.SubElement(summer, 'output').text = 'fcs/throttle-pos-norm'\n\n return(elemFCS)\n\n\n#%% Effectors, for each surface define the 2nd order TF, and an 'actuator'\ndef Effectors(oFdm):\n\n sysEffDef = oFdm['Act']\n\n effectors = ET.Element('system', name = 'Effectors')\n channel = ET.SubElement(effectors, 'channel', name = 'Actuator Models')\n\n for surf in sysEffDef.keys():\n cmdSurf = 'cmd' + surf + '_rad'\n posSurf = 'pos' + surf + '_rad'\n\n entry = sysEffDef[surf]\n\n # Actuator - delay and freeplay\n actuator = ET.SubElement(channel, 'actuator', name = 'act' + surf)\n ET.SubElement(actuator, 'input').text = 'fcs/' + cmdSurf\n\n ET.SubElement(actuator, 'lag').text = str(entry['lag_nd'])\n ET.SubElement(actuator, 'hysteresis_width').text = str(entry['freeplay_rad'])\n ET.SubElement(actuator, 'delay').text = str(entry['delay_s'])\n\n if ('min' in entry.keys()) or ('max' in entry.keys()):\n clipto = ET.SubElement(actuator, 'clipto')\n if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])\n if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])\n\n ET.SubElement(actuator, 'output').text = 'fcs/' + posSurf\n\n return(effectors)\n\n\n#%%\ndef Winch(oFdm):\n external_reactions = ET.Element('external_reactions')\n\n # Winch\n force = ET.SubElement(external_reactions, 'force', name='hitch' , frame = 'BODY', unit='N')\n location = ET.SubElement(force, 'location', unit = 'M')\n ET.SubElement(location, 'x').text = str(oFdm['Winch']['rHook_S_m'][0])\n ET.SubElement(location, 'y').text = str(oFdm['Winch']['rHook_S_m'][1])\n ET.SubElement(location, 'z').text = str(oFdm['Winch']['rHook_S_m'][2])\n direction = ET.SubElement(force, 'direction')\n ET.SubElement(direction, 'x').text = str(oFdm['Winch']['sHook_deg'][0])\n ET.SubElement(direction, 'y').text = str(oFdm['Winch']['sHook_deg'][1])\n ET.SubElement(direction, 'z').text = str(oFdm['Winch']['sHook_deg'][2])\n\n return(external_reactions)\n\n\n#%% IMU\ndef SensorImu(oFdm):\n imu = ET.Element('system', name = 'Sensor - IMU')\n\n # Create time in us\n function = ET.SubElement(imu, 'function', name = 'sensor/imu/time_us')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'\n ET.SubElement(product, 'value').text = str(1e6)\n\n # Accelerometers\n if 'Accel' in oFdm['Sensor']['Imu'].keys() :\n channel = ET.SubElement(imu, 'channel', name = 'Temp Accelerometers')\n\n axisList = ['X', 'Y', 'Z']\n\n for axisName in axisList:\n accel = ET.SubElement(channel, 'accelerometer', name = 'Accel' + axisName)\n\n ET.SubElement(accel, 'axis').text = axisName\n\n location = ET.SubElement(accel, 'location', unit = 'M')\n ET.SubElement(location, 'x').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][0])\n ET.SubElement(location, 'y').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][1])\n ET.SubElement(location, 'z').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][2])\n\n orientation = ET.SubElement(accel, 'orientation', unit='DEG')\n ET.SubElement(orientation, 'roll').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][0])\n ET.SubElement(orientation, 'pitch').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][1])\n ET.SubElement(orientation, 'yaw').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][2])\n\n ET.SubElement(accel, 'output').text = 'sensor/imu/accel' + axisName + '_true_fps2'\n\n\n # Convert Units Accelerometer to mps2\n for axisName in axisList:\n function = ET.SubElement(imu, 'function', name = 'sensor/imu/accel' + axisName + '_true_mps2')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'sensor/imu/accel' + axisName + '_true_fps2'\n ET.SubElement(product, 'value').text = str(ft2m)\n\n\n # Accelerometer Error Model\n channel = ET.SubElement(imu, 'channel', name = 'Accelerometer Error Model')\n\n errMod = oFdm['Sensor']['Imu']['Accel']\n for iAxis, axisName in enumerate(axisList):\n sensor = ET.SubElement(channel, 'sensor', name = 'Accel' + axisName)\n ET.SubElement(sensor, 'input').text = 'sensor/imu/accel' + axisName + '_true_mps2'\n\n ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(sensor, 'output').text = 'sensor/imu/accel' + axisName + '_mps2'\n\n\n # Gyros\n if 'Gyro' in oFdm['Sensor']['Imu'].keys() :\n errMod = oFdm['Sensor']['Imu']['Gyro']\n channel = ET.SubElement(imu, 'channel', name = 'Gyros')\n\n for iAxis, axisName in enumerate(axisList):\n gyro = ET.SubElement(channel, 'gyro', name = 'Gyro' + axisName)\n\n ET.SubElement(gyro, 'axis').text = axisName\n\n location = ET.SubElement(gyro, 'location', unit = 'M')\n ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])\n ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])\n ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])\n\n orientation = ET.SubElement(gyro, 'orientation', unit='DEG')\n ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])\n ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])\n ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])\n\n ET.SubElement(gyro, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(gyro, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(gyro, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(gyro, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(gyro, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(gyro, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(gyro, 'output').text = 'sensor/imu/gyro' + axisName + '_rps'\n\n # Magnetometers\n if 'Mag' in oFdm['Sensor']['Imu'].keys() :\n errMod = oFdm['Sensor']['Imu']['Mag']\n channel = ET.SubElement(imu, 'channel', name = 'Magnetometers')\n\n for iAxis, axisName in enumerate(axisList):\n mag = ET.SubElement(channel, 'magnetometer', name = 'Mag' + axisName)\n\n ET.SubElement(mag, 'axis').text = axisName\n\n location = ET.SubElement(mag, 'location', unit = 'M')\n ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])\n ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])\n ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])\n\n orientation = ET.SubElement(mag, 'orientation', unit='DEG')\n ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])\n ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])\n ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])\n\n ET.SubElement(mag, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(mag, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(mag, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(mag, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(mag, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(mag, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(mag, 'output').text = 'sensor/imu/mag' + axisName + '_nT'\n\n # Magnetometer unit conversion\n for axisName in axisList:\n function = ET.SubElement(imu, 'function', name = 'sensor/imu/mag' + axisName + '_uT')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'sensor/imu/mag' + axisName + '_nT'\n ET.SubElement(product, 'value').text = str(0.001)\n\n return(imu)\n\n#%% GPS\ndef SensorGps(oFdm):\n\n gps = ET.Element('system', name = 'Sensor - GPS')\n\n # Create time in us\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/time_us')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'\n ET.SubElement(product, 'value').text = str(1e6)\n\n # GPS Position\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/lat_true_rad')\n ET.SubElement(function, 'property').text = 'position/lat-geod-rad'\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/long_true_rad')\n ET.SubElement(function, 'property').text = 'position/long-gc-rad'\n\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/alt_true_m')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'position/h-sl-ft'\n ET.SubElement(product, 'value').text = str(ft2m)\n\n # GPS Velocity\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/vNorth_true_mps')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'velocities/v-north-fps'\n ET.SubElement(product, 'value').text = str(ft2m)\n\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/vEast_true_mps')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'velocities/v-east-fps'\n ET.SubElement(product, 'value').text = str(ft2m)\n\n function = ET.SubElement(gps, 'function', name = 'sensor/gps/vDown_true_mps')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'velocities/v-down-fps'\n ET.SubElement(product, 'value').text = str(ft2m)\n\n\n # GPS Error Model\n channel = ET.SubElement(gps, 'channel', name = 'GPS Error Models')\n\n axisList = ['lat_rad', 'long_rad', 'alt_m']\n errMod = oFdm['Sensor']['Gps']['Pos']\n for iAxis, axisName in enumerate(axisList):\n sensor = ET.SubElement(channel, 'sensor', name = axisName)\n\n ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')\n\n ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName\n\n axisList = ['vNorth_mps', 'vEast_mps', 'vDown_mps']\n errMod = oFdm['Sensor']['Gps']['Vel']\n for iAxis, axisName in enumerate(axisList):\n sensor = ET.SubElement(channel, 'sensor', name = axisName)\n\n ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')\n\n ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName\n\n\n return(gps)\n\n#%%\n\ndef SensorPitot(oFdm):\n\n pitot = ET.Element('system', name = 'Sensor - Pitot-Static Probe')\n\n # Create time in us\n function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/time_us')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'\n ET.SubElement(product, 'value').text = str(1e6)\n\n # Airdata Static\n function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presStatic_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'atmosphere/P-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n\n # Airdata Tip (Dynamic ~= Impact)\n function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presTip_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n\n # Airdata Temperature\n function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/temp_true_C')\n product = ET.SubElement(function, 'product')\n summation = ET.SubElement(product, 'sum')\n ET.SubElement(summation, 'property').text = 'atmosphere/T-R'\n ET.SubElement(summation, 'value').text = str(-491.67)\n ET.SubElement(product, 'value').text = str(5.0/9.0)\n\n # Pitot Error Model\n channel = ET.SubElement(pitot, 'channel', name = 'Pitot Error Models')\n\n axisList = ['presStatic_Pa', 'presTip_Pa', 'temp_C']\n errMod = oFdm['Sensor']['Gps']['Vel']\n for iAxis, axisName in enumerate(axisList):\n sensor = ET.SubElement(channel, 'sensor', name = axisName)\n\n ET.SubElement(sensor, 'input').text = 'sensor/pitot/' + axisName.replace('_', '_true_')\n\n ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(sensor, 'output').text = 'sensor/pitot/' + axisName\n\n\n return(pitot)\n\n#%%\n\ndef Sensor5Hole(oFdm):\n\n fiveHole = ET.Element('system', name = 'Sensor - 5Hole Probe')\n\n # Determine whether method #1 or method #2\n\n if 'alphaK1' and 'betaK1' in oFdm['Sensor']['5Hole'].keys():\n method = 1\n elif 'alphaK2' and 'betaK2' in oFdm['Sensor']['5Hole'].keys():\n method = 2\n else:\n print('5Hole Probe: Need either (alphaK1 and betaK1) or (alphaK2 and betaK2)')\n\n # Create time in us\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/time_us')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'\n ET.SubElement(product, 'value').text = str(1e6)\n\n # Airdata Static\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presStatic_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'atmosphere/P-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n\n # Airdata Tip (Dynamic ~= Impact)\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presTip_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n\n # Airdata Temperature\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/temp_true_C')\n product = ET.SubElement(function, 'product')\n summation = ET.SubElement(product, 'sum')\n ET.SubElement(summation, 'property').text = 'atmosphere/T-R'\n ET.SubElement(summation, 'value').text = str(-491.67)\n ET.SubElement(product, 'value').text = str(5.0/9.0)\n\n\n # [Method 1]\n if method == 1:\n axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlphaBot_Pa', 'presAlphaTop_Pa', 'presBetaRight_Pa', 'presBetaLeft_Pa', 'temp_C']\n\n # Alpha Difference (presAlphaBot - presAlphaTop)\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaBot_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/alpha-deg'\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][0])\n\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaTop_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/alpha-deg'\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][1])\n\n # [Method 2] Beta Difference (presBetaRight - presBetaLeft)\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaRight_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/beta-deg'\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][0])\n\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaLeft_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/beta-deg'\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][1])\n\n\n # [Method 2]\n elif method == 2:\n axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlpha_Pa', 'presBeta_Pa', 'temp_C']\n\n # Alpha Difference (presAlphaBot - presAlphaTop)\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlpha_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/alpha-deg'\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK2'])\n\n # [Method 2] Beta Difference (presBetaRight - presBetaLeft)\n function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBeta_true_Pa')\n product = ET.SubElement(function, 'product')\n ET.SubElement(product, 'property').text = 'aero/beta-deg'\n ET.SubElement(product, 'property').text = 'aero/qbar-psf'\n ET.SubElement(product, 'value').text = str(psf2pa)\n ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK2'])\n\n\n # 5Hole Error Model\n channel = ET.SubElement(fiveHole, 'channel', name = '5Hole Error Models')\n\n errMod = oFdm['Sensor']['5Hole']\n for iAxis, axisName in enumerate(axisList):\n sensor = ET.SubElement(channel, 'sensor', name = axisName)\n\n ET.SubElement(sensor, 'input').text = 'sensor/fiveHole/' + axisName.replace('_', '_true_')\n\n ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])\n ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])\n ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])\n ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])\n ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])\n ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])\n\n ET.SubElement(sensor, 'output').text = 'sensor/fiveHole/' + axisName\n\n\n return(fiveHole)\n" ]
[ [ "numpy.shape" ] ]
CBICA/MUSE
[ "edd01964078f957101130993899c7f4de13d48b6" ]
[ "src/muse-combineRoiMapsIter.py" ]
[ "#!/usr/bin/env python\n#\n# @file muse_combineRoiMapsIter.py\n# @brief Combine roi probability maps for a single subject\n#\n# Copyright (c) 2011, 2012 University of Pennsylvania. All rights reserved.<br />\n# See http://www.cbica.upenn.edu/sbia/software/license.html or COPYING file.\n#\n# Contact: SBIA Group <sbia-software at uphs.upenn.edu>\n#\n\n\n\n#Usage \n# ############################################ #\n# muse_combineRoiMapsIter.py /Path/To/Input/List.txt /Path/To/Destination/outImgName\n################################################\n# will read roi files listed in 'List.txt' file\n#The list file must have full paths to the files\n\nimport nibabel as nib\nimport numpy as np\nimport sys\nimport re\nimport time\n\nprint(str(sys.argv))\n\nInputList=str(sys.argv[1])\nDestFile=str(sys.argv[2])\n\n### Sanity check on the arguments\nif not InputList or not DestFile:\n\tprint(\"ERROR: Required input options not provided!!!\")\n\tsys.exit(0) \n\n### Printing input arguments\nprint('\\n\\n')\nprint('Subject Input List :', InputList)\nprint('Destination File :', DestFile)\nprint('\\n\\n')\n\n### Reading input file first line\nf=open(InputList)\nfline = f.readline()\nf.close()\n\n### Extract roi no\nmatch=re.search('([\\w.-]+)ROI_(\\d+)_([\\w.-]+)', fline)\nif match:\n\trnos = match.group(2)\n\trno = int(rnos)\nelse:\n\tprint('ERROR: No ROI_{roino} in file name !')\n\texit(1)\n\n### Read img, vectorize\nimg = nib.load(str.rstrip(fline))\na=img.get_data()\nb=np.reshape(a,-1)\nisize = a.shape\nvsize = b.shape\n\n### Set index of voxels belonging to that roi, set also max values\nimgMAX = b\nimgIND = np.zeros(vsize)\nimgIND[b>0] = rno\n\n### Reading input file list\nf=open(InputList)\nlines = f.readlines()\nf.close()\nctr=1\n\n### Combine roi images\nfor line in lines:\n \n\tprint(line)\n\t\n\t### Extract roi no\n\tmatch=re.search('([\\w.-]+)ROI_(\\d+)_([\\w.-]+)', line)\n\tif match:\n\t\trnos = match.group(2)\n\t\trno = int(rnos)\n\telse:\n\t\tprint('ERROR: No ROI_{roino} in file name !')\n\t\texit(1)\n\t\n\t### Read img, vectorize\n\timg = nib.load(str.rstrip(line))\n\ta=img.get_data()\n\tb=np.reshape(a,-1)\n\n\t### Set index of voxels belonging to that roi, set also max values\n\timgIND.put((b>imgMAX).nonzero(), rno)\n\timgMAX = np.maximum(b,imgMAX)\n\n\n### Write out img\nimgINDM = np.reshape(imgIND,isize)\n\naff = img.get_affine()\nhdr = img.get_header()\n#hdr.set_data_dtype(np.int16)\nimg2 = nib.Nifti1Image(imgINDM, aff, hdr)\nimg2.to_filename(DestFile);\n" ]
[ [ "numpy.maximum", "numpy.reshape", "numpy.zeros" ] ]
y3sar/painter_gan
[ "374fb91927ca584b4ef3fd8ba10922c7b5201780" ]
[ "generator.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torchvision.transforms import ToTensor, ToPILImage\n\n\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super().__init__()\n\n\n self.conv_block = nn.Sequential(\n\n nn.ConvTranspose2d(100, 512, 4, 1, 0),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(512, 256, 4, 2, 1),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(256, 128, 4, 2, 1),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(128, 64, 4, 2, 1),\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 3, 4, 2, 1),\n nn.BatchNorm2d(3),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(3, 3, 4, 2, 1),\n nn.Tanh(),\n\n \n\n )\n\n def forward(self, x):\n x = self.conv_block(x)\n \n\n\n return x\n\n\nif __name__ == '__main__':\n\n img = torch.randn(1, 100, 1, 1)\n\n gen = Generator()\n\n print(gen(img).shape)\n\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.randn", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.ConvTranspose2d" ] ]
siemens/drace
[ "2679067783b1d8f39e4c370cd72a7626ebf5f8e8" ]
[ "tools/ReportConverter/ReportConverter.py" ]
[ "# \n# ReportConverter: A graphical report generator for DRace\n# \n# Copyright 2019 Siemens AG\n# \n# Authors:\n# <Philip Harr> <[email protected]>\n# \n# SPDX-License-Identifier: MIT\n#\n\n## \\package ReportConverter\n## \\brief Python XML to HTML report converter for the better visualization of drace result data\n\n\n\nimport xml.etree.ElementTree as ET\nimport shutil\nimport argparse\nimport pathlib\nimport datetime\nimport html\nimport sys\nfrom subprocess import check_call, STDOUT, DEVNULL\nfrom functools import lru_cache\n\ntry:\n import matplotlib\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n noMatplotLib = False\nexcept ImportError:\n noMatplotLib = True\n print(\"Matplotlib is not installed.\")\n\n#look for resources path\nif getattr(sys, 'frozen', False):\n SCRIPTPATH = pathlib.Path(sys.executable)\n SCRIPTPATH = pathlib.Path(SCRIPTPATH / \"..\")\nelse :\n SCRIPTPATH = pathlib.Path(pathlib.Path(__file__).resolve().parents[0])\n\n\nif pathlib.Path(SCRIPTPATH / '../resources').is_dir():\n resourcesPath = pathlib.Path(SCRIPTPATH / '../resources')\n\nelse:\n if pathlib.Path(SCRIPTPATH / 'resources').is_dir():\n resourcesPath = pathlib.Path(SCRIPTPATH / 'resources')\n else:\n print(\"path of resources not found\")\n sys.exit(-1)\n\n#Paths\ng_HTMLTEMPLATES = resourcesPath / \"entries.xml\"\ng_CSSPATH = resourcesPath / \"css\"\ng_JSPATH = resourcesPath / \"js\"\n\nDEBUG = False\n\n#info: blacklisting overrules whitelisting\nSOURCEFILE_BL = list()\nSOURCEFILE_WL = list()\nWHITELISTING = False\nNUMBEROFCODELINES = 400\nif NUMBEROFCODELINES % 2:\n print('Number of maximum of displayed code lines must be even, but is:')\n print(str(NUMBEROFCODELINES))\n sys.exit(-1)\n\n#Source Directories\nSOURCE_DIRECTORIES = list()\n\nclass ReportCreator:\n _htmlTemplatesPath = str(g_HTMLTEMPLATES)\n _topStackGraphFileName = 'topStackBarchart.png'\n _errorTimesPlot = 'errorTimes.png'\n\n\n try:\n if check_call(['code', '--version'], stdout=DEVNULL, stderr=STDOUT, shell=True) == 0: #check if vscode is installed, for sourcefile linking\n _vscodeFlag = True\n else:\n _vscodeFlag = False\n except:\n _vscodeFlag = False\n\n\n def __init__(self, pathOfReport, target):\n self.sourcefileList = list()\n self._callStackNumber = 0\n self._errorNumber = 0\n self._snippets = str()\n self.succesfullReportCreation = True\n \n try:\n self._htmlTemplates = (ET.parse(self._htmlTemplatesPath)).getroot()\n except FileNotFoundError:\n print(\"template file is missing\")\n self.succesfullReportCreation = False\n return\n \n self.SCM = SourceCodeManagement()\n self._pathOfReport = pathOfReport\n \n if self._inputValidation():\n hasErrors = self._reportRoot.find('error') != None\n if not noMatplotLib and hasErrors:\n self._makeHistogramm(target)\n self._countTopStackOccurences(target)\n \n self._createReport()\n else:\n print(\"input file is not valid\")\n self.succesfullReportCreation = False\n\n def _inputValidation(self):\n\n \n try:\n self._reportContent = ET.parse(self._pathOfReport)\n except ET.ParseError:\n return 0\n\n self._reportRoot = self._reportContent.getroot()\n \n if self._reportRoot.find('protocolversion') != None and \\\n self._reportRoot.find('protocoltool') != None and \\\n self._reportRoot.find('preamble') != None and \\\n self._reportRoot.find('pid') != None and \\\n self._reportRoot.find('tool') != None and \\\n self._reportRoot.find('args') != None and \\\n self._reportRoot.find('status') != None and \\\n self._reportRoot.tag == 'valgrindoutput':\n\n return 1\n else:\n return 0\n\n def _getHeader(self):\n header = list()\n status = self._reportRoot.findall('status')\n if len(status) == 2:\n status = status[1] ##second status contains finishing values\n strDatetime = status.find('time').text\n if \"T\" in strDatetime:\n date = strDatetime.split('T')[0]\n time = (strDatetime.split('T')[1])[0:-1] #last digit is 'Z' -> not needed\n else:\n date = \"\"\n time = strDatetime\n\n header.append(adjText(date))\n header.append(adjText(time))\n if status.find('duration') != None:\n header.append(adjText(status.find('duration').text))\n header.append(adjText(status.find('duration').get('unit')))\n else:\n header.append(\"\")\n header.append(\"\") \n else:\n header.append(\"\")\n header.append(\"\")\n header.append(\"\")\n header.append(\"\")\n\n arguments = str()\n for arg in self._reportRoot.find('args').find('vargv').findall('arg'):\n arguments += arg.text\n arguments += ' '\n header.append(adjText(arguments[0:-1])) #remove last ' '\n \n header.append(adjText(self._reportRoot.find('args').find('argv').find('exe').text))\n header.append(adjText(self._reportRoot.find('protocolversion').text))\n header.append(adjText(self._reportRoot.find('protocoltool').text))\n\n return header\n\n def _makeFileEntry(self, frame):\n strDir = adjText(self._frameValues[\"dir\"])\n strFile = adjText(self._frameValues[\"file\"])\n strLine = adjText(self._frameValues[\"line\"])\n offset = adjText(self._frameValues[\"offset\"])\n\n if self._vscodeFlag:\n entry = \"<a href='vscode://file/\" + strDir + \"/\" + strFile + \":\" + strLine + \":\" + offset +\"'>\"+ strFile +\":\" + strLine + \":\" + offset + \"</a>\"\n else:\n entry = \"<a href='file://\"+ strDir + \"/\" + strFile + \"'>\" + strFile + \":\" + strLine + \"</a>\"\n \n return entry\n\n def _readFrame(self, frame):\n \n if frame is None:\n self._frameValues = {\"obj\":\"\", \"fn\":\"\", \"ip\":\"\", \"dir\":\"\", \"file\":\"\", \"line\":\"\", \"offset\":\"\"}\n return\n\n obj = frame.find('obj')\n if obj is None:\n obj = \"\"\n else:\n if obj.text is None:\n obj = \"\"\n else:\n obj = obj.text\n fn = frame.find('fn')\n if fn is None:\n fn = \"\"\n else:\n if fn.text is None:\n fn = \"\"\n else:\n fn = fn.text\n ip = frame.find('ip')\n if ip is None:\n ip = \"\"\n else:\n if ip.text is None:\n ip = \"\"\n else:\n ip = ip.text \n\n direc = frame.find('dir')\n if direc is None:\n direc = \"\"\n else:\n if direc.text is None:\n direc = \"\"\n else:\n direc = direc.text \n\n filename = frame.find('file')\n if filename is None:\n filename = \"\"\n else:\n if filename.text is None:\n filename = \"\"\n else:\n filename = filename.text \n\n line = frame.find('line')\n if line is None:\n line = \"0\"\n else:\n if line.text is None:\n line = \"0\"\n else:\n line = line.text \n\n offset = frame.find('offset')\n if offset is None:\n offset = \"0\"\n else:\n if offset.text is None:\n offset = \"0\"\n else:\n offset = offset.text \n\n self._frameValues = {\"obj\":obj, \"fn\":fn, \"ip\":ip, \"dir\":direc, \"file\":filename, \"line\":line, \"offset\":offset}\n\n\n def _createSnippetEntry(self, frame, elementNumber, tag, codeIndex, buttonID):\n \n\n newSnippet = self._htmlTemplates.find('snippet_entry').text\n\n newSnippet = newSnippet.replace('*SNIPPET_VAR*', (\"snippet_\" + str(self._callStackNumber)))\n newSnippet = newSnippet.replace('*STACK_NUMBER*', adjText(hex(elementNumber)))\n newSnippet = newSnippet.replace('*OBJ*', adjText(self._frameValues[\"obj\"]))\n newSnippet = newSnippet.replace('*FUNCTION*', adjText(self._frameValues[\"fn\"]))\n newSnippet = newSnippet.replace('*INSTRUCTION_POINTER*', adjText(self._frameValues[\"ip\"]))\n newSnippet = newSnippet.replace('*CODE_TAG*', tag)\n newSnippet = newSnippet.replace('*SNIPPET_BUTTON_ID*', buttonID)\n\n if (self._frameValues[\"file\"] != \"\"):\n newSnippet = newSnippet.replace('*FILE_NAME_ENTRY*', self._makeFileEntry(frame))\n newSnippet = newSnippet.replace('*DIRECTORY*', adjText(self._frameValues[\"dir\"]))\n newSnippet = newSnippet.replace('*SHORT_DIR*', adjText(self._makeShortDir(self._frameValues[\"dir\"])))\n newSnippet = newSnippet.replace('*LINE_OF_CODE*', adjText(self._frameValues[\"line\"]))\n\n\n if(codeIndex != -1):\n newSnippet = newSnippet.replace('*CODE_ID_VAR*', \"snippet_\"+str(self._callStackNumber)+\"_code\")\n newSnippet = newSnippet.replace('*LANGUAGE*', self.SCM.determineLanguage(adjText(self._frameValues[\"file\"])))\n newSnippet = newSnippet.replace('*FIRST_LINE*', str(self.SCM.getFirstLineOfCodeSnippet(codeIndex)))\n else:\n newSnippet = newSnippet.replace('*CODE_ID_VAR*', \"'None'\")\n\n else:\n newSnippet = newSnippet.replace('*FILE_NAME_ENTRY*', 'no filename avail.')\n newSnippet = newSnippet.replace('*DIRECTORY*', 'no directory avail.')\n newSnippet = newSnippet.replace('*SHORT_DIR*', 'no directory avail.')\n\n self._snippets += newSnippet #append referenced code snippet\n\n def _makeShortDir(self, strDir):\n elements = None\n if \"\\\\\" in strDir:\n elements = strDir.split(\"\\\\\")\n else:\n if \"/\" in strDir:\n elements = strDir.split(\"/\")\n \n if elements != None:\n return elements[0] + \"/\" + elements[1] + \"/.../\" + elements[-1]\n else:\n return \"\"\n\n def _createCallStack(self, errorEntry, position, outputID):\n \n callStack = str()\n stackTemplate = self._htmlTemplates.find('stack_entry').text\n stackArray = errorEntry.findall('stack')\n stack = stackArray[position]\n elementNumber = 0\n\n frames = stack.findall('frame')\n if frames is None:\n return \"\"\n\n for frame in frames:\n self._readFrame(frame) #reads all frame values and fills member var\n \n # updates frame dir if valid sourceDirectories are given, otherwise returns same value\n newDir = self.SCM.searchSourceDirectories(self._frameValues[\"dir\"], self._frameValues[\"file\"])\n self._frameValues[\"dir\"] = adjText(newDir)\n\n noPreview = False \n buttonID = \"button_\" + str(self._errorNumber) + \"_\" + str(position) + \"_\" + str(elementNumber)\n strOutputID = outputID+str(position)\n \n if elementNumber == 0:\n ###make heading for the red box### \n if len(self._errorHeading) == 0:\n self._errorHeading += \"<br> Obj. 1: \" + (adjText(self._frameValues[\"obj\"]) + ': \"' + adjText(self._frameValues[\"fn\"])) + '\" <br> '\n else:\n self._errorHeading += \"Obj. 2: \" + (adjText(self._frameValues[\"obj\"]) + ': \"' + adjText(self._frameValues[\"fn\"])) + '\"'\n \n #general entries (always available)\n newStackElement = stackTemplate.replace('*STACK_NUMBER*', adjText(hex(elementNumber))+\":\")\n newStackElement = newStackElement.replace('*SNIPPET_VAR*', (\"snippet_\" + str(self._callStackNumber)))\n newStackElement = newStackElement.replace('*OUTPUT_ID*', strOutputID)\n newStackElement = newStackElement.replace('*FUNCTION*', adjText(self._frameValues['fn']))\n newStackElement = newStackElement.replace('*BUTTON_ID*', buttonID)\n \n\n if (self._frameValues[\"file\"]!= \"\"): #file is in xml report defined\n codeIndex, tag = self.SCM.handleSourceCode(self._frameValues[\"file\"], self._frameValues[\"dir\"], self._frameValues[\"line\"])\n newStackElement = newStackElement.replace('*FILE*', adjText(self._frameValues[\"file\"]))\n\n if(codeIndex != -1):\n newStackElement = newStackElement.replace('*CODE_VAR*', str(codeIndex))\n newStackElement = newStackElement.replace('*CODE_ID_VAR*', \"'snippet_\"+str(self._callStackNumber)+\"_code'\")\n newStackElement = newStackElement.replace('*LINE_OF_CODE*', adjText(self._frameValues[\"line\"]))\n newStackElement = newStackElement.replace('*FIRST_LINE*', str(self.SCM.getFirstLineOfCodeSnippet(codeIndex))) \n \n else: #file is not available on device or file is blacklisted or not whitelisted\n noPreview = True\n \n \n else: #no filepath for file in xml is given \n codeIndex = -1\n tag = self._htmlTemplates.find('no_code_entry').text\n newStackElement = newStackElement.replace('*FILE*', 'no filename avail.')\n noPreview = True\n\n if noPreview:\n newStackElement = newStackElement.replace('*CODE_VAR*', \"'None'\")\n newStackElement = newStackElement.replace('*CODE_ID_VAR*', \"'None'\")\n newStackElement = newStackElement.replace('*LINE_OF_CODE*', \"'None'\")\n newStackElement = newStackElement.replace('*FIRST_LINE*', \"'NONE'\") \n searchStr = 'class=\"'\n insertPosition = newStackElement.find(searchStr)+len(searchStr) #to add the \".grey\" class the position before after class\n #insertPosition += newStackElement[insertPosition:].find('\"')\n newStackElement = newStackElement[:insertPosition] + \"grey-button \" + newStackElement[insertPosition:] \n\n \n self._createSnippetEntry(frame, elementNumber, tag, codeIndex, buttonID)\n callStack += newStackElement #append stack element\n elementNumber += 1\n self._callStackNumber += 1 #increase global call stack number (used for reference variables)\n\n return callStack\n\n def _makeHistogramm(self, target):\n errorTimes = dict()\n statusNode = self._reportRoot.findall('status')[1]\n \n if statusNode.find('duration') is None:\n self._errorTimesPlot = \"\"\n return\n \n totalDuration = int(statusNode.find('duration').text)\n errors = self._reportRoot.findall('error')\n \n for error in errors:\n timePoint = (round(float(100 * int(error.find('timestamp').text) /totalDuration))) #get occurance in %\n if errorTimes.get(timePoint) != None:\n value = errorTimes.pop(timePoint)\n errorTimes.update({timePoint: int(value)+1})\n else:\n errorTimes.update({timePoint: 1})\n \n x = list(errorTimes.keys())\n y = list(errorTimes.values())\n #make plot \n fig = plt.figure(figsize=(10,4)) \n ax = plt.axes() \n ax.scatter(x, y, color='#009999', edgecolor='black')\n\n xRangeEnd = max(y)+1\n if xRangeEnd < 3: #0, 1, 2 shall be always visible, even if max(y) is only 1\n xRangeEnd = 3\n ax.set_yticks([i for i in range(0, xRangeEnd)])\n ax.set_xticks([i for i in range(0, 110, 10)])\n \n plt.title('Error occurrences by time',fontfamily=\"monospace\", fontweight='bold')\n plt.ylabel('Occurrences', fontfamily=\"monospace\",fontweight='bold')\n plt.xlabel('Execution of program in %. \\n Total execution time = ' + str(totalDuration) + 'ms', fontfamily=\"monospace\",fontweight='bold') \n\n fig.add_axes(ax)\n #plt.show()\n figPath = pathlib.Path(target+'/'+self._errorTimesPlot)\n plt.savefig(str(figPath), dpi=300, format='png', bbox_inches='tight', orientation='landscape') # use format='svg' or 'pdf' for vectorial pictures\n\n def _countTopStackOccurences(self, target):\n topStackOccurences = dict()\n errors = self._reportRoot.findall('error')\n \n for error in errors:\n stacks = error.findall('stack')\n for i in range(0,2): \n topFrame = stacks[i].find('frame') #returns first element of with frame tag\n \n if(topFrame != None):\n self._readFrame(topFrame)\n tmp1 = self._frameValues[\"file\"]\n tmp2 = self._frameValues[\"fn\"]\n\n if(tmp1 != \"None\" and tmp2 != \"None\"):\n if(len(tmp2) > 20): #split function name in half if it is too long\n tmp2 = tmp2[:len(tmp2)//2] + '\\n' + tmp2[len(tmp2)//2:]\n identifier = tmp1 + \":\\n\" + tmp2\n\n if topStackOccurences.get(identifier) != None:\n value = topStackOccurences.pop(identifier)\n topStackOccurences.update({identifier: int(value)+1})\n else:\n topStackOccurences.update({identifier: 1})\n\n \n #sort dict\n sortedOccurences = sorted(topStackOccurences.items(), key=lambda kv: kv[1])\n x=list()\n y=list()\n for ele in sortedOccurences[-5:]: #append the 5 largest values in ascending order\n if len(ele[0]) < 250:\n x.append(ele[0]) #x values (basically the function names)\n else:\n x.append(ele[0][:250]+\". . .\")\n y.append(ele[1]) #y values occurrences (bar height)\n\n #make plot \n fig = plt.figure(figsize=(10,4)) \n ax = plt.axes() \n barWidth = 0.9 # the width of the bars \n xLoc = list(range(len(y))) # the x locations for the groups\n ax.barh([loc for loc in xLoc], y, barWidth, color='#009999')\n ax.set_yticks([loc for loc in xLoc])\n ax.set_yticklabels(reversed(['#'+str(rank) for rank in range(1,len(y)+1)]), minor=False)\n legend_lines = [Line2D([0], [0], color='#009999', lw=rank) for rank in range(len(y)+1, 1, -1)]\n ax.legend(legend_lines, reversed(x), loc='center', bbox_to_anchor=(0.5, -0.1*(len(y)+2)))\n \n plt.title('Top five functions by top of stack occurrences',fontfamily=\"monospace\", fontweight='bold')\n plt.xlabel('No. of top of stack occurrences', fontfamily=\"monospace\",fontweight='bold') \n\n for i,v in enumerate(y):\n ax.text(v, i, str(v), ha='left',color='black', fontweight='bold')\n \n \n fig.add_axes(ax)\n #plt.show()\n figPath = pathlib.Path(target+'/'+self._topStackGraphFileName)\n plt.savefig(str(figPath), dpi=300, format='png', bbox_inches='tight', orientation='landscape') # use format='svg' or 'pdf' for vectorial pictures\n \n def _createErrorList(self):\n self._strErrors = str()\n \n errorTemplate = self._htmlTemplates.find('error_entry').text\n errorList = self._reportRoot.findall('error')\n self._numberOfErrors = len(errorList)\n \n for error in errorList:\n \n outputID = \"output_\"+str(self._errorNumber)+\"_\"\n newError = errorTemplate.replace('*ERROR_ID*', adjText(error.find('unique').text))\n newError = newError.replace('*ERROR_TYPE*', adjText(error.find('kind').text))\n \n xwhat = error.findall('xwhat')\n errortext1 = xwhat[0].find('text').text\n #fall back to xauxhwaht -> valgrind format\n if(len(xwhat) == 1):\n element = error.find('xauxwhat')\n if element != None:\n errortext2 = element.find('text').text\n else:\n errortext2 = \"\"\n else:\n errortext2 = xwhat[1].find('text').text\n\n newError = newError.replace('*XWHAT_TEXT_1*', adjText(errortext1))\n newError = newError.replace('*XWHAT_TEXT_2*', adjText(errortext2))\n\n # Resolved Address info\n resolvedaddress = error.find('resolvedaddress')\n if resolvedaddress != None:\n raModname = resolvedaddress.find('modname')\n resolvedaddressEntry = \"<h5>Resolved Address</h5>\" + \"<p class='reduced-margin'><b>Module Name: </b>\" \\\n + adjText(raModname.text) + \"</p>\"\n \n raSymname = resolvedaddress.find('symname')\n if raSymname != None:\n resolvedaddressEntry = resolvedaddressEntry + \"<p class='reduced-margin'><b>Symbol Name: </b>\" \\\n + adjText(raSymname.text) + \"</p>\"\n\n raFile = resolvedaddress.find('file')\n if raFile != None:\n raLine = resolvedaddress.find('line')\n raOffset = resolvedaddress.find('offset')\n resolvedaddressEntry = resolvedaddressEntry + \"<p class='reduced-margin'><b>File: </b>\" + adjText(raFile.text) + \"</p> <p class='reduced-margin'><b>Line: </b>\" \\\n + adjText(raLine.text) + \"</p> <p class='reduced-margin'><b>Offset: </b>\" + adjText(raOffset.text) + \"</p>\"\n \n else:\n resolvedaddressEntry = \"\"\n\n newError = newError.replace('*RESOLVED_ADDRESS_ENTRY*', resolvedaddressEntry)\n\n self._errorHeading = str() #reset errorHeading, will be filled filled by _createCallStack\n newError = newError.replace('*CALL_STACK_ENTRIES_1*', self._createCallStack(error, 0, outputID))\n if errortext2 != \"\":\n newError = newError.replace('*CALL_STACK_ENTRIES_2*', self._createCallStack(error, 1, outputID))\n else:\n newError = newError.replace('*CALL_STACK_ENTRIES_2*', \"No Callstack Available\")\n \n newError = newError.replace('*OUTPUT_ID_1*', outputID+'0')\n newError = newError.replace('*OUTPUT_ID_2*', outputID+'1')\n newError = newError.replace('*ERROR_HEADING*', self._errorHeading)\n\n self._errorNumber += 1\n self._strErrors += newError\n\n self.SCM.searchSourceDirectories.cache_clear()\n\n def _createHeader(self):\n hasErrors = self._reportRoot.find('error') != None\n headerInformation = self._getHeader()\n self.htmlReport = self._htmlTemplates.find('base_entry').text\n self.htmlReport = self.htmlReport.replace('*DATE*', headerInformation[0])\n self.htmlReport = self.htmlReport.replace('*TIME*', headerInformation[1])\n self.htmlReport = self.htmlReport.replace('*DURATION*', headerInformation[2])\n self.htmlReport = self.htmlReport.replace('*DURATION_UNIT*', headerInformation[3])\n self.htmlReport = self.htmlReport.replace('*ARGS*', headerInformation[4])\n self.htmlReport = self.htmlReport.replace('*EXE*', headerInformation[5])\n self.htmlReport = self.htmlReport.replace('*PROTOCOLVERSION*', headerInformation[6])\n self.htmlReport = self.htmlReport.replace('*PROTOCOLTOOL*', headerInformation[7])\n self.htmlReport = self.htmlReport.replace('*NUMBER_OF_ERRORS*', str(self._numberOfErrors))\n self.htmlReport = self.htmlReport.replace('*ERROR_ENTRIES*', self._strErrors)\n if not noMatplotLib and hasErrors:\n matplotlib_snippet = self._htmlTemplates.find('matplotlib_entries').text\n matplotlib_snippet = matplotlib_snippet.replace('*TOP_OF_STACK_GRAPH*', self._topStackGraphFileName)\n matplotlib_snippet = matplotlib_snippet.replace('*ERROR_TIMES_PLOT*', self._errorTimesPlot)\n self.htmlReport = self.htmlReport.replace('*MATPLOTLIB_PICTURES*', matplotlib_snippet)\n else:\n self.htmlReport = self.htmlReport.replace('*MATPLOTLIB_PICTURES*', '')\n\n def _createReport(self):\n self._createErrorList()\n self._createHeader()\n self.htmlReport = self.htmlReport.replace(\"*SNIPPET_VARIABLES*\", self._snippets)\n self.htmlReport = self.SCM.createCodeVars(self.htmlReport)\n\n\nclass SourceCodeManagement:\n def __init__(self):\n self._sourcefilelist = list()\n self._htmlTemplatesPath = str(g_HTMLTEMPLATES)\n self._htmlTemplates = (ET.parse(self._htmlTemplatesPath)).getroot()\n\n def _createSourcefileEntry(self, path, line):\n #one entry consists of the full file path the line number of interest \n sourceFile = open(path, mode='r')\n sourceLineList = sourceFile.readlines()\n if len(sourceLineList) > NUMBEROFCODELINES:\n newElement = [path, int(line), False]\n else:\n newElement = [path, int(line), True]\n \n self._sourcefilelist.append(newElement)\n return self._sourcefilelist.index(newElement)\n\n \n def _returnCode(self, fullPath, justExistance, line = 0):\n returnSrc = False\n try: #may throw an an exception in earlier version (until 3.6), therefore try-catch \n fp = pathlib.Path(fullPath).resolve() #returns absolute path\n except FileNotFoundError:\n return -1\n except OSError: #if path is available, but for any reason not reachable (e.g. locked by bitlocker) OSError is thrown\n return -1\n\n if fp.is_file():\n for element in SOURCEFILE_BL: #blacklisting routine\n if str(element) in str(fp): #both are absoulte paths, so comparing is valid\n return -1\n \n if WHITELISTING:\n for element in SOURCEFILE_WL:\n if str(element) in str(fullPath):\n returnSrc = True\n break\n if not returnSrc:\n return -1\n if justExistance:\n sourceCode = self._getLines(fullPath, line)\n if sourceCode == -1: ##line was not found\n return -1\n return 0\n else:\n return -1\n \n #if we are here we want to return the source code\n return adjText(self._getLines(fullPath, line))\n\n\n def _getLines(self, path, line):\n sourceFile = open(path, mode='r')\n sourceLineList = sourceFile.readlines()\n\n if len(sourceLineList) < line: #the found file contains less lines than the target (e.g. wrong line number from drace)\n return -1\n\n if len(sourceLineList) > NUMBEROFCODELINES:\n if line <= NUMBEROFCODELINES//2:\n begin = 0\n end = NUMBEROFCODELINES\n else:\n begin = (line - NUMBEROFCODELINES//2) - 1 #-1 because array starts with 0\n end = begin + NUMBEROFCODELINES\n\n sourceLineList = sourceLineList[begin:end]\n \n sourceCode = str()\n for sourceLine in sourceLineList:\n sourceCode += sourceLine\n\n sourceFile.close()\n return sourceCode\n\n\n def handleSourceCode(self, filename, directory, line):\n fullPath = pathlib.Path(directory +'/'+ filename)\n\n src = self._returnCode(fullPath, 1, int(line))\n if src == -1:\n return -1, self._htmlTemplates.find('no_code_entry').text\n\n index = -1\n #check if source file is already in the list\n for item in self._sourcefilelist:\n if item[0] == fullPath:\n if item[2] or (int(line) - NUMBEROFCODELINES//10) <= item[1] <= (int(line) + NUMBEROFCODELINES//10): \n index = self._sourcefilelist.index(item)\n #entry = item\n\n if index == -1:\n index = self._createSourcefileEntry(fullPath, line)\n \n strIndex = 'code_' + str(index) \n return strIndex, (self._htmlTemplates.find('code_entry').text)\n\n def createCodeVars(self, report):\n codeString = str()\n\n for sourceObject in self._sourcefilelist:\n src = self._returnCode(sourceObject[0], justExistance=0, line = sourceObject[1])\n tmpCode = \"code_\" + str(self._sourcefilelist.index(sourceObject)) + ' = \"' + src + '\";\\n'\n codeString += tmpCode\n\n report = report.replace(\"*CODE_VARIABLES*\", codeString)\n return report\n\n def determineLanguage(self, filename):\n fileParts = filename.split('.')\n if len(fileParts) == 1:\n return 'cpp' #files without file endigs are treated as cpp files\n else:\n ending = fileParts[-1]\n if ending == 'c':\n return 'c'\n elif ending == 'cpp':\n return 'cpp'\n elif ending == 'h':\n return 'cpp'\n elif ending == 'cs':\n return 'csharp'\n elif ending == 'css':\n return 'css'\n elif ending == 'js':\n return 'javascript'\n elif ending == 'html':\n return 'markup'\n else:\n return 'cpp' \n\n def getFirstLineOfCodeSnippet(self, index):\n codeSnippet = int(index.split(\"_\")[-1]) #index is e.g. code_3\n srcObject = self._sourcefilelist[codeSnippet]\n\n if srcObject[2]:\n return 1\n else:\n firstLine = srcObject[1] - NUMBEROFCODELINES//2\n return firstLine #srcObject[1] is line of interest of snippet\n\n @lru_cache(maxsize=100)\n def searchSourceDirectories(self, dir, file): \n if pathlib.Path(pathlib.Path(dir) / file).is_file():\n # path to file in xml file is valid\n return dir \n else:\n # path to file in xml file is NOT valid\n if not SOURCE_DIRECTORIES:\n # no sourceDirectories args given\n print(f\"Cannot find file '{file}' in directory '{dir}'.\")\n return dir\n else:\n print(f\"Cannot find file '{file}' in directory '{dir}'. Searching through given source directories ...\")\n # search in sourceDirectories given from args if applicable\n for customDirPath in SOURCE_DIRECTORIES:\n customDir = pathlib.Path(customDirPath) \n fileInstances = customDir.glob(f'**/{file}') # generator for found file instances\n try:\n f1 = next(fileInstances)\n try:\n f2 = next(fileInstances)\n # Check if next found file f2 has a parent directory which supersets that of first found file f1\n if str(f1.resolve().parent) == str(f2.resolve().parent)[:len(str(f1.resolve().parent))]:\n return str(f2.resolve().parent) # second valid file instance in customDirPath\n else:\n return str(f1.resolve().parent) # first valid file instance in customDirPath\n\n except StopIteration:\n # Only one valid file instance found in customDirPath\n return str(f1.resolve().parent)\n except StopIteration:\n # No file instance found in customDirPath element\n continue\n\n # Search for file instances in given sourceDirectories failed \n print(f\"Cannot find file '{file}' in given source directories.\")\n return dir\n\n\ndef adjText(text): #change html symbols e.g. & -> &amp;\n text = text.replace('`', '\\'')\n text = text.replace('\\\\', '/')\n text = text.replace('\\n', '\\\\n')\n return html.escape(text)\n\ndef parseArgumentString(fileList, strEntries):\n strEntries = strEntries.replace(\"\\\\\",\"/\")\n listEntries = strEntries.split(',')\n for entry in listEntries: \n #remove potential leading and trailing whitespaces\n while entry[0] == ' ':\n entry = entry[1:]\n while entry[-1] == ' ':\n entry = entry[:-1]\n\n newObject = pathlib.Path(entry)\n newObject = newObject.resolve()\n fileList.append(newObject) \n\n return \n\ndef returnDateString():\n date = datetime.datetime.utcnow()\n return date.strftime('%Y%m%d_%H%M')\n\ndef main():\n global SOURCEFILE_BL, SOURCEFILE_WL, WHITELISTING, SOURCE_DIRECTORIES, DEBUG\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--inputFile\", help='define <input_file>', type=str)\n parser.add_argument(\"-o\", \"--outputDirectory\", help='define <output_directory>', type=str)\n parser.add_argument(\"-b\", \"--blacklist\", help='add blacklist entries <entry1,entry2 ...>', type=str)\n parser.add_argument(\"-w\", \"--whitelist\", help='add whitelist entries <entry1,entry2 ...>', type=str)\n parser.add_argument(\"-s\", \"--sourceDirectories\", help='add source directories <entry1,entry2 ...>', type=str)\n parser.add_argument(\"--Debug\", help='Debug Mode', action=\"store_true\")\n args = parser.parse_args()\n \n ###args handling\n if args.Debug:\n print(\"Debug Mode is on\")\n inFile = pathlib.Path(SCRIPTPATH / 'test_files/test.xml') \n targetDirectory = pathlib.Path(SCRIPTPATH / 'test_files/output') \n\n else:\n if args.inputFile != None:\n inFile = pathlib.Path(args.inputFile)\n else:\n print(\"You must specify an input file\")\n print()\n parser.print_help()\n sys.exit(-1)\n\n if not inFile.is_file():\n print(\"Your input file does not exist\")\n parser.print_help()\n sys.exit(-1)\n\n strDate = returnDateString()\n\n if not args.Debug:\n if args.outputDirectory != None:\n targetDirectory = pathlib.Path(args.outputDirectory+'/drace_report_'+strDate)\n else:\n targetDirectory = pathlib.Path('./drace_report_'+strDate)\n\n if args.blacklist != None:\n parseArgumentString(SOURCEFILE_BL, args.blacklist)\n \n if args.whitelist != None:\n parseArgumentString(SOURCEFILE_WL, args.whitelist)\n WHITELISTING = True\n \n if args.sourceDirectories != None:\n parseArgumentString(SOURCE_DIRECTORIES, args.sourceDirectories)\n #end of args handling\n\n \n if not targetDirectory.is_dir():\n targetDirectory.mkdir()\n\n #report gets generated here\n report = ReportCreator(str(inFile), str(targetDirectory))\n\n if report.succesfullReportCreation:\n\n #write report to destination\n output = open(str(targetDirectory)+'/index.html', mode='w')\n output.write(report.htmlReport)\n output.close()\n\n #copy needed files to destination\n cssPath = pathlib.Path(str(targetDirectory)+\"/css\")\n jsPath = pathlib.Path(str(targetDirectory)+\"/js\")\n\n if cssPath.is_dir():\n shutil.rmtree(str(cssPath))\n \n if jsPath.is_dir():\n shutil.rmtree(str(jsPath))\n\n shutil.copytree(str(g_CSSPATH.resolve()), str(targetDirectory / \"css\"))\n shutil.copytree(str(g_JSPATH.resolve()), str(targetDirectory / \"js\"))\n shutil.copy(str((resourcesPath / 'legend.png').resolve()), str(targetDirectory))\n print(\"Report creation successful\")\n print(\"Report is at:\")\n print(targetDirectory)\n return 0\n\n else:\n print(\"Report creation was NOT successful\")\n targetDirectory.rmdir()\n return -1\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.lines.Line2D", "matplotlib.pyplot.figure", "matplotlib.pyplot.axes", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ] ]
chao1224/SGNN-EBM
[ "bda4c486e8ecb9775b635757dbe1071878be7b8a" ]
[ "src/models/SGNN_EBM_models.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch_scatter import scatter_add\n\n\nclass NCE_C_Parameter(torch.nn.Module):\n def __init__(self, N):\n super(NCE_C_Parameter, self).__init__()\n self.NCE_C = nn.Parameter(torch.zeros(N, requires_grad=True))\n\n\nclass GNN_EBM_Layer_01(torch.nn.Module):\n def __init__(self, input_dim, output_dim):\n super(GNN_EBM_Layer_01, self).__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.edge_layer = torch.nn.Linear(input_dim, output_dim)\n self.node_layer = torch.nn.Linear(input_dim, output_dim)\n self.mlp = torch.nn.Linear(input_dim, output_dim)\n\n def node_message_passing(self, x, x_2nd_agg, edge):\n T = x.size()[1]\n node_in, node_out = edge[0], edge[1] # M, M\n\n update = (scatter_add(x_2nd_agg, node_out, dim=1, dim_size=T) +\n scatter_add(x_2nd_agg, node_in, dim=1, dim_size=T)) / 2 # B, T, d\n x = x + update # B, T, d\n\n return x\n\n def forward(self, x_1st, x_2nd, edge):\n '''\n :param x: (B, T, 2, d)\n :param x_2nd: (B, M, 4, d)\n :param edge: (M, 2)\n :return: (B, T, 2, d_out)\n '''\n aggregate_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)\n node_i_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)\n node_j_indice = torch.LongTensor([0, 1, 0, 1]).to(x_1st.device)\n\n x_1st_neg = x_1st[:, :, 0, :] # B, T, d\n x_1st_pos = x_1st[:, :, 1, :] # B, T, d\n\n x_2nd_agg = scatter_add(x_2nd, aggregate_indice, dim=2) # B, T, 2, d\n x_2nd_neg = x_2nd_agg[:, :, 0, :] # B, M, d\n x_2nd_pos = x_2nd_agg[:, :, 1, :] # B, M, d\n\n x_neg = self.node_message_passing(x_1st_neg, x_2nd_neg, edge) # B, T, d\n x_pos = self.node_message_passing(x_1st_pos, x_2nd_pos, edge) # B, T, d\n x = torch.stack([x_neg, x_pos], dim=2) # B, T, 2, d\n x = self.node_layer(x) # B, T, 2, d\n\n edge_i = torch.index_select(x_1st, 1, edge[0]) # B, M, 2, dim\n edge_i = torch.index_select(edge_i, 2, node_i_indice) # B, M, 4, dim\n\n edge_j = torch.index_select(x_1st, 1, edge[1]) # B, M, 2, dim\n edge_j = torch.index_select(edge_j, 2, node_j_indice) # B, M, 4, dim\n\n edge = x_2nd + self.mlp(edge_i + edge_j) # B, M, 4, d\n edge = self.edge_layer(edge)\n\n return x, edge\n\n\nclass GNN_Energy_Model_1st_Order_01(torch.nn.Module):\n def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):\n super(GNN_Energy_Model_1st_Order_01, self).__init__()\n self.ebm_GNN_dim = ebm_GNN_dim\n self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1\n self.dropout = dropout\n self.output_dim = output_dim\n self.concat = concat\n\n hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num\n\n self.hidden_layers = torch.nn.ModuleList()\n for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):\n self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))\n\n if self.concat:\n hidden_dim_sum = sum(hidden_layers_dim)\n else:\n hidden_dim_sum = ebm_GNN_dim\n self.node_readout = torch.nn.Sequential(\n torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(hidden_dim_sum, output_dim)\n )\n return\n\n def forward(self, x_1st, x_2nd, edge):\n '''\n :param x_1st: B,T,2,dim\n :param x_2nd: B,M,4,dim\n :param edge: 2,M\n :return: B,T,1\n '''\n B, T = x_1st.size()[:2]\n h_node_list = [x_1st]\n x_node, x_edge = x_1st, x_2nd\n\n for i in range(self.ebm_GNN_layer_num):\n x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)\n if i < self.ebm_GNN_layer_num - 1:\n x_node = F.relu(x_node)\n # x_edge = F.relu(x_edge)\n x_node = F.dropout(x_node, self.dropout, training=self.training)\n # x_edge = F.dropout(x_edge, self.dropout, training=self.training)\n h_node_list.append(x_node)\n\n if self.concat:\n h = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d\n else:\n h = x_node.view(B, T, -1) # B, T, 2*d\n h = self.node_readout(h) # B, T, 1\n return h\n\n\nclass GNN_Energy_Model_1st_Order_02(torch.nn.Module):\n def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):\n super(GNN_Energy_Model_1st_Order_02, self).__init__()\n self.ebm_GNN_dim = ebm_GNN_dim\n self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1\n self.dropout = dropout\n self.output_dim = output_dim\n self.concat = concat\n\n hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num\n\n self.hidden_layers = torch.nn.ModuleList()\n for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):\n self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))\n\n if self.concat:\n hidden_dim_sum = sum(hidden_layers_dim)\n else:\n hidden_dim_sum = ebm_GNN_dim\n self.node_readout = torch.nn.Linear(2 * hidden_dim_sum, output_dim)\n return\n\n def forward(self, x_1st, x_2nd, edge):\n '''\n :param x_1st: B,T,2,dim\n :param x_2nd: B,M,4,dim\n :param edge: 2,M\n :return: B,T,1\n '''\n B, T = x_1st.size()[:2]\n h_node_list = [x_1st]\n x_node, x_edge = x_1st, x_2nd\n\n for i in range(self.ebm_GNN_layer_num):\n x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)\n if i < self.ebm_GNN_layer_num - 1:\n x_node = F.relu(x_node)\n # x_edge = F.relu(x_edge)\n x_node = F.dropout(x_node, self.dropout, training=self.training)\n # x_edge = F.dropout(x_edge, self.dropout, training=self.training)\n h_node_list.append(x_node)\n\n if self.concat:\n h = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d\n else:\n h = x_node.view(B, T, -1) # B, T, 2*d\n h = self.node_readout(h) # B, T, 1\n return h\n\n\nclass GNN_Energy_Model_2nd_Order_01(torch.nn.Module):\n def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):\n super(GNN_Energy_Model_2nd_Order_01, self).__init__()\n self.ebm_GNN_dim = ebm_GNN_dim\n self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1\n self.dropout = dropout\n self.concat = concat\n\n hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num\n\n self.hidden_layers = torch.nn.ModuleList()\n for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):\n self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))\n\n if self.concat:\n hidden_dim_sum = sum(hidden_layers_dim)\n else:\n hidden_dim_sum = ebm_GNN_dim\n self.node_readout = torch.nn.Sequential(\n torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(hidden_dim_sum, 1)\n )\n self.edge_readout = torch.nn.Sequential(\n torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(hidden_dim_sum, 1)\n )\n return\n\n def forward(self, x_1st, x_2nd, edge):\n '''\n :param x_1st: B,T,2,dim\n :param x_2nd: B,M,4,dim\n :param edge: 2,M\n :return: (B,T,2), (B,M,4)\n '''\n B, T = x_1st.size()[:2]\n M = edge.size()[1]\n h_node_list = [x_1st]\n h_edge_list = [x_2nd]\n x_node, x_edge = x_1st, x_2nd\n\n for i in range(self.ebm_GNN_layer_num):\n x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)\n if i < self.ebm_GNN_layer_num - 1:\n x_node = F.relu(x_node)\n # x_edge = F.relu(x_edge)\n x_node = F.dropout(x_node, self.dropout, training=self.training)\n # x_edge = F.dropout(x_edge, self.dropout, training=self.training)\n h_node_list.append(x_node)\n h_edge_list.append(x_edge)\n\n if self.concat:\n h_node = torch.cat(h_node_list, dim=3) # B, T, 2, layer_num*d\n h_edge = torch.cat(h_edge_list, dim=3) # B, M, 4, layer_num*d\n else:\n h_node = x_node # B, T, 2, d\n h_edge = x_edge # B, M, 4, d\n h_node = self.node_readout(h_node) # B, T, 2, 1\n h_edge = self.edge_readout(h_edge) # B, M, 4, 1\n h_node = h_node.squeeze(3) # B, T, 2\n h_edge = h_edge.squeeze(3) # B, M, 4\n return h_node, h_edge\n\n\nclass GNN_Energy_Model_2nd_Order_02(torch.nn.Module):\n def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):\n super(GNN_Energy_Model_2nd_Order_02, self).__init__()\n self.ebm_GNN_dim = ebm_GNN_dim\n self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1\n self.dropout = dropout\n self.concat = concat\n\n hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num\n\n self.hidden_layers = torch.nn.ModuleList()\n for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):\n self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))\n\n if self.concat:\n hidden_dim_sum = sum(hidden_layers_dim)\n else:\n hidden_dim_sum = ebm_GNN_dim\n self.node_readout = torch.nn.Sequential(\n torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(hidden_dim_sum, 2)\n )\n self.edge_readout = torch.nn.Sequential(\n torch.nn.Linear(4 * hidden_dim_sum, 2 * hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),\n torch.nn.ReLU(),\n torch.nn.Linear(hidden_dim_sum, 4)\n )\n return\n\n def forward(self, x_1st, x_2nd, edge):\n '''\n :param x_1st: B,T,2,dim\n :param x_2nd: B,M,4,dim\n :param edge: 2,M\n :return: (B,T,2), (B,M,4)\n '''\n B, T = x_1st.size()[:2]\n M = x_2nd.size()[1]\n h_node_list = [x_1st]\n h_edge_list = [x_2nd]\n x_node, x_edge = x_1st, x_2nd\n\n for i in range(self.ebm_GNN_layer_num):\n x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)\n if i < self.ebm_GNN_layer_num - 1:\n x_node = F.relu(x_node)\n # x_edge = F.relu(x_edge)\n x_node = F.dropout(x_node, self.dropout, training=self.training)\n # x_edge = F.dropout(x_edge, self.dropout, training=self.training)\n h_node_list.append(x_node)\n h_edge_list.append(x_edge)\n\n if self.concat:\n h_node = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d\n h_edge = torch.cat(h_edge_list, dim=3).view(B, M, -1) # B, M, 4*layer_num*d\n else:\n h_node = x_node.view(B, T, -1) # B, T, 2*d\n h_edge = x_edge.view(B, M, -1) # B, M, 4*d\n h_node = self.node_readout(h_node) # B, T, 2\n h_edge = self.edge_readout(h_edge) # B, M, 4\n return h_node, h_edge\n\n\nclass GNN_Energy_Model_2nd_Order_03(torch.nn.Module):\n def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):\n super(GNN_Energy_Model_2nd_Order_03, self).__init__()\n self.ebm_GNN_dim = ebm_GNN_dim\n self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1\n self.dropout = dropout\n self.concat = concat\n\n hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num\n\n self.hidden_layers = torch.nn.ModuleList()\n for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):\n self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))\n\n if self.concat:\n hidden_dim_sum = sum(hidden_layers_dim)\n else:\n hidden_dim_sum = ebm_GNN_dim\n\n self.node_readout = nn.Linear(2 * hidden_dim_sum, 2)\n self.edge_readout = nn.Linear(4 * hidden_dim_sum, 4)\n return\n\n def forward(self, x_1st, x_2nd, edge):\n '''\n :param x_1st: B,T,2,dim\n :param x_2nd: B,M,4,dim\n :param edge: 2,M\n :return: (B,T,2), (B,M,4)\n '''\n B, T = x_1st.size()[:2]\n M = edge.size()[1]\n h_node_list = [x_1st]\n h_edge_list = [x_2nd]\n x_node, x_edge = x_1st, x_2nd\n\n for i in range(self.ebm_GNN_layer_num):\n x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)\n if i < self.ebm_GNN_layer_num - 1:\n x_node = F.relu(x_node)\n # x_edge = F.relu(x_edge)\n x_node = F.dropout(x_node, self.dropout, training=self.training)\n # x_edge = F.dropout(x_edge, self.dropout, training=self.training)\n h_node_list.append(x_node)\n h_edge_list.append(x_edge)\n\n if self.concat:\n h_node = torch.cat(h_node_list, dim=3) # B, T, 2, layer_num*d\n h_edge = torch.cat(h_edge_list, dim=3) # B, M, 4, layer_num*d\n else:\n h_node = x_node # B, T, 2, d\n h_edge = x_edge # B, M, 4, d\n\n h_node = h_node.view(B, T, -1) # B, T, 2*d\n h_edge = h_edge.view(B, M, -1) # B, M, 4*d\n\n h_node = self.node_readout(h_node) # B, T, 2\n h_edge = self.edge_readout(h_edge) # B, M, 4\n return h_node, h_edge\n\n\n# class GATNet(torch.nn.Module):\n# def __init__(self, embedding_dim=10, hidden_dim=10, num_head=8):\n# super(GATNet, self).__init__()\n# self.conv1 = GATConv(embedding_dim, hidden_dim, heads=num_head, dropout=0.6)\n# self.conv2 = GATConv(hidden_dim * num_head, hidden_dim, heads=1, concat=False, dropout=0.6)\n\n# def forward(self, data):\n# x = data.x\n# x = F.dropout(x, p=0.6, training=self.training)\n# x = F.elu(self.conv1(x, data.edge_index))\n# x = F.dropout(x, p=0.6, training=self.training)\n# x = self.conv2(x, data.edge_index)\n# return x\n\n\n# class MLP(nn.Sequential):\n# def __init__(self, input_dim, output_dim, hidden_dims=[1024, 512], dropout=0.1, use_batch_norm=False):\n# super(MLP, self).__init__()\n\n# self.input_dim = input_dim\n# self.output_dim = output_dim\n# self.hidden_dims = hidden_dims\n# self.use_batch_norm = use_batch_norm\n# self.dropout = nn.Dropout(0.1)\n\n# self.layer_size = len(self.hidden_dims) + 1\n# dims = [self.input_dim] + self.hidden_dims + [self.output_dim]\n\n# self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(self.layer_size)])\n# if use_batch_norm:\n# self.batch_norms = nn.ModuleList([nn.BatchNorm1d(dims[i + 1]) for i in range(self.layer_size)])\n# for m in self.modules():\n# if isinstance(m, nn.Linear):\n# nn.init.xavier_uniform_(m.weight.data)\n# if m.bias is not None:\n# m.bias.data.fill_(0.0)\n\n# def norm(self):\n# with torch.no_grad():\n# norm = 0\n# for m in self.modules():\n# if isinstance(m, nn.Linear):\n# norm += torch.norm(m.weight.data).item()\n# return norm\n\n# def forward(self, v):\n# '''\n# : params x: (batch_size, *, input_dim)\n# : output : (batch_size, *, output_dim)\n# '''\n# B, t, _ = v.size()\n# v = v.flatten(0, -2)\n# # print('input norm: %.5f' % (torch.norm(v).item()))\n# for i, l in enumerate(self.predictor):\n# v = l(v)\n# if i != self.layer_size - 1:\n# if self.use_batch_norm:\n# v = self.batch_norms[i](v)\n# v = F.relu(v)\n# v = self.dropout(v)\n# # print('layer %d norm: %.5f' % (i, torch.norm(v).item()))\n# v = v.reshape(B, t, -1)\n# return v\n\n\n# class GradKnowledgeGraphModel(nn.Module):\n# def __init__(self, num_tasks, args):\n# super(GradKnowledgeGraphModel, self).__init__()\n\n# self.num_tasks = num_tasks\n\n# self.weights = nn.Parameter(torch.ones(self.num_tasks, 1), requires_grad=True)\n# self.register_parameter('grad_KG', self.weights)\n# self.softmax = nn.Softmax(dim=0)\n# self.normalize_method = args.grad_KG_normalize_method\n\n# def forward(self, task_repr):\n# # ########## This won't train ##########\n# # task_repr = task_repr * self.weights.data\n# task_repr = task_repr * self.weights\n# return task_repr\n\n# def renormalize(self):\n# if self.normalize_method == 'sum':\n# ########## TODO: there might be negatives after backward ##########\n# normalize_coeff = self.num_tasks / self.weights.data.sum()\n# self.weights.data *= normalize_coeff\n# elif self.normalize_method == 'softmax':\n# self.weights.data = self.softmax(self.weights.data) * self.num_tasks\n# return\n\n# def reset_param(self):\n# self.weights.data.fill_(1)\n# return\n" ]
[ [ "torch.stack", "torch.nn.Linear", "torch.nn.functional.dropout", "torch.nn.functional.relu", "torch.nn.ModuleList", "torch.index_select", "torch.zeros", "torch.LongTensor", "torch.nn.ReLU", "torch.cat" ] ]
kathoma/AutomaticKneeMRISegmentation
[ "72ea3fa96fa5de34461b5999814aa706360f4a79" ]
[ "calculate_t2.py" ]
[ "from __future__ import print_function, division\n\nimport sys\nsys.path.insert(0, 'lib')\nimport numpy as np\nimport random\nimport scipy.io as sio\nimport os\nimport pandas as pd\nimport scipy.ndimage as ndimage\nimport math\nimport os\nimport scipy.linalg as la\nfrom joblib import Parallel, delayed\nfrom scipy.optimize import curve_fit\nfrom skimage import measure\nimport scipy.stats as ss\nimport skimage\n\n\n#########################################################\n# Calculating T2 Values for Segmented Voxels\n#########################################################\n\ndef exp_func(mri_time, A, m, b):\n return A*np.exp(-m*mri_time)\n\ndef running_mean(x):\n kernel = np.ones((3,))/3\n conv = np.convolve(x, kernel, mode = 'valid')\n temp = np.copy(x)\n temp[1:-1]=conv\n \n # Avoid boundary effects of convolution\n temp[0]=np.mean(x[0:2])\n temp[-1]=np.mean(x[-2:])\n \n return temp\n\ndef strictly_decreasing(vec):\n return np.all(np.diff(vec)<0)\n\ndef fit_t2(t2imgs, t2times, segmentation = None, n_jobs = 4, show_bad_pixels = True):\n \n '''\n Fits T2 curves to the T2_weighted images in each slice.\n IN:\n t2imgs - with T2 weighted images in numpy array (nr_slices, time_steps, width, heigth)\n t2times - list with aquisition times\n segmentation - segmentation matrix (nr_slices, width, heigth)\n n_jobs - number of parallel jobs\n OUT:\n matrix (nr_slices, width, heigth) with T2 values\n '''\n t2_tensor = np.zeros((t2imgs.shape[0], t2imgs.shape[2], t2imgs.shape[3]))\n\n def fit_per_slice(slice_idx, show_bad_pixels):\n scan = t2imgs[slice_idx,:,:,:]\n \n mri_time = np.array(t2times[slice_idx]) - t2times[slice_idx][0] #np.array(t2times[slice_idx])#\n \n if not segmentation is None: # if we have a segmentation\n segmentation_mask = segmentation[slice_idx,:,:]\n (cartilage_indices_r, cartilage_indices_c) = np.where(segmentation_mask)\n\n t2_matrix = np.full((scan.shape[1], scan.shape[2]), np.nan) \n if len(cartilage_indices_r)> 0:\n for i in np.arange(len(cartilage_indices_r)):\n ir = cartilage_indices_r[i]\n ic = cartilage_indices_c[i] \n \n if all(scan[:,ir,ic] == scan[0,ir,ic]): # if constant value, decay is 0 \n continue\n \n try:\n if strictly_decreasing(scan[1:,ir,ic]):\n echo_corrected = scan[1:,ir,ic]\n else:\n echo_corrected = running_mean(scan[1:,ir,ic])\n \n parameters,_ = curve_fit(exp_func, \n mri_time[1:], \n echo_corrected, \n p0 = [scan[0,ir,ic], .03, 0])#, \n# bounds = ([-np.inf, 0, -np.inf], [np.inf, 100, np.inf]))\n m = parameters[1]\n t2_ = 1./m\n t2_matrix[ir, ic] = t2_\n if show_bad_pixels:\n if ((t2_ > .100) or (t2_< -.100)): \n print(t2_)\n plt.plot(mri_time, scan[:,ir,ic])\n plt.plot(mri_time, exp_func(mri_time, *parameters), 'r-')\n plt.show()\n \n \n except RuntimeError:\n if show_bad_pixels:\n plt.plot(mri_time, scan[:,ir,ic])\n plt.title(\"Did not converge\")\n plt.show() \n\n return t2_matrix\n\n for i in range(t2imgs.shape[0]):\n t2_tensor[i,:,:] = fit_per_slice(i, show_bad_pixels)*1000 # in ms\n return t2_tensor" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.diff", "scipy.optimize.curve_fit", "numpy.copy", "numpy.exp", "numpy.where", "numpy.array", "numpy.convolve", "numpy.full", "numpy.mean" ] ]
uchida-takumi/recommender_system_verification
[ "a079e0c8764926e5dc66da01a809c6ba4fde7fb7" ]
[ "src/module/DeepFM.py" ]
[ "\"\"\"\n# install the package\npip install deepctr\n\n# tutorial\nhttps://deepctr-doc.readthedocs.io/en/latest/Quick-Start.html#getting-started-4-steps-to-deepctr\n\n# github\nhttps://github.com/shenweichen/DeepCTR\n\nしかし、これは binary しか出来ないので適応不可能。\nbinary を無理矢理適応させるばあいは、非クリックデータを何らかの方法で生成する必要がある。\n\n# ---- 次のアイデア ----\n# github\nhttps://github.com/ChenglongChen/tensorflow-DeepFM\n\"\"\"\n\nimport tensorflow as tf\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport copy\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import mean_absolute_error\nfrom src.module.tensorflow_DeepFM.DeepFM import DeepFM as DeepFM_\n\n# インターフェース\nclass DeepFM:\n def __init__(self, set_train_test_users, set_train_test_items, dict_genre=None, first_half_fit_only_fm=False, ctr_prediction=True):\n \"\"\"\n import pandas as pd\n DIR_DATA = 'src/module/knowledge_graph_attention_network/Data/ml'\n df_train = pd.read_csv(os.path.join(DIR_DATA, 'train_rating.csv'))\n df_test = pd.read_csv(os.path.join(DIR_DATA, 'test_rating.csv'))\n\n set_train_test_users = set(np.concatenate([df_train['UserID'], df_test['UserID']]))\n set_train_test_items = set(np.concatenate([df_train['MovieID'], df_test['MovieID']]))\n dict_genre = pickle.load(open(os.path.join(DIR_DATA, 'genre.pickle'), 'rb'))\n \n self = DeepFM(set_train_test_users, set_train_test_items, dict_genre)\n self.dfm_params['epoch'] = 10\n self.dfm_params['batch_size'] = 64\n \n users = df_train['UserID'].values\n items = df_train['UserID'].values\n ratings = df_train['Rating'].values\n self.fit(users, items, ratings)\n predicted = self.predict(df_test['UserID'].values, df_test['UserID'].values)\n \n\n # MAE of test-set\n print( np.mean(np.abs(predicted - df_test['Rating'])) )\n\n # MAE of mean-prediction\n print( np.mean(np.abs(df_test['Rating'].mean() - df_test['Rating'])) ) \n\n\n ## まぁ、実際のテストをクリアできればOKとする。\n \"\"\"\n \n \"\"\"\n 参考として、Movielens1Mデータで検証されたハイパーパラメータは以下の通り\n Deep Matrix Factorization Approach for\n Collaborative Filtering Recommender Systems\n \n k(hidden-factor) = 8, γ(learning-rate) = 0.01, λ(regularization) = 0.045\n K = [9, 3, 3]; Γ= [0.01, 0.01, 0.01]; Λ = [0.1, 0.01, 0.1]\n \"\"\"\n self.set_train_test_users = set(set_train_test_users)\n self.set_train_test_items = set(set_train_test_items)\n \n self.dict_genre = dict_genre\n self.first_half_fit_only_fm = first_half_fit_only_fm\n self.data_manager = Data_manager(set_train_test_users, set_train_test_items, dict_genre)\n feature_size, field_size = self.data_manager.get_feature_size_field_size()\n self.dfm_params = {\n \"feature_size\" : feature_size,\n \"field_size\" : field_size,\n \"loss_type\" : \"mse\", # \"logloss\" なら {0,1} の判別問題。 \"mse\" なら regression。\n \"use_fm\": True, # fm-layer を使用\n \"use_deep\": True, # deep-layer を使用\n \"embedding_size\": 8,\n \"dropout_fm\": [1.0, 1.0],\n \"deep_layers\": [32, 32],\n \"dropout_deep\": [0.5, 0.5, 0.5],\n \"deep_layers_activation\": tf.nn.relu,\n \"epoch\": 30,\n \"batch_size\": 64,\n \"learning_rate\": 0.001,\n \"optimizer_type\": \"adam\",\n \"batch_norm\": 1,\n \"batch_norm_decay\": 0.995,\n \"l2_reg\": 0.0001,\n \"l2_reg_embedding\": 0.0001,\n \"l2_reg_bias\": 0.0001,\n \"verbose\": True,\n \"eval_metric\": mean_absolute_error,\n \"greater_is_better\": False, # 学習における損失スコアが大きい方が良いかどうか\n \"random_seed\": 2017,\n }\n self.ctr_prediction = ctr_prediction\n if self.ctr_prediction:\n self.dfm_params[\"loss_type\"] = \"logloss\"\n \n\n def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):\n \"\"\"\n users = [0,0,1]\n items = [0,3,3]\n ratings = [3.,4.,5.]\n \"\"\"\n global_mean_bias_init = np.float32(np.mean(ratings))\n global_mean_bias_init = 0.01\n self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)\n \n # もし、CTR予測の場合は、y=0のデータをランダム生成する。\n if self.ctr_prediction:\n users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))\n items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))\n ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)\n test_ratings = list((np.array(test_ratings)>0).astype(int))\n \n Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)\n \n if len(test_users)>0:\n test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)\n self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)\n else:\n self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)\n \n # load data\n self.trained_users = list(set(users))\n self.trained_items = list(set(items))\n self.global_mean = self.model.predict(Xi, Xv).mean()\n \n \n def predict(self, users, items, *args, **kargs):\n Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)\n predicted = self.model.predict(Xi, Xv)\n return predicted\n\n# prepare training and validation data in the required format\nclass Data_manager:\n def __init__(self, users, items, dict_genre=None):\n \"\"\"\n users [array like object]:\n train, test set に含まれる user_id\n items [array like object]:\n train, test set に含まれる item_id\n dict_genre [dictionary]:\n ex) {item_id: [genre_id1, genre_id2]}\n \n tensorflow_DeepFM/example 内部のプログラム、特にDataReader.pyを読み、データの形式を解読した。\n 結論として、 item, user, genre の各IDは以下のように変換すればよい。\n 1) user = {0,1,2} → {0,1,2} *未変更\n 2) item = {0,1} → {3,4} *userからのインクリメントID\n 3) genre = {0,1} → {5,6} *itemからのインクリメントID\n 4) a interaction-sample [u,i,g] = [0,1,0]→[0,4,5]\n 5) Xi_train (X-index trainset) = [変換した[u,i,g]1, 変換した[u,i,g]2, ...]\n 6) Xv_train (X-value trainset) = [[1.,1.,1.], [1.,1.,1.], ...]\n user,item,genre はカテゴリ変数なのですべて1.となる。\n 7) y_train = [rating-score1, rating-score2, ...] *変換不要\n \n EXAMPLE\n -------------\n import pandas as pd\n df_rating = pd.read_csv(os.path.join(DIR_DATA, 'train_rating.csv'))\n dict_genre = pickle.load(open(os.path.join(DIR_DATA, 'genre.pickle'), 'rb'))\n users = df_rating['UserID']\n items = df_rating['MovieID']\n\n self = Data_manager(users, items, dict_genre=dict_genre)\n \"\"\" \n self.dict_genre = dict_genre\n # インクリメントインデックスを生成するオブジェクト self.inclement_index を生成する。\n if dict_genre:\n dict_genre = {i:gs for i,gs in dict_genre.items() if i in items}\n n_genre = max([max(gs) for i,gs in dict_genre.items() if gs]) + 1\n genres = list(range(n_genre))\n else:\n dict_genre = {}\n n_genre = 0\n genres = []\n \n self.inclement_index = inclement_index(users, items, genres)\n\n # userとitemをインクリメントIDに変更する\n dict_genre = {self.inclement_index.transform([i], field='item')[0]:gs for i,gs in dict_genre.items()}\n\n # user, itemはそれぞれで2フィールド、ジャンルはジャンルラベルごとに別々のフィールドにわける。\n self.re_dict_genre = {} \n for i,gs in dict_genre.items():\n # re_dict は {item_id:(field_id, genru_id)}となる。\n genre_one_hot_vec = [0] * n_genre\n for g in gs:\n genre_one_hot_vec[g] = 1 # カテゴリ変数はかならず整数の1とする。\n self.re_dict_genre[i] = genre_one_hot_vec\n \n self.genre_indexes = self.inclement_index.transform(genres, field='genre')\n self.feature_size = self.inclement_index.get_feature_size()\n self.field_size = 2 + n_genre\n \n def get_feature_size_field_size(self):\n return self.feature_size, self.field_size\n\n def transform_users_and_items_to_Xi_Xv(self, users, items):\n \"\"\"\n users = [0,0,1]\n items = [1,5,5]\n \"\"\"\n Xi, Xv = [], []\n users = self.inclement_index.transform(users, field='user')\n items = self.inclement_index.transform(items, field='item')\n for u,i in zip(users, items):\n if self.dict_genre:\n Xi.append([u, i] + self.genre_indexes)\n Xv.append([1, 1] + self.re_dict_genre[i])\n else:\n Xi.append([u, i])\n Xv.append([1, 1]) \n return Xi, Xv\n \n \n\nclass inclement_index:\n def __init__(self, users, items, genres=[]):\n \"\"\"\n users = ['u0','u1',3]\n items = ['i0', 3]\n genres = ['pop', 'sf']\n \n self = inclement_index(users, items, genres)\n self.transform(['u0', 'u1', 3], field='user', inverse=False)\n self.transform(['i0', 3], field='item', inverse=False)\n self.transform(['pop', 'sf'], field='genre', inverse=False)\n \n transformed = self.transform(['u0', 'u1', 3], field='user', inverse=False)\n self.transform(transformed, field='user', inverse=True)\n\n \"\"\"\n users = set(users)\n items = set(items)\n genres = set(genres)\n self.increment_cnt = 0\n self.user_dict = {u:self.get_incremate_index() for u in users}\n self.user_inverse_dict = {v:k for k,v in self.user_dict.items()}\n self.item_dict = {i:self.get_incremate_index() for i in items}\n self.item_inverse_dict = {v:k for k,v in self.item_dict.items()}\n self.genre_dict = {g:self.get_incremate_index() for g in genres}\n self.genre_inverse_dict = {v:k for k,v in self.genre_dict.items()}\n\n def transform(self, xs, field='user', inverse=False):\n \"\"\"\n xs = [0,2]\n\n self.transform(xs, type='user')\n \"\"\"\n if inverse:\n if field == 'user':\n _dict = self.user_inverse_dict\n elif field == 'item':\n _dict = self.item_inverse_dict\n elif field == 'genre':\n _dict = self.genre_inverse_dict\n else:\n if field == 'user':\n _dict = self.user_dict\n elif field == 'item':\n _dict = self.item_dict\n elif field == 'genre':\n _dict = self.genre_dict\n\n return [_dict[x] for x in xs] \n \n def get_incremate_index(self):\n now_index = copy.deepcopy(self.increment_cnt)\n self.increment_cnt += 1\n return now_index\n \n def get_feature_size(self):\n return self.increment_cnt\n\n\n \nif __name__ == 'how to use it.':\n ###########################\n # --- かなりシンプルなテスト ---\n sample_size = 1000\n users = np.random.choice(range(100), size=sample_size) \n items = np.random.choice(range(100), size=sample_size) \n genre_dict = None\n ratings = users - items\n \n self = DeepFM(set(users), set(items))\n self.dfm_params['batch_size'] = 64\n self.dfm_params['epoch'] = 100\n self.fit(users, items, ratings) \n self.predict([10, 5, 10], [10, 10, 2]) # 正解は [0, -5, 8] である\n # 十分に小さなbatch_sizeかどうかは非常に重要\n # これは学習テストのロス減少によって確認できる。\n \n ###########################\n # --- シンプルなテスト1 ---\n sample_size = 1000\n n_user = 500\n n_item = 20\n users = np.random.choice(range(n_user), size=sample_size) \n items = np.random.choice(range(n_item), size=sample_size) \n \n user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}\n item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}\n \n def rating(u, i):\n return 10*sum(user_embedding[u] * item_embedding[i]) + 3\n \n ratings = [rating(u, i) for u,i in zip(users, items)]\n \n self = DeepFM(list(range(n_user)), list(range(n_item)))\n self.dfm_params['epoch'] = 100\n self.dfm_params['embedding_size'] = 200\n self.dfm_params['l2_reg'] = 0.0045\n self.fit(users, items, ratings)\n \n test_users = np.random.choice(range(n_user), size=sample_size) \n test_items = np.random.choice(range(n_item), size=sample_size) \n test_ratings = [rating(u, i) for u,i in zip(users, items)]\n\n predicted = self.predict(test_users, test_items)\n print( np.mean(np.abs(test_ratings - predicted)) ) \n print( np.mean(np.abs(test_ratings - np.mean(ratings))) )\n \n # scaler を導入すると改善されるか? → 特に改善はされていない。\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n scaler.fit([[r] for r in ratings])\n s_ratings = scaler.transform([[r] for r in ratings])[:,0]\n \n self.fit(users, items, s_ratings) \n predicted = self.predict(test_users, test_items)\n predicted = scaler.inverse_transform(predicted[:,None])\n print( np.mean(np.abs(test_ratings - predicted)) ) \n print( np.mean(np.abs(test_ratings - np.mean(ratings))) )\n\n ###########################\n # --- シンプルなテスト2 bias とembedding あり ---\n sample_size = 1000\n n_user = 500\n n_item = 20\n users = np.random.choice(range(n_user), size=sample_size) \n items = np.random.choice(range(n_item), size=sample_size) \n \n user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}\n item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}\n user_bias = {u:u/10 for u in range(n_user)} # 単純にidが大きいほどバイアスが大きい\n item_bias = {i:i for i in range(n_item)} # 単純にidが大きいほどバイアスが大きい\n \n def rating(u, i):\n return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i] \n ratings = [rating(u, i) for u,i in zip(users, items)]\n\n test_users = np.random.choice(range(n_user), size=sample_size) \n test_items = np.random.choice(range(n_item), size=sample_size) \n test_ratings = [rating(u, i) for u,i in zip(users, items)]\n \n self = DeepFM(list(range(n_user)), list(range(n_item)))\n self.dfm_params['epoch'] = 100\n self.dfm_params['embedding_size'] = 200\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n \n # 平均性能との比較\n predicted = self.predict(test_users, test_items)\n print( np.mean(np.abs(test_ratings - predicted)) ) \n print( np.mean(np.abs(test_ratings - np.mean(ratings))) )\n \n # オラクルとの比較\n predicted = self.predict([200]*n_item, list(range(n_item)))\n answer = [rating(200,i) for i in range(n_item)]\n print(predicted)\n print(answer)\n print(predicted - answer)\n \n ## 内部の embedding を確認する。\n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n \n\n ###########################\n # --- シンプルなテスト3 head-tail-new ID ---\n sample_size = 1000\n n_user = 200\n n_item = 50\n ## id が後半になるほど学習セット中の出現率が低くなる。\n p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()\n p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()\n users = np.random.choice(range(n_user), size=sample_size, p=p_user) \n items = np.random.choice(range(n_item), size=sample_size, p=p_item) \n\n user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}\n item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}\n user_bias = {u:u/10 for u in range(n_user)} # 単純にidが大きいほどバイアスが大きい\n item_bias = {i:i for i in range(n_item)} # 単純にidが大きいほどバイアスが大きい\n def rating(u, i):\n return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i] \n ratings = [rating(u, i) for u,i in zip(users, items)]\n\n ## user=500 と item=20 はそれぞれ新規IDとなる\n test_users = np.random.choice(range(n_user), size=sample_size) \n test_items = np.random.choice(range(n_item), size=sample_size) \n test_ratings = [rating(u, i) for u,i in zip(users, items)]\n\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))\n self.dfm_params['epoch'] = 300\n self.dfm_params['embedding_size'] = 4\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n # 平均値予測との比較\n predicted = self.predict(test_users, test_items)\n print( np.mean(np.abs(test_ratings - predicted)) ) \n print( np.mean(np.abs(test_ratings - np.mean(ratings))) )\n\n ## 内部の embedding を確認する。\n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n\n ## 可視化する(ID=500 まではユーザーで、それ以降はアイテム)\n import pandas as pd\n # [正常] 一部のembeddingがIDの増加に合わせて線形に変化している。これらはバイアス効果を一部学習している。\n pd.DataFrame(feature_embeddings).plot() \n # [成功] DeepFM のバイアスの初期値を0付近にすることで、userのバイアスはオラクルに近くなった。\n # [?] itemのバイアスはオラクルと逆にidが増加するほど減少している → おそらくembeddingがバイアスを学習してしまったゆえか?\n pd.DataFrame(feature_bias).plot() \n \n # 新規IDを確認する → ほぼ、初期値の0付近か?\n ## 新規ユーザー\n feature_embeddings[200]\n feature_bias[200]\n ## 新規アイテム\n feature_embeddings[-1]\n feature_bias[-1]\n \n ############################################## \n # --- IDとは無関係なランダムなバイアスで学習してみる ---\n sample_size = 1000\n n_user = 200\n n_item = 50\n ## id が後半になるほど学習セット中の出現率が低くなる。\n p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()\n p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()\n users = np.random.choice(range(n_user), size=sample_size, p=p_user) \n items = np.random.choice(range(n_item), size=sample_size, p=p_item) \n user_bias = {u:np.random.rand() for u in range(n_user)} \n item_bias = {i:np.random.rand() for i in range(n_item)} \n user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}\n item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}\n def rating(u, i):\n return 3*(sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]) \n ratings = [rating(u, i) for u,i in zip(users, items)]\n\n ## user=500 と item=20 はそれぞれ新規IDとなる\n test_users = np.random.choice(range(n_user), size=sample_size) \n test_items = np.random.choice(range(n_item), size=sample_size) \n test_ratings = [rating(u, i) for u,i in zip(users, items)] \n # ------------------------------\n ############################################## \n\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))\n self.dfm_params['epoch'] = 100\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.001\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n\n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n\n \"\"\" デバック\n self.predict([1]*n_item, range(n_item))\n self.predict([0]*n_item, range(n_item))\n [rating(1, i) for i in range(n_item)]\n \"\"\"\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n \"\"\"\n 本テストは想定どおりの結果となり、成功したといえる。\n その成功要因は、以下の変更を加えたことによる。\n [1] 各id の embedding, bias の初期値を0付近のものに変更した。\n [2] l2_reg の対象として embedding, bias を追加した。(おそらく、マイナーIDのweightが抑制されると思われるが、詳細は不明)\n \"\"\"\n\n # --- パラメータごとの影響を確認する。\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))\n self.dfm_params['epoch'] = 10\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['use_deep'] = False\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))\n self.dfm_params['epoch'] = 10\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.001\n self.dfm_params['learning_rate'] = 0.001\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))\n self.dfm_params['epoch'] = 10\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.100\n self.dfm_params['learning_rate'] = 0.001\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))\n self.dfm_params['epoch'] = 10\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.001\n self.dfm_params['learning_rate'] = 0.010\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)\n self.dfm_params['epoch'] = 20\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.010\n self.dfm_params['learning_rate'] = 0.010\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n\n # --- only fm \n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)\n self.dfm_params['epoch'] = 20\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.010\n self.dfm_params['learning_rate'] = 0.010\n self.dfm_params['use_deep'] = False\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n \n # ---- high l2-reg\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)\n self.dfm_params['epoch'] = 20\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.100\n self.dfm_params['learning_rate'] = 0.010\n self.dfm_params['use_deep'] = False\n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n # ---- high learning_rate\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False)\n self.dfm_params['epoch'] = 20\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.0100\n self.dfm_params['l2_reg_embedding'] = 0.0100\n self.dfm_params['l2_reg_bias'] = 0.0100\n self.dfm_params['learning_rate'] = 0.0100\n self.dfm_params['use_deep'] = False\n \n self.fit(users, items, ratings, test_users, test_items, test_ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n \n ## 結論、頻度の違いがバイアスに影響を与えることはない。\n\n \n\n # ---- high learning_rate\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False)\n self.dfm_params['epoch'] = 20\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.0100\n #self.dfm_params['l2_reg_embedding'] = 0.0100\n #self.dfm_params['l2_reg_bias'] = 0.0100\n self.dfm_params['learning_rate'] = 0.0020\n self.dfm_params['use_deep'] = False\n self.dfm_params['batch_size'] = 32\n self.dfm_params['loss_type'] = 'mse'\n self.dfm_params['optimizer_type'] = 'sgd'\n #self.dfm_params['optimizer_type'] = 'adam'\n \n self.fit(users, items, ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n \n self.predict([0,0,150,150],[0,10,0,10])\n\n\n\n ##########################\n # MovieLensのCTR問題として定義し直して、性能を比較する\n import numpy as np \n ctr_users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))\n ctr_items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))\n ctrs = [1]*len(users) + [0]*len(users)\n \n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)\n self.dfm_params['epoch'] = 20\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['l2_reg'] = 0.0010\n #self.dfm_params['l2_reg_embedding'] = 0.0020\n #self.dfm_params['l2_reg_bias'] = 0.0020\n self.dfm_params['learning_rate'] = 0.00010\n #self.dfm_params['use_deep'] = False\n self.dfm_params['batch_size'] = 16\n self.dfm_params['loss_type'] = 'logloss'\n self.dfm_params['greater_is_better'] = True\n \n self.fit(ctr_users, ctr_items, ctrs)\n\n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n\n self.predict([0,0,150,150],[0,10,0,10])\n \n ########################\n # CTR 対応型のテスト\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False, ctr_prediction=True)\n self.dfm_params['epoch'] = 30\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['batch_size'] = 32\n self.dfm_params['dropout_fm'] = [0.5, 0.5]\n self.dfm_params['l2_reg'] = 0.0\n self.dfm_params['l2_reg_embedding'] = 0.0\n self.dfm_params['l2_reg_bias'] = 0.0\n \n self.fit(users, items, ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n\n pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n pd.DataFrame(self.predict([200]*50,list(range(50)))).plot() # 新規ユーザーだけ常に一定になる。\n \n self.predict([0,0,150,150],[0,10,0,10])\n\n self.predict([50]*50,list(range(50)))\n self.predict([100]*50,list(range(50)))\n self.predict([150]*50,list(range(50)))\n self.predict([200]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。\n self.predict([199]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。\n self.predict([198]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。\n self.predict([197]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。\n self.predict(list(range(200)),[50]*200) # 新規ユーザーだけ常に一定になる。\n\n feature_embeddings[200]\n feature_bias[200]\n \n feature_embeddings[150]\n feature_bias[150]\n\n feature_embeddings[220]\n feature_embeddings[222]\n \n feature_embeddings[223]\n \n ########################\n # tensorflow の動作テスト\n weight = tf.Variable(initial_value=[[0,1,2,3], [0,10,20,30], [0,100,200,300]], trainable=True, name='test', dtype=tf.float32)\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n sess.run(weight)\n op = weight[1,3].assign(9999.)\n sess.run(op)\n sess.run(weight)\n \n ########################\n # 上手く行かなかったので、テスト\n # 実際のデータで確認する\n ############################################## \n # --- 疑似データの生成 ---\n sample_size = 10000\n n_user = 2000\n n_item = 500\n ## id が後半になるほど学習セット中の出現率が低くなる。\n p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()\n p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()\n users = np.random.choice(range(n_user), size=sample_size, p=p_user) \n items = np.random.choice(range(n_item), size=sample_size, p=p_item) \n user_bias = {u:np.random.rand() for u in range(n_user)} \n item_bias = {i:np.random.rand() for i in range(n_item)} \n user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}\n item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}\n def rating(u, i):\n return 3*(sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]) \n ratings = [rating(u, i) for u,i in zip(users, items)]\n\n ## user=500 と item=20 はそれぞれ新規IDとなる\n test_users = np.random.choice(range(n_user), size=sample_size) \n test_items = np.random.choice(range(n_item), size=sample_size) \n test_ratings = [rating(u, i) for u,i in zip(users, items)] \n # ------------------------------\n ############################################## \n\n \n for i in range(5):\n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False, ctr_prediction=False)\n self.dfm_params['epoch'] = 10\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['deep_layers'] = [16, 16]\n self.dfm_params['l2_reg'] = 0.0100 #0.0040\n self.dfm_params['l2_reg_embedding'] = 0.0000 #0.001\n self.dfm_params['l2_reg_bias'] = 0.000 #0.001\n self.dfm_params['learning_rate'] = 0.00100 #0.001\n self.dfm_params['use_deep'] = False\n self.dfm_params['batch_size'] = 128\n self.dfm_params['loss_type'] = 'mse'\n #self.dfm_params['optimizer_type'] = 'sgd'\n \n self.fit(users, items, ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n concat_projection = self.model.sess.run(self.model.weights[\"concat_projection\"]) # [0,1]がuser,itemのbiasに対する重み\n \n #pd.DataFrame(feature_embeddings).plot() \n pd.DataFrame(feature_bias).plot() \n pd.DataFrame(concat_projection).plot() \n #pd.DataFrame(self.predict([200]*50,list(range(50)))).plot() # 新規ユーザーだけ常に一定になる。\n df_result = pd.DataFrame()\n df_result['u=10'] = self.predict([10]*n_item,list(range(n_item)))\n df_result['u=100'] = self.predict([100]*n_item,list(range(n_item)))\n df_result['u=1000'] = self.predict([1000]*n_item,list(range(n_item)))\n df_result['u=2000'] = self.predict([2000]*n_item,list(range(n_item)))\n df_result.plot()\n\n\n\n\n \"\"\" Best setting? \n self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False, ctr_prediction=False)\n self.dfm_params['epoch'] = 10\n self.dfm_params['embedding_size'] = 4\n self.dfm_params['deep_layers'] = [16, 16]\n self.dfm_params['l2_reg'] = 0.04 #0.0040\n self.dfm_params['l2_reg_embedding'] = 0.001 #0.001\n self.dfm_params['l2_reg_bias'] = 0.001 #0.001\n self.dfm_params['learning_rate'] = 0.0010 #0.001\n self.dfm_params['use_deep'] = True\n self.dfm_params['batch_size'] = 64\n self.dfm_params['loss_type'] = 'mse'\n #self.dfm_params['optimizer_type'] = 'sgd'\n \n self.fit(users, items, ratings)\n \n feature_embeddings = self.model.sess.run(self.model.weights[\"feature_embeddings\"])\n feature_bias = self.model.sess.run(self.model.weights[\"feature_bias\"])\n concat_bias = self.model.sess.run(self.model.weights[\"concat_bias\"])\n \n #pd.DataFrame(feature_embeddings).plot() \n #pd.DataFrame(feature_bias).plot() \n pd.DataFrame(self.predict([200]*50,list(range(50)))).plot() # 新規ユーザーだけ常に一定になる。\n \"\"\"" ]
[ [ "tensorflow.global_variables_initializer", "pandas.DataFrame", "numpy.abs", "numpy.random.rand", "tensorflow.Session", "tensorflow.Variable", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.mean" ] ]
AlexKoff88/open_model_zoo
[ "8944a46653427cfa53db10fa91d677826adf31e1", "8944a46653427cfa53db10fa91d677826adf31e1" ]
[ "demos/smartlab_demo/python/segmentor.py", "demos/colorization_demo/python/colorization_demo.py" ]
[ "\"\"\"\n Copyright (C) 2021-2022 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport logging as log\nfrom pathlib import Path\nfrom scipy.special import softmax\nfrom openvino.runtime import PartialShape\n\nclass SegmentorMstcn:\n def __init__(self, ie, device, i3d_path, mstcn_path):\n self.ActionTerms = [\n \"background\",\n \"noise_action\",\n \"remove_support_sleeve\",\n \"remove_pointer_sleeve\",\n \"adjust_rider\",\n \"adjust_nut\",\n \"adjust_balancing\",\n \"open_box\",\n \"close_box\",\n \"choose_weight\",\n \"put_left\",\n \"put_right\",\n \"take_left\",\n \"take_right\",\n \"install_support_sleeve\",\n \"install_pointer_sleeve\",\n ]\n\n self.EmbedBufferTop = np.zeros((1024, 0))\n self.EmbedBufferFront = np.zeros((1024, 0))\n self.ImgSizeHeight = 224\n self.ImgSizeWidth = 224\n self.EmbedBatchSize = 1\n self.SegBatchSize = 24\n self.EmbedWindowLength = 16\n self.EmbedWindowStride = 1\n self.EmbedWindowAtrous = 3\n self.TemporalLogits = np.zeros((0, len(self.ActionTerms)))\n\n net = ie.read_model(i3d_path)\n net.reshape({net.inputs[0]: PartialShape(\n [self.EmbedBatchSize, self.EmbedWindowLength, self.ImgSizeHeight, self.ImgSizeWidth, 3])})\n nodes = net.get_ops()\n net.add_outputs(nodes[13].output(0))\n self.i3d = ie.compile_model(model=net, device_name=device)\n\n self.mstcn_net = ie.read_model(mstcn_path)\n self.mstcn = ie.compile_model(model=self.mstcn_net, device_name=device)\n self.mstcn_input_keys = self.mstcn.inputs\n self.mstcn_output_key = self.mstcn.outputs\n self.mstcn_net.reshape({'input': PartialShape([1, 2048, 1])})\n self.reshape_mstcn = ie.compile_model(model=self.mstcn_net, device_name=device)\n file_path = Path(__file__).parent / 'init_his.npz'\n init_his_feature = np.load(file_path)\n self.his_fea = {f'fhis_in_{i}': init_his_feature[f'arr_{i}'] for i in range(4)}\n\n def inference(self, buffer_top, buffer_front, frame_index):\n \"\"\"\n Args:\n buffer_top: buffers of the input image arrays for the top view\n buffer_front: buffers of the input image arrays for the front view\n frame_index: frame index of the latest frame\n Returns: the temporal prediction results for each frame (including the historical predictions),\n length of predictions == frame_index()\n \"\"\"\n ### run encoder ###\n self.EmbedBufferTop = self.feature_embedding(\n img_buffer=buffer_top,\n embedding_buffer=self.EmbedBufferTop,\n frame_index=frame_index)\n self.EmbedBufferFront = self.feature_embedding(\n img_buffer=buffer_front,\n embedding_buffer=self.EmbedBufferFront,\n frame_index=frame_index)\n\n ### run mstcn++ only batch size 1###\n if min(self.EmbedBufferTop.shape[-1], self.EmbedBufferFront.shape[-1]) > 0:\n self.action_segmentation()\n\n # ### get label ###\n valid_index = self.TemporalLogits.shape[0]\n if valid_index == 0:\n return []\n else:\n frame_predictions = [self.ActionTerms[i] for i in np.argmax(self.TemporalLogits, axis=1)]\n frame_predictions = [\"background\" for i in range(self.EmbedWindowLength - 1)] + frame_predictions\n\n return frame_predictions[-1]\n\n def feature_embedding(self, img_buffer, embedding_buffer, frame_index):\n # minimal temporal length for processor\n min_t = (self.EmbedWindowLength - 1) * self.EmbedWindowAtrous\n\n infer_request = self.i3d.create_infer_request()\n if frame_index > min_t:\n num_embedding = embedding_buffer.shape[-1]\n img_buffer = list(img_buffer)\n curr_t = self.EmbedWindowStride * num_embedding + (self.EmbedWindowLength - 1) * self.EmbedWindowAtrous\n while curr_t < frame_index:\n # absolute index in temporal shaft\n start_index = self.EmbedWindowStride * num_embedding\n\n if frame_index > len(img_buffer):\n # absolute index in buffer shaft\n start_index = start_index - (frame_index - len(img_buffer))\n\n input_data = [\n [cv2.resize(img_buffer[start_index + i * self.EmbedWindowAtrous],\n (self.ImgSizeHeight, self.ImgSizeWidth)) for i in range(self.EmbedWindowLength)]\n for j in range(self.EmbedBatchSize)]\n input_data = np.asarray(input_data).transpose((0, 4, 1, 2, 3))\n input_data = input_data * 127.5 + 127.5\n\n input_dict = {self.i3d.inputs[0]: input_data}\n out_logits = infer_request.infer(input_dict)[self.i3d.outputs[1]]\n out_logits = out_logits.squeeze((0, 3, 4))\n\n # ndarray: C x num_embedding\n embedding_buffer = np.concatenate((embedding_buffer, out_logits), axis=1)\n\n curr_t += self.EmbedWindowStride\n return embedding_buffer\n\n def action_segmentation(self):\n # read buffer\n embed_buffer_top = self.EmbedBufferTop\n embed_buffer_front = self.EmbedBufferFront\n batch_size = self.SegBatchSize\n start_index = self.TemporalLogits.shape[0]\n end_index = min(embed_buffer_top.shape[-1], embed_buffer_front.shape[-1])\n num_batch = (end_index - start_index) // batch_size\n\n infer_request = self.reshape_mstcn.create_infer_request()\n if num_batch < 0:\n log.debug(\"Waiting for the next frame ...\")\n elif num_batch == 0:\n log.debug(f\"start_index: {start_index} end_index: {end_index}\")\n unit1 = embed_buffer_top[:, start_index:end_index]\n unit2 = embed_buffer_front[:, start_index:end_index]\n feature_unit = np.concatenate([unit1[:, ], unit2[:, ]], axis=0)\n input_mstcn = np.expand_dims(feature_unit, 0)\n\n feed_dict = {}\n if len(self.his_fea) != 0:\n for key in self.mstcn_input_keys:\n if 'fhis_in_' in str(key.names):\n string = list(key.names)[0]\n feed_dict[string] = self.his_fea[string]\n feed_dict['input'] = input_mstcn\n if input_mstcn.shape == (1, 2048, 1):\n out = infer_request.infer(feed_dict)\n\n predictions = out[list(out.keys())[-1]]\n for key in self.mstcn_output_key:\n if 'fhis_in_' in str(key.names):\n string = list(key.names)[0]\n self.his_fea[string] = out[string]\n\n \"\"\"\n predictions --> 4x1x64x24\n his_fea --> [12*[1x64x2048], 11*[1x64x2048], 11*[1x64x2048], 11*[1x64x2048]]\n \"\"\"\n temporal_logits = predictions[:, :, :len(self.ActionTerms), :] # 4x1x16xN\n temporal_logits = softmax(temporal_logits[-1], 1) # 1x16xN\n temporal_logits = temporal_logits.transpose((0, 2, 1)).squeeze(axis=0)\n self.TemporalLogits = np.concatenate([self.TemporalLogits, temporal_logits], axis=0)\n else:\n for batch_idx in range(num_batch):\n unit1 = embed_buffer_top[:,\n start_index + batch_idx * batch_size:start_index + batch_idx * batch_size + batch_size]\n unit2 = embed_buffer_front[:,\n start_index + batch_idx * batch_size:start_index + batch_idx * batch_size + batch_size]\n feature_unit = np.concatenate([unit1[:, ], unit2[:, ]], axis=0)\n\n feed_dict = {}\n if len(self.his_fea) != 0:\n for key in self.mstcn_input_keys:\n if 'fhis_in_' in str(key.names):\n string = list(key.names)[0]\n feed_dict[key] = self.his_fea[string]\n feed_dict['input'] = feature_unit\n out = infer_request.infer(feed_dict)\n predictions = out[list(out.keys())[-1]]\n for key in self.mstcn_output_key:\n if 'fhis_in_' in str(key.names):\n string = list(key.names)[0]\n self.his_fea[string] = out[string]\n\n temporal_logits = predictions[:, :, :len(self.ActionTerms), :] # 4x1x16xN\n temporal_logits = softmax(temporal_logits[-1], 1) # 1x16xN\n temporal_logits = temporal_logits.transpose((0, 2, 1)).squeeze(axis=0)\n self.TemporalLogits = np.concatenate([self.TemporalLogits, temporal_logits], axis=0)\n", "#!/usr/bin/env python3\n\"\"\"\n Copyright (c) 2018-2021 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom openvino.runtime import Core, get_version\nimport cv2 as cv\nimport numpy as np\nimport logging as log\nfrom time import perf_counter\nimport sys\nfrom argparse import ArgumentParser, SUPPRESS\nfrom pathlib import Path\n\nsys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python'))\n\nimport monitors\nfrom images_capture import open_images_capture\nfrom openvino.model_zoo.model_api.performance_metrics import PerformanceMetrics\n\nlog.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.DEBUG, stream=sys.stdout)\n\n\ndef build_arg():\n parser = ArgumentParser(add_help=False)\n in_args = parser.add_argument_group('Options')\n in_args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Help with the script.')\n in_args.add_argument(\"-m\", \"--model\", help=\"Required. Path to .xml file with pre-trained model.\",\n required=True, type=Path)\n in_args.add_argument(\"-d\", \"--device\",\n help=\"Optional. Specify target device for infer: CPU, GPU, HDDL or MYRIAD. \"\n \"Default: CPU\",\n default=\"CPU\", type=str)\n in_args.add_argument('-i', \"--input\", required=True,\n help='Required. An input to process. The input must be a single image, '\n 'a folder of images, video file or camera id.')\n in_args.add_argument('--loop', default=False, action='store_true',\n help='Optional. Enable reading the input in a loop.')\n in_args.add_argument('-o', '--output', required=False,\n help='Optional. Name of the output file(s) to save.')\n in_args.add_argument('-limit', '--output_limit', required=False, default=1000, type=int,\n help='Optional. Number of frames to store in output. '\n 'If 0 is set, all frames are stored.')\n in_args.add_argument(\"--no_show\", help=\"Optional. Don't show output.\",\n action='store_true', default=False)\n in_args.add_argument(\"-u\", \"--utilization_monitors\", default=\"\", type=str,\n help=\"Optional. List of monitors to show initially.\")\n return parser\n\ndef main(args):\n cap = open_images_capture(args.input, args.loop)\n\n log.info('OpenVINO Inference Engine')\n log.info('\\tbuild: {}'.format(get_version()))\n core = Core()\n\n log.info('Reading model {}'.format(args.model))\n model = core.read_model(args.model)\n\n input_tensor_name = 'data_l'\n input_shape = model.input(input_tensor_name).shape\n assert input_shape[1] == 1, \"Expected model input shape with 1 channel\"\n\n inputs = {}\n for input in model.inputs:\n inputs[input.get_any_name()] = np.zeros(input.shape)\n\n assert len(model.outputs) == 1, \"Expected number of outputs is equal 1\"\n\n compiled_model = core.compile_model(model, device_name=args.device)\n output_tensor = compiled_model.outputs[0]\n infer_request = compiled_model.create_infer_request()\n log.info('The model {} is loaded to {}'.format(args.model, args.device))\n\n _, _, h_in, w_in = input_shape\n\n frames_processed = 0\n imshow_size = (640, 480)\n graph_size = (imshow_size[0] // 2, imshow_size[1] // 4)\n presenter = monitors.Presenter(args.utilization_monitors, imshow_size[1] * 2 - graph_size[1], graph_size)\n metrics = PerformanceMetrics()\n\n video_writer = cv.VideoWriter()\n if args.output and not video_writer.open(args.output, cv.VideoWriter_fourcc(*'MJPG'),\n cap.fps(), (imshow_size[0] * 2, imshow_size[1] * 2)):\n raise RuntimeError(\"Can't open video writer\")\n\n start_time = perf_counter()\n original_frame = cap.read()\n if original_frame is None:\n raise RuntimeError(\"Can't read an image from the input\")\n\n while original_frame is not None:\n (h_orig, w_orig) = original_frame.shape[:2]\n\n if original_frame.shape[2] > 1:\n frame = cv.cvtColor(cv.cvtColor(original_frame, cv.COLOR_BGR2GRAY), cv.COLOR_GRAY2RGB)\n else:\n frame = cv.cvtColor(original_frame, cv.COLOR_GRAY2RGB)\n\n img_rgb = frame.astype(np.float32) / 255\n img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)\n img_l_rs = cv.resize(img_lab.copy(), (w_in, h_in))[:, :, 0]\n\n inputs[input_tensor_name] = np.expand_dims(img_l_rs, axis=[0, 1])\n\n res = infer_request.infer(inputs)[output_tensor]\n\n update_res = np.squeeze(res)\n\n out = update_res.transpose((1, 2, 0))\n out = cv.resize(out, (w_orig, h_orig))\n img_lab_out = np.concatenate((img_lab[:, :, 0][:, :, np.newaxis], out), axis=2)\n img_bgr_out = np.clip(cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR), 0, 1)\n\n original_image = cv.resize(original_frame, imshow_size)\n grayscale_image = cv.resize(frame, imshow_size)\n colorize_image = (cv.resize(img_bgr_out, imshow_size) * 255).astype(np.uint8)\n lab_image = cv.resize(img_lab_out, imshow_size).astype(np.uint8)\n\n original_image = cv.putText(original_image, 'Original', (25, 50),\n cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)\n grayscale_image = cv.putText(grayscale_image, 'Grayscale', (25, 50),\n cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)\n colorize_image = cv.putText(colorize_image, 'Colorize', (25, 50),\n cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)\n lab_image = cv.putText(lab_image, 'LAB interpretation', (25, 50),\n cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)\n\n ir_image = [cv.hconcat([original_image, grayscale_image]),\n cv.hconcat([lab_image, colorize_image])]\n final_image = cv.vconcat(ir_image)\n\n metrics.update(start_time, final_image)\n\n frames_processed += 1\n if video_writer.isOpened() and (args.output_limit <= 0 or frames_processed <= args.output_limit):\n video_writer.write(final_image)\n\n presenter.drawGraphs(final_image)\n if not args.no_show:\n cv.imshow('Colorization Demo', final_image)\n key = cv.waitKey(1)\n if key in {ord(\"q\"), ord(\"Q\"), 27}:\n break\n presenter.handleKey(key)\n start_time = perf_counter()\n original_frame = cap.read()\n\n metrics.log_total()\n for rep in presenter.reportMeans():\n log.info(rep)\n\nif __name__ == \"__main__\":\n args = build_arg().parse_args()\n sys.exit(main(args) or 0)\n" ]
[ [ "numpy.load", "numpy.zeros", "scipy.special.softmax", "numpy.asarray", "numpy.argmax", "numpy.expand_dims", "numpy.concatenate" ], [ "numpy.squeeze", "numpy.concatenate", "numpy.expand_dims", "numpy.zeros" ] ]
eduardojdiniz/CompNeuro
[ "20269e66540dc4e802273735c97323020ee37406" ]
[ "CichyWanderers/dataloader.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n\n# Imports\nimport h5py\nimport scipy.io as sio\nimport os\nimport requests\nimport zipfile\nimport numpy as np\nimport glob\nimport shutil\nimport pickle\n\n\ndef loadmat(matfile):\n \"\"\"Function to load .mat files.\n\n Parameters\n ----------\n matfile : str\n path to `matfile` containing fMRI data for a given trial.\n\n Returns\n -------\n dict\n dictionary containing data in key 'vol' for a given trial.\n\n \"\"\"\n try:\n f = h5py.File(matfile)\n except (IOError, OSError):\n return sio.loadmat(matfile)\n else:\n return {name: np.transpose(f.get(name)) for name in f.keys()}\n\n\ndef download_Cichy(**kwargs):\n \"\"\"Function to download data from Cichy et al, 2014.\n\n Parameters\n ----------\n kwargs: dict\n 'data_dirpath': str, data directory path. Default: ./data\n 'data_url' : str, data url. Default: https://osf.io/7vpyh/download'\n 'label_url' : str, visual stimuli labels url. Default:\n http://wednesday.csail.mit.edu/MEG1_MEG_Clear_Data/visual_stimuli.mat\n\n Returns\n -------\n path_dict: dict\n 'fMRI' : str, fMRI filepath\n 'MEG' : str, MEG filepath\n 'label': str, visual stimuli filepath\n 'image': str, jpeg images dirpath\n 'tmp' : str, temporary data dirpath\n\n \"\"\"\n cwd = os.getcwd()\n data_dirpath = kwargs.pop('data_dirpath', os.path.join(cwd, \"data\"))\n if not os.path.exists(data_dirpath):\n os.makedirs(data_dirpath)\n\n tmp_dirpath = os.path.join(cwd, \"tmp\")\n if not os.path.exists(tmp_dirpath):\n os.makedirs(tmp_dirpath)\n\n data_url = kwargs.pop('data_url', 'https://osf.io/7vpyh/download')\n label_url = kwargs.pop(\n 'label_url',\n 'http://wednesday.csail.mit.edu/MEG1_MEG_Clear_Data/visual_stimuli.mat')\n data_filepath = os.path.join(tmp_dirpath, 'data.zip')\n label_filepath = os.path.join(tmp_dirpath, 'visual_stimuli.mat')\n if not os.path.exists(tmp_dirpath):\n os.makedirs(tmp_dirpath)\n\n # Download MEG and fMRI RDMs\n r = requests.get(data_url)\n with open(data_filepath, 'wb') as f:\n f.write(r.content)\n\n # Download visual stimuli\n r = requests.get(label_url)\n with open(label_filepath, 'wb') as f:\n f.write(r.content)\n\n # Extract directory '92_Image_Set' and 'MEG_decoding_RDMs.mat'\n with zipfile.ZipFile(data_filepath, 'r') as zip_file:\n zip_file.extractall(tmp_dirpath)\n\n # Move image files to permanent directory\n tmp_image_dirpath = os.path.join(tmp_dirpath, '92_Image_Set', '92images')\n image_dirpath = os.path.join(cwd, 'data', 'images')\n if not os.path.exists(image_dirpath):\n os.makedirs(image_dirpath)\n\n for f in os.listdir(tmp_image_dirpath):\n shutil.move(tmp_image_dirpath + f, image_dirpath)\n\n path_dict = {}\n fMRI_filepath = os.path.join(\n data_dirpath,\n '92_Image_Set',\n 'target_fmri.mat')\n path_dict['fMRI'] = fMRI_filepath\n path_dict['MEG'] = os.path.join(data_dirpath, 'MEG_decoding_RDMs')\n path_dict['label'] = label_filepath\n path_dict['image'] = image_dirpath\n path_dict['tmp'] = tmp_dirpath\n\n return path_dict\n\n\ndef get_stim_dict(**kwargs):\n \"\"\"Get category names and binary features describing the Cichy dataset\n\n Parameters\n ----------\n kwargs: dict\n 'fMRI' : str, fMRI filepath\n 'MEG' : str, MEG filepath\n 'label': str, visual stimuli filepath\n 'image': str, jpeg images dirpath\n 'tmp' : str, temporary data dirpath\n\n Returns\n -------\n stim_dict: dict\n 'category' : list[str], indicating category\n 'human' : list[int], indicating membership (0=not a member, 1=member)\n 'face' : list[int], indicating membership (0=not a member, 1=member)\n 'animate' : list[int], indicating membership (0=not a member, 1=member)\n 'natural' : list[int], indicating membership (0=not a member, 1=member)\n 'imagepath' : list[str], jpeg image filepath\n\n \"\"\"\n stimuli_filepath = kwargs.pop('label', '')\n image_dirpath = kwargs.pop('image', '')\n\n stim_dat = loadmat(stimuli_filepath)['visual_stimuli']\n fields = ['category', 'human', 'face', 'animate', 'natural']\n\n stim_dict = {field: [] for field in fields}\n for ii in range(92):\n for jj, field in enumerate(fields):\n stim_dict[field].append(stim_dat[0, ii][jj][0])\n for field in fields[1:]:\n stim_dict[field] = np.array(stim_dict[field]).squeeze()\n stim_dict['imagepath'] = glob.glob(image_dirpath + '/*.jpg').sort()\n\n return stim_dict\n\n\ndef get_RDM_dict(**kwargs):\n \"\"\"Get MEG and fMRI RDMs from the Cichy dataset\n\n Parameters\n ----------\n kwargs: dict\n 'fMRI' : str, fMRI filepath\n 'MEG' : str, MEG filepath\n 'label': str, visual stimuli filepath\n 'image': str, jpeg images dirpath\n 'tmp' : str, temporary data dirpath\n\n Returns\n -------\n RDM_dict: dict\n 'MEG' : ndarray, (16, 2, 1301, 92, 92)\n 16 subjects, 2 sessions, 1301 time points (from -100 ms to 1200 ms\n wrt to stimulus onset at 0 ms), 92 conditions by 92 conditions.\n The last 2 dimensions form representational dissimilarity matrices of\n decoding accuracies, symmetric accross the diagonal, with the diagonal\n undefined (NaN).\n 'fMRI_EVC': ndarray, (15, 92, 92)\n 15 subjects, 92 conditions by 92 conditions.\n The last 2 dimensions form a representational dissimilarity matrix of\n spearman correlation for the EVC cortex, symmetric accross the diagonal,\n with the diagonal undefined (NaN).\n 'fMRI_IT' : ndarray, (15, 92, 92)\n 15 subjects, 92 conditions by 92 conditions.\n The last 2 dimensions form a representational dissimilarity matrix of\n spearman correlation for the IT cortex, symmetric accross the diagonal,\n with the diagonal undefined (NaN).\n\n \"\"\"\n fMRI_filepath = kwargs.pop('fMRI', '')\n MEG_filepath = kwargs.pop('MEG', '')\n\n RDM_dict = {}\n\n RDM_dict['MEG'] = loadmat(MEG_filepath)['MEG_decoding_RDMs']\n\n fMRI_RDMs = loadmat(fMRI_filepath)\n RDM_dict['fMRI_EVC'] = fMRI_RDMs['EVC_RDMs']\n RDM_dict['fMRI_IT'] = fMRI_RDMs['IT_RDMs']\n\n return RDM_dict\n\n\ndef main():\n \"\"\"Download and organize Cichy et al, 2014 dataset\n\n Parameters\n ----------\n None\n\n Returns\n -------\n data_dirpath: str, data directory containing an image directory with the 92\n visual stimuli and a pickle files containing the two dictionaries,\n stim_dict.pkl and RDM_dict.pkl. See help(get_stim_dict) and\n help(get_RDM_dict) for details.\n\n \"\"\"\n url_dict = {\n 'data_url': 'https://osf.io/7vpyh/download',\n 'label_url': 'http://wednesday.csail.mit.edu/MEG1_MEG_Clear_Data/visual_stimuli.mat'}\n\n # Download Cichy et al, 2014 dataset\n path_dict = download_Cichy(**url_dict)\n # Get stimuli dictionary\n stim_dict = get_stim_dict(**path_dict)\n # Get RDM dictionary\n RDM_dict = get_RDM_dict(**path_dict)\n\n data_dirpath = path_dict['data_dirpath']\n stim_dict_pickle = os.path.join(data_dirpath, 'stim_dict.pkl')\n RDM_dict_pickle = os.path.join(data_dirpath, 'RDM_dict.pkl')\n\n with open(stim_dict_pickle, 'wb') as pkl:\n pickle.dump(stim_dict, pkl)\n\n with open(RDM_dict_pickle, 'wb') as pkl:\n pickle.dump(RDM_dict, pkl)\n\n # Clean temporary directory\n shutil.rmtree(url_dict['tmp'])\n\n return data_dirpath\n\n\nif __name__ == \"__main__\":\n data_dirpath = main()\n" ]
[ [ "scipy.io.loadmat", "numpy.array" ] ]
gengkunling/tensorflow_poet
[ "5ef36da08ee0f50cdaa2d08753393c549c2e75b3" ]
[ "scripts/retrain.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Simple transfer learning with Inception v3 or Mobilenet models.\n\nWith support for TensorBoard.\n\nThis example shows how to take a Inception v3 or Mobilenet model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector (1001-dimensional for\nMobilenet) for each image. We train a softmax layer on top of this\nrepresentation. Assuming the softmax layer contains N labels, this corresponds\nto learning N + 2048*N (or 1001*N) model parameters corresponding to the\nlearned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\n\n```bash\nbazel build tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/tensorflow/examples/image_retraining/retrain \\\n --image_dir ~/flower_photos\n```\n\nOr, if you have a pip installation of tensorflow, `retrain.py` can be run\nwithout bazel:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\nBy default this script will use the high accuracy, but comparatively large and\nslow Inception v3 model architecture. It's recommended that you start with this\nto validate that you have gathered good training data, but if you want to deploy\non resource-limited platforms, you can try the `--architecture` flag with a\nMobilenet model. For example:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos --architecture mobilenet_1.0_224\n```\n\nThere are 32 different Mobilenet models to choose from, with a variety of file\nsize and latency options. The first number can be '1.0', '0.75', '0.50', or\n'0.25' to control the size, and the second controls the input image size, either\n'224', '192', '160', or '128', with smaller sizes running faster. See\nhttps://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\nfor more information on Mobilenet.\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nFLAGS = None\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n tf.logging.error(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in gfile.Walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG', 'png', 'PNG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n tf.logging.info(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n if not file_list:\n tf.logging.warning('No files found')\n continue\n if len(file_list) < 20:\n tf.logging.warning(\n 'WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n tf.logging.warning(\n 'WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category, architecture):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n architecture: The name of the model architecture.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '_' + architecture + '.txt'\n\n\ndef create_model_graph(model_info):\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Args:\n model_info: Dictionary containing information about the model architecture.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Graph().as_default() as graph:\n model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])\n with gfile.FastGFile(model_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(\n graph_def,\n name='',\n return_elements=[\n model_info['bottleneck_tensor_name'],\n model_info['resized_input_tensor_name'],\n ]))\n return graph, bottleneck_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n decoded_image_tensor: Output of initial image resizing and preprocessing.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n # First decode the JPEG image, resize it, and rescale the pixel values.\n resized_input_values = sess.run(decoded_image_tensor,\n {image_data_tensor: image_data})\n # Then run it through the recognition network.\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: resized_input_values})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract(data_url):\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n\n Args:\n data_url: Web location of the tar file containing the pretrained model.\n \"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n tf.logging.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n tf.logging.info('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor)\n except Exception as e:\n raise RuntimeError('Error during processing file %s (%s)' % (image_path,\n str(e)))\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The output tensor for the bottleneck values.\n architecture: The name of the model architecture.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category, architecture)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n tf.logging.warning('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The penultimate output layer of the graph.\n architecture: The name of the model architecture.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(\n sess, image_lists, label_name, index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n tf.logging.info(\n str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n architecture: The name of the model architecture.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: distorted_image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck_values)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness, input_width, input_height,\n input_depth, input_mean, input_std):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n input_width: Horizontal size of expected input image to model.\n input_height: Vertical size of expected input image to model.\n input_depth: How many channels the expected input image should have.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, input_width)\n precrop_height = tf.multiply(scale_value, input_height)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [input_height, input_width, input_depth])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n offset_image = tf.subtract(brightened_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,\n bottleneck_tensor_size):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n bottleneck_tensor_size: How many entries in the bottleneck vector.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor,\n shape=[None, bottleneck_tensor_size],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(tf.float32,\n [None, class_count],\n name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal(\n [bottleneck_tensor_size, class_count], stddev=0.001)\n\n layer_weights = tf.Variable(initial_value, name='final_weights')\n\n variable_summaries(layer_weights)\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n #cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n # labels=ground_truth_input, logits=logits)\n cross_entropy = tf.nn.weighted_cross_entropy_with_logits(targets=ground_truth_input, logits=logits, pos_weight=4)\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(\n prediction, tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef save_graph_to_file(sess, graph, graph_file_name):\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n with gfile.FastGFile(graph_file_name, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n return\n\n\ndef prepare_file_system():\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n if FLAGS.intermediate_store_frequency > 0:\n ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)\n return\n\n\ndef create_model_info(architecture):\n \"\"\"Given the name of a model architecture, returns information about it.\n\n There are different base image recognition pretrained models that can be\n retrained using transfer learning, and this function translates from the name\n of a model to the attributes that are needed to download and train with it.\n\n Args:\n architecture: Name of a model architecture.\n\n Returns:\n Dictionary of information about the model, or None if the name isn't\n recognized\n\n Raises:\n ValueError: If architecture name is unknown.\n \"\"\"\n architecture = architecture.lower()\n if architecture == 'inception_v3':\n # pylint: disable=line-too-long\n data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n # pylint: enable=line-too-long\n bottleneck_tensor_name = 'pool_3/_reshape:0'\n bottleneck_tensor_size = 2048\n input_width = 299\n input_height = 299\n input_depth = 3\n resized_input_tensor_name = 'Mul:0'\n model_file_name = 'classify_image_graph_def.pb'\n input_mean = 128\n input_std = 128\n elif architecture.startswith('mobilenet_'):\n parts = architecture.split('_')\n if len(parts) != 3 and len(parts) != 4:\n tf.logging.error(\"Couldn't understand architecture name '%s'\",\n architecture)\n return None\n version_string = parts[1]\n if (version_string != '1.0' and version_string != '0.75' and\n version_string != '0.50' and version_string != '0.25'):\n tf.logging.error(\n \"\"\"\"The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',\n but found '%s' for architecture '%s'\"\"\",\n version_string, architecture)\n return None\n size_string = parts[2]\n if (size_string != '224' and size_string != '192' and\n size_string != '160' and size_string != '128'):\n tf.logging.error(\n \"\"\"The Mobilenet input size should be '224', '192', '160', or '128',\n but found '%s' for architecture '%s'\"\"\",\n size_string, architecture)\n return None\n if len(parts) == 3:\n is_quantized = False\n else:\n if parts[3] != 'quantized':\n tf.logging.error(\n \"Couldn't understand architecture suffix '%s' for '%s'\", parts[3],\n architecture)\n return None\n is_quantized = True\n data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'\n data_url += version_string + '_' + size_string + '_frozen.tgz'\n bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'\n bottleneck_tensor_size = 1001\n input_width = int(size_string)\n input_height = int(size_string)\n input_depth = 3\n resized_input_tensor_name = 'input:0'\n if is_quantized:\n model_base_name = 'quantized_graph.pb'\n else:\n model_base_name = 'frozen_graph.pb'\n model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string\n model_file_name = os.path.join(model_dir_name, model_base_name)\n input_mean = 127.5\n input_std = 127.5\n else:\n tf.logging.error(\"Couldn't understand architecture name '%s'\", architecture)\n raise ValueError('Unknown architecture', architecture)\n\n return {\n 'data_url': data_url,\n 'bottleneck_tensor_name': bottleneck_tensor_name,\n 'bottleneck_tensor_size': bottleneck_tensor_size,\n 'input_width': input_width,\n 'input_height': input_height,\n 'input_depth': input_depth,\n 'resized_input_tensor_name': resized_input_tensor_name,\n 'model_file_name': model_file_name,\n 'input_mean': input_mean,\n 'input_std': input_std,\n }\n\n\ndef add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n \"\"\"Adds operations that perform JPEG decoding and resizing to the graph..\n\n Args:\n input_width: Desired width of the image fed into the recognizer graph.\n input_height: Desired width of the image fed into the recognizer graph.\n input_depth: Desired channels of the image fed into the recognizer graph.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n Tensors for the node to feed JPEG data into, and the output of the\n preprocessing steps.\n \"\"\"\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n return jpeg_data, mul_image\n\n\ndef main(_):\n # Needed to make sure the logging output is visible.\n # See https://github.com/tensorflow/tensorflow/issues/3047\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Prepare necessary directories that can be used during training\n prepare_file_system()\n\n # Gather information about the model architecture we'll be using.\n model_info = create_model_info(FLAGS.architecture)\n if not model_info:\n tf.logging.error('Did not recognize architecture flag')\n return -1\n\n # Set up the pre-trained graph.\n maybe_download_and_extract(model_info['data_url'])\n graph, bottleneck_tensor, resized_image_tensor = (\n create_model_graph(model_info))\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n tf.logging.error('Only one valid folder of images found at ' +\n FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n with tf.Session(graph=graph) as sess:\n # Set up the image decoding sub-graph.\n jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(\n model_info['input_width'], model_info['input_height'],\n model_info['input_depth'], model_info['input_mean'],\n model_info['input_std'])\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness, model_info['input_width'],\n model_info['input_height'], model_info['input_depth'],\n model_info['input_mean'], model_info['input_std'])\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor, FLAGS.architecture)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(\n len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,\n model_info['bottleneck_tensor_size'])\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, prediction = add_evaluation_step(\n final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # Every so often, print out how well the graph is training.\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %\n (datetime.now(), i, train_accuracy * 100))\n tf.logging.info('%s: Step %d: Cross entropy = %f' %\n (datetime.now(), i, cross_entropy_value))\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\n (datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks)))\n\n # Store intermediate results\n intermediate_frequency = FLAGS.intermediate_store_frequency\n\n if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)\n and i > 0):\n intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +\n 'intermediate_' + str(i) + '.pb')\n tf.logging.info('Save intermediate result to : ' +\n intermediate_file_name)\n save_graph_to_file(sess, graph, intermediate_file_name)\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.test_batch_size, 'testing',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n test_accuracy, predictions = sess.run(\n [evaluation_step, prediction],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %\n (test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i].argmax():\n tf.logging.info('%70s %s' %\n (test_filename,\n list(image_lists.keys())[predictions[i]]))\n\n # Write out the trained graph and labels with the weights stored as\n # constants.\n save_graph_to_file(sess, graph, FLAGS.output_graph)\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\n type=str,\n default='',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='/tmp/output_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--intermediate_output_graphs_dir',\n type=str,\n default='/tmp/intermediate_graph/',\n help='Where to save the intermediate graphs.'\n )\n parser.add_argument(\n '--intermediate_store_frequency',\n type=int,\n default=0,\n help=\"\"\"\\\n How many steps to store intermediate graph. If \"0\" then will not\n store.\\\n \"\"\"\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='/tmp/output_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='/tmp/retrain_logs',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=4000,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='/tmp/imagenet',\n help=\"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\"\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='/tmp/bottleneck',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n parser.add_argument(\n '--architecture',\n type=str,\n default='inception_v3',\n help=\"\"\"\\\n Which model architecture to use. 'inception_v3' is the most accurate, but\n also the slowest. For faster or smaller models, chose a MobileNet with the\n form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,\n 'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224\n pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much\n less accurate, but smaller and faster network that's 920 KB on disk and\n takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\n for more information on Mobilenet.\\\n \"\"\")\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.reduce_max", "tensorflow.logging.set_verbosity", "tensorflow.logging.error", "tensorflow.matmul", "tensorflow.squeeze", "tensorflow.name_scope", "tensorflow.image.resize_bilinear", "tensorflow.Variable", "tensorflow.summary.FileWriter", "tensorflow.nn.softmax", "tensorflow.summary.histogram", "tensorflow.global_variables_initializer", "tensorflow.python.platform.gfile.Glob", "tensorflow.multiply", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.Graph", "tensorflow.logging.warning", "tensorflow.image.decode_jpeg", "tensorflow.import_graph_def", "tensorflow.gfile.MakeDirs", "tensorflow.reduce_min", "tensorflow.constant", "tensorflow.GraphDef", "tensorflow.stack", "tensorflow.random_crop", "numpy.zeros", "tensorflow.subtract", "tensorflow.app.run", "tensorflow.logging.fatal", "tensorflow.image.random_flip_left_right", "tensorflow.expand_dims", "tensorflow.cast", "tensorflow.python.util.compat.as_bytes", "tensorflow.Session", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.placeholder_with_default", "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.logging.info", "numpy.squeeze", "tensorflow.summary.merge_all", "tensorflow.python.platform.gfile.Walk", "tensorflow.truncated_normal", "tensorflow.reduce_mean", "tensorflow.python.platform.gfile.Exists", "tensorflow.train.GradientDescentOptimizer", "tensorflow.argmax", "tensorflow.nn.weighted_cross_entropy_with_logits", "tensorflow.square", "tensorflow.gfile.Exists", "tensorflow.gfile.DeleteRecursively" ] ]
iriszero/DepthAwareCNNplus
[ "5dcc0a9279d53a2826d76631f097959d52982f8b" ]
[ "models/Deeplab.py" ]
[ "import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch\nfrom .base_model import BaseModel\nimport numpy as np\nfrom . import losses\nimport shutil\nfrom utils.util import *\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\nfrom tensorboardX import SummaryWriter\nimport os\nfrom . import VGG_Deeplab\n\n\nclass Deeplab_VGG(nn.Module):\n def __init__(self, num_classes, depthconv=False):\n super(Deeplab_VGG,self).__init__()\n self.Scale = VGG_Deeplab.vgg16(num_classes=num_classes,depthconv=depthconv)\n\n def forward(self,x, depth=None):\n output = self.Scale(x,depth) # for original scale\n return output\n\n#------------------------------------------------------#\n\nclass Deeplab_Solver(BaseModel):\n def __init__(self, opt, dataset=None, encoder='VGG'):\n BaseModel.initialize(self, opt)\n self.encoder = encoder\n if encoder == 'VGG':\n self.model = Deeplab_VGG(self.opt.label_nc, self.opt.depthconv)\n\n if self.opt.isTrain:\n self.criterionSeg = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()\n # self.criterionSeg = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()\n # self.criterionSeg = nn.NLLLoss2d(ignore_index=255)#.cuda()\n\n if encoder == 'VGG':\n self.optimizer = torch.optim.SGD([{'params': self.model.Scale.get_1x_lr_params_NOscale(), 'lr': self.opt.lr},\n {'params': self.model.Scale.get_10x_lr_params(), 'lr': self.opt.lr},\n {'params': self.model.Scale.get_2x_lr_params_NOscale(), 'lr': self.opt.lr, 'weight_decay': 0.},\n {'params': self.model.Scale.get_20x_lr_params(), 'lr': self.opt.lr, 'weight_decay': 0.}\n ],\n lr=self.opt.lr, momentum=self.opt.momentum, weight_decay=self.opt.wd)\n\n # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.opt.lr, momentum=self.opt.momentum, weight_decay=self.opt.wd)\n\n self.old_lr = self.opt.lr\n self.averageloss = []\n # copy scripts\n self.model_path = './models' #os.path.dirname(os.path.realpath(__file__))\n self.data_path = './data' #os.path.dirname(os.path.realpath(__file__))\n shutil.copyfile(os.path.join(self.model_path, 'Deeplab.py'), os.path.join(self.model_dir, 'Deeplab.py'))\n\n if encoder == 'VGG':\n shutil.copyfile(os.path.join(self.model_path, 'VGG_Deeplab.py'), os.path.join(self.model_dir, 'VGG_Deeplab.py'))\n shutil.copyfile(os.path.join(self.model_path, 'model_utils.py'), os.path.join(self.model_dir, 'model_utils.py'))\n shutil.copyfile(os.path.join(self.data_path, dataset.datafile), os.path.join(self.model_dir, dataset.datafile))\n shutil.copyfile(os.path.join(self.data_path, 'base_dataset.py'), os.path.join(self.model_dir, 'base_dataset.py'))\n\n self.writer = SummaryWriter(self.tensorborad_dir)\n self.counter = 0\n\n if not self.isTrain or self.opt.continue_train:\n if self.opt.pretrained_model!='':\n self.load_pretrained_network(self.model, self.opt.pretrained_model, self.opt.which_epoch, strict=False)\n print(\"Successfully loaded from pretrained model with given path!\")\n else:\n self.load()\n print(\"Successfully loaded model, continue training....!\")\n\n self.model.cuda()\n self.normweightgrad=0.\n # if len(opt.gpu_ids):#opt.isTrain and\n # self.model = torch.nn.DataParallel(self.model, device_ids=opt.gpu_ids)\n\n def forward(self, data, isTrain=True):\n self.model.zero_grad()\n\n self.image = Variable(data['image'], volatile=not isTrain).cuda()\n if 'depth' in data.keys():\n self.depth = Variable(data['depth'], volatile=not isTrain).cuda()\n else:\n self.depth = None\n if data['seg'] is not None:\n self.seggt = Variable(data['seg'], volatile=not isTrain).cuda()\n else:\n self.seggt = None\n\n input_size = self.image.size()\n self.segpred = self.model(self.image,self.depth)\n self.segpred = nn.functional.upsample(self.segpred, size=(input_size[2], input_size[3]), mode='bilinear')\n # self.segpred = nn.functional.log_softmax(nn.functional.upsample(self.segpred, size=(input_size[2], input_size[3]), mode='bilinear'))\n\n if self.opt.isTrain:\n self.loss = self.criterionSeg(self.segpred, torch.squeeze(self.seggt,1).long())\n self.averageloss += [self.loss.data[0]]\n# self.averageloss += [self.loss.item()]\n \n segpred = self.segpred.max(1, keepdim=True)[1]\n return self.seggt, segpred\n\n\n def backward(self, step, total_step):\n self.loss.backward()\n self.optimizer.step()\n # print self.model.Scale.classifier.fc6_2.weight.grad.mean().data.cpu().numpy()\n # self.normweightgrad +=self.model.Scale.classifier.norm.scale.grad.mean().data.cpu().numpy()\n # print self.normweightgrad#self.model.Scale.classifier.norm.scale.grad.mean().data.cpu().numpy()\n if step % self.opt.iterSize == 0:\n self.update_learning_rate(step, total_step)\n trainingavgloss = np.mean(self.averageloss)\n if self.opt.verbose:\n print (' Iter: %d, Loss: %f' % (step, trainingavgloss) )\n\n def get_visuals(self, step):\n ############## Display results and errors ############\n if self.opt.isTrain:\n self.trainingavgloss = np.mean(self.averageloss)\n if self.opt.verbose:\n print (' Iter: %d, Loss: %f' % (step, self.trainingavgloss) )\n self.writer.add_scalar(self.opt.name+'/trainingloss/', self.trainingavgloss, step)\n self.averageloss = []\n\n if self.depth is not None:\n return OrderedDict([('image', tensor2im(self.image.data[0], inputmode=self.opt.inputmode)),\n ('depth', tensor2im(self.depth.data[0], inputmode='divstd-mean')),\n ('segpred', tensor2label(self.segpred.data[0], self.opt.label_nc)),\n ('seggt', tensor2label(self.seggt.data[0], self.opt.label_nc))])\n else:\n return OrderedDict([('image', tensor2im(self.image.data[0], inputmode=self.opt.inputmode)),\n ('segpred', tensor2label(self.segpred.data[0], self.opt.label_nc)),\n ('seggt', tensor2label(self.seggt.data[0], self.opt.label_nc))])\n\n def update_tensorboard(self, data, step):\n if self.opt.isTrain:\n self.writer.add_scalar(self.opt.name+'/Accuracy/', data[0], step)\n self.writer.add_scalar(self.opt.name+'/Accuracy_Class/', data[1], step)\n self.writer.add_scalar(self.opt.name+'/Mean_IoU/', data[2], step)\n self.writer.add_scalar(self.opt.name+'/FWAV_Accuracy/', data[3], step)\n\n self.trainingavgloss = np.mean(self.averageloss)\n self.writer.add_scalars(self.opt.name+'/loss', {\"train\": self.trainingavgloss,\n \"val\": np.mean(self.averageloss)}, step)\n\n self.writer.add_scalars('trainingavgloss/', {self.opt.name: self.trainingavgloss}, step)\n self.writer.add_scalars('valloss/', {self.opt.name: np.mean(self.averageloss)}, step)\n self.writer.add_scalars('val_MeanIoU/', {self.opt.name: data[2]}, step)\n\n file_name = os.path.join(self.save_dir, 'MIoU.txt')\n with open(file_name, 'wt') as opt_file:\n opt_file.write('%f\\n' % (data[2]))\n # self.writer.add_scalars('losses/'+self.opt.name, {\"train\": self.trainingavgloss,\n # \"val\": np.mean(self.averageloss)}, step)\n self.averageloss = []\n\n def save(self, which_epoch):\n # self.save_network(self.netG, 'G', which_epoch, self.gpu_ids)\n self.save_network(self.model, 'net', which_epoch, self.gpu_ids)\n\n def load(self):\n self.load_network(self.model, 'net',self.opt.which_epoch)\n\n def update_learning_rate(self, step, total_step):\n\n lr = max(self.opt.lr * ((1 - float(step) / total_step) ** (self.opt.lr_power)), 1e-6)\n\n # drop_ratio = (1. * float(total_step - step) / (total_step - step + 1)) ** self.opt.lr_power\n # lr = self.old_lr * drop_ratio\n\n self.writer.add_scalar(self.opt.name+'/Learning_Rate/', lr, step)\n \n self.optimizer.param_groups[0]['lr'] = lr\n self.optimizer.param_groups[1]['lr'] = lr\n self.optimizer.param_groups[2]['lr'] = lr\n self.optimizer.param_groups[3]['lr'] = lr\n\t# self.optimizer.param_groups[0]['lr'] = lr\n\t# self.optimizer.param_groups[1]['lr'] = lr*10\n\t# self.optimizer.param_groups[2]['lr'] = lr*2 #* 100\n\t# self.optimizer.param_groups[3]['lr'] = lr*20\n\t# self.optimizer.param_groups[4]['lr'] = lr*100\n\n\n # torch.nn.utils.clip_grad_norm(self.model.Scale.get_1x_lr_params_NOscale(), 1.)\n # torch.nn.utils.clip_grad_norm(self.model.Scale.get_10x_lr_params(), 1.)\n if self.opt.verbose:\n print(' update learning rate: %f -> %f' % (self.old_lr, lr))\n\n self.old_lr = lr\n" ]
[ [ "torch.autograd.Variable", "torch.nn.functional.upsample", "torch.nn.CrossEntropyLoss", "torch.squeeze", "numpy.mean" ] ]
laipaang/Paddle
[ "0ec3a42e9740a5f5066053bb49a923d538eba24a" ]
[ "python/paddle/incubate/hapi/tests/test_loss.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nimport os\nimport six\nimport numpy as np\nimport shutil\nimport copy\n\nimport paddle\nfrom paddle import fluid\n\nfrom paddle.incubate.hapi.model import Model, Input\nfrom paddle.incubate.hapi.loss import CrossEntropy, SoftmaxWithCrossEntropy\n\n\ndef stable_softmax(x):\n \"\"\"Compute the softmax of vector x in a numerically stable way.\"\"\"\n # clip to shiftx, otherwise, when calc loss with\n # log(exp(shiftx)), may get log(0)=INF\n shiftx = (x - np.max(x)).clip(-64.)\n exps = np.exp(shiftx)\n return exps / np.sum(exps)\n\n\ndef randomize_probability(batch_size, class_num, dtype='float32'):\n prob = np.random.uniform(\n 0.1, 1.0, size=(batch_size, class_num)).astype(dtype)\n prob_sum = prob.sum(axis=1)\n for i in six.moves.xrange(len(prob)):\n prob[i] /= prob_sum[i]\n return prob\n\n\ndef numpy_ce(x, label):\n return np.asmatrix(\n [[-np.log(x[i][label[i][0]])] for i in range(x.shape[0])],\n dtype=\"float32\").mean()\n\n\nclass TestLoss(unittest.TestCase):\n def test_cross_entropy(self):\n class_num = 100\n batch_size = 128\n inputs = [randomize_probability(128, class_num) for _ in range(2)]\n\n labels = [\n np.random.randint(\n 0, class_num, (batch_size, 1), dtype=\"int64\") for _ in range(2)\n ]\n\n gt_out = [numpy_ce(inputs[i], labels[i]) for i in range(2)]\n\n fluid.enable_dygraph()\n cross_entropy = CrossEntropy()\n out = cross_entropy(\n [fluid.dygraph.to_variable(x) for x in inputs],\n [fluid.dygraph.to_variable(label) for label in labels])\n out = [o.numpy() for o in out]\n\n for o, g in zip(out, gt_out):\n np.testing.assert_allclose(o, g, atol=1e-5)\n\n def test_soft_cross_entronpy(self):\n class_num = 100\n batch_size = 128\n\n inputs = [randomize_probability(128, class_num) for _ in range(2)]\n\n labels = [\n np.random.randint(\n 0, class_num, (batch_size, 1), dtype=\"int64\") for _ in range(2)\n ]\n\n fluid.enable_dygraph()\n softmax_cross_entropy = SoftmaxWithCrossEntropy()\n\n softmax_cross_entropy(\n [fluid.dygraph.to_variable(x) for x in inputs],\n [fluid.dygraph.to_variable(label) for label in labels])\n\n softmax_cross_entropy = SoftmaxWithCrossEntropy(average=False)\n\n inputs = [randomize_probability(128, class_num)]\n\n labels = [\n np.random.randint(\n 0, class_num, (batch_size, 1), dtype=\"int64\")\n ]\n\n softmax_cross_entropy([fluid.dygraph.to_variable(x) for x in inputs],\n fluid.dygraph.to_variable(labels[0]))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.uniform", "numpy.sum", "numpy.exp", "numpy.max", "numpy.log", "numpy.testing.assert_allclose", "numpy.random.randint" ] ]
YexuZhou/TimeSeriesClassification_Transformer
[ "c20e00cfac4cfdb849e57e14c184f7d424257409" ]
[ "models/embedding.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport seaborn as sns\nimport matplotlib.pylab as plt\nimport numpy as np\n\n# TODO 所有循环结构应该呈现灵活性,每一层都不能一样!\nactivation_dict = {\"relu\" : nn.ReLU,\n \"leakyrelu\" : nn.LeakyReLU,\n \"prelu\" : nn.PReLU,\n \"rrelu\" : nn.RReLU,\n \"elu\" : nn.ELU,\n \"gelu\" : nn.GELU,\n \"hardswish\" : nn.Hardswish,\n \"mish\" : nn.Mish}\nNorm_dict = {\"layer\" : nn.LayerNorm,\n \"batch\" : nn.BatchNorm1d}\n\n\nclass DW_PW_projection(nn.Module):\n def __init__(self, c_in, c_out, kernel_size, stride=1, bias = False, padding_mode = \"replicate\"):\n super(DW_PW_projection, self).__init__()\n\n self.dw_conv1d = nn.Conv1d(in_channels = c_in,\n out_channels = c_in,\n kernel_size = kernel_size,\n padding = int(kernel_size/2),\n groups = c_in,\n stride = stride,\n bias = bias, \n padding_mode = padding_mode)\n\n self.pw_conv1d = nn.Conv1d(in_channels = c_in,\n out_channels = c_out,\n kernel_size = 1,\n padding = 0,\n groups = 1,\n bias = bias, \n padding_mode = padding_mode)\n def forward(self, x):\n\n\n x = self.dw_conv1d(x)\n x = self.pw_conv1d(x)\n\n return x\n\nclass Forward_block(nn.Module):\n def __init__(self,\n c_in,\n c_out,\n kernel_size,\n stride = 1, \n conv_bias = False,\n activation = \"relu\",\n norm_type = \"batch\",\n max_pool = False,\n pooling_kernel_size = 3, \n pooling_stride = 2,\n pooling_padding = 1,\n padding_mode = 'replicate',\n light_weight = False):\n \"\"\"\n embedding的block 由 conv --> norm --> activation --> maxpooling组成\n \"\"\"\n super(Forward_block, self).__init__() \n if light_weight:\n self.conv = DW_PW_projection(c_in = c_in, \n c_out = c_out,\n kernel_size = kernel_size,\n stride = stride,\n bias = conv_bias, \n padding_mode = padding_mode)\n else:\n self.conv = nn.Conv1d(in_channels = c_in, \n out_channels = c_out,\n kernel_size = kernel_size,\n padding = int(kernel_size/2),\n stride = stride,\n bias = conv_bias,\n padding_mode = padding_mode)\n self.norm_type = norm_type\n self.norm = Norm_dict[norm_type](c_out)\n self.activation = activation_dict[activation]()\n self.max_pool = max_pool\n if max_pool:\n self.maxpooling = nn.MaxPool1d(kernel_size = pooling_kernel_size,\n stride = pooling_stride,\n padding = pooling_padding)\n def forward(self, x):\n\n x = self.conv(x.permute(0, 2, 1)).permute(0, 2, 1)\n\n if self.norm_type == \"layer\":\n x = self.activation(self.norm(x))\n else :\n x = self.activation(self.norm(x.permute(0, 2, 1)).permute(0, 2, 1))\n\n if self.max_pool:\n x = self.maxpooling(x.permute(0, 2, 1)).permute(0, 2, 1)\n return x\n\n\nclass Freq_Forward_block(nn.Module):\n def __init__(self, \n c_in, \n c_out, # 主要是把channel的dim压平\n kernel_size, \n stride=1, \n bias = False, \n padding_mode = \"replicate\"):\n \n super(Freq_Forward_block, self).__init__()\n \n # depthwise\n self.dw_conv = nn.Conv2d(in_channels = c_in,\n out_channels = c_in,\n kernel_size = [kernel_size,kernel_size],\n padding = [int(kernel_size/2),int(kernel_size/2)],\n groups = c_in,\n stride = [1,stride], #缩短长度\n bias = bias, \n padding_mode = padding_mode)\n self.batch_norm_1 = nn.BatchNorm2d(c_in)\n self.act_1 = nn.ReLU()\n # pointwise\n self.pw_conv = nn.Conv2d(in_channels = c_in,\n out_channels = c_out, # 压平\n kernel_size = 1,\n padding = 0,\n stride = 1,\n bias = bias, \n padding_mode = padding_mode)\n self.batch_norm_2 = nn.BatchNorm2d(c_out)\n self.act_2 = nn.ReLU()\n \n def forward(self, x):\n\n x = self.dw_conv(x)\n x = self.batch_norm_1(x)\n x = self.act_1(x)\n\n x = self.pw_conv(x)\n x = self.batch_norm_2(x)\n x = self.act_2(x)\n\n return x\n\n\nclass TokenEmbedding(nn.Module):\n def __init__(self,\n c_in, \n token_d_model,\n kernel_size = 3, \n stride = 1, \n conv_bias = False,\n activation = \"relu\",\n norm_type = \"batch\",\n n_conv_layers = 1,\n in_planes = None,\n max_pool = False,\n pooling_kernel_size = 3, \n pooling_stride = 2,\n pooling_padding = 1,\n padding_mode = 'replicate',\n light_weight = False):\n \"\"\"\n c_in : 模型输入的维度\n token_d_model : embedding的维度 TODO看看后面是需要被相加还是被cat\n kernel_size : 每一层conv的kernel大小\n \n \"\"\"\n super(TokenEmbedding, self).__init__()\n in_planes = in_planes or int(token_d_model/2)\n n_filter_list = [c_in] + [in_planes for _ in range(n_conv_layers - 1)] + [token_d_model]\n padding = int(kernel_size/2)\n\n\n self.conv_layers = []\n for i in range(n_conv_layers):\n self.conv_layers.append(Forward_block(c_in = n_filter_list[i],\n c_out = n_filter_list[i + 1], \n kernel_size = kernel_size,\n stride = stride, \n conv_bias = conv_bias,\n activation = activation,\n norm_type = norm_type,\n max_pool = max_pool,\n pooling_kernel_size = pooling_kernel_size, \n pooling_stride = pooling_stride,\n pooling_padding = pooling_padding,\n padding_mode = padding_mode,\n light_weight = light_weight))\n\n self.conv_layers = nn.ModuleList(self.conv_layers)\n\n #for m in self.modules():\n # if isinstance(m, nn.Conv1d):\n # nn.init.kaiming_normal_(m.weight)\n\n\n\n def forward(self, x):\n\n\n for layer in self.conv_layers:\n x = layer(x)\n return x\n\n def sequence_length(self, length=100, n_channels=3):\n return self.forward(torch.zeros((1, length,n_channels))).shape[1]\n\n\n\nclass Freq_TokenEmbedding(nn.Module):\n def __init__(self,\n c_in, \n token_d_model,\n kernel_size = 3, \n stride = 1, #横向方向缩短距离\n conv_bias = False,\n n_conv_layers = 1,\n f_max = 100,\n padding_mode = 'replicate',\n light_weight = False):\n \"\"\"\n c_in : 模型输入的维度\n token_d_model : embedding的维度 TODO看看后面是需要被相加还是被cat\n kernel_size : 每一层conv的kernel大小\n \n \"\"\"\n super(Freq_TokenEmbedding, self).__init__()\n\n n_filter_list = [c_in] + [max(1,int(c_in/2**(i+1))) for i in range(n_conv_layers - 1)] + [1]\n print(n_filter_list)\n self.conv_layers = []\n for i in range(n_conv_layers):\n self.conv_layers.append(Freq_Forward_block(c_in = n_filter_list[i], \n c_out = n_filter_list[i + 1], # 主要是把channel的dim压平\n kernel_size = kernel_size, \n stride = stride, \n bias = conv_bias,\n padding_mode = padding_mode))\n\n self.conv_layers = nn.ModuleList(self.conv_layers)\n\n self.conv = nn.Conv1d(in_channels = self.channel(c_in = c_in, freq = int(f_max/2), length=100), \n out_channels = token_d_model,\n kernel_size = kernel_size,\n padding = int(kernel_size/2),\n stride = 1,\n bias = conv_bias,\n padding_mode = padding_mode)\n self.norm = nn.LayerNorm(token_d_model)\n self.activation = nn.ReLU()\n def forward(self, x):\n\n\n for layer in self.conv_layers:\n x = layer(x)\n\n x = torch.squeeze(x, 1)\n\n x = self.conv(x) # B C L\n x = self.activation(self.norm(x.permute(0, 2, 1)))\n\n return x\n \n def sequence_length(self, c_in = 100, freq = 50, length=100):\n x = torch.rand(1,c_in,freq,length).float()\n for layer in self.conv_layers:\n x = layer(x)\n return x.shape[3]\n\n def channel(self, c_in = 100, freq = 50, length=100):\n x = torch.rand(1,c_in,freq,length).float()\n for layer in self.conv_layers:\n x = layer(x)\n print(\"channel ,\", x.shape[2])\n return x.shape[2]\n\nclass PositionalEmbedding(nn.Module):\n \"\"\"\n input shape should be (batch, seq_length, feature_channel)\n \n \"\"\"\n def __init__(self, pos_d_model, max_len=5000):\n super(PositionalEmbedding, self).__init__()\n # Compute the positional encodings once in log space.\n \n \n pe = torch.zeros(max_len, pos_d_model).float()\n pe.require_grad = False\n\n position = torch.arange(0, max_len).float().unsqueeze(1)\n div_term = (torch.arange(0, pos_d_model, 2).float() * -(math.log(10000.0) / pos_d_model)).exp()\n\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n\n pe = pe.unsqueeze(0)# [1, max_len, pos_d_model]\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n return self.pe[:, :x.size(1)] # select the the length same as input\n\n\n def vis_pos_heat(self, length):\n heat = self.pe[:, :length]\n plt.figure(figsize=(15,5))\n sns.heatmap(heat.detach().numpy()[0], linewidth=0)\n plt.ylabel(\"length\")\n plt.xlabel(\"embedding\")\n\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool1d", "torch.cos", "matplotlib.pylab.ylabel", "torch.rand", "matplotlib.pylab.figure", "torch.nn.Conv1d", "torch.sin", "torch.nn.LayerNorm", "torch.nn.ModuleList", "torch.nn.Conv2d", "matplotlib.pylab.xlabel", "torch.arange", "torch.zeros", "torch.nn.ReLU", "torch.squeeze" ] ]
techshot25/gpytorch
[ "092d523027a844939ba85d7ea8c8c7b7511843d5" ]
[ "test/kernels/test_rbf_kernel_grad.py" ]
[ "#!/usr/bin/env python3\n\nimport torch\nimport unittest\nfrom gpytorch.kernels import RBFKernelGrad\nfrom gpytorch.test.base_kernel_test_case import BaseKernelTestCase\n\n\nclass TestRBFKernelGrad(unittest.TestCase, BaseKernelTestCase):\n def create_kernel_no_ard(self, **kwargs):\n return RBFKernelGrad(**kwargs)\n\n def create_kernel_ard(self, num_dims, **kwargs):\n return RBFKernelGrad(ard_num_dims=num_dims, **kwargs)\n\n def test_kernel(self, cuda=False):\n a = torch.tensor([[[1, 2], [2, 4]]], dtype=torch.float)\n b = torch.tensor([[[1, 3], [0, 4]]], dtype=torch.float)\n\n actual = torch.tensor(\n [\n [0.35321, 0, -0.73517, 0.0054977, 0.011443, -0.022886],\n [0, 0.73517, 0, -0.011443, -0.012374, 0.047633],\n [0.73517, 0, -0.79499, 0.022886, 0.047633, -0.083824],\n [0.12476, 0.25967, 0.25967, 0.015565, 0.064793, 0],\n [-0.25967, -0.2808, -0.54047, -0.064793, -0.23732, 0],\n [-0.25967, -0.54047, -0.2808, 0, 0, 0.032396],\n ]\n )\n\n kernel = RBFKernelGrad()\n\n if cuda:\n a = a.cuda()\n b = b.cuda()\n actual = actual.cuda()\n kernel = kernel.cuda()\n\n res = kernel(a, b).evaluate()\n\n self.assertLess(torch.norm(res - actual), 1e-5)\n\n def test_kernel_cuda(self):\n if torch.cuda.is_available():\n self.test_kernel(cuda=True)\n\n def test_kernel_batch(self):\n a = torch.tensor([[[1, 2, 3], [2, 4, 0]], [[-1, 1, 2], [2, 1, 4]]], dtype=torch.float)\n b = torch.tensor([[[1, 3, 1]], [[2, -1, 0]]], dtype=torch.float).repeat(1, 2, 1)\n\n kernel = RBFKernelGrad()\n res = kernel(a, b).evaluate()\n\n # Compute each batch separately\n actual = torch.zeros(2, 8, 8)\n actual[0, :, :] = kernel(a[0, :, :].squeeze(), b[0, :, :].squeeze()).evaluate()\n actual[1, :, :] = kernel(a[1, :, :].squeeze(), b[1, :, :].squeeze()).evaluate()\n\n self.assertLess(torch.norm(res - actual), 1e-5)\n\n def test_initialize_lengthscale(self):\n kernel = RBFKernelGrad()\n kernel.initialize(lengthscale=3.14)\n actual_value = torch.tensor(3.14).view_as(kernel.lengthscale)\n self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)\n\n def test_initialize_lengthscale_batch(self):\n kernel = RBFKernelGrad(batch_shape=torch.Size([2]))\n ls_init = torch.tensor([3.14, 4.13])\n kernel.initialize(lengthscale=ls_init)\n actual_value = ls_init.view_as(kernel.lengthscale)\n self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.Size", "torch.tensor", "torch.norm", "torch.cuda.is_available", "torch.zeros" ] ]
JamesFengi/handPose_Eric
[ "3e329181930ebc7ef0fed2abb9a9d092a8541f9c" ]
[ "lib/wyw2s_lib/make_facebank_tools/make_facebank.py" ]
[ "# make facebank\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport torch\nfrom model import Backbone\nimport argparse\nfrom pathlib import Path\nfrom torchvision import transforms as trans\nfrom PIL import Image\nimport numpy as np\ndef prepare_facebank(path_images,facebank_path, model, mtcnn, device , tta = True):\n #\n test_transform_ = trans.Compose([\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n #\n model.eval()\n embeddings = []\n names = ['Unknown']\n idx = 0\n for path in path_images.iterdir():\n if path.is_file():\n continue\n else:\n idx += 1\n print(\"idx {} : {}\".format(idx,path))\n embs = []\n for file in path.iterdir():\n # print(file)\n if not file.is_file():\n continue\n else:\n\n try:\n # print(\"---------------------------\")\n img = Image.open(file)\n print(\" {}) {}\".format(idx,file))\n except:\n continue\n\n with torch.no_grad():\n if tta:\n mirror = trans.functional.hflip(img)\n emb = model(test_transform_(img).to(device).unsqueeze(0))\n emb_mirror = model(test_transform_(mirror).to(device).unsqueeze(0))\n embs.append(l2_norm(emb + emb_mirror))\n else:\n embs.append(model(test_transform_(img).to(device).unsqueeze(0)))\n if len(embs) == 0:\n continue\n embedding = torch.cat(embs).mean(0,keepdim=True)\n embeddings.append(embedding)\n names.append(path.name)\n embeddings = torch.cat(embeddings)\n names = np.array(names)\n torch.save(embeddings, facebank_path+'/facebank.pth')\n np.save(facebank_path + '/names', names)\n return embeddings, names\n\nif __name__ == '__main__':\n # 需要制作人脸库对应的 图片地址\n path_images = \"./images/\"\n\n\n # 定义模型\n device_ = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model_ = Backbone(50, 1., \"ir_se\").to(device_)\n # 加载模型\n if os.access(\"./model_ir_se50.pth\",os.F_OK):\n model_.load_state_dict(torch.load(\"./model_ir_se50.pth\"))\n\n model_.eval()\n facebank_path = \"./facebank/\" # 人脸库对应地址\n targets, names = prepare_facebank(Path(path_images), facebank_path,model_, \"\" ,device_, tta = False) # 构建 人脸 底库\n" ]
[ [ "numpy.save", "torch.load", "torch.save", "torch.no_grad", "torch.cuda.is_available", "numpy.array", "torch.cat" ] ]
firmai/universal-portfolios
[ "b1d99d6dbcf553582d399cf3851ac4ba35a93d3e" ]
[ "universal/algo.py" ]
[ "import sys\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport logging\nimport inspect\nimport copy\nfrom .result import AlgoResult, ListResult\nfrom scipy.misc import comb\nfrom . import tools\n\n\nclass Algo(object):\n \"\"\" Base class for algorithm calculating weights for online portfolio.\n You have to subclass either step method to calculate weights sequentially\n or weights method, which does it at once. weights method might be useful\n for better performance when using matrix calculation, but be careful about\n look-ahead bias.\n\n Upper case letters stand for matrix and lower case for vectors (such as\n B and b for weights).\n \"\"\"\n\n # if true, replace missing values by last values\n REPLACE_MISSING = False\n\n # type of prices going into weights or step function\n # ratio: pt / pt-1\n # log: log(pt / pt-1)\n # raw: pt\n PRICE_TYPE = 'ratio'\n\n def __init__(self, min_history=None, frequency=1):\n \"\"\" Subclass to define algo specific parameters here.\n :param min_history: If not None, use initial weights for first min_window days. Use\n this if the algo needs some history for proper parameter estimation.\n :param frequency: algorithm should trade every `frequency` periods\n \"\"\"\n self.min_history = min_history or 0\n self.frequency = frequency\n\n def init_weights(self, m):\n \"\"\" Set initial weights.\n :param m: Number of assets.\n \"\"\"\n return np.zeros(m)\n\n def init_step(self, X):\n \"\"\" Called before step method. Use to initialize persistent variables.\n :param X: Entire stock returns history.\n \"\"\"\n pass\n\n def step(self, x, last_b, history):\n \"\"\" Calculate new portfolio weights. If history parameter is omited, step\n method gets passed just parameters `x` and `last_b`. This significantly\n increases performance.\n :param x: Last returns.\n :param last_b: Last weights.\n :param history: All returns up to now. You can omit this parameter to increase\n performance.\n \"\"\"\n raise NotImplementedError('Subclass must implement this!')\n\n def _use_history_step(self):\n \"\"\" Use history parameter in step method? \"\"\"\n step_args = inspect.getargspec(self.step)[0]\n return len(step_args) >= 4\n\n def weights(self, X, min_history=None, log_progress=True):\n \"\"\" Return weights. Call step method to update portfolio sequentially. Subclass\n this method only at your own risk. \"\"\"\n min_history = self.min_history if min_history is None else min_history\n\n # init\n B = X.copy() * 0.\n last_b = self.init_weights(X.shape[1])\n if isinstance(last_b, np.ndarray):\n last_b = pd.Series(last_b, X.columns)\n\n # use history in step method?\n use_history = self._use_history_step()\n\n # run algo\n self.init_step(X)\n for t, (_, x) in enumerate(X.iterrows()):\n # save weights\n B.ix[t] = last_b\n\n # keep initial weights for min_history\n if t < min_history:\n continue\n\n # trade each `frequency` periods\n if (t + 1) % self.frequency != 0:\n continue\n\n # predict for t+1\n if use_history:\n history = X.iloc[:t+1]\n last_b = self.step(x, last_b, history)\n else:\n last_b = self.step(x, last_b)\n\n # convert last_b to suitable format if needed\n if type(last_b) == np.matrix:\n # remove dimension\n last_b = np.squeeze(np.array(last_b))\n\n # show progress by 10 pcts\n if log_progress:\n tools.log_progress(t, len(X), by=10)\n\n return B\n\n def _split_index(self, ix, nr_chunks, freq):\n \"\"\" Split index into chunks so that each chunk except of the last has length\n divisible by freq. \"\"\"\n chunksize = int(len(ix) / freq / nr_chunks + 1) * freq\n return [ix[i*chunksize:(i+1)*chunksize] for i in range(len(ix) / chunksize + 1)]\n\n def run(self, S, n_jobs=1, log_progress=True):\n \"\"\" Run algorithm and get weights.\n :params S: Absolute stock prices. DataFrame with stocks in columns.\n :param show_progress: Log computation progress. Works only for algos with\n defined step method.\n :param n_jobs: run step method in parallel (step method can't depend on last weights)\n \"\"\"\n if log_progress:\n logging.debug('Running {}...'.format(self.__class__.__name__))\n\n if isinstance(S, ListResult):\n P = S.to_dataframe()\n else:\n P = S\n\n # convert prices to proper format\n X = self._convert_prices(P, self.PRICE_TYPE, self.REPLACE_MISSING)\n\n # get weights\n if n_jobs == 1:\n try:\n B = self.weights(X, log_progress=log_progress)\n except TypeError: # weights are missing log_progress parameter\n B = self.weights(X)\n else:\n with tools.mp_pool(n_jobs) as pool:\n ix_blocks = self._split_index(X.index, pool._processes * 2, self.frequency)\n min_histories = np.maximum(np.cumsum([0] + map(len, ix_blocks[:-1])) - 1, self.min_history)\n\n B_blocks = pool.map(_parallel_weights, [(self, X.ix[:ix_block[-1]], min_history, log_progress)\n for ix_block, min_history in zip(ix_blocks, min_histories)])\n\n # join weights to one dataframe\n B = pd.concat([B_blocks[i].ix[ix] for i, ix in enumerate(ix_blocks)])\n\n # cast to dataframe if weights return numpy array\n if not isinstance(B, pd.DataFrame):\n B = pd.DataFrame(B, index=P.index, columns=P.columns)\n\n if log_progress:\n logging.debug('{} finished successfully.'.format(self.__class__.__name__))\n\n # if we are aggregating strategies, combine weights from strategies\n # and use original assets\n if isinstance(S, ListResult):\n B = sum(result.B.mul(B[col], axis=0) for result, col in zip(S, B.columns))\n return AlgoResult(S[0].X, B)\n else:\n return AlgoResult(self._convert_prices(S, 'ratio'), B)\n\n def next_weights(self, S, last_b, **kwargs):\n \"\"\" Calculate weights for next day. \"\"\"\n # use history in step method?\n use_history = self._use_history_step()\n history = self._convert_prices(S, self.PRICE_TYPE, self.REPLACE_MISSING)\n x = history.iloc[-1]\n\n if use_history:\n b = self.step(x, last_b, history, **kwargs)\n else:\n b = self.step(x, last_b, **kwargs)\n return pd.Series(b, index=S.columns)\n\n def run_subsets(self, S, r, generator=False):\n \"\"\" Run algorithm on all stock subsets of length r. Note that number of such tests can be\n very large.\n :param S: stock prices\n :param r: number of stocks in a subset\n :param generator: yield results\n \"\"\"\n def subset_generator():\n total_subsets = comb(S.shape[1], r)\n\n for i, S_sub in enumerate(tools.combinations(S, r)):\n # run algorithm on given subset\n result = self.run(S_sub, log_progress=False)\n name = ', '.join(S_sub.columns.astype(str))\n\n # log progress by 1 pcts\n tools.log_progress(i, total_subsets, by=1)\n\n yield result, name\n raise StopIteration\n\n if generator:\n return subset_generator()\n else:\n results = []\n names = []\n for result, name in subset_generator():\n results.append(result)\n names.append(name)\n return ListResult(results, names)\n\n @classmethod\n def _convert_prices(self, S, method, replace_missing=False):\n \"\"\" Convert prices to format suitable for weight or step function.\n Available price types are:\n ratio: pt / pt_1\n log: log(pt / pt_1)\n raw: pt (normalized to start with 1)\n \"\"\"\n if method == 'raw':\n # normalize prices so that they start with 1.\n r = {}\n for name, s in S.iteritems():\n init_val = s.ix[s.first_valid_index()]\n r[name] = s / init_val\n X = pd.DataFrame(r)\n\n if replace_missing:\n X.ix[0] = 1.\n X = X.fillna(method='ffill')\n\n return X\n\n elif method == 'absolute':\n return S\n\n elif method in ('ratio', 'log'):\n # be careful about NaN values\n X = S / S.shift(1).fillna(method='ffill')\n for name, s in X.iteritems():\n X[name].iloc[s.index.get_loc(s.first_valid_index()) - 1] = 1.\n\n if replace_missing:\n X = X.fillna(1.)\n\n return np.log(X) if method == 'log' else X\n\n else:\n raise ValueError('invalid price conversion method')\n\n @classmethod\n def run_combination(cls, S, **kwargs):\n \"\"\" Get equity of algo using all combinations of parameters. All\n values in lists specified in kwargs will be optimized. Other types\n will be passed as they are to algo __init__ (like numbers, strings,\n tuples).\n Return ListResult object, which is basically a wrapper of list of AlgoResult objects.\n It is possible to pass ListResult to Algo or run_combination again\n to get AlgoResult. This is useful for chaining of Algos.\n\n Example:\n S = ...load data...\n list_results = Anticor.run_combination(S, alpha=[0.01, 0.1, 1.])\n result = CRP().run(list_results)\n\n :param S: Stock prices.\n :param kwargs: Additional arguments to algo.\n :param n_jobs: Use multiprocessing (-1 = use all cores). Use all cores by default.\n \"\"\"\n if isinstance(S, ListResult):\n S = S.to_dataframe()\n\n n_jobs = kwargs.pop('n_jobs', -1)\n\n # extract simple parameters\n simple_params = {k: kwargs.pop(k) for k, v in kwargs.items()\n if not isinstance(v, list)}\n\n # iterate over all combinations\n names = []\n params_to_try = []\n for seq in itertools.product(*kwargs.values()):\n params = dict(zip(kwargs.keys(), seq))\n\n # run algo\n all_params = dict(params.items() + simple_params.items())\n params_to_try.append(all_params)\n\n # create name with format param:value\n name = ','.join([str(k) + '=' + str(v) for k, v in params.items()])\n names.append(name)\n\n # try all combinations in parallel\n with tools.mp_pool(n_jobs) as pool:\n results = pool.map(_run_algo_params, [(S, cls, all_params) for all_params in params_to_try])\n results = map(_run_algo_params, [(S, cls, all_params) for all_params in params_to_try])\n\n return ListResult(results, names)\n\n def copy(self):\n return copy.deepcopy(self)\n\n\ndef _parallel_weights(tuple_args):\n self, X, min_history, log_progress = tuple_args\n try:\n return self.weights(X, min_history=min_history, log_progress=log_progress)\n except TypeError: # weights are missing log_progress parameter\n return self.weights(X, min_history=min_history)\n\n\ndef _run_algo_params(tuple_args):\n S, cls, params = tuple_args\n logging.debug('Run combination of parameters: {}'.format(params))\n return cls(**params).run(S)\n" ]
[ [ "pandas.Series", "numpy.zeros", "scipy.misc.comb", "pandas.DataFrame", "numpy.log", "numpy.array" ] ]
Pandinosaurus/RandPerson
[ "1c6e935d64d8210ee4cddbf803da054016090675" ]
[ "trainCode/Source/reid/models/resmap.py" ]
[ "from __future__ import absolute_import\n\nfrom torch import nn\nimport torchvision\n\nfea_dims_small = {'layer2': 128, 'layer3': 256, 'layer4': 512}\nfea_dims = {'layer2': 512, 'layer3': 1024, 'layer4': 2048}\n\n\nclass ResNet(nn.Module):\n __factory = {\n 18: torchvision.models.resnet18,\n 34: torchvision.models.resnet34,\n 50: torchvision.models.resnet50,\n 101: torchvision.models.resnet101,\n 152: torchvision.models.resnet152,\n }\n\n def __init__(self, depth, final_layer='layer3', neck=128, pretrained=True):\n super(ResNet, self).__init__()\n\n self.depth = depth\n self.final_layer = final_layer\n self.neck = neck\n self.pretrained = pretrained\n\n # Construct base (pretrained) resnet\n if depth not in ResNet.__factory:\n raise KeyError(\"Unsupported depth:\", depth)\n self.base = ResNet.__factory[depth](pretrained=pretrained)\n\n if depth < 50:\n out_planes = fea_dims_small[final_layer]\n else:\n out_planes = fea_dims[final_layer]\n\n if neck > 0:\n self.neck_conv = nn.Conv2d(out_planes, neck, kernel_size=3, padding=1, bias=False)\n out_planes = neck\n self.neck_bn = nn.BatchNorm2d(out_planes)\n\n self.num_features = out_planes\n\n def forward(self, inputs):\n x = inputs\n for name, module in self.base._modules.items():\n x = module(x)\n if name == self.final_layer:\n break\n\n if self.neck > 0:\n x = self.neck_conv(x)\n x = self.neck_bn(x)\n\n return x\n\n\ndef resnet18(**kwargs):\n return ResNet(18, **kwargs)\n\n\ndef resnet34(**kwargs):\n return ResNet(34, **kwargs)\n\n\ndef resnet50(**kwargs):\n return ResNet(50, **kwargs)\n\n\ndef resnet101(**kwargs):\n return ResNet(101, **kwargs)\n\n\ndef resnet152(**kwargs):\n return ResNet(152, **kwargs)\n\n\n__factory = {\n 'resnet18': resnet18,\n 'resnet34': resnet34,\n 'resnet50': resnet50,\n 'resnet101': resnet101,\n 'resnet152': resnet152,\n}\n\n\ndef names():\n return sorted(__factory.keys())\n\n\ndef create(name, *args, **kwargs):\n \"\"\"\n Create a model instance.\n\n Parameters\n ----------\n name : str\n Model name. Can be one of 'inception', 'resnet18', 'resnet34',\n 'resnet50', 'resnet101', and 'resnet152'.\n pretrained : bool, optional\n Only applied for 'resnet*' models. If True, will use ImageNet pretrained\n model. Default: True\n cut_at_pooling : bool, optional\n If True, will cut the model before the last global pooling layer and\n ignore the remaining kwargs. Default: False\n num_features : int, optional\n If positive, will append a Linear layer after the global pooling layer,\n with this number of output units, followed by a BatchNorm layer.\n Otherwise these layers will not be appended. Default: 256 for\n 'inception', 0 for 'resnet*'\n norm : bool, optional\n If True, will normalize the feature to be unit L2-norm for each sample.\n Otherwise will append a ReLU layer after the above Linear layer if\n num_features > 0. Default: False\n dropout : float, optional\n If positive, will append a Dropout layer with this dropout rate.\n Default: 0\n num_classes : int, optional\n If positive, will append a Linear layer at the end as the classifier\n with this number of output units. Default: 0\n \"\"\"\n if name not in __factory:\n raise KeyError(\"Unknown model:\", name)\n return __factory[name](*args, **kwargs)\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Conv2d" ] ]
wanirepo/Neurosynth
[ "5b770ec31c5095c16e27ebe664fa5d515c662298" ]
[ "neurosynth/analysis/reduce.py" ]
[ "import numpy as np\n\n\"\"\" Dimensional/data reduction methods. \"\"\"\n\ndef average_within_regions(dataset, img, threshold=None, remove_zero=True):\n \"\"\" Averages over all voxels within each ROI in the input image.\n\n Takes a Dataset and a Nifti image that defines distinct regions, and \n returns a numpy matrix of ROIs x mappables, where the value at each ROI is \n the proportion of active voxels in that ROI. Each distinct ROI must have a \n unique value in the image; non-contiguous voxels with the same value will \n be assigned to the same ROI.\n\n Args:\n dataset: A Dataset instance\n img: A NIFTI or Analyze-format image that provides the ROI definitions\n threshold: An optional float in the range of 0 - 1. If passed, the array \n will be binarized, with ROI values above the threshold assigned to True \n and values below the threshold assigned to False. (E.g., if threshold = \n 0.05, only ROIs in which more than 5% of voxels are active will be \n considered active.).\n remove_zero: An optional boolean; when True, assume that voxels with value \n of 0 should not be considered as a separate ROI, and will be ignored. \n\n Returns:\n If replace == True, nothing is returned (the Dataset is modified in-place).\n Otherwise, returns a 2D numpy array with ROIs in rows and mappables in columns.\n \"\"\"\n regions = dataset.volume.mask(img)\n labels = np.unique(regions)\n if remove_zero: labels = labels[np.nonzero(labels)]\n n_regions = labels.size\n m = np.zeros((regions.size, n_regions))\n for i in range(n_regions):\n m[regions==labels[i],i] = 1.0/np.sum(regions==labels[i])\n # produces roi x study matrix\n result = np.transpose(m) * dataset.get_image_data(ids=None, dense=False)\n if threshold is not None:\n result[result < threshold] = 0.0\n result = result.astype(bool)\n return result\n\ndef get_random_voxels(dataset, n_voxels):\n \"\"\" Returns mappable data for a random subset of voxels.\n\n May be useful as a baseline in predictive analyses--e.g., to compare performance \n of a more principled feature selection method with simple random selection.\n\n Args:\n dataset: A Dataset instance\n n_voxels: An integer specifying the number of random voxels to select.\n\n Returns:\n A 2D numpy array with (randomly-selected) voxels in rows and mappables in columns.\n \"\"\"\n voxels = np.range(dataset.volume.num_vox_in_mask)\n selected = np.random.shuffle(voxels)[0:n_voxels]\n return dataset.get_image_data(voxels=selected)\n" ]
[ [ "numpy.sum", "numpy.random.shuffle", "numpy.transpose", "numpy.zeros", "numpy.range", "numpy.nonzero", "numpy.unique" ] ]
jdey4/progressive-learning
[ "410b3525ab63e1f7c32e9838460b2c9af7b9d256", "410b3525ab63e1f7c32e9838460b2c9af7b9d256" ]
[ "replaying/test.py", "src/lifelong_dnn.py" ]
[ "#%%\nimport random\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport seaborn as sns \n\nimport numpy as np\nimport pickle\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom math import log2, ceil \n\nimport sys\n#sys.path.append(\"../src/\")\nsys.path.append(\"../src_sampling/\")\nfrom lifelong_dnn import LifeLongDNN\nfrom joblib import Parallel, delayed\n\n#%%\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ndef get_colors(colors, inds):\n c = [colors[i] for i in inds]\n return c\n\ndef generate_2d_rotation(theta=0, acorn=None):\n if acorn is not None:\n np.random.seed(acorn)\n \n R = np.array([\n [np.cos(theta), np.sin(theta)],\n [-np.sin(theta), np.cos(theta)]\n ])\n \n return R\n\n\ndef generate_gaussian_parity(n, mean=np.array([-1, -1]), cov_scale=1, angle_params=None, k=1, acorn=None):\n if acorn is not None:\n np.random.seed(acorn)\n \n d = len(mean)\n \n if mean[0] == -1 and mean[1] == -1:\n mean = mean + 1 / 2**k\n \n mnt = np.random.multinomial(n, 1/(4**k) * np.ones(4**k))\n cumsum = np.cumsum(mnt)\n cumsum = np.concatenate(([0], cumsum))\n \n Y = np.zeros(n)\n X = np.zeros((n, d))\n \n for i in range(2**k):\n for j in range(2**k):\n temp = np.random.multivariate_normal(mean, cov_scale * np.eye(d), \n size=mnt[i*(2**k) + j])\n temp[:, 0] += i*(1/2**(k-1))\n temp[:, 1] += j*(1/2**(k-1))\n \n X[cumsum[i*(2**k) + j]:cumsum[i*(2**k) + j + 1]] = temp\n \n if i % 2 == j % 2:\n Y[cumsum[i*(2**k) + j]:cumsum[i*(2**k) + j + 1]] = 0\n else:\n Y[cumsum[i*(2**k) + j]:cumsum[i*(2**k) + j + 1]] = 1\n \n if d == 2:\n if angle_params is None:\n angle_params = np.random.uniform(0, 2*np.pi)\n \n R = generate_2d_rotation(angle_params)\n X = X @ R\n \n else:\n raise ValueError('d=%i not implemented!'%(d))\n \n return X, Y.astype(int)\n\n\n#%%\ndef experiment(n_xor, n_nxor, n_test, reps, n_trees, max_depth, acorn=None):\n #print(1)\n if n_xor==0 and n_nxor==0:\n raise ValueError('Wake up and provide samples to train!!!')\n \n if acorn != None:\n np.random.seed(acorn)\n \n errors = np.zeros((reps,4),dtype=float)\n \n for i in range(reps):\n l2f = LifeLongDNN(parallel=False)\n uf1 = LifeLongDNN(parallel=False)\n uf2 = LifeLongDNN(parallel=False)\n #source data\n xor, label_xor = generate_gaussian_parity(n_xor,cov_scale=0.1,angle_params=0)\n test_xor, test_label_xor = generate_gaussian_parity(n_test,cov_scale=0.1,angle_params=0)\n\n '''min_xor = np.min(xor)\n xor = (xor - min_xor)\n max_xor = np.max(xor)\n xor = xor/max_xor\n test_xor = (test_xor-min_xor)/max_xor'''\n #target data\n if n_nxor!=0:\n nxor, label_nxor = generate_gaussian_parity(n_nxor,cov_scale=0.1,angle_params=np.pi/2)\n test_nxor, test_label_nxor = generate_gaussian_parity(n_test,cov_scale=0.1,angle_params=np.pi/2)\n\n '''min_nxor = np.min(nxor)\n nxor = (nxor - min_nxor)\n max_nxor = np.max(nxor)\n nxor = nxor/max_nxor\n test_nxor = (test_nxor-min_nxor)/max_nxor'''\n\n if n_xor == 0:\n l2f.new_forest(nxor, label_nxor, n_estimators=n_trees,max_depth=max_depth)\n \n errors[i,0] = 0.5\n errors[i,1] = 0.5\n \n uf_task2=l2f.predict(test_nxor, representation=0, decider=0)\n l2f_task2=l2f.predict(test_nxor, representation='all', decider=0)\n \n errors[i,2] = 1 - np.sum(uf_task2 == test_label_nxor)/n_test\n errors[i,3] = 1 - np.sum(l2f_task2 == test_label_nxor)/n_test\n elif n_nxor == 0:\n l2f.new_forest(xor, label_xor, n_estimators=n_trees,max_depth=max_depth)\n \n uf_task1=l2f.predict(test_xor, representation=0, decider=0)\n l2f_task1=l2f.predict(test_xor, representation='all', decider=0)\n \n errors[i,0] = 1 - np.sum(uf_task1 == test_label_xor)/n_test\n errors[i,1] = 1 - np.sum(l2f_task1 == test_label_xor)/n_test\n errors[i,2] = 0.5\n errors[i,3] = 0.5\n else:\n l2f.new_forest(xor, label_xor, n_estimators=n_trees,max_depth=max_depth)\n\n delta = .001\n #sample the grid\n x = np.arange(-1,1,step=delta)\n y = np.arange(-1,1,step=delta)\n x,y = np.meshgrid(x,y)\n sample = np.concatenate(\n (\n x.reshape(-1,1),\n y.reshape(-1,1)\n ),\n axis=1\n )\n #sample_label = l2f.predict(sample, representation=0,decider=0)\n sample_label = l2f._estimate_posteriors(sample, representation='all', decider=0)\n l2f.X_across_tasks[0] = sample\n l2f.y_across_tasks[0] = sample_label\n ############################\n\n l2f.new_forest(nxor, label_nxor, n_estimators=n_trees,max_depth=max_depth)\n \n uf1.new_forest(xor, label_xor, n_estimators=n_trees,max_depth=max_depth)\n uf2.new_forest(nxor, label_nxor, n_estimators=n_trees,max_depth=max_depth)\n\n uf_task1=uf1.predict(test_xor, representation=0, decider=0)\n l2f_task1=l2f.predict(test_xor, representation='all', decider=0)\n uf_task2=uf2.predict(test_nxor, representation=0, decider=0)\n l2f_task2=l2f.predict(test_nxor, representation='all', decider=1)\n \n errors[i,0] = 1 - np.sum(uf_task1 == test_label_xor)/n_test\n errors[i,1] = 1 - np.sum(l2f_task1 == test_label_xor)/n_test\n errors[i,2] = 1 - np.sum(uf_task2 == test_label_nxor)/n_test\n errors[i,3] = 1 - np.sum(l2f_task2 == test_label_nxor)/n_test\n\n return np.mean(errors,axis=0)\n\n#%%\nmc_rep = 1000\nn_test = 1000\nn_trees = 10\nn_xor = (100*np.arange(0.5, 7.25, step=0.25)).astype(int)\nn_nxor = (100*np.arange(0.5, 7.5, step=0.25)).astype(int)\n\nmean_error = np.zeros((4, len(n_xor)+len(n_nxor)))\nstd_error = np.zeros((4, len(n_xor)+len(n_nxor)))\n\nmean_te = np.zeros((2, len(n_xor)+len(n_nxor)))\nstd_te = np.zeros((2, len(n_xor)+len(n_nxor)))\n\nfor i,n1 in enumerate(n_xor):\n print('starting to compute %s xor\\n'%n1)\n error = np.array(\n Parallel(n_jobs=-1,verbose=1)(\n delayed(experiment)(n1,0,n_test,1,n_trees=n_trees,max_depth=200) for _ in range(mc_rep)\n )\n )\n mean_error[:,i] = np.mean(error,axis=0)\n std_error[:,i] = np.std(error,ddof=1,axis=0)\n mean_te[0,i] = np.mean(error[:,0]/error[:,1])\n mean_te[1,i] = np.mean(error[:,2]/error[:,3])\n std_te[0,i] = np.std(error[:,0]/error[:,1],ddof=1)\n std_te[1,i] = np.std(error[:,2]/error[:,3],ddof=1)\n \n if n1==n_xor[-1]:\n for j,n2 in enumerate(n_nxor):\n print('starting to compute %s nxor\\n'%n2)\n \n error = np.array(\n Parallel(n_jobs=-1,verbose=1)(\n delayed(experiment)(n1,n2,n_test,1,n_trees=n_trees,max_depth=200) for _ in range(mc_rep)\n )\n )\n mean_error[:,i+j+1] = np.mean(error,axis=0)\n std_error[:,i+j+1] = np.std(error,ddof=1,axis=0)\n mean_te[0,i+j+1] = np.mean(error[:,0]/error[:,1])\n mean_te[1,i+j+1] = np.mean(error[:,2]/error[:,3])\n std_te[0,i+j+1] = np.std(error[:,0]/error[:,1],ddof=1)\n std_te[1,i+j+1] = np.std(error[:,2]/error[:,3],ddof=1)\n \nwith open('./result/mean_xor_nxor.pickle','wb') as f:\n pickle.dump(mean_error,f)\n \nwith open('./result/std_xor_nxor.pickle','wb') as f:\n pickle.dump(std_error,f)\n \nwith open('./result/mean_te_xor_nxor.pickle','wb') as f:\n pickle.dump(mean_te,f)\n \nwith open('./result/std_te_xor_nxor.pickle','wb') as f:\n pickle.dump(std_te,f)\n\n#%% Plotting the result\n#mc_rep = 50\nmean_error = unpickle('result/mean_xor_nxor.pickle')\nstd_error = unpickle('result/std_xor_nxor.pickle')\n\nn_xor = (100*np.arange(0.5, 7.25, step=0.25)).astype(int)\nn_nxor = (100*np.arange(0.5, 7.5, step=0.25)).astype(int)\n\nn1s = n_xor\nn2s = n_nxor\n\nns = np.concatenate((n1s, n2s + n1s[-1]))\nls=['-', '--']\nalgorithms = ['Uncertainty Forest', 'Lifelong Forest']\n\n\nTASK1='XOR'\nTASK2='N-XOR'\n\nfontsize=30\nlabelsize=28\n\ncolors = sns.color_palette(\"Set1\", n_colors = 2)\n\nfig = plt.figure(constrained_layout=True,figsize=(21,14))\ngs = fig.add_gridspec(14, 21)\nax1 = fig.add_subplot(gs[7:,:6])\n# for i, algo in enumerate(algorithms):\nax1.plot(ns, mean_error[0], label=algorithms[0], c=colors[1], ls=ls[np.sum(0 > 1).astype(int)], lw=3)\n#ax1.fill_between(ns, \n# mean_error[0] + 1.96*std_error[0], \n# mean_error[0] - 1.96*std_error[0], \n# where=mean_error[0] + 1.96*std_error[0] >= mean_error[0] - 1.96*std_error[0], \n# facecolor=colors[1], \n# alpha=0.15,\n# interpolate=True)\n\nax1.plot(ns, mean_error[1], label=algorithms[1], c=colors[0], ls=ls[np.sum(1 > 1).astype(int)], lw=3)\n#ax1.fill_between(ns, \n# mean_error[1] + 1.96*std_error[1, ], \n# mean_error[1] - 1.96*std_error[1, ], \n# where=mean_error[1] + 1.96*std_error[1] >= mean_error[1] - 1.96*std_error[1], \n# facecolor=colors[0], \n# alpha=0.15,\n# interpolate=True)\n\nax1.set_ylabel('Generalization Error (%s)'%(TASK1), fontsize=fontsize)\nax1.legend(loc='upper right', fontsize=20, frameon=False)\nax1.set_ylim(0.1, 0.21)\nax1.set_xlabel('Total Sample Size', fontsize=fontsize)\nax1.tick_params(labelsize=labelsize)\nax1.set_yticks([0.15, 0.2])\nax1.set_xticks([250,750, 1500])\nax1.axvline(x=750, c='gray', linewidth=1.5, linestyle=\"dashed\")\nax1.set_title('XOR', fontsize=30)\n\nright_side = ax1.spines[\"right\"]\nright_side.set_visible(False)\ntop_side = ax1.spines[\"top\"]\ntop_side.set_visible(False)\n\nax1.text(250, np.mean(ax1.get_ylim()), \"%s\"%(TASK1), fontsize=26)\nax1.text(900, np.mean(ax1.get_ylim()), \"%s\"%(TASK2), fontsize=26)\n\n#plt.tight_layout()\n\n#plt.savefig('./result/figs/generalization_error_xor.pdf',dpi=500)\n\n#####\nmean_error = unpickle('result/mean_xor_nxor.pickle')\nstd_error = unpickle('result/std_xor_nxor.pickle')\n\nalgorithms = ['Uncertainty Forest', 'Lifelong Forest']\n\nTASK1='XOR'\nTASK2='N-XOR'\n\nax1 = fig.add_subplot(gs[7:,7:13])\n# for i, algo in enumerate(algorithms):\nax1.plot(ns[len(n1s):], mean_error[2, len(n1s):], label=algorithms[0], c=colors[1], ls=ls[1], lw=3)\n#ax1.fill_between(ns[len(n1s):], \n# mean_error[2, len(n1s):] + 1.96*std_error[2, len(n1s):], \n# mean_error[2, len(n1s):] - 1.96*std_error[2, len(n1s):], \n# where=mean_error[2, len(n1s):] + 1.96*std_error[2, len(n1s):] >= mean_error[2, len(n1s):] - 1.96*std_error[2, len(n1s):], \n# facecolor=colors[1], \n# alpha=0.15,\n# interpolate=True)\n\nax1.plot(ns[len(n1s):], mean_error[3, len(n1s):], label=algorithms[1], c=colors[0], ls=ls[1], lw=3)\n#ax1.fill_between(ns[len(n1s):], \n# mean_error[3, len(n1s):] + 1.96*std_error[3, len(n1s):], \n# mean_error[3, len(n1s):] - 1.96*std_error[3, len(n1s):], \n# where=mean_error[3, len(n1s):] + 1.96*std_error[3, len(n1s):] >= mean_error[3, len(n1s):] - 1.96*std_error[3, len(n1s):], \n# facecolor=colors[0], \n# alpha=0.15,\n# interpolate=True)\n\nax1.set_ylabel('Generalization Error (%s)'%(TASK2), fontsize=fontsize)\nax1.legend(loc='upper right', fontsize=20, frameon=False)\n# ax1.set_ylim(-0.01, 0.22)\nax1.set_xlabel('Total Sample Size', fontsize=fontsize)\nax1.tick_params(labelsize=labelsize)\n# ax1.set_yticks([0.15, 0.25, 0.35])\nax1.set_yticks([0.15, 0.2])\nax1.set_xticks([250,750, 1500])\nax1.axvline(x=750, c='gray', linewidth=1.5, linestyle=\"dashed\")\n\nax1.set_ylim(0.11, 0.21)\n\nax1.set_xlim(-10)\nright_side = ax1.spines[\"right\"]\nright_side.set_visible(False)\ntop_side = ax1.spines[\"top\"]\ntop_side.set_visible(False)\n\n# ax1.set_ylim(0.14, 0.36)\nax1.text(250, np.mean(ax1.get_ylim()), \"%s\"%(TASK1), fontsize=26)\nax1.text(900, np.mean(ax1.get_ylim()), \"%s\"%(TASK2), fontsize=26)\n\nax1.set_title('N-XOR', fontsize=30)\n#plt.tight_layout()\n\n#plt.savefig('./result/figs/generalization_error_nxor.pdf',dpi=500)\n\n#####\nmean_error = unpickle('result/mean_te_xor_nxor.pickle')\nstd_error = unpickle('result/std_te_xor_nxor.pickle')\n\nalgorithms = ['Backward Transfer', 'Forward Transfer']\n\nTASK1='XOR'\nTASK2='N-XOR'\n\nax1 = fig.add_subplot(gs[7:,14:])\n\nax1.plot(ns, mean_error[0], label=algorithms[0], c=colors[0], ls=ls[0], lw=3)\n#ax1.fill_between(ns, \n# mean_error[0] + 1.96*std_error[0], \n# mean_error[0] - 1.96*std_error[0], \n# where=mean_error[1] + 1.96*std_error[0] >= mean_error[0] - 1.96*std_error[0], \n# facecolor=colors[0], \n# alpha=0.15,\n# interpolate=True)\n\nax1.plot(ns[len(n1s):], mean_error[1, len(n1s):], label=algorithms[1], c=colors[0], ls=ls[1], lw=3)\n#ax1.fill_between(ns[len(n1s):], \n# mean_error[1, len(n1s):] + 1.96*std_error[1, len(n1s):], \n# mean_error[1, len(n1s):] - 1.96*std_error[1, len(n1s):], \n# where=mean_error[1, len(n1s):] + 1.96*std_error[1, len(n1s):] >= mean_error[1, len(n1s):] - 1.96*std_error[1, len(n1s):], \n# facecolor=colors[0], \n# alpha=0.15,\n# interpolate=True)\n\nax1.set_ylabel('Transfer Efficiency', fontsize=fontsize)\nax1.legend(loc='upper right', fontsize=20, frameon=False)\nax1.set_ylim(.99, 1.4)\nax1.set_xlabel('Total Sample Size', fontsize=fontsize)\nax1.tick_params(labelsize=labelsize)\nax1.set_yticks([1,1.2,1.4])\nax1.set_xticks([250,750, 1500])\nax1.axvline(x=750, c='gray', linewidth=1.5, linestyle=\"dashed\")\nright_side = ax1.spines[\"right\"]\nright_side.set_visible(False)\ntop_side = ax1.spines[\"top\"]\ntop_side.set_visible(False)\nax1.hlines(1, 50,1500, colors='gray', linestyles='dashed',linewidth=1.5)\n\nax1.text(250, np.mean(ax1.get_ylim()), \"%s\"%(TASK1), fontsize=26)\nax1.text(900, np.mean(ax1.get_ylim()), \"%s\"%(TASK2), fontsize=26)\n\n#plt.tight_layout()\n\n#plt.savefig('./result/figs/TE.pdf',dpi=500)\n\n#####\ncolors = sns.color_palette('Dark2', n_colors=2)\n\nX, Y = generate_gaussian_parity(750, cov_scale=0.1, angle_params=0)\nZ, W = generate_gaussian_parity(750, cov_scale=0.1, angle_params=np.pi/2)\n\nax = fig.add_subplot(gs[:6,4:10])\nax.scatter(X[:, 0], X[:, 1], c=get_colors(colors, Y), s=50)\n\nax.set_xticks([])\nax.set_yticks([])\nax.set_title('Gaussian XOR', fontsize=30)\n\nplt.tight_layout()\nax.axis('off')\n#plt.savefig('./result/figs/gaussian-xor.pdf')\n\n###\ncolors = sns.color_palette('Dark2', n_colors=2)\n\nax = fig.add_subplot(gs[:6,11:16])\nax.scatter(Z[:, 0], Z[:, 1], c=get_colors(colors, W), s=50)\n\nax.set_xticks([])\nax.set_yticks([])\nax.set_title('Gaussian N-XOR', fontsize=30)\nax.axis('off')\n#plt.tight_layout()\nplt.savefig('./result/figs/xor_nxor_exp_sampling.pdf')\n\n# %%\n", "'''\nPrimary Author: Will LeVine\nEmail: [email protected]\n'''\n\nfrom sklearn.base import clone \n\nimport numpy as np\n\nfrom joblib import Parallel, delayed\n\nclass LifeLongDNN():\n def __init__(self, acorn = None, verbose = False, model = \"uf\", parallel = True, n_jobs = None):\n self.X_across_tasks = []\n self.y_across_tasks = []\n \n self.transformers_across_tasks = []\n \n #element [i, j] votes on decider from task i under representation from task j\n self.voters_across_tasks_matrix = []\n self.n_tasks = 0\n \n self.classes_across_tasks = []\n \n if acorn is not None:\n np.random.seed(acorn)\n \n self.verbose = verbose\n \n self.model = model\n \n self.parallel = parallel\n \n self.n_jobs = n_jobs\n \n def check_task_idx_(self, task_idx):\n if task_idx >= self.n_tasks:\n raise Exception(\"Invalid Task IDX\")\n \n def new_forest(self, \n X, \n y, \n epochs = 100, \n lr = 5e-4, \n n_estimators = 100, \n max_samples = .63,\n bootstrap = False,\n max_depth = 30,\n min_samples_leaf = 1,\n acorn = None,\n parallel = False,\n n_jobs = None):\n \n if self.model == \"dnn\":\n from honest_dnn import HonestDNN \n if self.model == \"uf\":\n from uncertainty_forest import UncertaintyForest\n \n self.X_across_tasks.append(X)\n self.y_across_tasks.append(y)\n \n if self.model == \"dnn\":\n new_honest_dnn = HonestDNN(verbose = self.verbose)\n new_honest_dnn.fit(X, y, epochs = epochs, lr = lr)\n if self.model == \"uf\":\n new_honest_dnn = UncertaintyForest(n_estimators = n_estimators,\n max_samples = max_samples,\n bootstrap = bootstrap,\n max_depth = max_depth,\n min_samples_leaf = min_samples_leaf,\n parallel = parallel,\n n_jobs = n_jobs)\n new_honest_dnn.fit(X, y)\n new_transformer = new_honest_dnn.get_transformer()\n new_voter = new_honest_dnn.get_voter()\n new_classes = new_honest_dnn.classes_\n \n self.transformers_across_tasks.append(new_transformer)\n self.classes_across_tasks.append(new_classes)\n \n #add one voter to previous task voter lists under the new transformation\n for task_idx in range(self.n_tasks):\n X_of_task, y_of_task = self.X_across_tasks[task_idx], self.y_across_tasks[task_idx]\n if self.model == \"dnn\":\n X_of_task_under_new_transform = new_transformer.predict(X_of_task) \n if self.model == \"uf\":\n X_of_task_under_new_transform = new_transformer(X_of_task) \n unfit_task_voter_under_new_transformation = clone(new_voter)\n if self.model == \"uf\":\n unfit_task_voter_under_new_transformation.classes_ = self.voters_across_tasks_matrix[task_idx][0].classes_\n task_voter_under_new_transformation = unfit_task_voter_under_new_transformation.fit(\n X_of_task_under_new_transform, \n y_of_task,\n tree_id_to_leaf_profile = new_voter.tree_id_to_leaf_profile\n )\n #print(task_voter_under_new_transformation.tree_id_to_leaf_profile,'hi\\n',task_voter_under_new_transformation.tree_idx_to_node_ids_to_posterior_map)\n self.voters_across_tasks_matrix[task_idx].append(task_voter_under_new_transformation)\n \n #add n_tasks voters to new task voter list under previous transformations \n new_voters_under_previous_task_transformation = []\n for task_idx in range(self.n_tasks):\n transformer_of_task = self.transformers_across_tasks[task_idx]\n if self.model == \"dnn\":\n X_under_task_transformation = transformer_of_task.predict(X)\n if self.model == \"uf\":\n X_under_task_transformation = transformer_of_task(X)\n unfit_new_task_voter_under_task_transformation = clone(self.voters_across_tasks_matrix[task_idx][task_idx])\n if self.model == \"uf\":\n unfit_new_task_voter_under_task_transformation.classes_ = new_voter.classes_\n new_task_voter_under_task_transformation = unfit_new_task_voter_under_task_transformation.fit(\n X_under_task_transformation,\n y,\n tree_id_to_leaf_profile = self.voters_across_tasks_matrix[task_idx][task_idx].tree_id_to_leaf_profile\n )\n new_voters_under_previous_task_transformation.append(new_task_voter_under_task_transformation)\n \n #make sure to add the voter of the new task under its own transformation\n new_voters_under_previous_task_transformation.append(new_voter)\n \n self.voters_across_tasks_matrix.append(new_voters_under_previous_task_transformation)\n \n self.n_tasks += 1\n \n def _estimate_posteriors(self, X, representation = 0, decider = 0):\n self.check_task_idx_(decider)\n \n if representation == \"all\":\n representation = range(self.n_tasks)\n elif isinstance(representation, int):\n representation = np.array([representation])\n \n def worker(transformer_task_idx):\n transformer = self.transformers_across_tasks[transformer_task_idx]\n voter = self.voters_across_tasks_matrix[decider][transformer_task_idx]\n if self.model == \"dnn\":\n return voter.predict_proba(transformer.predict(X))\n if self.model == \"uf\":\n return voter.predict_proba(transformer(X))\n \n if self.parallel:\n posteriors_across_tasks = np.array(\n Parallel(n_jobs=self.n_jobs if self.n_jobs != None else len(representation))(\n delayed(worker)(transformer_task_idx) for transformer_task_idx in representation\n )\n ) \n else:\n posteriors_across_tasks = np.array([worker(transformer_task_idx) for transformer_task_idx in representation]) \n \n return np.mean(posteriors_across_tasks, axis = 0)\n \n def predict(self, X, representation = 0, decider = 0):\n task_classes = self.classes_across_tasks[decider]\n return task_classes[np.argmax(self._estimate_posteriors(X, representation, decider), axis = -1)]\n \n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.eye", "numpy.sum", "numpy.cumsum", "numpy.sin", "numpy.zeros", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "numpy.cos", "numpy.arange", "numpy.array", "numpy.std", "numpy.concatenate", "numpy.meshgrid", "numpy.mean" ], [ "numpy.array", "sklearn.base.clone", "numpy.random.seed", "numpy.mean" ] ]
stefan-woerner/aqua
[ "12e1b867e254977d9c5992612a7919d8fe016cb4" ]
[ "qiskit/optimization/applications/ising/knapsack.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nConvert knapsack parameters instances into Pauli list\nThe parameters are a list of values a list of weights and a maximum weight of the knapsack.\n\nIn the Knapsack Problem we are given a list of objects that each has a weight and a value.\nWe are also given a maximum weight we can carry. We need to pick a subset of the objects\nso as to maximize the total value without going over the maximum weight.\n\nIf we have the weights w[i], the values v[i] and the maximum weight W_max.\nWe express the solution as a binary array x[i]\nwhere we have a 1 for the items we take in the solution set.\nWe need to maximize sum(x[i]*v[i]) while respecting W_max >= sum(x[i]*w[i])\n\n\"\"\"\n\nimport logging\nimport math\nimport numpy as np\n\nfrom qiskit.quantum_info import Pauli\nfrom qiskit.aqua.operators import WeightedPauliOperator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_operator(values, weights, max_weight):\n \"\"\"\n Generate Hamiltonian for the knapsack problem.\n\n Notes:\n To build the cost function for the Hamiltonian we add a term S\n that will vary with our solution. In order to make it change wit the solution\n we enhance X with a number of additional bits X' = [x_0,..x_{n-1},y_{n}..y_{n+m-1}].\n The bytes y[i] will be the binary representation of S.\n In this way the optimizer will be able to optimize S as well as X.\n\n The cost function is\n $$C(X') = M(W_{max} - \\\\sum_{i=0}^{n-1} x_{i}w_{i} - S)^2 - \\\\sum_{i}^{n-1} x_{i}v_{i}$$\n where S = sum(2**j * y[j]), j goes from n to n+log(W_max).\n M is a number large enough to dominate the sum of values.\n\n Because S can only be positive, when W_max >= sum(x[i]*w[i])\n the optimizer can find an S (or better the y[j] that compose S)\n so that it will take the first term to 0.\n This way the function is dominated by the sum of values.\n If W_max < sum(x[i]*w[i]) then the first term can never be 0\n and, multiplied by a large M, will always dominate the function.\n\n The minimum value of the function will be that where the constraint is respected\n and the sum of values is maximized.\n\n Args:\n values (list of non-negative integers) : a list of values\n weights (list of non-negative integers) : a list of weights\n max_weight (non negative integer) : the maximum weight the knapsack can carry\n\n Returns:\n WeightedPauliOperator: operator for the Hamiltonian\n float: a constant shift for the obj function.\n\n Raises:\n ValueError: values and weights have different lengths\n ValueError: A value or a weight is negative\n ValueError: All values are zero\n ValueError: max_weight is negative\n\n \"\"\"\n if len(values) != len(weights):\n raise ValueError(\"The values and weights must have the same length\")\n\n if any(v < 0 for v in values) or any(w < 0 for w in weights):\n raise ValueError(\"The values and weights cannot be negative\")\n\n if all(v == 0 for v in values):\n raise ValueError(\"The values cannot all be 0\")\n\n if max_weight < 0:\n raise ValueError(\"max_weight cannot be negative\")\n\n y_size = int(math.log(max_weight, 2)) + 1 if max_weight > 0 else 1\n n = len(values)\n num_values = n + y_size\n pauli_list = []\n shift = 0\n\n # pylint: disable=invalid-name\n M = 10 * np.sum(values)\n\n # term for sum(x_i*w_i)**2\n for i in range(n):\n for j in range(n):\n coefficient = -1 * 0.25 * weights[i] * weights[j] * M\n pauli_op = _get_pauli_op(num_values, [j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n pauli_op = _get_pauli_op(num_values, [i])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n coefficient = -1 * coefficient\n pauli_op = _get_pauli_op(num_values, [i, j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n # term for sum(2**j*y_j)**2\n for i in range(y_size):\n for j in range(y_size):\n coefficient = -1 * 0.25 * (2 ** i) * (2 ** j) * M\n\n pauli_op = _get_pauli_op(num_values, [n + j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n pauli_op = _get_pauli_op(num_values, [n + i])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n coefficient = -1 * coefficient\n pauli_op = _get_pauli_op(num_values, [n + i, n + j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n # term for -2*W_max*sum(x_i*w_i)\n for i in range(n):\n coefficient = max_weight * weights[i] * M\n\n pauli_op = _get_pauli_op(num_values, [i])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n # term for -2*W_max*sum(2**j*y_j)\n for j in range(y_size):\n coefficient = max_weight * (2 ** j) * M\n\n pauli_op = _get_pauli_op(num_values, [n + j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n for i in range(n):\n for j in range(y_size):\n coefficient = -1 * 0.5 * weights[i] * (2 ** j) * M\n\n pauli_op = _get_pauli_op(num_values, [n + j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n pauli_op = _get_pauli_op(num_values, [i])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n coefficient = -1 * coefficient\n pauli_op = _get_pauli_op(num_values, [i, n + j])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n # term for sum(x_i*v_i)\n for i in range(n):\n coefficient = 0.5 * values[i]\n\n pauli_op = _get_pauli_op(num_values, [i])\n pauli_list.append([coefficient, pauli_op])\n shift -= coefficient\n\n return WeightedPauliOperator(paulis=pauli_list), shift\n\n\ndef get_solution(x, values):\n \"\"\"\n Get the solution to the knapsack problem\n from the bitstring that represents\n to the ground state of the Hamiltonian\n\n Args:\n x (numpy.ndarray): the ground state of the Hamiltonian.\n values (numpy.ndarray): the list of values\n\n Returns:\n numpy.ndarray: a bit string that has a '1' at the indexes\n corresponding to values that have been taken in the knapsack.\n i.e. if the solution has a '1' at index i then\n the value values[i] has been taken in the knapsack\n \"\"\"\n return x[:len(values)]\n\n\ndef knapsack_value_weight(solution, values, weights):\n \"\"\"\n Get the total wight and value of the items taken in the knapsack.\n\n Args:\n solution (numpy.ndarray) : binary string that represents the solution to the problem.\n values (numpy.ndarray) : the list of values\n weights (numpy.ndarray) : the list of weights\n\n Returns:\n tuple: the total value and weight of the items in the knapsack\n \"\"\"\n value = np.sum(solution * values)\n weight = np.sum(solution * weights)\n return value, weight\n\n\ndef _get_pauli_op(num_values, indexes):\n pauli_x = np.zeros(num_values, dtype=bool)\n pauli_z = np.zeros(num_values, dtype=bool)\n for i in indexes:\n pauli_z[i] = not pauli_z[i]\n\n return Pauli((pauli_z, pauli_x))\n" ]
[ [ "numpy.sum", "numpy.zeros" ] ]
theHamsta/PYRO-NN-Layers
[ "c776c3d7315f483937a7cebf667c6d491ecd57e6" ]
[ "cuda_operator.py" ]
[ "# Copyright [2019] [Christopher Syben]\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Makes every implemented operator in python available under the namespace pyronn_layers\n# PYRO-NN is developed as an Open Source project under the Apache License, Version 2.0.\n#\nimport os.path\nimport tensorflow as tf\nimport pyronn_layers\n\n\nif tf.test.is_built_with_cuda():\n _pyronn_layers_module = tf.load_op_library(os.path.dirname(__file__)+'/pyronn_layers.so')\n ''' TODO: Improve the getattr method to add only real kernel methods and not everything '''\n for obj in dir(_pyronn_layers_module):\n setattr(pyronn_layers, obj, getattr(_pyronn_layers_module, obj))\n\n\n" ]
[ [ "tensorflow.test.is_built_with_cuda" ] ]
EnjoyLifeFund/macHighSierra-py36-pkgs
[ "5668b5785296b314ea1321057420bcd077dba9ea", "5668b5785296b314ea1321057420bcd077dba9ea", "5668b5785296b314ea1321057420bcd077dba9ea" ]
[ "torch/utils/model_zoo.py", "mir_eval/segment.py", "cvxpy_tinoco/functions/log_sum_exp.py" ]
[ "import torch\n\nimport hashlib\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nif sys.version_info[0] == 2:\n from urlparse import urlparse\n from urllib2 import urlopen\nelse:\n from urllib.request import urlopen\n from urllib.parse import urlparse\ntry:\n from tqdm import tqdm\nexcept ImportError:\n tqdm = None # defined below\n\n# matches bfd8deac from resnet18-bfd8deac.pth\nHASH_REGEX = re.compile(r'-([a-f0-9]*)\\.')\n\n\ndef load_url(url, model_dir=None, map_location=None):\n r\"\"\"Loads the Torch serialized object at the given URL.\n\n If the object is already present in `model_dir`, it's deserialized and\n returned. The filename part of the URL should follow the naming convention\n ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more\n digits of the SHA256 hash of the contents of the file. The hash is used to\n ensure unique names and to verify the contents of the file.\n\n The default value of `model_dir` is ``$TORCH_HOME/models`` where\n ``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be\n overriden with the ``$TORCH_MODEL_ZOO`` environment variable.\n\n Args:\n url (string): URL of the object to download\n model_dir (string, optional): directory in which to save the object\n map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)\n\n Example:\n >>> state_dict = torch.utils.model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')\n\n \"\"\"\n if model_dir is None:\n torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))\n model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n parts = urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stderr.write('Downloading: \"{}\" to {}\\n'.format(url, cached_file))\n hash_prefix = HASH_REGEX.search(filename).group(1)\n _download_url_to_file(url, cached_file, hash_prefix)\n return torch.load(cached_file, map_location=map_location)\n\n\ndef _download_url_to_file(url, dst, hash_prefix):\n u = urlopen(url)\n meta = u.info()\n if hasattr(meta, 'getheaders'):\n file_size = int(meta.getheaders(\"Content-Length\")[0])\n else:\n file_size = int(meta.get_all(\"Content-Length\")[0])\n\n f = tempfile.NamedTemporaryFile(delete=False)\n try:\n sha256 = hashlib.sha256()\n with tqdm(total=file_size) as pbar:\n while True:\n buffer = u.read(8192)\n if len(buffer) == 0:\n break\n f.write(buffer)\n sha256.update(buffer)\n pbar.update(len(buffer))\n\n f.close()\n digest = sha256.hexdigest()\n if digest[:len(hash_prefix)] != hash_prefix:\n raise RuntimeError('invalid hash value (expected \"{}\", got \"{}\")'\n .format(hash_prefix, digest))\n shutil.move(f.name, dst)\n finally:\n f.close()\n if os.path.exists(f.name):\n os.remove(f.name)\n\n\nif tqdm is None:\n # fake tqdm if it's not installed\n class tqdm(object):\n\n def __init__(self, total):\n self.total = total\n self.n = 0\n\n def update(self, n):\n self.n += n\n sys.stderr.write(\"\\r{0:.1f}%\".format(100 * self.n / float(self.total)))\n sys.stderr.flush()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sys.stderr.write('\\n')\n", "# CREATED:2013-08-13 12:02:42 by Brian McFee <[email protected]>\n'''\nEvaluation criteria for structural segmentation fall into two categories:\nboundary annotation and structural annotation. Boundary annotation is the task\nof predicting the times at which structural changes occur, such as when a verse\ntransitions to a refrain. Metrics for boundary annotation compare estimated\nsegment boundaries to reference boundaries. Structural annotation is the task\nof assigning labels to detected segments. The estimated labels may be\narbitrary strings - such as A, B, C, - and they need not describe functional\nconcepts. Metrics for structural annotation are similar to those used for\nclustering data.\n\nConventions\n-----------\n\nBoth boundary and structural annotation metrics require two dimensional arrays\nwith two columns, one for boundary start times and one for boundary end times.\nStructural annotation further require lists of reference and estimated segment\nlabels which must have a length which is equal to the number of rows in the\ncorresponding list of boundary edges. In both tasks, we assume that\nannotations express a partitioning of the track into intervals. The function\n:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment\nboundaries to span the duration of the entire track.\n\n\nMetrics\n-------\n\n* :func:`mir_eval.segment.detection`: An estimated boundary is considered\n correct if it falls within a window around a reference boundary\n* :func:`mir_eval.segment.deviation`: Computes the median absolute time\n difference from a reference boundary to its nearest estimated boundary, and\n vice versa\n* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time\n instants as belonging to the same structural component\n* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated\n annotations and compares them by the Rand Index\n* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance\n* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated\n labels as samples of random variables :math:`Y_R, Y_E` from which the\n conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and\n :math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated\n* :func:`mir_eval.segment.mutual_information`: Computes the standard,\n normalized, and adjusted mutual information of sampled reference and\n estimated segments\n'''\n\nimport collections\nimport warnings\n\nimport numpy as np\nimport scipy.stats\nimport scipy.sparse\nimport scipy.misc\nimport scipy.special\n\nfrom . import util\n\n\ndef validate_boundary(reference_intervals, estimated_intervals, trim):\n \"\"\"Checks that the input annotations to a segment boundary estimation\n metric (i.e. one that only takes in segment intervals) look like valid\n segment times, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n\n trim : bool\n will the start and end events be trimmed?\n\n \"\"\"\n\n if trim:\n # If we're trimming, then we need at least 2 intervals\n min_size = 2\n else:\n # If we're not trimming, then we only need one interval\n min_size = 1\n\n if len(reference_intervals) < min_size:\n warnings.warn(\"Reference intervals are empty.\")\n\n if len(estimated_intervals) < min_size:\n warnings.warn(\"Estimated intervals are empty.\")\n\n for intervals in [reference_intervals, estimated_intervals]:\n util.validate_intervals(intervals)\n\n\ndef validate_structure(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels):\n \"\"\"Checks that the input annotations to a structure estimation metric (i.e.\n one that takes in both segment boundaries and their labels) look like valid\n segment times and labels, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n \"\"\"\n for (intervals, labels) in [(reference_intervals, reference_labels),\n (estimated_intervals, estimated_labels)]:\n\n util.validate_intervals(intervals)\n if intervals.shape[0] != len(labels):\n raise ValueError('Number of intervals does not match number '\n 'of labels')\n\n # Check only when intervals are non-empty\n if intervals.size > 0:\n # Make sure intervals start at 0\n if not np.allclose(intervals.min(), 0.0):\n raise ValueError('Segment intervals do not start at 0')\n\n if reference_intervals.size == 0:\n warnings.warn(\"Reference intervals are empty.\")\n if estimated_intervals.size == 0:\n warnings.warn(\"Estimated intervals are empty.\")\n # Check only when intervals are non-empty\n if reference_intervals.size > 0 and estimated_intervals.size > 0:\n if not np.allclose(reference_intervals.max(),\n estimated_intervals.max()):\n raise ValueError('End times do not match')\n\n\ndef detection(reference_intervals, estimated_intervals,\n window=0.5, beta=1.0, trim=False):\n \"\"\"Boundary detection hit-rate.\n\n A hit is counted whenever an reference boundary is within ``window`` of a\n estimated boundary. Note that each boundary is matched at most once: this\n is achieved by computing the size of a maximal matching between reference\n and estimated boundary points, subject to the window constraint.\n\n Examples\n --------\n >>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # With 0.5s windowing\n >>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,\n ... est_intervals,\n ... window=0.5)\n >>> # With 3s windowing\n >>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,\n ... est_intervals,\n ... window=3)\n >>> # Ignoring hits for the beginning and end of track\n >>> P, R, F = mir_eval.segment.detection(ref_intervals,\n ... est_intervals,\n ... window=0.5,\n ... trim=True)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n window : float > 0\n size of the window of 'correctness' around ground-truth beats\n (in seconds)\n (Default value = 0.5)\n beta : float > 0\n weighting constant for F-measure.\n (Default value = 1.0)\n trim : boolean\n if ``True``, the first and last boundary times are ignored.\n Typically, these denote start (0) and end-markers.\n (Default value = False)\n\n Returns\n -------\n precision : float\n precision of estimated predictions\n recall : float\n recall of reference reference boundaries\n f_measure : float\n F-measure (weighted harmonic mean of ``precision`` and ``recall``)\n\n \"\"\"\n\n validate_boundary(reference_intervals, estimated_intervals, trim)\n\n # Convert intervals to boundaries\n reference_boundaries = util.intervals_to_boundaries(reference_intervals)\n estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)\n\n # Suppress the first and last intervals\n if trim:\n reference_boundaries = reference_boundaries[1:-1]\n estimated_boundaries = estimated_boundaries[1:-1]\n\n # If we have no boundaries, we get no score.\n if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:\n return 0.0, 0.0, 0.0\n\n matching = util.match_events(reference_boundaries,\n estimated_boundaries,\n window)\n\n precision = float(len(matching)) / len(estimated_boundaries)\n recall = float(len(matching)) / len(reference_boundaries)\n\n f_measure = util.f_measure(precision, recall, beta=beta)\n\n return precision, recall, f_measure\n\n\ndef deviation(reference_intervals, estimated_intervals, trim=False):\n \"\"\"Compute the median deviations between reference\n and estimated boundary times.\n\n Examples\n --------\n >>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')\n >>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,\n ... est_intervals)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n trim : boolean\n if ``True``, the first and last intervals are ignored.\n Typically, these denote start (0.0) and end-of-track markers.\n (Default value = False)\n\n Returns\n -------\n reference_to_estimated : float\n median time from each reference boundary to the\n closest estimated boundary\n estimated_to_reference : float\n median time from each estimated boundary to the\n closest reference boundary\n\n \"\"\"\n\n validate_boundary(reference_intervals, estimated_intervals, trim)\n\n # Convert intervals to boundaries\n reference_boundaries = util.intervals_to_boundaries(reference_intervals)\n estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)\n\n # Suppress the first and last intervals\n if trim:\n reference_boundaries = reference_boundaries[1:-1]\n estimated_boundaries = estimated_boundaries[1:-1]\n\n # If we have no boundaries, we get no score.\n if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:\n return np.nan, np.nan\n\n dist = np.abs(np.subtract.outer(reference_boundaries,\n estimated_boundaries))\n\n estimated_to_reference = np.median(dist.min(axis=0))\n reference_to_estimated = np.median(dist.min(axis=1))\n\n return reference_to_estimated, estimated_to_reference\n\n\ndef pairwise(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels,\n frame_size=0.1, beta=1.0):\n \"\"\"Frame-clustering segmentation evaluation by pair-wise agreement.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta value for F-measure\n (Default value = 1.0)\n\n Returns\n -------\n precision : float > 0\n Precision of detecting whether frames belong in the same cluster\n recall : float > 0\n Recall of detecting whether frames belong in the same cluster\n f : float > 0\n F-measure of detecting whether frames belong in the same cluster\n\n \"\"\"\n validate_structure(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels)\n\n # Check for empty annotations. Don't need to check labels because\n # validate_structure makes sure they're the same size as intervals\n if reference_intervals.size == 0 or estimated_intervals.size == 0:\n return 0., 0., 0.\n\n # Generate the cluster labels\n y_ref = util.intervals_to_samples(reference_intervals,\n reference_labels,\n sample_size=frame_size)[-1]\n\n y_ref = util.index_labels(y_ref)[0]\n\n # Map to index space\n y_est = util.intervals_to_samples(estimated_intervals,\n estimated_labels,\n sample_size=frame_size)[-1]\n\n y_est = util.index_labels(y_est)[0]\n\n # Build the reference label agreement matrix\n agree_ref = np.equal.outer(y_ref, y_ref)\n # Count the unique pairs\n n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0\n\n # Repeat for estimate\n agree_est = np.equal.outer(y_est, y_est)\n n_agree_est = (agree_est.sum() - len(y_est)) / 2.0\n\n # Find where they agree\n matches = np.logical_and(agree_ref, agree_est)\n n_matches = (matches.sum() - len(y_ref)) / 2.0\n\n precision = n_matches / n_agree_est\n recall = n_matches / n_agree_ref\n f_measure = util.f_measure(precision, recall, beta=beta)\n\n return precision, recall, f_measure\n\n\ndef rand_index(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels,\n frame_size=0.1, beta=1.0):\n \"\"\"(Non-adjusted) Rand index.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> rand_index = mir_eval.structure.rand_index(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta value for F-measure\n (Default value = 1.0)\n\n Returns\n -------\n rand_index : float > 0\n Rand index\n\n \"\"\"\n\n validate_structure(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels)\n\n # Check for empty annotations. Don't need to check labels because\n # validate_structure makes sure they're the same size as intervals\n if reference_intervals.size == 0 or estimated_intervals.size == 0:\n return 0., 0., 0.\n\n # Generate the cluster labels\n y_ref = util.intervals_to_samples(reference_intervals,\n reference_labels,\n sample_size=frame_size)[-1]\n\n y_ref = util.index_labels(y_ref)[0]\n\n # Map to index space\n y_est = util.intervals_to_samples(estimated_intervals,\n estimated_labels,\n sample_size=frame_size)[-1]\n\n y_est = util.index_labels(y_est)[0]\n\n # Build the reference label agreement matrix\n agree_ref = np.equal.outer(y_ref, y_ref)\n\n # Repeat for estimate\n agree_est = np.equal.outer(y_est, y_est)\n\n # Find where they agree\n matches_pos = np.logical_and(agree_ref, agree_est)\n\n # Find where they disagree\n matches_neg = np.logical_and(~agree_ref, ~agree_est)\n\n n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0\n\n n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0\n n_matches_neg = matches_neg.sum() / 2.0\n rand = (n_matches_pos + n_matches_neg) / n_pairs\n\n return rand\n\n\ndef _contingency_matrix(reference_indices, estimated_indices):\n \"\"\"Computes the contingency matrix of a true labeling vs an estimated one.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n contingency_matrix : np.ndarray\n Contingency matrix, shape=(#reference indices, #estimated indices)\n .. note:: Based on sklearn.metrics.cluster.contingency_matrix\n\n \"\"\"\n ref_classes, ref_class_idx = np.unique(reference_indices,\n return_inverse=True)\n est_classes, est_class_idx = np.unique(estimated_indices,\n return_inverse=True)\n n_ref_classes = ref_classes.shape[0]\n n_est_classes = est_classes.shape[0]\n # Using coo_matrix is faster than histogram2d\n return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),\n (ref_class_idx, est_class_idx)),\n shape=(n_ref_classes, n_est_classes),\n dtype=np.int).toarray()\n\n\ndef _adjusted_rand_index(reference_indices, estimated_indices):\n \"\"\"Compute the Rand index, adjusted for change.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n ari : float\n Adjusted Rand index\n\n .. note:: Based on sklearn.metrics.cluster.adjusted_rand_score\n\n \"\"\"\n n_samples = len(reference_indices)\n ref_classes = np.unique(reference_indices)\n est_classes = np.unique(estimated_indices)\n # Special limit cases: no clustering since the data is not split;\n # or trivial clustering where each document is assigned a unique cluster.\n # These are perfect matches hence return 1.0.\n if (ref_classes.shape[0] == est_classes.shape[0] == 1 or\n ref_classes.shape[0] == est_classes.shape[0] == 0 or\n (ref_classes.shape[0] == est_classes.shape[0] ==\n len(reference_indices))):\n return 1.0\n\n contingency = _contingency_matrix(reference_indices, estimated_indices)\n\n # Compute the ARI using the contingency data\n sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in\n contingency.sum(axis=1))\n sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in\n contingency.sum(axis=0))\n\n sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in\n contingency.flatten()))\n prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))\n mean_comb = (sum_comb_k + sum_comb_c)/2.\n return ((sum_comb - prod_comb)/(mean_comb - prod_comb))\n\n\ndef ari(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels,\n frame_size=0.1):\n \"\"\"Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,\n ... est_intervals, est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n\n Returns\n -------\n ari_score : float > 0\n Adjusted Rand index between segmentations.\n\n \"\"\"\n validate_structure(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels)\n\n # Check for empty annotations. Don't need to check labels because\n # validate_structure makes sure they're the same size as intervals\n if reference_intervals.size == 0 or estimated_intervals.size == 0:\n return 0., 0., 0.\n\n # Generate the cluster labels\n y_ref = util.intervals_to_samples(reference_intervals,\n reference_labels,\n sample_size=frame_size)[-1]\n\n y_ref = util.index_labels(y_ref)[0]\n\n # Map to index space\n y_est = util.intervals_to_samples(estimated_intervals,\n estimated_labels,\n sample_size=frame_size)[-1]\n\n y_est = util.index_labels(y_est)[0]\n\n return _adjusted_rand_index(y_ref, y_est)\n\n\ndef _mutual_info_score(reference_indices, estimated_indices, contingency=None):\n \"\"\"Compute the mutual information between two sequence labelings.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n contingency : np.ndarray\n Pre-computed contingency matrix. If None, one will be computed.\n (Default value = None)\n\n Returns\n -------\n mi : float\n Mutual information\n\n .. note:: Based on sklearn.metrics.cluster.mutual_info_score\n\n \"\"\"\n if contingency is None:\n contingency = _contingency_matrix(reference_indices,\n estimated_indices).astype(float)\n contingency_sum = np.sum(contingency)\n pi = np.sum(contingency, axis=1)\n pj = np.sum(contingency, axis=0)\n outer = np.outer(pi, pj)\n nnz = contingency != 0.0\n # normalized contingency\n contingency_nm = contingency[nnz]\n log_contingency_nm = np.log(contingency_nm)\n contingency_nm /= contingency_sum\n # log(a / b) should be calculated as log(a) - log(b) for\n # possible loss of precision\n log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())\n mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +\n contingency_nm * log_outer)\n return mi.sum()\n\n\ndef _entropy(labels):\n \"\"\"Calculates the entropy for a labeling.\n\n Parameters\n ----------\n labels : list-like\n List of labels.\n\n Returns\n -------\n entropy : float\n Entropy of the labeling.\n\n .. note:: Based on sklearn.metrics.cluster.entropy\n\n \"\"\"\n if len(labels) == 0:\n return 1.0\n label_idx = np.unique(labels, return_inverse=True)[1]\n pi = np.bincount(label_idx).astype(np.float)\n pi = pi[pi > 0]\n pi_sum = np.sum(pi)\n # log(a / b) should be calculated as log(a) - log(b) for\n # possible loss of precision\n return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))\n\n\ndef _adjusted_mutual_info_score(reference_indices, estimated_indices):\n \"\"\"Compute the mutual information between two sequence labelings, adjusted for\n chance.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n ami : float <= 1.0\n Mutual information\n\n .. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score\n and sklearn.metrics.cluster.expected_mutual_info_score\n\n \"\"\"\n n_samples = len(reference_indices)\n ref_classes = np.unique(reference_indices)\n est_classes = np.unique(estimated_indices)\n # Special limit cases: no clustering since the data is not split.\n # This is a perfect match hence return 1.0.\n if (ref_classes.shape[0] == est_classes.shape[0] == 1 or\n ref_classes.shape[0] == est_classes.shape[0] == 0):\n return 1.0\n contingency = _contingency_matrix(reference_indices,\n estimated_indices).astype(float)\n # Calculate the MI for the two clusterings\n mi = _mutual_info_score(reference_indices, estimated_indices,\n contingency=contingency)\n # The following code is based on\n # sklearn.metrics.cluster.expected_mutual_information\n R, C = contingency.shape\n N = float(n_samples)\n a = np.sum(contingency, axis=1).astype(np.int32)\n b = np.sum(contingency, axis=0).astype(np.int32)\n # There are three major terms to the EMI equation, which are multiplied to\n # and then summed over varying nij values.\n # While nijs[0] will never be used, having it simplifies the indexing.\n nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')\n # Stops divide by zero warnings. As its not used, no issue.\n nijs[0] = 1\n # term1 is nij / N\n term1 = nijs / N\n # term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)\n # term2 uses the outer product\n log_ab_outer = np.log(np.outer(a, b))\n # term2 uses N * nij\n log_Nnij = np.log(N * nijs)\n # term3 is large, and involved many factorials. Calculate these in log\n # space to stop overflows.\n gln_a = scipy.special.gammaln(a + 1)\n gln_b = scipy.special.gammaln(b + 1)\n gln_Na = scipy.special.gammaln(N - a + 1)\n gln_Nb = scipy.special.gammaln(N - b + 1)\n gln_N = scipy.special.gammaln(N + 1)\n gln_nij = scipy.special.gammaln(nijs + 1)\n # start and end values for nij terms for each summation.\n start = np.array([[v - N + w for w in b] for v in a], dtype='int')\n start = np.maximum(start, 1)\n end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1\n # emi itself is a summation over the various values.\n emi = 0\n for i in range(R):\n for j in range(C):\n for nij in range(start[i, j], end[i, j]):\n term2 = log_Nnij[nij] - log_ab_outer[i, j]\n # Numerators are positive, denominators are negative.\n gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -\n gln_N - gln_nij[nij] -\n scipy.special.gammaln(a[i] - nij + 1) -\n scipy.special.gammaln(b[j] - nij + 1) -\n scipy.special.gammaln(N - a[i] - b[j] + nij + 1))\n term3 = np.exp(gln)\n emi += (term1[nij] * term2 * term3)\n # Calculate entropy for each labeling\n h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)\n ami = (mi - emi) / (max(h_true, h_pred) - emi)\n return ami\n\n\ndef _normalized_mutual_info_score(reference_indices, estimated_indices):\n \"\"\"Compute the mutual information between two sequence labelings, adjusted for\n chance.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n nmi : float <= 1.0\n Normalized mutual information\n\n .. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score\n\n \"\"\"\n ref_classes = np.unique(reference_indices)\n est_classes = np.unique(estimated_indices)\n # Special limit cases: no clustering since the data is not split.\n # This is a perfect match hence return 1.0.\n if (ref_classes.shape[0] == est_classes.shape[0] == 1 or\n ref_classes.shape[0] == est_classes.shape[0] == 0):\n return 1.0\n contingency = _contingency_matrix(reference_indices,\n estimated_indices).astype(float)\n contingency = np.array(contingency, dtype='float')\n # Calculate the MI for the two clusterings\n mi = _mutual_info_score(reference_indices, estimated_indices,\n contingency=contingency)\n # Calculate the expected value for the mutual information\n # Calculate entropy for each labeling\n h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)\n nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)\n return nmi\n\n\ndef mutual_information(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels,\n frame_size=0.1):\n \"\"\"Frame-clustering segmentation: mutual information metrics.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n\n Returns\n -------\n MI : float > 0\n Mutual information between segmentations\n AMI : float\n Adjusted mutual information between segmentations.\n NMI : float > 0\n Normalize mutual information between segmentations\n\n \"\"\"\n validate_structure(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels)\n\n # Check for empty annotations. Don't need to check labels because\n # validate_structure makes sure they're the same size as intervals\n if reference_intervals.size == 0 or estimated_intervals.size == 0:\n return 0., 0., 0.\n\n # Generate the cluster labels\n y_ref = util.intervals_to_samples(reference_intervals,\n reference_labels,\n sample_size=frame_size)[-1]\n\n y_ref = util.index_labels(y_ref)[0]\n\n # Map to index space\n y_est = util.intervals_to_samples(estimated_intervals,\n estimated_labels,\n sample_size=frame_size)[-1]\n\n y_est = util.index_labels(y_est)[0]\n\n # Mutual information\n mutual_info = _mutual_info_score(y_ref, y_est)\n\n # Adjusted mutual information\n adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)\n\n # Normalized mutual information\n norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)\n\n return mutual_info, adj_mutual_info, norm_mutual_info\n\n\ndef nce(reference_intervals, reference_labels, estimated_intervals,\n estimated_labels, frame_size=0.1, beta=1.0):\n \"\"\"Frame-clustering segmentation: normalized conditional entropy\n\n Computes cross-entropy of cluster assignment, normalized by the\n max-entropy.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta for F-measure\n (Default value = 1.0)\n\n Returns\n -------\n S_over\n Over-clustering score:\n ``1 - H(y_est | y_ref) / log(|y_est|)``\n If `|y_est|==1`, then `S_over` will be 0.\n S_under\n Under-clustering score:\n ``1 - H(y_ref | y_est) / log(|y_ref|)``\n If `|y_ref|==1`, then `S_under` will be 0.\n S_F\n F-measure for (S_over, S_under)\n\n \"\"\"\n\n validate_structure(reference_intervals, reference_labels,\n estimated_intervals, estimated_labels)\n\n # Check for empty annotations. Don't need to check labels because\n # validate_structure makes sure they're the same size as intervals\n if reference_intervals.size == 0 or estimated_intervals.size == 0:\n return 0., 0., 0.\n\n # Generate the cluster labels\n y_ref = util.intervals_to_samples(reference_intervals,\n reference_labels,\n sample_size=frame_size)[-1]\n\n y_ref = util.index_labels(y_ref)[0]\n\n # Map to index space\n y_est = util.intervals_to_samples(estimated_intervals,\n estimated_labels,\n sample_size=frame_size)[-1]\n\n y_est = util.index_labels(y_est)[0]\n\n # Make the contingency table: shape = (n_ref, n_est)\n contingency = _contingency_matrix(y_ref, y_est).astype(float)\n\n # Normalize by the number of frames\n contingency = contingency / len(y_ref)\n\n # Compute the marginals\n p_est = contingency.sum(axis=0)\n p_ref = contingency.sum(axis=1)\n\n # H(true | prediction) = sum_j P[estimated = j] *\n # sum_i P[true = i | estimated = j] log P[true = i | estimated = j]\n # entropy sums over axis=0, which is true labels\n\n # The following scipy.stats.entropy calls are equivalent to\n # scipy.stats.entropy(contingency, base=2)\n # However the `base` kwarg has only been introduced in scipy 0.14.0\n true_given_est = p_est.dot(scipy.stats.entropy(contingency) / np.log(2))\n pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T) / np.log(2))\n\n score_under = 0.0\n if contingency.shape[0] > 1:\n score_under = 1. - true_given_est / np.log2(contingency.shape[0])\n\n score_over = 0.0\n if contingency.shape[1] > 1:\n score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])\n\n f_measure = util.f_measure(score_over, score_under, beta=beta)\n\n return score_over, score_under, f_measure\n\n\ndef evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):\n \"\"\"Compute all metrics for the given reference and estimated annotations.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,\n ... est_intervals, est_labels)\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n ref_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n est_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n est_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n \"\"\"\n\n # Adjust timespan of estimations relative to ground truth\n ref_intervals, ref_labels = \\\n util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)\n\n est_intervals, est_labels = \\\n util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,\n t_max=ref_intervals.max())\n\n # Now compute all the metrics\n scores = collections.OrderedDict()\n\n # Boundary detection\n # Force these values for window\n kwargs['window'] = .5\n scores['[email protected]'], scores['[email protected]'], scores['[email protected]'] = \\\n util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)\n\n kwargs['window'] = 3.0\n scores['[email protected]'], scores['[email protected]'], scores['[email protected]'] = \\\n util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)\n\n # Boundary deviation\n scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \\\n util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)\n\n # Pairwise clustering\n (scores['Pairwise Precision'],\n scores['Pairwise Recall'],\n scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,\n ref_intervals,\n ref_labels,\n est_intervals,\n est_labels, **kwargs)\n\n # Rand index\n scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,\n ref_labels, est_intervals,\n est_labels, **kwargs)\n # Adjusted rand index\n scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,\n ref_labels,\n est_intervals,\n est_labels, **kwargs)\n\n # Mutual information metrics\n (scores['Mutual Information'],\n scores['Adjusted Mutual Information'],\n scores['Normalized Mutual Information']) = \\\n util.filter_kwargs(mutual_information, ref_intervals, ref_labels,\n est_intervals, est_labels, **kwargs)\n\n # Conditional entropy metrics\n scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \\\n util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,\n est_labels, **kwargs)\n\n return scores\n", "#***********************************************************************#\n# Copyright (C) 2010-2012 Tomas Tinoco De Rubira #\n# #\n# This file is part of CVXPY # \n# #\n# CVXPY is free software: you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation, either version 3 of the License, or # \n# (at your option) any later version. # \n# #\n# CVXPY is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\n#***********************************************************************#\n\nimport numpy as np\nfrom ..defs import *\nfrom ..utils import *\nfrom ..sets import *\nfrom ..interface import *\nfrom ..arrays import cvxpy_array\nfrom ..arrays import cvxpy_matrix\n\n# log_sum_exp\ndef log_sum_exp(x):\n \"\"\"\n | :math:`\\mbox{log\\_sum\\_exp} :\n \\mathbb{R}^n \\\\to \\mathbb{R},\n \\ \\mbox{log\\_sum\\_exp}(x) = \n \\mbox{log} \\\\big( \\sum_{i = 1}^n e^{x_i} \\\\big)`.\n | Convex and increasing.\n\n :param x: number, \n :ref:`scalar object<scalar_ref>` or\n :ref:`multidimensional object<multi_ref>`.\n :return: number or \n :ref:`scalar object<scalar_ref>`.\n \"\"\"\n\n # Check input\n if (np.isscalar(x) or \n type(x).__name__ in SCALAR_OBJS):\n return x\n elif (type(x) is not cvxpy_matrix and\n type(x).__name__ not in ARRAY_OBJS):\n raise TypeError('Invalid argument type')\n\n # Must be column\n if x.shape[1] != 1:\n raise ValueError('Invalid argument dimensions')\n\n # Single element\n if x.shape == (1,1):\n return x[0,0]\n\n # Construct program\n t = variable()\n z = variable(x.shape[0],1)\n v = variable(x.shape[0],1)\n w = variable(x.shape[0],1)\n constr = [equals(sum(w),1)]\n for i in range(0,x.shape[0],1):\n constr += [belongs(vstack((v[i,0]-t,1.,w[i,0])),\n exp_cone),\n less_equals(z[i,0],v[i,0])]\n p = program(minimize(t),\n constr,\n [z], \n name='log_sum_exp')\n\n # Return \n return p(x)\n" ]
[ [ "torch.load" ], [ "numpy.subtract.outer", "numpy.sum", "numpy.sqrt", "numpy.log2", "numpy.bincount", "numpy.ones", "numpy.maximum", "numpy.equal.outer", "numpy.logical_and", "numpy.exp", "numpy.log", "numpy.max", "numpy.array", "numpy.outer", "numpy.unique", "numpy.resize" ], [ "numpy.isscalar" ] ]
iksteen/pyxclib
[ "2948162dd780f8230a785abfd2ee57e8ab5cc156" ]
[ "xclib/classifier/_svm.py" ]
[ "from sklearn.svm import LinearSVC\nimport numpy as np\n\n\ndef apply_threshold(data, threshold):\n data[np.where(np.abs(data) < threshold)] = 0\n\ndef train_one(data, loss, C, verbose, max_iter, threshold, dual, tol):\n def _get_features(obj):\n # Index samples iff they are required\n # Helful in reducing memory footprint\n if obj['ind'] is None:\n return obj['data']\n else:\n return obj['data'].take(obj['ind'], axis=0)\n X, y = _get_features(data), data['Y']\n clf = LinearSVC(tol=tol,\n loss=loss,\n dual=dual,\n C=C,\n multi_class='ovr',\n fit_intercept=True,\n intercept_scaling=1,\n class_weight=None,\n verbose=verbose,\n random_state=0,\n max_iter=max_iter)\n try:\n clf.fit(X, y)\n weight, bias = clf.coef_, clf.intercept_\n except ValueError:\n # TODO Find a solution for this; choose randomly may be?\n weight, bias = np.zeros((1, X.shape[1]), dtype=np.float32), np.zeros(\n (1), dtype=np.float32)\n del clf\n apply_threshold(weight, threshold)\n return weight, bias\n " ]
[ [ "sklearn.svm.LinearSVC", "numpy.abs", "numpy.zeros" ] ]
isi-vista/adam
[ "91f392f2529a98cd50c095a18769ae4b55ce4292" ]
[ "adam/learner/semantics_utils.py" ]
[ "from typing import Optional, Any, Dict\n\nimport numpy as np\nimport pandas as pd\nfrom more_itertools import first\nfrom networkx import Graph, to_numpy_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\nfrom adam.semantics import Concept, KindConcept, ObjectConcept, ActionConcept\n\n\nclass SemanticsManager:\n def __init__(self, semantics_graph: Graph) -> None:\n self.semantics_graph: Graph = Graph()\n # Create a new type of edge for each edge in the original semantics graph\n # If any of the nodes is an action concept, we want to make a distinct new node to track syntax\n for u, v, data in semantics_graph.edges(data=True):\n syntactic_position = data[\"slot\"]\n new_u = (\n self.concept_as_str_node(u, syntactic_position)\n if isinstance(u, ActionConcept)\n else self.concept_as_str_node(u)\n )\n new_v = (\n self.concept_as_str_node(v, syntactic_position)\n if isinstance(v, ActionConcept)\n else self.concept_as_str_node(v)\n )\n self.semantics_graph.add_edge(new_u, new_v, weight=data[\"weight\"])\n\n self.nodes = list(self.semantics_graph.nodes)\n self.semantics_matrix = to_numpy_matrix(self.semantics_graph)\n\n def object_concept_embedding(self, concept: str) -> Any:\n # Get a numpy array weighted adjacency embedding of the concept from the graph\n return self.semantics_matrix[self.nodes.index(concept)]\n\n def kind_concept_embedding(self, concept: str) -> Any:\n # Get a numpy array weighted adjacency embedding averaging the members of a kind concept in the graph\n member_embeddings = np.vstack(\n [\n self.object_concept_embedding(member)\n for member in self.semantics_graph.neighbors(concept)\n ]\n )\n return np.mean(member_embeddings, axis=0)\n\n def evaluate_kind_membership(self, word: str, kind: str) -> float:\n word_node = self.concept_as_str_node(ObjectConcept(word))\n kind_node = self.concept_as_str_node(KindConcept(kind))\n if kind_node not in self.nodes or word_node not in self.nodes:\n return 0\n return cos_sim(\n self.object_concept_embedding(word_node),\n self.kind_concept_embedding(kind_node),\n )\n\n @staticmethod\n def concept_as_str_node(concept: Concept, syntactic_position=\"\") -> str:\n if syntactic_position:\n return f\"{concept.debug_string}_{str(type(concept))}_{syntactic_position}\"\n else:\n return f\"{concept.debug_string}_{str(type(concept))}\"\n\n\ndef get_concept_node_from_graph(\n identifier: str, semantics_graph: Graph\n) -> Optional[Concept]:\n return first([n for n in semantics_graph.nodes if n.debug_string == identifier], None)\n\n\ndef cos_sim(a, b) -> float:\n dot = np.dot(a.reshape(1, -1), b.reshape(-1, 1))\n norma = np.linalg.norm(a.reshape(1, -1))\n normb = np.linalg.norm(b.reshape(1, -1))\n return dot / (norma * normb)\n\n\ndef generate_heatmap(nodes_to_embeddings: Dict[Concept, Any], filename: str):\n if not nodes_to_embeddings:\n return\n similarity_matrix = np.zeros((len(nodes_to_embeddings), len(nodes_to_embeddings)))\n for i, (_, embedding_1) in enumerate(nodes_to_embeddings.items()):\n for j, (_, embedding_2) in enumerate(nodes_to_embeddings.items()):\n similarity_matrix[i][j] = cos_sim(embedding_1, embedding_2)\n names = [n.debug_string for n in nodes_to_embeddings.keys()]\n df = pd.DataFrame(data=similarity_matrix, index=names, columns=names)\n plt.rcParams[\"figure.figsize\"] = (20.0, 20.0)\n plt.rcParams[\"font.family\"] = \"serif\"\n sb.clustermap(df, row_cluster=True, col_cluster=True)\n plt.savefig(f\"plots/{filename}.png\")\n plt.close()\n" ]
[ [ "numpy.mean", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.close" ] ]
alexlee-gk/Theano
[ "e4e08782d3a10d010d3a99bc87fd0fc3b0465405" ]
[ "theano/gpuarray/tests/test_dnn.py" ]
[ "from __future__ import absolute_import, print_function, division\nimport logging\n\nfrom nose.plugins.skip import SkipTest\nfrom nose_parameterized import parameterized\nimport numpy\nfrom itertools import product, chain\n\nimport theano\nfrom six import StringIO\nimport theano.tensor as T\nimport theano.tests.unittest_tools as utt\nfrom theano.sandbox.neighbours import images2neibs\nfrom theano.tensor.signal.pool import pool_2d\nfrom theano.tensor.signal.pool import MaxPoolGrad, AveragePoolGrad\n\nfrom .. import dnn\nfrom ..basic_ops import GpuAllocEmpty\n\nfrom .config import mode_with_gpu, mode_without_gpu, test_ctx_name\nfrom . import test_nnet\n\nfrom theano.configdefaults import SUPPORTED_DNN_CONV_ALGO_FWD\n\n\ndef test_dnn_conv_desc_merge():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n kern_shp = T.as_tensor_variable(\n numpy.asarray([3, 1, 2, 2]).astype('int64'))\n desc1 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2),\n conv_mode='conv')(kern_shp)\n desc2 = dnn.GpuDnnConvDesc(border_mode='full', subsample=(1, 1),\n conv_mode='cross')(kern_shp)\n # CDataType is not DeepCopyable so this will crash if we don't use\n # borrow=True\n f = theano.function([], [theano.Out(desc1, borrow=True),\n theano.Out(desc2, borrow=True)])\n\n d1, d2 = f()\n\n # This will be the case if they are merged, which would be bad.\n assert d1 != d2\n\n\ndef test_dnn_conv_merge():\n # This test that we merge correctly multiple dnn_conv.\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img_shp = [2, 5, 6, 8]\n kern_shp = [3, 5, 5, 6]\n img = T.ftensor4('img')\n kern = T.ftensor4('kern')\n out = T.ftensor4('out')\n desc = dnn.GpuDnnConvDesc(\n border_mode='valid')(kern.shape)\n\n # Test forward op\n o1 = dnn.dnn_conv(img, kern)\n o2 = dnn.dnn_conv(img, kern)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),\n numpy.random.rand(*kern_shp).astype('float32'))\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1\n\n # Test grad w op\n o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)\n o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)\n f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1\n\n # Test grad i op\n o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)\n o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)\n f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1\n\n\ndef test_dnn_conv_inplace():\n \"\"\"This test that we have inplace work correctly even when\n GpuAllocEmpty get merged together.\n\n \"\"\"\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img_shp = [2, 5, 6, 8]\n kern_shp = [3, 5, 5, 6]\n img = T.ftensor4('img')\n kern = T.ftensor4('kern')\n out = T.ftensor4('out')\n desc1 = dnn.GpuDnnConvDesc(border_mode='valid', conv_mode='conv')(\n kern.shape)\n desc2 = dnn.GpuDnnConvDesc(\n border_mode='valid', conv_mode='cross')(kern.shape)\n\n # Test forward op\n o1 = dnn.dnn_conv(img, kern, conv_mode='conv')\n o2 = dnn.dnn_conv(img, kern, conv_mode='cross')\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),\n numpy.random.rand(*kern_shp).astype('float32'))\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n # Test grad w op\n out = GpuAllocEmpty(kern.dtype, test_ctx_name)(*kern.shape)\n o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc1)\n o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc2)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n # Test grad i op\n out = GpuAllocEmpty(img.dtype, test_ctx_name)(*img.shape)\n o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc1)\n o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc2)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n\ndef pool_2d_i2n(input, ds=(2, 2), strides=None,\n pad=(0, 0),\n pool_function=T.max, mode='ignore_borders'):\n if strides is None:\n strides = ds\n\n if strides[0] > ds[0] or strides[1] > ds[1]:\n raise RuntimeError(\n \"strides should be smaller than or equal to ds,\"\n \" strides=(%d, %d) and ds=(%d, %d)\" %\n (strides + ds))\n shape = input.shape\n if pad != (0, 0):\n assert pool_function is T.max\n pad_x = pad[0]\n pad_y = pad[1]\n a = T.alloc(-numpy.inf, shape[0], shape[1], shape[2] + pad_x * 2,\n shape[3] + pad_y * 2)\n input = T.set_subtensor(a[:, :,\n pad_x:pad_x + shape[2],\n pad_y:pad_y + shape[3]],\n input)\n shape = input.shape\n\n neibs = images2neibs(input, ds, strides, mode=mode)\n pooled_neibs = pool_function(neibs, axis=1)\n\n output_width = (shape[2] - ds[0]) // strides[0] + 1\n output_height = (shape[3] - ds[1]) // strides[1] + 1\n\n pooled_output = pooled_neibs.reshape((shape[0], shape[1],\n output_width, output_height))\n return pooled_output\n\n\ndef test_pooling():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n # 'average_exc_pad' is disabled for versions < 4004\n if dnn.version(raises=False) < 4004:\n modes = ('max', 'average_inc_pad')\n else:\n modes = ('max', 'average_inc_pad', 'average_exc_pad')\n\n x = T.ftensor4()\n for mode, pad in product(modes,\n ((0, 0), (1, 0), (1, 0), (2, 3), (3, 2))):\n if mode == 'max':\n func = T.max\n else:\n func = T.mean\n\n if pad != (0, 0) and func is T.mean:\n continue\n\n for ws in (4, 2, 5):\n for stride in (2, 3):\n if stride > ws:\n continue\n if pad[0] > stride or pad[1] > stride:\n # Not implemented\n continue\n # We will check that the opt introduced it.\n out1 = pool_2d(x, (ws, ws),\n st=(stride, stride),\n ignore_border=True,\n padding=pad, mode=mode)\n out2 = pool_2d_i2n(x, ds=(ws, ws), strides=(stride, stride),\n pad=pad,\n pool_function=func)\n mode_without_gpu2 = mode_without_gpu.including()\n mode_without_gpu2.check_isfinite = False\n f1 = theano.function([x], out1, mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPool)\n for node in f1.maker.fgraph.apply_nodes])\n f2 = theano.function([x], out2, mode=mode_without_gpu2)\n assert not any([isinstance(node.op, dnn.GpuDnnPool)\n for node in f2.maker.fgraph.apply_nodes])\n for shp in [(1, 10, 100, 100),\n (1, 3, 99, 99),\n (32, 1, 147, 197),\n ]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\")\n a = f1(data)\n b = f2(data)\n\n utt.assert_allclose(a, b)\n\n # Test the grad\n for shp in [(1, 1, 2, 2),\n (1, 1, 3, 3)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\") * 10\n\n ws = 2\n stride = 2\n if pad[0] > stride or pad[1] > stride:\n # Not implemented\n continue\n\n # This test the CPU grad + opt + GPU implemtentation\n def fn(x):\n return pool_2d(x, (ws, ws), ignore_border=True,\n padding=pad, mode=mode)\n utt.verify_grad(fn, [data],\n cast_to_output_type=False,\n mode=mode_with_gpu)\n # Confirm that the opt would have inserted it.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n\n # Test the GPU grad + GPU implementation\n def fn(x):\n dnn_op = dnn.dnn_pool(\n x, ws=(ws, ws),\n stride=(stride, stride),\n pad=pad,\n mode=mode)\n return dnn_op\n utt.verify_grad(fn, [data],\n cast_to_output_type=False,\n mode=mode_with_gpu)\n # Confirm that we get the good op.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n g_out = fg(data)\n\n # Compare against the CPU result\n out = pool_2d(x, (ws, ws),\n padding=pad,\n ignore_border=True, mode=mode)\n fc = theano.function([x], theano.grad(out.sum(), x),\n mode=mode_without_gpu)\n if mode == 'max':\n assert any([isinstance(node.op, MaxPoolGrad)\n for node in fc.maker.fgraph.toposort()])\n else:\n assert any([isinstance(node.op, AveragePoolGrad)\n for node in fc.maker.fgraph.toposort()])\n c_out = fc(data)\n utt.assert_allclose(c_out, g_out)\n\n\ndef test_pooling_with_tensor_vars():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n x = T.ftensor4()\n ws = theano.shared(numpy.array([2, 2], dtype='int32'))\n st = theano.shared(numpy.array([1, 1], dtype='int32'))\n pad = theano.shared(numpy.array([0, 0], dtype='int32'))\n mode = 'max'\n\n def fn(x):\n dnn_op = dnn.dnn_pool(x,\n ws=ws,\n stride=st,\n pad=pad,\n mode=mode)\n return dnn_op\n\n for shp in [(1, 1, 2, 2),\n (1, 1, 3, 3)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\") * 10\n theano.tests.unittest_tools.verify_grad(\n fn, [data],\n cast_to_output_type=False,\n mode=mode_with_gpu)\n\n out2 = pool_2d_i2n(x, ds=(2, 2), strides=(1, 1),\n pad=(0, 0),\n pool_function=T.max)\n\n mode_without_gpu2 = mode_without_gpu.including()\n mode_without_gpu2.check_isfinite = False\n\n f1 = theano.function([x], fn(x), mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPool)\n for node in f1.maker.fgraph.apply_nodes])\n f2 = theano.function([x], out2, mode=mode_without_gpu2)\n assert not any([isinstance(node.op, dnn.GpuDnnPool)\n for node in f2.maker.fgraph.apply_nodes])\n for shp in [(1, 10, 100, 100),\n (1, 3, 99, 99),\n (32, 1, 147, 197),\n ]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\")\n a = f1(data).__array__()\n\n b = f2(data).__array__()\n utt.assert_allclose(a, b)\n\n\ndef test_pooling_opt():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n x = T.fmatrix()\n\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 2), mode='average_inc_pad',\n ignore_border=True),\n mode=mode_with_gpu)\n\n assert any([isinstance(n.op, dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10), dtype='float32'))\n\n f = theano.function(\n [x],\n T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',\n ignore_border=True).sum(),\n x),\n mode=mode_with_gpu.including(\"cudnn\"))\n\n assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10), dtype='float32'))\n\n\ndef test_dnn_tag():\n \"\"\"\n Test that if cudnn isn't avail we crash and that if it is avail, we use it.\n \"\"\"\n x = T.ftensor4()\n old = theano.config.on_opt_error\n theano.config.on_opt_error = \"raise\"\n\n sio = StringIO()\n handler = logging.StreamHandler(sio)\n logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)\n # Silence original handler when intentionnally generating warning messages\n logging.getLogger('theano').removeHandler(theano.logging_default_handler)\n raised = False\n try:\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 2), ignore_border=True),\n mode=mode_with_gpu.including(\"cudnn\"))\n except (AssertionError, RuntimeError):\n assert not dnn.dnn_available(test_ctx_name)\n raised = True\n finally:\n theano.config.on_opt_error = old\n logging.getLogger(\n 'theano.compile.tests.test_dnn').removeHandler(handler)\n logging.getLogger('theano').addHandler(theano.logging_default_handler)\n\n if not raised:\n assert dnn.dnn_available(test_ctx_name)\n assert any([isinstance(n.op, dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n\nclass TestDnnInferShapes(utt.InferShapeTester):\n\n border_modes = ['valid', 'full', 'half']\n conv_modes = ['conv', 'cross']\n\n def setUp(self):\n super(TestDnnInferShapes, self).setUp()\n self.mode = mode_with_gpu\n\n def test_softmax(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n t = T.ftensor4('t')\n rand_tensor = numpy.asarray(\n numpy.random.rand(5, 4, 3, 2),\n dtype='float32'\n )\n self._compile_and_check(\n [t],\n [dnn.GpuDnnSoftmax('accurate', 'channel')(t)],\n [rand_tensor],\n dnn.GpuDnnSoftmax\n )\n\n self._compile_and_check(\n [t],\n [\n T.grad(\n dnn.GpuDnnSoftmax(\n 'accurate',\n 'channel'\n )(t).mean(),\n t\n )\n ],\n [rand_tensor],\n dnn.GpuDnnSoftmaxGrad\n )\n\n def _test_conv(self, img, kerns, out, img_val, kern_vals, border_mode, conv_mode, subsamples, algo):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n img_val = numpy.asarray(img_val, dtype='float32')\n kern_vals = numpy.asarray(kern_vals, dtype='float32')\n\n for subsample in subsamples:\n out_vals = numpy.zeros(\n dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape,\n border_mode=border_mode,\n subsample=subsample),\n dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=border_mode,\n subsample=subsample,\n conv_mode=conv_mode\n )(kerns.shape)\n conv = dnn.GpuDnnConv(algo=algo)(img, kerns, out, desc)\n self._compile_and_check(\n [img, kerns, out],\n [conv],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConv\n )\n\n @parameterized.expand(chain(product([SUPPORTED_DNN_CONV_ALGO_FWD[0]],\n border_modes,\n conv_modes),\n product(SUPPORTED_DNN_CONV_ALGO_FWD[1:],\n [border_modes[0]],\n [conv_modes[0]])),\n testcase_func_name=utt.custom_name_func)\n def test_conv(self, algo, border_mode, conv_mode):\n if algo == 'winograd' and dnn.version(raises=False) < 5000:\n raise SkipTest(dnn.dnn_available.msg)\n\n self._test_conv(T.ftensor4('img'),\n T.ftensor4('kerns'),\n T.ftensor4('out'),\n numpy.random.rand(7, 2, 8, 4),\n numpy.random.rand(8, 2, 4, 3),\n border_mode,\n conv_mode,\n [(1, 1), (2, 2)],\n algo)\n\n @parameterized.expand(product(border_modes, conv_modes), utt.custom_name_func)\n def test_conv3d_none(self, border_mode, conv_mode):\n ftensor5 = T.TensorType(dtype=\"float32\", broadcastable=(False,) * 5)\n self._test_conv(ftensor5('img'),\n ftensor5('kerns'),\n ftensor5('out'),\n numpy.random.rand(10, 2, 6, 4, 11),\n numpy.random.rand(8, 2, 4, 3, 1),\n border_mode,\n conv_mode,\n [(1, 1, 1), (2, 2, 2)],\n 'none')\n\n def _test_conv_gradw(self, img, kerns, out, img_val, kern_vals, border_mode, conv_mode, subsample):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n img_val = numpy.asarray(\n img_val,\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n kern_vals,\n dtype='float32'\n )\n\n temp_img = img.dimshuffle(1, 0, 2, 3)\n temp_kerns = kerns\n if conv_mode == 'conv':\n temp_kerns = temp_kerns[:, :, ::-1, ::-1]\n temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)\n shape = (\n kern_vals.shape[1], img_val.shape[1],\n img_val.shape[2] - kern_vals.shape[2] + 1,\n img_val.shape[3] - kern_vals.shape[3] + 1\n )\n out_vals = numpy.zeros(shape, dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=border_mode,\n subsample=subsample,\n conv_mode=conv_mode\n )(out.shape)\n conv_grad_w = dnn.GpuDnnConvGradW()(\n temp_img,\n temp_kerns,\n out,\n desc,\n )\n self._compile_and_check(\n [temp_img, temp_kerns, out],\n [conv_grad_w],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConvGradW\n )\n\n @parameterized.expand(product(border_modes, conv_modes), utt.custom_name_func)\n def test_conv_gradw(self, border_mode, conv_mode):\n self._test_conv_gradw(T.ftensor4('img'),\n T.ftensor4('kerns'),\n T.ftensor4('out'),\n numpy.random.rand(2, 5, 6, 8),\n numpy.random.rand(2, 1, 5, 6),\n border_mode,\n conv_mode,\n (1, 1))\n\n def test_conv_gradi(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n kern_vals = numpy.asarray(\n numpy.random.rand(13, 14, 15, 16),\n dtype='float32'\n )\n out_vals = numpy.asarray(\n numpy.random.rand(3, 13, 5, 6),\n dtype='float32'\n )\n\n for params in product(\n ['valid'], # Should this work for 'full'?\n [(1, 1)],\n ['conv', 'cross']\n ):\n shape = (\n out_vals.shape[0], kern_vals.shape[1],\n out_vals.shape[2] + kern_vals.shape[2] - 1,\n out_vals.shape[3] + kern_vals.shape[3] - 1\n )\n img_vals = numpy.zeros(shape, dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(kerns.shape)\n conv_grad_i = dnn.GpuDnnConvGradI()(\n kerns,\n out,\n img,\n desc,\n )\n self._compile_and_check(\n [kerns, img, out],\n [conv_grad_i],\n [kern_vals, img_vals, out_vals],\n dnn.GpuDnnConvGradI\n )\n\n def test_pool(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n\n # 'average_exc_pad' is disabled for versions < 4004\n if dnn.version(raises=False) < 4004:\n modes = ['max', 'average_inc_pad']\n else:\n modes = ['max', 'average_inc_pad', 'average_exc_pad']\n\n for params in product(\n [(1, 1), (2, 2), (3, 3)],\n [(1, 1), (2, 2), (3, 3)],\n modes\n ):\n self._compile_and_check(\n [img],\n [dnn.GpuDnnPool(mode=params[2])(img, params[0], params[1], (0, 0))],\n [img_val],\n dnn.GpuDnnPool\n )\n\n def test_pool_grad(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n img_grad = T.ftensor4('img_grad')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n img_grad_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n out_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n\n for params in product(\n [(1, 1), (2, 2), (3, 3)],\n [(1, 1), (2, 2), (3, 3)],\n ['max', 'average_inc_pad']\n ):\n pool_grad = dnn.GpuDnnPoolGrad(mode=params[2])(\n img,\n out,\n img_grad,\n params[0],\n params[1],\n (0, 0)\n )\n self._compile_and_check(\n [img, img_grad, out],\n [pool_grad],\n [img_val, img_grad_val, out_val],\n dnn.GpuDnnPoolGrad\n )\n\n\n# this has been a problem in the past\ndef test_dnn_conv_border_mode():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n\n dnn.dnn_conv(img, kern, border_mode=1)\n dnn.dnn_conv(img, kern, border_mode=(2, 3))\n dnn.dnn_conv(img, kern, border_mode='full')\n dnn.dnn_conv(img, kern, border_mode='valid')\n dnn.dnn_conv(img, kern, border_mode='half')\n\n\ndef test_dnn_conv_alpha_output_merge():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n out = T.ftensor4()\n\n b = 1\n c = 4\n f = 3\n ih = 5\n iw = 8\n kh = 2\n kw = 6\n img_val = numpy.random.random((b, c, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, ih - kh + 1,\n iw - kw + 1)).astype('float32')\n\n conv = dnn.dnn_conv(img, kern)\n gw = theano.grad(conv.sum(), kern)\n gi = theano.grad(conv.sum(), img)\n\n lr = numpy.asarray(0.05, dtype='float32')\n\n fr = lr * (conv + out)\n wr = kern + lr * gw\n ir = img + lr * gi\n\n f1 = theano.function([img, kern, out], [fr, wr, ir], mode=mode_with_gpu)\n assert isinstance(f1.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert isinstance(f1.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert isinstance(f1.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n mode = mode_with_gpu\n mode = mode.excluding('local_dnn_conv_alpha_merge')\n mode = mode.excluding('local_dnn_convw_alpha_merge')\n mode = mode.excluding('local_dnn_convi_alpha_merge')\n mode = mode.excluding('local_dnn_conv_output_merge')\n mode = mode.excluding('local_dnn_convw_output_merge')\n mode = mode.excluding('local_dnn_convi_output_merge')\n\n f2 = theano.function([img, kern, out], [fr, wr, ir], mode=mode)\n\n assert not isinstance(f2.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert not isinstance(f2.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert not isinstance(f2.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n out_f1 = f1(img_val, kern_val, out_val)\n out_f2 = f2(img_val, kern_val, out_val)\n\n assert len(out_f1) == len(out_f2)\n\n for v1, v2 in zip(out_f1, out_f2):\n utt.assert_allclose(v1, v2)\n\n\ndef test_dnn_conv_grad():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n b = 1\n c = 4\n f = 3\n ih = 2\n iw = 8\n kh = 2\n kw = 2\n img_val = numpy.random.random((b, c, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, ih - kw + 1,\n iw - kw + 1)).astype('float32')\n\n def dconv(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(kern.shape)\n return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75)\n\n def dconvi(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(kern.shape)\n return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0,\n beta=0.0)\n\n def dconvw(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(kern.shape)\n return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75,\n beta=-1.0)\n\n utt.verify_grad(dconv, [img_val, kern_val, out_val])\n utt.verify_grad(dconvi, [img_val, kern_val, out_val])\n utt.verify_grad(dconvw, [img_val, kern_val, out_val])\n\n\ndef test_version():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n assert isinstance(dnn.version(), int)\n\n\nclass test_SoftMax(test_nnet.test_SoftMax):\n gpu_op = dnn.GpuDnnSoftmax\n gpu_grad_op = dnn.GpuDnnSoftmaxGrad\n mode = mode_with_gpu\n\n def setUp(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n def test_softmax_shape_0(self):\n raise SkipTest(\"Cudnn doesn't support 0 shapes\")\n\n def test_softmax_grad(self):\n def cmp(n, m, f, f_gpu):\n data = numpy.arange(n * m, dtype='float32').reshape(n, m)\n gdata = numpy.asarray(data)[:, :, None, None]\n\n out = f(data)\n gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]\n utt.assert_allclose(out, gout)\n\n x = T.matrix('x', 'float32')\n x_gpu = T.tensor4('x_gpu', 'float32')\n f_z = T.nnet.softmax_op\n f_gpu = dnn.GpuDnnSoftmax(\n 'accurate',\n 'channel'\n )\n\n # Verify the grad operation\n dims = (2, 3, 4, 5)\n gdata = numpy.arange(\n numpy.product(dims),\n dtype='float32'\n ).reshape(dims)\n T.verify_grad(f_gpu, [gdata], rng=numpy.random,\n mode=mode_with_gpu)\n\n # Verify that the CPU and GPU implementations return the same results\n # up to a tolerance.\n\n self._test_softmax(\n x,\n x_gpu,\n f_z,\n f_gpu,\n cmp\n )\n\n self._test_softmax(\n x, x, f_z, f_z, self._cmp\n )\n\n # Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad\n # optimization is applied when cudnn is required\n y = T.fvector('y')\n f = theano.function(\n [y],\n T.grad(T.nnet.softmax(y).mean(), y),\n mode=mode_with_gpu\n )\n sorted_f = f.maker.fgraph.toposort()\n val = numpy.random.rand(5).astype('float32')\n out_dnn = f(val)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n self.gpu_grad_op)\n ]) == 1)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad)\n ]) == 0)\n\n # Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad\n # optimization is not applied when cudnn is excluded or not\n # available\n mode_wo_cudnn = mode_with_gpu.excluding(\"cudnn\")\n y = T.fvector('y')\n f = theano.function(\n [y],\n T.grad(T.nnet.softmax(y).mean(), y),\n mode=mode_wo_cudnn\n )\n sorted_f = f.maker.fgraph.toposort()\n out_cpu = f(val)\n utt.assert_allclose(out_dnn, out_cpu)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n self.gpu_grad_op)\n ]) == 0)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad)\n ]) == 1)\n\n # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not\n # crash with manual graph\n y = T.fvector('y')\n o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)\n f = theano.function([y], o, mode=mode_with_gpu)\n sorted_f = f.maker.fgraph.toposort()\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n self.gpu_grad_op)\n ]) == 1)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad)\n ]) == 0)\n\n def test_log_softmax(self):\n # This is a test for an optimization that depends on cuDNN v3 or\n # more recent. Don't test if the cuDNN version is too old.\n if dnn.version(raises=False) < 3000:\n raise SkipTest(\"Log-softmax is only in cudnn v3+\")\n\n x = T.ftensor4()\n softmax_out = dnn.GpuDnnSoftmax('accurate', 'channel')(x)\n log_out = T.log(T.as_tensor_variable(softmax_out))\n\n f = theano.function([x], log_out, mode=mode_with_gpu)\n\n # Ensure that the optimization has been applied\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Ensure that the output of the function is valid\n input_shapes = [(3, 4, 5, 6),\n (1025, 2, 3, 4),\n (2, 1025, 3, 4),\n (2, 3, 1025, 4),\n (2, 3, 4, 1025),\n (66000, 2, 3, 4),\n (2, 66000, 3, 4),\n (2, 3, 66000, 4),\n (2, 3, 4, 66000)]\n\n for inp_shape in input_shapes:\n input_val = numpy.random.normal(0, 1, inp_shape).astype(\"float32\")\n\n out = f(input_val)\n expected_out = numpy.log(numpy.exp(input_val) /\n numpy.exp(input_val).sum(1)[:, None, :, :])\n\n utt.assert_allclose(out, expected_out)\n\n def test_log_softmax2(self):\n # Test that the op LogSoftmax is correctly replaced by the op\n # DnnSoftmax with the 'log' mode.\n\n # This is a test for an optimization that depends on cuDNN v3 or\n # more recent. Don't test if the cuDNN version is too old.\n if dnn.version(raises=False) < 3000:\n raise SkipTest(\"Log-softmax is only in cudnn v3+\")\n\n # Compile a reference function, on the CPU, to be used to validate the\n # results of the other function.\n x = T.fmatrix()\n f_ref = theano.function([x], T.nnet.LogSoftmax()(x))\n\n # Build the first graph and ensure that the optimization is applied\n log_softmax_out = T.nnet.LogSoftmax()(x)\n f = theano.function([x], log_softmax_out, mode=mode_with_gpu)\n\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Compare the output of the function with the reference function\n inp = numpy.random.normal(0, 1, (5, 6)).astype(\"float32\")\n utt.assert_allclose(f(inp), f_ref(inp))\n\n # Build the first graph and ensure that the optimization is applied\n log_softmax_out = T.log(T.nnet.Softmax()(x))\n f = theano.function([x], log_softmax_out, mode=mode_with_gpu)\n\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Compare the output of the function with the reference function\n inp = numpy.random.normal(0, 1, (5, 6)).astype(\"float32\")\n utt.assert_allclose(f(inp), f_ref(inp))\n\n\ndef test_dnn_batchnorm_train():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n if dnn.version(raises=False) < 5000:\n raise SkipTest(\"batch normalization requires cudnn v5+\")\n utt.seed_rng()\n\n for mode in ('per-activation', 'spatial'):\n for vartype in (T.ftensor4, T.ftensor3, T.fmatrix, T.fvector):\n x, scale, bias = (vartype(n) for n in ('x', 'scale', 'bias'))\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n\n # forward pass\n out, x_mean, x_invstd = dnn.dnn_batch_normalization_train(\n x, scale, bias, mode, eps)\n # reference forward pass\n if mode == 'per-activation':\n axes = (0,)\n elif mode == 'spatial':\n axes = (0,) + tuple(range(2, ndim))\n x_mean2 = x.mean(axis=axes, keepdims=True)\n x_invstd2 = T.inv(T.sqrt(x.var(axis=axes, keepdims=True) + eps))\n scale2 = T.addbroadcast(scale, *axes)\n bias2 = T.addbroadcast(bias, *axes)\n out2 = (x - x_mean2) * (scale2 * x_invstd2) + bias2\n # backward pass\n dy = vartype('dy')\n grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})\n # reference backward pass\n grads2 = T.grad(None, wrt=[x, scale, bias], known_grads={out2: dy})\n # compile\n f = theano.function([x, scale, bias, dy],\n [out, x_mean, x_invstd, out2, x_mean2, x_invstd2] +\n grads + grads2, mode=mode_with_gpu)\n # run\n for data_shape in ((10, 20, 30, 40), (4, 3, 1, 1), (1, 1, 5, 5)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes else s\n for d, s in enumerate(data_shape))\n X = 4 + 3 * numpy.random.randn(*data_shape).astype('float32')\n Dy = -1 + 2 * numpy.random.randn(*data_shape).astype('float32')\n Scale = numpy.random.randn(*param_shape).astype('float32')\n Bias = numpy.random.randn(*param_shape).astype('float32')\n outputs = f(X, Scale, Bias, Dy)\n # compare outputs\n utt.assert_allclose(outputs[0], outputs[0 + 3]) # out\n utt.assert_allclose(outputs[1], outputs[1 + 3]) # mean\n utt.assert_allclose(outputs[2], outputs[2 + 3]) # invstd\n # compare gradients\n utt.assert_allclose(outputs[6], outputs[6 + 3]) # dx\n utt.assert_allclose(outputs[7], outputs[7 + 3], rtol=3e-3) # dscale\n utt.assert_allclose(outputs[8], outputs[8 + 3]) # dbias\n\n\ndef test_batchnorm_inference():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n if dnn.version(raises=False) < 5000:\n raise SkipTest(\"batch normalization requires cudnn v5+\")\n utt.seed_rng()\n\n for mode in ('per-activation', 'spatial'):\n for vartype in (T.ftensor4, T.ftensor3, T.fmatrix, T.fvector):\n x, scale, bias, mean, var = (vartype(n) for n in ('x', 'scale',\n 'bias', 'mean',\n 'var'))\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n\n # forward pass\n out = dnn.dnn_batch_normalization_test(x, scale, bias, mean,\n var, mode, eps)\n # reference forward pass\n if mode == 'per-activation':\n axes = (0,)\n elif mode == 'spatial':\n axes = (0,) + tuple(range(2, ndim))\n scale2, bias2, mean2, var2 = (T.addbroadcast(t, *axes)\n for t in (scale, bias, mean, var))\n out2 = (x - mean2) * (scale2 / T.sqrt(var2 + eps)) + bias2\n # backward pass\n dy = vartype('dy')\n grads = T.grad(None, wrt=[x, scale, bias, mean, var], known_grads={out: dy})\n # reference backward pass\n grads2 = T.grad(None, wrt=[x, scale, bias, mean, var], known_grads={out2: dy})\n # compile\n f = theano.function([x, scale, bias, mean, var, dy],\n [out, out2] + grads + grads2, mode=mode_with_gpu)\n # run\n for data_shape in ((10, 20, 30, 40), (4, 3, 1, 1), (1, 1, 5, 5)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes else s\n for d, s in enumerate(data_shape))\n X = 4 + 3 * numpy.random.randn(*data_shape).astype('float32')\n Dy = -1 + 2 * numpy.random.randn(*data_shape).astype('float32')\n Scale = numpy.random.randn(*param_shape).astype('float32')\n Bias = numpy.random.randn(*param_shape).astype('float32')\n Mean = numpy.random.randn(*param_shape).astype('float32')\n Var = numpy.random.rand(*param_shape).astype('float32')\n outputs = f(X, Scale, Bias, Mean, Var, Dy)\n # compare outputs\n utt.assert_allclose(outputs[0], outputs[1]) # out\n # compare gradients\n utt.assert_allclose(outputs[2], outputs[2 + 5]) # dx\n utt.assert_allclose(outputs[3], outputs[3 + 5]) # dscale\n utt.assert_allclose(outputs[4], outputs[4 + 5]) # dbias\n utt.assert_allclose(outputs[5], outputs[5 + 5]) # dmean\n utt.assert_allclose(outputs[6], outputs[6 + 5], atol=2e-5) # dvar\n" ]
[ [ "numpy.zeros", "numpy.random.normal", "numpy.asarray", "numpy.exp", "numpy.random.randn", "numpy.random.random", "numpy.arange", "numpy.random.rand", "numpy.product", "numpy.array" ] ]
Babelscape/crocodile
[ "424ae33c68fdf22eb305e75b2f498831526d87f8" ]
[ "add_filter_relations.py" ]
[ "import jsonlines\nimport re\nimport transformers\nimport torch\nfrom tqdm import trange, tqdm\nimport argparse\nimport os, sys\n\ndef get_case_insensitive_key_value(input_dict, key):\n return next((value for dict_key, value in input_dict.items() if dict_key.lower() == key.lower()), None)\n\ndef filter_triples(model, tokenizer, texts):\n if max([len(text) for text in texts])>256:\n range_length = 12\n else:\n range_length = 64\n result = []\n for batch in range(0,len(texts),range_length):\n encoded_input = tokenizer(\n [ex[0] for ex in texts[batch: batch + range_length]], [ex[1] for ex in texts[batch: batch + range_length]],\n return_tensors=\"pt\",\n add_special_tokens=True,\n max_length=256,\n padding='longest',\n return_token_type_ids=False,\n truncation_strategy='only_first')\n for tensor in encoded_input:\n encoded_input[tensor] = encoded_input[tensor].cuda()\n with torch.no_grad(): # remove this if you need gradients.\n outputs = model(**encoded_input, return_dict=True, output_attentions=False, output_hidden_states = False)\n result.append(outputs['logits'].softmax(dim=1))\n del outputs\n logits = torch.cat(result)\n # if language == 'ko':\n # return logits.argmax(1) == get_case_insensitive_key_value(model.config.label2id, 'entailment')# [:,get_case_insensitive_key_value(model.config.label2id, 'entailment')]>0.75\n return logits[:,get_case_insensitive_key_value(model.config.label2id, 'entailment')]#>0.75\n\ndef prepare_triplet(subject_entity, object_entity, article_text, predicate):\n text_triplet = ''\n text_triplet += re.compile(\"(?<!\\d)\\.(?!\\d)\").split(article_text[:min(subject_entity['boundaries'][0], object_entity['boundaries'][0])])[-1]\n text_triplet += article_text[min(subject_entity['boundaries'][0], object_entity['boundaries'][0]):max(subject_entity['boundaries'][1], object_entity['boundaries'][1])]\n text_triplet += re.compile(\"(?<!\\d)\\.(?!\\d)\").split(article_text[max(subject_entity['boundaries'][1], object_entity['boundaries'][1]):])[0]\n if language == 'ko' or language == 'kosource':\n return (text_triplet.strip('\\n'), ' '.join([str(subject_entity['surfaceform']), str(object_entity['surfaceform']), str(predicate['surfaceform'])]))\n # return (text_triplet.strip('\\n'), ' '.join([str(object_entity['surfaceform']), str(predicate['surfaceform']), str(subject_entity['surfaceform'])]))\n return (text_triplet.strip('\\n'), ' '.join([str(subject_entity['surfaceform']), str(predicate['surfaceform']), str(object_entity['surfaceform'])]))\n\ndef main(folder_input = 'out/ko'):\n global language \n language = folder_input.split('/')[1]\n if language == 'ko' or language == 'kosource':\n model_name_or_path = '/home/huguetcabot/sentence_transformers/test-glue/XNLI'\n # model_name_or_path = '/home/huguetcabot/sentence_transformers/test-glue/run-1/checkpoint-3910'\n else:\n model_name_or_path = 'joeddav/xlm-roberta-large-xnli'\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(\n model_name_or_path)\n model_config = transformers.AutoConfig.from_pretrained(\n model_name_or_path,\n # num_labels=2,\n output_hidden_states=True,\n output_attentions=True,\n )\n model = transformers.AutoModelForSequenceClassification.from_pretrained(model_name_or_path, config = model_config)\n model.cuda()\n model.eval()\n model.half()\n with jsonlines.open(f'out_clean/{\"/\".join(folder_input.split(\"/\")[1:])}.jsonl', mode='w') as writer:\n for k,j,y in os.walk(folder_input):\n for file_name in y:\n with jsonlines.open(k + '/' + file_name) as reader:\n for i, article in tqdm(enumerate(reader)):\n previous = []\n triples_list = []\n texts = []\n for triple in article['triples']:\n if triple['subject']['boundaries'] != None and triple['object']['boundaries'] != None and (triple['subject']['boundaries'], triple['object']['boundaries']) not in previous:\n previous.append((triple['subject']['boundaries'], triple['object']['boundaries']))\n triples_list.append(triple)\n texts.append(prepare_triplet(triple['subject'], triple['object'], article['text'], triple[\"predicate\"]))\n elif (triple['subject']['boundaries'], triple['object']['boundaries']) not in previous:\n distance = 1000000\n for entity in article['entities']:\n if entity['uri'] == triple['subject']['uri']:\n if abs(min(triple['object']['boundaries'])-min(entity['boundaries'])) < distance:\n subject_entity = entity\n distance = abs(min(triple['object']['boundaries'])-min(entity['boundaries']))\n triple['subject'] = subject_entity\n previous.append((triple['subject']['boundaries'], triple['object']['boundaries']))\n triples_list.append(triple)\n texts.append(prepare_triplet(subject_entity, triple['object'], article['text'], triple[\"predicate\"]))\n indexes = filter_triples(model, tokenizer, texts)\n if len(indexes) == 0:\n continue\n for pred, trip in zip(indexes, triples_list):\n trip['confidence'] = pred.item()\n # article['triples'] = [x for i,x in zip(indexes, triples_list) if (i == True) or x[\"predicate\"][\"uri\"] in [\"P569\", \"P570\"]]\n article['triples'] = triples_list\n writer.write(article)\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=__doc__)\n parser.add_argument(\"--folder_input\", \n help=\"input file\")\n args = parser.parse_args()\n\n main(args.folder_input)\n" ]
[ [ "torch.no_grad", "torch.cat" ] ]
panchiittp/pyross
[ "d5a455ae36a61e2fba29b30f1da774f1b284f1e2" ]
[ "tests/quick_test.py" ]
[ "#!python\n\"\"\"Unittesting for the pyross module. Run as python -m unittest pyross.test.\"\"\"\nimport sys\n#remove pwd from path that tries to import .pyx files\nfor i in sys.path:\n if 'pyross' in i or i == '':\n sys.path.remove(i)\n# print(sys.path)\nimport pyross\nimport unittest\nimport inspect\nimport numpy as np\nimport scipy as sp\n\n\nclass DeterministicTest(unittest.TestCase):\n \"\"\"testing deterministic.pyx.\"\"\"\n N = np.asarray([10000], dtype=np.float64)\n M = 1\n alpha = 0\n beta = 0.0071\n gIa = 0.008\n gIs = 0.008\n gI = 0.008\n gE = 0.007\n gIc = 0.1\n gIhp= 0.1\n gIsp= 0.1\n gIcp= 0.1\n gIh = 0.1\n gA = 0\n tE = 0\n tIa = 0\n tIs = 0\n Tf = 100\n Nf = 1000\n fsa = 0\n fh = 1\n sa = 0\n iaa = 0\n hh = 0\n cc = 0\n mm = 0\n tE = 0\n tA = 0\n tIa = 0\n tIs = 0\n kI = 1\n kE = 1\n k = 1\n ep = 0\n parameters = {'N': N, 'M': M, 'alpha': alpha,\n 'beta': beta, 'gIa': gIa, 'gIs': gIs,\n 'gIsp':gIsp,'gIhp':gIhp,'gIcp':gIcp,\n 'gI': gI, 'iaa': iaa,\n 'gE': gE, 'gA': gA, 'tE': tE,\n 'gIc': gIc, 'gIh': gIh, 'fh': fh,\n 'tIa': tIa, 'tIs': tIs, 'fsa': fsa,\n 'sa': sa, 'hh': hh, 'cc': cc,\n 'mm': mm, 'tA': tA, 'tE': tE,\n 'tIa': tIa, 'tIs': tIs, 'kI': kI,\n 'kE': kE, 'ep': ep, 'k': k}\n\n\n def __init__(self, *args, **kwargs):\n super(DeterministicTest, self).__init__(*args, **kwargs)\n # self.parameters = self.parameters\n\n def contactMatrix(self, t): return np.identity(self.M)\n\n def test_decay(self):\n \"\"\"\n Exponential decay from infected to recovered. Paths agree within .1%.\n \"\"\"\n SIR = pyross.deterministic.SIR(self.parameters, self.M, self.N)\n sim = SIR.simulate(np.zeros(1), np.zeros(1), self.N,\n self.contactMatrix, self.Tf,\n self.Nf, integrator='solve_ivp')\n time_points = np.linspace(0, self.Tf, self.Nf)\n exp_decay = sp.integrate.solve_ivp(lambda t, y: -self.gIs * y,\n (0, self.Tf), self.N,\n t_eval=time_points)\n diff = (sim['X'][:, 2] - exp_decay.y)/self.N\n self.assertTrue((diff < 0.001).all(), msg=\"paths differ > .1%\")\n\n def test_integrators(self):\n \"\"\"\n All integration methods produce paths which agree within .1%\n \"\"\"\n integrators = ['solve_ivp', 'odeint', 'odespy',\n 'odespy-rkf45', 'odespy-rk4']\n paths = []\n model = pyross.deterministic.SIR(self.parameters, self.M, self.N)\n for integrator in integrators:\n try:\n data = model.simulate(np.zeros(1), np.zeros(1), self.N,\n self.contactMatrix, self.Tf,\n self.Nf, integrator=integrator)\n except ImportError:\n print(f\"{integrator} is not installed, skipping...\")\n pass\n paths.append(data['X'])\n for i in range(len(paths)):\n for j in range(len(paths)):\n if i != j:\n diff = (paths[i]-paths[j])/self.N\n self.assertTrue((np.asarray(diff) < 0.001).all(),\n msg=f\"path {i} not equal to path {j}\")\n\n def test_SIRS(self):\n \"\"\"Test to make sure SIRS collapses down to SIR\"\"\"\n self.parameters['ep'] = 0\n self.parameters['sa'] = 0\n self.parameters['iaa'] = 0\n SIR = pyross.deterministic.SIR(self.parameters, self.M, self.N)\n SIRS = pyross.deterministic.SIRS(self.parameters, self.M, self.N)\n SIRdata = SIR.simulate(self.N, np.ones(1), np.zeros(1),\n self.contactMatrix, self.Tf,\n self.Nf)['X']\n SIRSdata = SIRS.simulate(self.N, np.ones(1), np.zeros(1),\n self.contactMatrix, self.Tf,\n self.Nf)['X']\n self.assertTrue((SIRdata-SIRSdata[:, 0:3] < 0.001).all(),\n msg=\"paths differ > .1%\")\n\n def test_init_models(self):\n \"\"\"Test initialisation of deterministic models\"\"\"\n deterministic_models = dict(inspect.getmembers(pyross.deterministic,\n inspect.isclass))\n for name, model in deterministic_models.items():\n if name.startswith('S') and not 'Spp':\n m = model(self.parameters, self.M, self.N)\n\n def test_run_models(self):\n \"\"\"Runs all deterministic models\"\"\"\n deterministic_models = dict(inspect.getmembers(pyross.deterministic,\n inspect.isclass))\n traj_dict={}\n for name, model in deterministic_models.items():\n if name.startswith('S') and not 'Spp':\n m = model(self.parameters, self.M, self.N)\n x0 = np.array([*self.N, *np.ones(self.M),\n *np.zeros(m.nClass -2)], dtype=np.float64).reshape((m.nClass,1))\n traj_dict[name] = m.simulate(*x0, self.contactMatrix, 100, 100)\n\n\nclass StochasticTest(unittest.TestCase):\n \"\"\"testing stochastic.pyx\"\"\"\n nloops=10\n iinfec = 3000\n Tf = 10\n\n def __init__(self, *args, **kwargs):\n super(StochasticTest, self).__init__(*args, **kwargs)\n self.parameters = DeterministicTest.parameters\n self.stochastic_models = dict(inspect.getmembers(pyross.stochastic,\n inspect.isclass))\n\n def contactMatrix(self, t): return np.identity(self.parameters['M'])\n\n def test_init_models(self):\n \"\"\"Initializes all stochastic models\"\"\"\n traj_dict={}\n for name, model in self.stochastic_models.items():\n if name.startswith('S'):\n params, M, N = self.parameters, self.parameters['M'], self.parameters['N']\n m = model(params, M, N)\n # x0 = np.array([*self.N, *np.ones(self.M),\n # *np.zeros(m.nClass -2)], dtype=np.float64).reshape((m.nClass,1))\n # traj_dict[name] = m.simulate(*x0, self.contactMatrix, 100, 100)\n\n def test_run_models(self):\n \"\"\"Runs all stochastic models\"\"\"\n traj_dict={}\n for name, model in self.stochastic_models.items():\n \n if name.startswith('S'):\n params, M, N = self.parameters, self.parameters['M'], self.parameters['N']\n m = model(params, M, N + M*10)\n x0 = np.array([*self.parameters['N'],\n *np.ones(self.parameters['M'])*10,\n *np.zeros(m.nClass -2)],\n dtype=np.float64).reshape((m.nClass,1))\n traj_dict[name] = m.simulate(*x0, self.contactMatrix, 100, 100)\n \n def test_stochastic_mean_gillespie(self):\n \"\"\"Runs stochastic models a few times and compares mean to \n deterministic\"\"\"\n deterministic_models = dict(inspect.getmembers(pyross.deterministic,\n inspect.isclass))\n params, M, N = self.parameters, self.parameters['M'], self.parameters['N']\n for name, model in self.stochastic_models.items():\n if name.startswith('S'):\n mS = model(params, M, N + M*self.iinfec)\n # print(mS.kk)\n mD = deterministic_models[name](params, M, N + M*self.iinfec)\n x0 = np.array([*self.parameters['N'],\n *np.ones(self.parameters['M'])*self.iinfec,\n *np.zeros(mS.nClass -2)],\n dtype=np.float64).reshape((mS.nClass,1))\n trajectories = []\n for i in range(self.nloops):\n traj = mS.simulate(*x0, self.contactMatrix, self.Tf, self.Tf)['X']\n trajectories.append(traj)\n traj_mean = np.mean(trajectories, axis=0)[:-1]\n mean = mD.simulate(*x0, self.contactMatrix, self.Tf, self.Tf)['X']\n absdiff = np.abs(traj_mean -mean)/(N*self.Tf)\n # print(name, np.sum(absdiff[:,:-1]))\n self.assertTrue(np.sum(absdiff[:,:-1])<0.01, \n msg=f\"{name} model disagreement\")\n\n def test_stochastic_mean_tau(self):\n \"\"\"Runs stochastic models a few times and compares mean to \n deterministic using tau leaping\"\"\"\n deterministic_models = dict(inspect.getmembers(pyross.deterministic,\n inspect.isclass))\n params, M, N = self.parameters, self.parameters['M'], self.parameters['N']\n for name, model in self.stochastic_models.items():\n if name.startswith('S'):\n mS = model(params, M, N + M*self.iinfec)\n # print(mS.kk)\n mD = deterministic_models[name](params, M, N + M*self.iinfec)\n x0 = np.array([*self.parameters['N'],\n *np.ones(self.parameters['M'])*self.iinfec,\n *np.zeros(mS.nClass -2)],\n dtype=np.float64).reshape((mS.nClass,1))\n trajectories = []\n for i in range(self.nloops):\n traj = mS.simulate(*x0, self.contactMatrix, self.Tf, self.Tf,\n method='tau_leaping')['X']\n trajectories.append(traj)\n traj_mean = np.mean(trajectories, axis=0)[:-1]\n mean = mD.simulate(*x0, self.contactMatrix, self.Tf, self.Tf)['X']\n absdiff = np.abs(traj_mean -mean)/(N*self.Tf)\n # print(name, np.sum(absdiff[:,:-1]))\n self.assertTrue(np.sum(absdiff[:,:-1])<0.01, \n msg=f\"{name} model disagreement\")\n\n def test_stochastic_integrators(self):\n \"\"\"Compare tau leaping to Gillespie.\n This will fail because there is a problem with SIkR\n Also, difference is an order of magnitude greater than\n Gillespie from the mean.\n \"\"\"\n self.nloops=10\n params, M, N = self.parameters, self.parameters['M'], self.parameters['N']\n for name, model in self.stochastic_models.items():\n if name.startswith('S'):\n mS = model(params, M, N + M*self.iinfec)\n x0 = np.array([*self.parameters['N'],\n *np.ones(self.parameters['M'])*self.iinfec,\n *np.zeros(mS.nClass -2)],\n dtype=np.float64).reshape((mS.nClass,1))\n gtraj = []\n tautraj = []\n for i in range(self.nloops):\n gtraj.append(mS.simulate(*x0, self.contactMatrix, self.Tf, self.Tf, \n method='gillespie')['X'])\n tautraj.append(mS.simulate(*x0, self.contactMatrix, self.Tf, self.Tf, \n method='tau_leaping', epsilon=1E-3)['X'])\n gmean = np.sum(gtraj, axis=0)\n taumean= np.sum(tautraj, axis=0)\n absdiff = np.abs(gmean - taumean)/(N*self.Tf)\n # print(name, np.sum(absdiff), np.shape(gmean), np.shape(taumean))\n self.assertTrue(np.sum(absdiff)<.1, msg=f\"{name} model disagreement\")\n\n\nclass ControlTest(unittest.TestCase):\n \"\"\"testing control.pyx\"\"\"\n \n def __init__(self, *args, **kwargs):\n super(ControlTest, self).__init__(*args, **kwargs)\n self.parameters = DeterministicTest.parameters\n self.control_models = dict(inspect.getmembers(pyross.control,\n inspect.isclass))\n\n def contactMatrix(self, t): return np.identity(self.parameters['M'])\n\n def test_init_models(self):\n \"\"\"Initializes all control models\"\"\"\n for name, model in self.control_models.items():\n if name.startswith('S'):\n params, M, N = self.parameters, self.parameters['M'], self.parameters['N']\n m = model(params, M, N)\n\n\nclass InferenceTest(unittest.TestCase):\n \"\"\"testing inference.pyx\"\"\"\n \n def __init__(self, *args, **kwargs):\n super(InferenceTest, self).__init__(*args, **kwargs)\n self.parameters = DeterministicTest.parameters\n self.control_models = dict(inspect.getmembers(pyross.inference,\n inspect.isclass))\n \n def contactMatrix(self, t): return np.identity(self.parameters['M'])\n\n def test_init_models(self):\n \"\"\"Initializes all inference models\"\"\"\n for name, model in self.control_models.items():\n if name.startswith('S') and name != \"SIR_type\":\n params, M, Ni = self.parameters, self.parameters['M'], self.parameters['N']\n N = int(np.sum(Ni))\n fi = Ni/N\n steps = 1\n m = model(params, M, fi, N, steps)\n\n\nclass ForecastTest(unittest.TestCase):\n \"\"\"testing forcast.pyx\"\"\"\n \n def __init__(self, *args, **kwargs):\n super(ForecastTest, self).__init__(*args, **kwargs)\n self.parameters = DeterministicTest.parameters\n self.control_models = dict(inspect.getmembers(pyross.forecast,\n inspect.isclass))\n self.parameters['cov'] = np.identity(2)\n \n def contactMatrix(self, t): return np.identity(self.parameters['M'])\n\n def test_init_models(self):\n \"\"\"Initializes all forcast models\"\"\"\n for name, model in self.control_models.items():\n if name.startswith('S') and name != \"SIR_type\":\n params, M, Ni = self.parameters, self.parameters['M'], self.parameters['N']\n N = int(np.sum(Ni))\n fi = Ni/N\n steps = 1\n m = model(params, M, Ni)\n\n\nclass UtilsPythonTest(unittest.TestCase):\n \"\"\"Testing the minimization function in utils_python.py\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(UtilsPythonTest, self).__init__(*args, **kwargs)\n\n def test_minimization(self):\n \"\"\"Test the minimization(...) function in utils_python.py with a few simple examples\"\"\"\n\n # A simple example\n f1 = lambda x, grad=0: 1 + np.linalg.norm(x)**2 \n # A multi-modal example\n f2 = lambda x, grad=0: 1 + np.linalg.norm(x)**2 + 0.1*np.abs(np.sin(4*np.pi*np.linalg.norm(x)))\n\n # Test global optimisation\n guess = np.array([1.0, 1.0])\n bounds = np.array([[-2.0, 2.0], [-2.0, 2.0]])\n x, y = pyross.utils_python.minimization(f1, guess, bounds, enable_global=True, enable_local=False,\n ftol=1e-4, cma_random_seed=1, verbose=False)\n self.assertTrue(np.abs(y - 1.0) < 1e-3)\n\n x, y = pyross.utils_python.minimization(f2, guess, bounds, enable_global=True, enable_local=False,\n ftol=1e-4, verbose=False, cma_random_seed=2)\n self.assertTrue(np.abs(y - 1.0) < 1e-3)\n\n # Test local optimisation\n guess = np.array([2.0, 2.0])\n bounds = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n x, y = pyross.utils_python.minimization(f1, guess, bounds, enable_global=False, enable_local=True,\n ftol=1e-5, verbose=False)\n self.assertTrue(np.abs(y - 1.0) < 1e-4)\n\n # And now combined\n x, y = pyross.utils_python.minimization(f2, guess, bounds, enable_global=True, enable_local=True,\n ftol=1e-5, global_ftol_factor=100, verbose=False, cma_random_seed=4)\n self.assertTrue(np.abs(y - 1.0) < 1e-4)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.linalg.norm", "numpy.zeros", "scipy.integrate.solve_ivp", "numpy.abs", "numpy.asarray", "numpy.array", "numpy.identity", "numpy.linspace", "numpy.mean" ] ]
jhong93/vpd
[ "1ed3e8631c46e078ecb9a7756dba1f1c14aead5b" ]
[ "dummy_2d_features.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"\nConvert COCO17 2D poses to dummy embeddings for 2D-VPD.\n\"\"\"\n\nimport os\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom util.io import store_pickle, load_gz_json\nfrom vipe_dataset.dataset_base import normalize_2d_skeleton\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('pose_dir', type=str)\n parser.add_argument('-o', '--out_dir', type=str)\n parser.add_argument('--no_flip', action='store_true')\n return parser.parse_args()\n\n\ndef main(pose_dir, out_dir, no_flip):\n for video_name in tqdm(sorted(os.listdir(pose_dir))):\n if video_name.endswith('.json.gz'):\n # Flat case\n video_pose_path = os.path.join(pose_dir, video_name)\n video_name = video_name.split('.json.gz')[0]\n else:\n # Nested case\n video_pose_path = os.path.join(\n pose_dir, video_name, 'coco_keypoints.json.gz')\n\n if not os.path.exists(video_pose_path):\n print('Not found:', video_pose_path)\n continue\n\n embs = []\n for frame_num, pose_data in load_gz_json(video_pose_path):\n raw_2d = np.array(pose_data[0][-1])\n pose_2d = normalize_2d_skeleton(raw_2d, False, to_tensor=False)\n emb = pose_2d[:, :2].flatten() # drop confidence column\n meta = {'is_2d': True, 'kp_score': np.mean(pose_2d[:, 2] + 0.5).item()}\n if not no_flip:\n emb2 = normalize_2d_skeleton(\n raw_2d, True, to_tensor=False)[:, :2].flatten()\n emb = np.stack([emb, emb2])\n embs.append((frame_num, emb, meta))\n\n if out_dir is not None:\n os.makedirs(out_dir, exist_ok=True)\n store_pickle(os.path.join(out_dir, video_name + '.emb.pkl'), embs)\n print('Done!')\n\n\nif __name__ == '__main__':\n main(**vars(get_args()))" ]
[ [ "numpy.array", "numpy.stack", "numpy.mean" ] ]
JPompeus/Stone-Soup
[ "030c60aaf5ff92d7bb53f06e350c0bf58c9af037" ]
[ "stonesoup/simulator/simple.py" ]
[ "# -*- coding: utf-8 -*-\nimport datetime\n\nimport numpy as np\n\nfrom ..base import Property\nfrom ..models.measurement import MeasurementModel\nfrom ..models.transition import TransitionModel\nfrom ..reader import GroundTruthReader\nfrom ..types.detection import TrueDetection, Clutter\nfrom ..types.groundtruth import GroundTruthPath, GroundTruthState\nfrom ..types.numeric import Probability\nfrom ..types.state import GaussianState, State\nfrom .base import DetectionSimulator, GroundTruthSimulator\nfrom stonesoup.buffered_generator import BufferedGenerator\n\n\nclass SingleTargetGroundTruthSimulator(GroundTruthSimulator):\n \"\"\"Target simulator that produces a single target\"\"\"\n transition_model = Property(\n TransitionModel, doc=\"Transition Model used as propagator for track.\")\n initial_state = Property(\n State,\n doc=\"Initial state to use to generate ground truth\")\n timestep = Property(\n datetime.timedelta,\n default=datetime.timedelta(seconds=1),\n doc=\"Time step between each state. Default one second.\")\n number_steps = Property(\n int, default=100, doc=\"Number of time steps to run for\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.index = 0\n\n @BufferedGenerator.generator_method\n def groundtruth_paths_gen(self):\n time = self.initial_state.timestamp or datetime.datetime.now()\n\n gttrack = GroundTruthPath([\n GroundTruthState(self.initial_state.state_vector, timestamp=time,\n metadata={\"index\": self.index})])\n yield time, {gttrack}\n\n for _ in range(self.number_steps - 1):\n time += self.timestep\n # Move track forward\n trans_state_vector = self.transition_model.function(\n gttrack[-1].state_vector,\n time_interval=self.timestep)\n gttrack.append(GroundTruthState(\n trans_state_vector, timestamp=time,\n metadata={\"index\": self.index}))\n yield time, {gttrack}\n\n\nclass SwitchOneTargetGroundTruthSimulator(SingleTargetGroundTruthSimulator):\n \"\"\"Target simulator that produces a single target. This target switches\n between multiple transition models based on a markov matrix\n (:attr:`model_probs`)\"\"\"\n transition_models = Property(\n [TransitionModel], doc=\"List of transition models to be used, ensure\\\n that they all have the same dimensions.\")\n model_probs = Property([float], doc=\"A matrix of probabilities.\\\n The element in the ith row and the jth column is the probability of\\\n switching from the ith transition model in :attr:`transition_models`\\\n to the jth\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.index = 0\n\n @property\n def transition_model(self):\n self.index = np.random.choice(range(0, len(self.transition_models)),\n p=self.model_probs[self.index])\n return self.transition_models[self.index]\n\n\nclass MultiTargetGroundTruthSimulator(SingleTargetGroundTruthSimulator):\n \"\"\"Target simulator that produces multiple targets.\n\n Targets are created and destroyed randomly, as defined by the birth rate\n and death probability.\"\"\"\n transition_model = Property(\n TransitionModel, doc=\"Transition Model used as propagator for track.\")\n\n initial_state = Property(\n GaussianState,\n doc=\"Initial state to use to generate states\")\n birth_rate = Property(\n float, default=1.0, doc=\"Rate at which tracks are born. Expected \"\n \"number of occurrences (λ) in Poisson distribution. Default 1.0.\")\n death_probability = Property(\n Probability, default=0.1,\n doc=\"Probability of track dying in each time step. Default 0.1.\")\n\n @BufferedGenerator.generator_method\n def groundtruth_paths_gen(self):\n groundtruth_paths = set()\n time = self.initial_state.timestamp or datetime.datetime.now()\n\n for _ in range(self.number_steps):\n # Random drop tracks\n groundtruth_paths.difference_update(\n gttrack\n for gttrack in groundtruth_paths.copy()\n if np.random.rand() <= self.death_probability)\n\n # Move tracks forward\n for gttrack in groundtruth_paths:\n self.index = gttrack[-1].metadata.get(\"index\")\n trans_state_vector = self.transition_model.function(\n gttrack[-1].state_vector,\n time_interval=self.timestep)\n gttrack.append(GroundTruthState(\n trans_state_vector, timestamp=time,\n metadata={\"index\": self.index}))\n\n # Random create\n for _ in range(np.random.poisson(self.birth_rate)):\n self.index = 0\n gttrack = GroundTruthPath()\n gttrack.append(GroundTruthState(\n self.initial_state.state_vector +\n np.sqrt(self.initial_state.covar) @\n np.random.randn(self.initial_state.ndim, 1),\n timestamp=time, metadata={\"index\": self.index}))\n groundtruth_paths.add(gttrack)\n\n yield time, groundtruth_paths\n time += self.timestep\n\n\nclass SwitchMultiTargetGroundTruthSimulator(MultiTargetGroundTruthSimulator):\n \"\"\"Functions identically to :class:`~.MultiTargetGroundTruthSimulator`,\n but has the transition model switching ability from\n :class:`.SwitchOneTargetGroundTruthSimulator`\"\"\"\n transition_models = Property(\n [TransitionModel], doc=\"List of transition models to be used, ensure\\\n that they all have the same dimensions.\")\n model_probs = Property([float], doc=\"A matrix of probabilities.\\\n The element in the ith row and the jth column is the probability of\\\n switching from the ith transition model in :attr:`transition_models`\\\n to the jth\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.index = 0\n\n @property\n def transition_model(self):\n self.index = np.random.choice(range(0, len(self.transition_models)),\n p=self.model_probs[self.index])\n return self.transition_models[self.index]\n\n\nclass SimpleDetectionSimulator(DetectionSimulator):\n \"\"\"A simple detection simulator.\n\n Parameters\n ----------\n groundtruth : GroundTruthReader\n Source of ground truth tracks used to generate detections for.\n measurement_model : MeasurementModel\n Measurement model used in generating detections.\n \"\"\"\n groundtruth = Property(GroundTruthReader)\n measurement_model = Property(MeasurementModel)\n meas_range = Property(np.ndarray)\n detection_probability = Property(Probability, default=0.9)\n clutter_rate = Property(float, default=2.0)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.real_detections = set()\n self.clutter_detections = set()\n self.index = 0\n\n @property\n def clutter_spatial_density(self):\n \"\"\"returns the clutter spatial density of the measurement space - num\n clutter detections per unit volume per timestep\"\"\"\n return self.clutter_rate/np.prod(np.diff(self.meas_range))\n\n @BufferedGenerator.generator_method\n def detections_gen(self):\n for time, tracks in self.groundtruth:\n self.real_detections.clear()\n self.clutter_detections.clear()\n\n for track in tracks:\n self.index = track[-1].metadata.get(\"index\")\n if np.random.rand() < self.detection_probability:\n detection = TrueDetection(\n self.measurement_model.function(\n track[-1].state_vector),\n timestamp=track[-1].timestamp,\n groundtruth_path=track)\n detection.clutter = False\n self.real_detections.add(detection)\n\n # generate clutter\n for _ in range(np.random.poisson(self.clutter_rate)):\n detection = Clutter(\n np.random.rand(self.measurement_model.ndim_meas, 1) *\n np.diff(self.meas_range) + self.meas_range[:, :1],\n timestamp=time)\n self.clutter_detections.add(detection)\n\n yield time, self.real_detections | self.clutter_detections\n\n\nclass SwitchDetectionSimulator(SimpleDetectionSimulator):\n\n \"\"\"Functions identically as the :class:`SimpleDetectionSimulator`, but for\n ground truth paths formed using multiple transition models it allows the\n user to assign a detection probability to each transition models.\n For example, if you wanted a higher detection probability when the\n simulated object makes a turn\"\"\"\n\n detection_probabilities = Property([Probability], doc=\"List of\\\n probabilities that correspond to the detection probability of the\\\n simulated object while undergoing each transition model\")\n\n @property\n def detection_probability(self):\n return self.detection_probabilities[self.index]\n" ]
[ [ "numpy.diff", "numpy.random.randn", "numpy.random.poisson", "numpy.random.rand", "numpy.sqrt" ] ]
hello-ag/stretch_body
[ "4d9a1f10617b8f7155b8498c5333821818ce24ab" ]
[ "body/test/test_dxl_comms.py" ]
[ "# Logging level must be set before importing any stretch_body class\nimport stretch_body.robot_params\n#stretch_body.robot_params.RobotParams.set_logging_level(\"DEBUG\")\n\nimport unittest\nimport stretch_body.device\nimport stretch_body.robot as robot\nimport numpy as np\n\nclass TestTimingStats(unittest.TestCase):\n def test_thread_starvation_group_sync_read(self):\n robot = stretch_body.robot.Robot()\n robot.end_of_arm.params['use_group_sync_read']=1\n print(robot.end_of_arm.joints)\n print('Starting test_thread_starvation')\n print('Latency timer of %f'%robot.end_of_arm.params['dxl_latency_timer'])\n print('Testing on tool %s'%robot.params['tool'])\n robot.startup()\n try:\n for itr in range(100): #Make large CPU load\n x = np.random.rand(3, 1000, 1000)\n x.tolist()\n except (IndexError, IOError) as e:\n self.fail(\"IndexError or IOError failure in comms\")\n self.assertTrue(robot.end_of_arm.comm_errors.status['n_rx']<2)\n robot.end_of_arm.comm_errors.pretty_print()\n robot.stop()\n" ]
[ [ "numpy.random.rand" ] ]
jsun94/nimble
[ "e5c899a69677818b1becc58100577441e15ede13", "e5c899a69677818b1becc58100577441e15ede13" ]
[ "benchmarks/operator_benchmark/pt/qbatchnorm_test.py", "torch/utils/data/dataloader.py" ]
[ "\nimport operator_benchmark as op_bench\nimport torch\n\n\n\"\"\"Microbenchmarks for quantized batchnorm operator.\"\"\"\n\nbatchnorm_configs_short = op_bench.config_list(\n attr_names=[\"M\", \"N\", \"K\"],\n attrs=[\n [1, 256, 3136],\n ],\n cross_product_configs={\n 'device': ['cpu'],\n 'dtype': (torch.qint8,),\n },\n tags=[\"short\"]\n)\n\n\nclass QBatchNormBenchmark(op_bench.TorchBenchmarkBase):\n def init(self, M, N, K, device, dtype):\n self._init(M, N, K, device)\n x_scale = 0.1\n x_zero_point = 0\n self.q_input_one = torch.quantize_per_tensor(\n self.input_one, scale=x_scale, zero_point=x_zero_point, dtype=dtype)\n self.mean = torch.rand(N)\n self.var = torch.rand(N)\n self.weight = torch.rand(N)\n self.bias = torch.rand(N)\n self.eps = 1e-5\n self.Y_scale = 0.1\n self.Y_zero_point = 0\n\n def _init(self, M, N, K, device):\n pass\n\n def forward(self):\n pass\n\n\nclass QBatchNorm1dBenchmark(QBatchNormBenchmark):\n def _init(self, M, N, K, device):\n self.set_module_name(\"QBatchNorm1d\")\n self.input_one = torch.rand(M, N, K, device=device, requires_grad=self.auto_set())\n\n def forward(self):\n return torch.ops.quantized.batch_norm1d(\n self.q_input_one, self.weight, self.bias, self.mean, self.var, self.eps,\n self.Y_scale, self.Y_zero_point)\n\n\nclass QBatchNorm2dBenchmark(QBatchNormBenchmark):\n def _init(self, M, N, K, device):\n self.set_module_name(\"QBatchNorm2d\")\n # Note: quantized implementation requires rank 4, which is why we\n # add a 1 as the last dimension\n self.input_one = torch.rand(M, N, K, 1, device=device, requires_grad=self.auto_set())\n\n def forward(self):\n return torch.ops.quantized.batch_norm2d(\n self.q_input_one, self.weight, self.bias, self.mean, self.var, self.eps,\n self.Y_scale, self.Y_zero_point)\n\n\nop_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm1dBenchmark)\nop_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm2dBenchmark)\n\nif __name__ == \"__main__\":\n op_bench.benchmark_runner.main()\n", "r\"\"\"Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter\n\nTo support these two classes, in `./_utils` we define many utility methods and\nfunctions to be run in multiprocessing. E.g., the data loading worker loop is\nin `./_utils/worker.py`.\n\"\"\"\n\nimport threading\nimport itertools\nimport warnings\nfrom typing import Any, Callable, TypeVar, Generic, Sequence, List, Optional\n\nimport multiprocessing as python_multiprocessing\nimport torch\nimport torch.multiprocessing as multiprocessing\nfrom torch._utils import ExceptionWrapper\nfrom torch._six import queue, string_classes\n\nfrom . import IterableDataset, Sampler, SequentialSampler, RandomSampler, BatchSampler, Dataset\nfrom . import _utils\n\nT_co = TypeVar('T_co', covariant=True)\nT = TypeVar('T')\n_worker_init_fn_t = Callable[[int], None]\n\n# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that\n# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.\n# See https://github.com/python/mypy/issues/3737.\n_collate_fn_t = Callable[[List[T]], Any]\n\n\n# This function used to be defined in this file. However, it was moved to\n# _utils/collate.py. Although it is rather hard to access this from user land\n# (one has to explicitly directly `import torch.utils.data.dataloader`), there\n# probably is user code out there using it. This aliasing maintains BC in this\n# aspect.\ndefault_collate: _collate_fn_t = _utils.collate.default_collate\n\nget_worker_info = _utils.worker.get_worker_info\n\nclass _DatasetKind(object):\n Map = 0\n Iterable = 1\n\n @staticmethod\n def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):\n if kind == _DatasetKind.Map:\n return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)\n else:\n return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)\n\n\nclass _InfiniteConstantSampler(Sampler):\n r\"\"\"Analogous to ``itertools.repeat(None, None)``.\n Used as sampler for :class:`~torch.utils.data.IterableDataset`.\n\n Arguments:\n data_source (Dataset): dataset to sample from\n \"\"\"\n\n def __init__(self):\n super(_InfiniteConstantSampler, self).__init__(None)\n\n def __iter__(self):\n while True:\n yield None\n\n\nclass DataLoader(Generic[T_co]):\n r\"\"\"\n Data loader. Combines a dataset and a sampler, and provides an iterable over\n the given dataset.\n\n The :class:`~torch.utils.data.DataLoader` supports both map-style and\n iterable-style datasets with single- or multi-process loading, customizing\n loading order and optional automatic batching (collation) and memory pinning.\n\n See :py:mod:`torch.utils.data` documentation page for more details.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: ``1``).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: ``False``).\n sampler (Sampler or Iterable, optional): defines the strategy to draw\n samples from the dataset. Can be any ``Iterable`` with ``__len__``\n implemented. If specified, :attr:`shuffle` must not be specified.\n batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but\n returns a batch of indices at a time. Mutually exclusive with\n :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,\n and :attr:`drop_last`.\n num_workers (int, optional): how many subprocesses to use for data\n loading. ``0`` means that the data will be loaded in the main process.\n (default: ``0``)\n collate_fn (callable, optional): merges a list of samples to form a\n mini-batch of Tensor(s). Used when using batched loading from a\n map-style dataset.\n pin_memory (bool, optional): If ``True``, the data loader will copy Tensors\n into CUDA pinned memory before returning them. If your data elements\n are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,\n see the example below.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: ``0``)\n worker_init_fn (callable, optional): If not ``None``, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: ``None``)\n prefetch_factor (int, optional, keyword-only arg): Number of sample loaded\n in advance by each worker. ``2`` means there will be a total of\n 2 * num_workers samples prefetched across all workers. (default: ``2``)\n persistent_workers (bool, optional): If ``True``, the data loader will not shutdown\n the worker processes after a dataset has been consumed once. This allows to \n maintain the workers `Dataset` instances alive. (default: ``False``)\n\n\n .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`\n cannot be an unpicklable object, e.g., a lambda function. See\n :ref:`multiprocessing-best-practices` on more details related\n to multiprocessing in PyTorch.\n\n .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.\n When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,\n it instead returns an estimate based on ``len(dataset) / batch_size``, with proper\n rounding depending on :attr:`drop_last`, regardless of multi-process loading\n configurations. This represents the best guess PyTorch can make because PyTorch\n trusts user :attr:`dataset` code in correctly handling multi-process\n loading to avoid duplicate data.\n\n However, if sharding results in multiple workers having incomplete last batches,\n this estimate can still be inaccurate, because (1) an otherwise complete batch can\n be broken into multiple ones and (2) more than one batch worth of samples can be\n dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such\n cases in general.\n\n See `Dataset Types`_ for more details on these two types of datasets and how\n :class:`~torch.utils.data.IterableDataset` interacts with\n `Multi-process data loading`_.\n \"\"\"\n dataset: Dataset[T_co]\n batch_size: Optional[int]\n num_workers: int\n pin_memory: bool\n drop_last: bool\n timeout: float\n sampler: Sampler\n prefetch_factor: int\n _iterator : Optional['_BaseDataLoaderIter']\n __initialized = False\n\n def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1,\n shuffle: bool = False, sampler: Optional[Sampler[int]] = None,\n batch_sampler: Optional[Sampler[Sequence[int]]] = None,\n num_workers: int = 0, collate_fn: _collate_fn_t = None,\n pin_memory: bool = False, drop_last: bool = False,\n timeout: float = 0, worker_init_fn: _worker_init_fn_t = None,\n multiprocessing_context=None, generator=None,\n *, prefetch_factor: int = 2,\n persistent_workers: bool = False):\n torch._C._log_api_usage_once(\"python.data_loader\") # type: ignore\n\n if num_workers < 0:\n raise ValueError('num_workers option should be non-negative; '\n 'use num_workers=0 to disable multiprocessing.')\n\n if timeout < 0:\n raise ValueError('timeout option should be non-negative')\n\n if num_workers == 0 and prefetch_factor != 2:\n raise ValueError('prefetch_factor option could only be specified in multiprocessing.'\n 'let num_workers > 0 to enable multiprocessing.')\n assert prefetch_factor > 0\n\n if persistent_workers and num_workers == 0:\n raise ValueError('persistent_workers option needs num_workers > 0')\n\n self.dataset = dataset\n self.num_workers = num_workers\n self.prefetch_factor = prefetch_factor\n self.pin_memory = pin_memory\n self.timeout = timeout\n self.worker_init_fn = worker_init_fn\n self.multiprocessing_context = multiprocessing_context\n\n # Arg-check dataset related before checking samplers because we want to\n # tell users that iterable-style datasets are incompatible with custom\n # samplers first, so that they don't learn that this combo doesn't work\n # after spending time fixing the custom sampler errors.\n if isinstance(dataset, IterableDataset):\n self._dataset_kind = _DatasetKind.Iterable\n # NOTE [ Custom Samplers and IterableDataset ]\n #\n # `IterableDataset` does not support custom `batch_sampler` or\n # `sampler` since the key is irrelevant (unless we support\n # generator-style dataset one day...).\n #\n # For `sampler`, we always create a dummy sampler. This is an\n # infinite sampler even when the dataset may have an implemented\n # finite `__len__` because in multi-process data loading, naive\n # settings will return duplicated data (which may be desired), and\n # thus using a sampler with length matching that of dataset will\n # cause data lost (you may have duplicates of the first couple\n # batches, but never see anything afterwards). Therefore,\n # `Iterabledataset` always uses an infinite sampler, an instance of\n # `_InfiniteConstantSampler` defined above.\n #\n # A custom `batch_sampler` essentially only controls the batch size.\n # However, it is unclear how useful it would be since an iterable-style\n # dataset can handle that within itself. Moreover, it is pointless\n # in multi-process data loading as the assignment order of batches\n # to workers is an implementation detail so users can not control\n # how to batchify each worker's iterable. Thus, we disable this\n # option. If this turns out to be useful in future, we can re-enable\n # this, and support custom samplers that specify the assignments to\n # specific workers.\n if shuffle is not False:\n raise ValueError(\n \"DataLoader with IterableDataset: expected unspecified \"\n \"shuffle option, but got shuffle={}\".format(shuffle))\n elif sampler is not None:\n # See NOTE [ Custom Samplers and IterableDataset ]\n raise ValueError(\n \"DataLoader with IterableDataset: expected unspecified \"\n \"sampler option, but got sampler={}\".format(sampler))\n elif batch_sampler is not None:\n # See NOTE [ Custom Samplers and IterableDataset ]\n raise ValueError(\n \"DataLoader with IterableDataset: expected unspecified \"\n \"batch_sampler option, but got batch_sampler={}\".format(batch_sampler))\n else:\n self._dataset_kind = _DatasetKind.Map\n\n if sampler is not None and shuffle:\n raise ValueError('sampler option is mutually exclusive with '\n 'shuffle')\n\n if batch_sampler is not None:\n # auto_collation with custom batch_sampler\n if batch_size != 1 or shuffle or sampler is not None or drop_last:\n raise ValueError('batch_sampler option is mutually exclusive '\n 'with batch_size, shuffle, sampler, and '\n 'drop_last')\n batch_size = None\n drop_last = False\n elif batch_size is None:\n # no auto_collation\n if drop_last:\n raise ValueError('batch_size=None option disables auto-batching '\n 'and is mutually exclusive with drop_last')\n\n if sampler is None: # give default samplers\n if self._dataset_kind == _DatasetKind.Iterable:\n # See NOTE [ Custom Samplers and IterableDataset ]\n sampler = _InfiniteConstantSampler()\n else: # map-style\n if shuffle:\n # Cannot statically verify that dataset is Sized\n # Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]\n sampler = RandomSampler(dataset, generator=generator) # type: ignore\n else:\n sampler = SequentialSampler(dataset)\n\n if batch_size is not None and batch_sampler is None:\n # auto_collation without custom batch_sampler\n batch_sampler = BatchSampler(sampler, batch_size, drop_last)\n\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.sampler = sampler\n self.batch_sampler = batch_sampler\n self.generator = generator\n\n if collate_fn is None:\n if self._auto_collation:\n collate_fn = _utils.collate.default_collate\n else:\n collate_fn = _utils.collate.default_convert\n\n self.collate_fn = collate_fn\n self.persistent_workers = persistent_workers\n\n self.__initialized = True\n self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ]\n\n self._iterator = None\n\n def _get_iterator(self) -> '_BaseDataLoaderIter':\n if self.num_workers == 0:\n return _SingleProcessDataLoaderIter(self)\n else:\n return _MultiProcessingDataLoaderIter(self)\n\n @property\n def multiprocessing_context(self):\n return self.__multiprocessing_context\n\n @multiprocessing_context.setter\n def multiprocessing_context(self, multiprocessing_context):\n if multiprocessing_context is not None:\n if self.num_workers > 0:\n if not multiprocessing._supports_context:\n raise ValueError('multiprocessing_context relies on Python >= 3.4, with '\n 'support for different start methods')\n\n if isinstance(multiprocessing_context, string_classes):\n valid_start_methods = multiprocessing.get_all_start_methods()\n if multiprocessing_context not in valid_start_methods:\n raise ValueError(\n ('multiprocessing_context option '\n 'should specify a valid start method in {!r}, but got '\n 'multiprocessing_context={!r}').format(valid_start_methods, multiprocessing_context))\n # error: Argument 1 to \"get_context\" has incompatible type \"Union[str, bytes]\"; expected \"str\" [arg-type]\n multiprocessing_context = multiprocessing.get_context(multiprocessing_context) # type: ignore\n\n if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext):\n raise TypeError(('multiprocessing_context option should be a valid context '\n 'object or a string specifying the start method, but got '\n 'multiprocessing_context={}').format(multiprocessing_context))\n else:\n raise ValueError(('multiprocessing_context can only be used with '\n 'multi-process loading (num_workers > 0), but got '\n 'num_workers={}').format(self.num_workers))\n\n self.__multiprocessing_context = multiprocessing_context\n\n def __setattr__(self, attr, val):\n if self.__initialized and attr in (\n 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'):\n raise ValueError('{} attribute should not be set after {} is '\n 'initialized'.format(attr, self.__class__.__name__))\n\n super(DataLoader, self).__setattr__(attr, val)\n\n # We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up\n # since '_BaseDataLoaderIter' references 'DataLoader'.\n def __iter__(self) -> '_BaseDataLoaderIter':\n # When using a single worker the returned iterator should be\n # created everytime to avoid reseting its state\n # However, in the case of a multiple workers iterator\n # the iterator is only created once in the lifetime of the\n # DataLoader object so that workers can be reused\n if self.persistent_workers and self.num_workers > 0:\n if self._iterator is None:\n self._iterator = self._get_iterator()\n else:\n self._iterator._reset(self)\n return self._iterator\n else:\n return self._get_iterator()\n\n @property\n def _auto_collation(self):\n return self.batch_sampler is not None\n\n @property\n def _index_sampler(self):\n # The actual sampler used for generating indices for `_DatasetFetcher`\n # (see _utils/fetch.py) to read data at each time. This would be\n # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise.\n # We can't change `.sampler` and `.batch_sampler` attributes for BC\n # reasons.\n if self._auto_collation:\n return self.batch_sampler\n else:\n return self.sampler\n\n def __len__(self) -> int:\n if self._dataset_kind == _DatasetKind.Iterable:\n # NOTE [ IterableDataset and __len__ ]\n #\n # For `IterableDataset`, `__len__` could be inaccurate when one naively\n # does multi-processing data loading, since the samples will be duplicated.\n # However, no real use case should be actually using that behavior, so\n # it should count as a user error. We should generally trust user\n # code to do the proper thing (e.g., configure each replica differently\n # in `__iter__`), and give us the correct `__len__` if they choose to\n # implement it (this will still throw if the dataset does not implement\n # a `__len__`).\n #\n # To provide a further warning, we track if `__len__` was called on the\n # `DataLoader`, save the returned value in `self._len_called`, and warn\n # if the iterator ends up yielding more than this number of samples.\n\n # Cannot statically verify that dataset is Sized\n length = self._IterableDataset_len_called = len(self.dataset) # type: ignore\n if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler\n from math import ceil\n if self.drop_last:\n length = length // self.batch_size\n else:\n length = ceil(length / self.batch_size)\n return length\n else:\n return len(self._index_sampler)\n\n\nclass _BaseDataLoaderIter(object):\n def __init__(self, loader: DataLoader) -> None:\n self._dataset = loader.dataset\n self._dataset_kind = loader._dataset_kind\n self._IterableDataset_len_called = loader._IterableDataset_len_called\n self._auto_collation = loader._auto_collation\n self._drop_last = loader.drop_last\n self._index_sampler = loader._index_sampler\n self._num_workers = loader.num_workers\n self._prefetch_factor = loader.prefetch_factor\n self._pin_memory = loader.pin_memory and torch.cuda.is_available()\n self._timeout = loader.timeout\n self._collate_fn = loader.collate_fn\n self._sampler_iter = iter(self._index_sampler)\n self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()\n self._persistent_workers = loader.persistent_workers\n self._num_yielded = 0\n\n def __iter__(self) -> '_BaseDataLoaderIter':\n return self\n\n def _reset(self, loader, first_iter=False):\n self._sampler_iter = iter(self._index_sampler)\n self._num_yielded = 0\n self._IterableDataset_len_called = loader._IterableDataset_len_called\n\n def _next_index(self):\n return next(self._sampler_iter) # may raise StopIteration\n\n def _next_data(self):\n raise NotImplementedError\n\n def __next__(self) -> Any:\n if self._sampler_iter is None:\n self._reset()\n data = self._next_data()\n self._num_yielded += 1\n if self._dataset_kind == _DatasetKind.Iterable and \\\n self._IterableDataset_len_called is not None and \\\n self._num_yielded > self._IterableDataset_len_called:\n warn_msg = (\"Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} \"\n \"samples have been fetched. \").format(self._dataset, self._IterableDataset_len_called,\n self._num_yielded)\n if self._num_workers > 0:\n warn_msg += (\"For multiprocessing data-loading, this could be caused by not properly configuring the \"\n \"IterableDataset replica at each worker. Please see \"\n \"https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.\")\n warnings.warn(warn_msg)\n return data\n\n next = __next__ # Python 2 compatibility\n\n def __len__(self) -> int:\n return len(self._index_sampler)\n\n def __getstate__(self):\n # TODO: add limited pickling support for sharing an iterator\n # across multiple threads for HOGWILD.\n # Probably the best way to do this is by moving the sample pushing\n # to a separate thread and then just sharing the data queue\n # but signalling the end is tricky without a non-blocking API\n raise NotImplementedError(\"{} cannot be pickled\", self.__class__.__name__)\n\n\nclass _SingleProcessDataLoaderIter(_BaseDataLoaderIter):\n def __init__(self, loader):\n super(_SingleProcessDataLoaderIter, self).__init__(loader)\n assert self._timeout == 0\n assert self._num_workers == 0\n\n self._dataset_fetcher = _DatasetKind.create_fetcher(\n self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)\n\n def _next_data(self):\n index = self._next_index() # may raise StopIteration\n data = self._dataset_fetcher.fetch(index) # may raise StopIteration\n if self._pin_memory:\n data = _utils.pin_memory.pin_memory(data)\n return data\n\n\nclass _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):\n r\"\"\"Iterates once over the DataLoader's dataset, as specified by the sampler\"\"\"\n\n # NOTE [ Data Loader Multiprocessing Shutdown Logic ]\n #\n # Preliminary:\n #\n # Our data model looks like this (queues are indicated with curly brackets):\n #\n # main process ||\n # | ||\n # {index_queue} ||\n # | ||\n # worker processes || DATA\n # | ||\n # {worker_result_queue} || FLOW\n # | ||\n # pin_memory_thread of main process || DIRECTION\n # | ||\n # {data_queue} ||\n # | ||\n # data output \\/\n #\n # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if\n # `pin_memory=False`.\n #\n #\n # Terminating multiprocessing logic requires very careful design. In\n # particular, we need to make sure that\n #\n # 1. The iterator gracefully exits the workers when its last reference is\n # gone or it is depleted.\n #\n # In this case, the workers should be gracefully exited because the\n # main process may still need to continue to run, and we want cleaning\n # up code in the workers to be executed (e.g., releasing GPU memory).\n # Naturally, we implement the shutdown logic in `__del__` of\n # DataLoaderIterator.\n #\n # We delay the discussion on the logic in this case until later.\n #\n # 2. The iterator exits the workers when the loader process and/or worker\n # processes exits normally or with error.\n #\n # We set all workers and `pin_memory_thread` to have `daemon=True`.\n #\n # You may ask, why can't we make the workers non-daemonic, and\n # gracefully exit using the same logic as we have in `__del__` when the\n # iterator gets deleted (see 1 above)?\n #\n # First of all, `__del__` is **not** guaranteed to be called when\n # interpreter exits. Even if it is called, by the time it executes,\n # many Python core library resources may alreay be freed, and even\n # simple things like acquiring an internal lock of a queue may hang.\n # Therefore, in this case, we actually need to prevent `__del__` from\n # being executed, and rely on the automatic termination of daemonic\n # children. Thus, we register an `atexit` hook that sets a global flag\n # `_utils.python_exit_status`. Since `atexit` hooks are executed in the\n # reverse order of registration, we are guaranteed that this flag is\n # set before library resources we use are freed. (Hooks freeing those\n # resources are registered at importing the Python core libraries at\n # the top of this file.) So in `__del__`, we check if\n # `_utils.python_exit_status` is set or `None` (freed), and perform\n # no-op if so.\n #\n # Another problem with `__del__` is also related to the library cleanup\n # calls. When a process ends, it shuts the all its daemonic children\n # down with a SIGTERM (instead of joining them without a timeout).\n # Simiarly for threads, but by a different mechanism. This fact,\n # together with a few implementation details of multiprocessing, forces\n # us to make workers daemonic. All of our problems arise when a\n # DataLoader is used in a subprocess, and are caused by multiprocessing\n # code which looks more or less like this:\n #\n # try:\n # your_function_using_a_dataloader()\n # finally:\n # multiprocessing.util._exit_function()\n #\n # The joining/termination mentioned above happens inside\n # `_exit_function()`. Now, if `your_function_using_a_dataloader()`\n # throws, the stack trace stored in the exception will prevent the\n # frame which uses `DataLoaderIter` to be freed. If the frame has any\n # reference to the `DataLoaderIter` (e.g., in a method of the iter),\n # its `__del__`, which starts the shutdown procedure, will not be\n # called. That, in turn, means that workers aren't notified. Attempting\n # to join in `_exit_function` will then result in a hang.\n #\n # For context, `_exit_function` is also registered as an `atexit` call.\n # So it is unclear to me (@ssnl) why this is needed in a finally block.\n # The code dates back to 2008 and there is no comment on the original\n # PEP 371 or patch https://bugs.python.org/issue3050 (containing both\n # the finally block and the `atexit` registration) that explains this.\n #\n # Another choice is to just shutdown workers with logic in 1 above\n # whenever we see an error in `next`. This isn't ideal because\n # a. It prevents users from using try-catch to resume data loading.\n # b. It doesn't prevent hanging if users have references to the\n # iterator.\n #\n # 3. All processes exit if any of them die unexpectedly by fatal signals.\n #\n # As shown above, the workers are set as daemonic children of the main\n # process. However, automatic cleaning-up of such child processes only\n # happens if the parent process exits gracefully (e.g., not via fatal\n # signals like SIGKILL). So we must ensure that each process will exit\n # even the process that should send/receive data to/from it were\n # killed, i.e.,\n #\n # a. A process won't hang when getting from a queue.\n #\n # Even with carefully designed data dependencies (i.e., a `put()`\n # always corresponding to a `get()`), hanging on `get()` can still\n # happen when data in queue is corrupted (e.g., due to\n # `cancel_join_thread` or unexpected exit).\n #\n # For child exit, we set a timeout whenever we try to get data\n # from `data_queue`, and check the workers' status on each timeout\n # and error.\n # See `_DataLoaderiter._get_batch()` and\n # `_DataLoaderiter._try_get_data()` for details.\n #\n # Additionally, for child exit on non-Windows platforms, we also\n # register a SIGCHLD handler (which is supported on Windows) on\n # the main process, which checks if any of the workers fail in the\n # (Python) handler. This is more efficient and faster in detecting\n # worker failures, compared to only using the above mechanism.\n # See `DataLoader.cpp` and `_utils/signal_handling.py` for details.\n #\n # For `.get()` calls where the sender(s) is not the workers, we\n # guard them with timeouts, and check the status of the sender\n # when timeout happens:\n # + in the workers, the `_utils.worker.ManagerWatchdog` class\n # checks the status of the main process.\n # + if `pin_memory=True`, when getting from `pin_memory_thread`,\n # check `pin_memory_thread` status periodically until `.get()`\n # returns or see that `pin_memory_thread` died.\n #\n # b. A process won't hang when putting into a queue;\n #\n # We use `mp.Queue` which has a separate background thread to put\n # objects from an unbounded buffer array. The background thread is\n # daemonic and usually automatically joined when the process\n # exits.\n #\n # However, in case that the receiver has ended abruptly while\n # reading from the pipe, the join will hang forever. Therefore,\n # for both `worker_result_queue` (worker -> main process/pin_memory_thread)\n # and each `index_queue` (main process -> worker), we use\n # `q.cancel_join_thread()` in sender process before any `q.put` to\n # prevent this automatic join.\n #\n # Moreover, having all queues called `cancel_join_thread` makes\n # implementing graceful shutdown logic in `__del__` much easier.\n # It won't need to get from any queue, which would also need to be\n # guarded by periodic status checks.\n #\n # Nonetheless, `cancel_join_thread` must only be called when the\n # queue is **not** going to be read from or write into by another\n # process, because it may hold onto a lock or leave corrupted data\n # in the queue, leading other readers/writers to hang.\n #\n # `pin_memory_thread`'s `data_queue` is a `queue.Queue` that does\n # a blocking `put` if the queue is full. So there is no above\n # problem, but we do need to wrap the `put` in a loop that breaks\n # not only upon success, but also when the main process stops\n # reading, i.e., is shutting down.\n #\n #\n # Now let's get back to 1:\n # how we gracefully exit the workers when the last reference to the\n # iterator is gone.\n #\n # To achieve this, we implement the following logic along with the design\n # choices mentioned above:\n #\n # `workers_done_event`:\n # A `multiprocessing.Event` shared among the main process and all worker\n # processes. This is used to signal the workers that the iterator is\n # shutting down. After it is set, they will not send processed data to\n # queues anymore, and only wait for the final `None` before exiting.\n # `done_event` isn't strictly needed. I.e., we can just check for `None`\n # from the input queue, but it allows us to skip wasting resources\n # processing data if we are already shutting down.\n #\n # `pin_memory_thread_done_event`:\n # A `threading.Event` for a similar purpose to that of\n # `workers_done_event`, but is for the `pin_memory_thread`. The reason\n # that separate events are needed is that `pin_memory_thread` reads from\n # the output queue of the workers. But the workers, upon seeing that\n # `workers_done_event` is set, only wants to see the final `None`, and is\n # not required to flush all data in the output queue (e.g., it may call\n # `cancel_join_thread` on that queue if its `IterableDataset` iterator\n # happens to exhaust coincidentally, which is out of the control of the\n # main process). Thus, since we will exit `pin_memory_thread` before the\n # workers (see below), two separete events are used.\n #\n # NOTE: In short, the protocol is that the main process will set these\n # `done_event`s and then the corresponding processes/threads a `None`,\n # and that they may exit at any time after receiving the `None`.\n #\n # NOTE: Using `None` as the final signal is valid, since normal data will\n # always be a 2-tuple with the 1st element being the index of the data\n # transferred (different from dataset index/key), and the 2nd being\n # either the dataset key or the data sample (depending on which part\n # of the data model the queue is at).\n #\n # [ worker processes ]\n # While loader process is alive:\n # Get from `index_queue`.\n # If get anything else,\n # Check `workers_done_event`.\n # If set, continue to next iteration\n # i.e., keep getting until see the `None`, then exit.\n # Otherwise, process data:\n # If is fetching from an `IterableDataset` and the iterator\n # is exhausted, send an `_IterableDatasetStopIteration`\n # object to signal iteration end. The main process, upon\n # receiving such an object, will send `None` to this\n # worker and not use the corresponding `index_queue`\n # anymore.\n # If timed out,\n # No matter `workers_done_event` is set (still need to see `None`)\n # or not, must continue to next iteration.\n # (outside loop)\n # If `workers_done_event` is set, (this can be False with `IterableDataset`)\n # `data_queue.cancel_join_thread()`. (Everything is ending here:\n # main process won't read from it;\n # other workers will also call\n # `cancel_join_thread`.)\n #\n # [ pin_memory_thread ]\n # # No need to check main thread. If this thread is alive, the main loader\n # # thread must be alive, because this thread is set as daemonic.\n # While `pin_memory_thread_done_event` is not set:\n # Get from `index_queue`.\n # If timed out, continue to get in the next iteration.\n # Otherwise, process data.\n # While `pin_memory_thread_done_event` is not set:\n # Put processed data to `data_queue` (a `queue.Queue` with blocking put)\n # If timed out, continue to put in the next iteration.\n # Otherwise, break, i.e., continuing to the out loop.\n #\n # NOTE: we don't check the status of the main thread because\n # 1. if the process is killed by fatal signal, `pin_memory_thread`\n # ends.\n # 2. in other cases, either the cleaning-up in __del__ or the\n # automatic exit of daemonic thread will take care of it.\n # This won't busy-wait either because `.get(timeout)` does not\n # busy-wait.\n #\n # [ main process ]\n # In the DataLoader Iter's `__del__`\n # b. Exit `pin_memory_thread`\n # i. Set `pin_memory_thread_done_event`.\n # ii Put `None` in `worker_result_queue`.\n # iii. Join the `pin_memory_thread`.\n # iv. `worker_result_queue.cancel_join_thread()`.\n #\n # c. Exit the workers.\n # i. Set `workers_done_event`.\n # ii. Put `None` in each worker's `index_queue`.\n # iii. Join the workers.\n # iv. Call `.cancel_join_thread()` on each worker's `index_queue`.\n #\n # NOTE: (c) is better placed after (b) because it may leave corrupted\n # data in `worker_result_queue`, which `pin_memory_thread`\n # reads from, in which case the `pin_memory_thread` can only\n # happen at timeing out, which is slow. Nonetheless, same thing\n # happens if a worker is killed by signal at unfortunate times,\n # but in other cases, we are better off having a non-corrupted\n # `worker_result_queue` for `pin_memory_thread`.\n #\n # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)\n # can be omitted\n #\n # NB: `done_event`s isn't strictly needed. E.g., we can just check for\n # `None` from `index_queue`, but it allows us to skip wasting resources\n # processing indices already in `index_queue` if we are already shutting\n # down.\n\n def __init__(self, loader):\n super(_MultiProcessingDataLoaderIter, self).__init__(loader)\n\n assert self._num_workers > 0\n assert self._prefetch_factor > 0\n\n if loader.multiprocessing_context is None:\n multiprocessing_context = multiprocessing\n else:\n multiprocessing_context = loader.multiprocessing_context\n\n self._worker_init_fn = loader.worker_init_fn\n self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))\n # No certainty which module multiprocessing_context is\n self._worker_result_queue = multiprocessing_context.Queue() # type: ignore\n self._worker_pids_set = False\n self._shutdown = False\n self._workers_done_event = multiprocessing_context.Event()\n\n self._index_queues = []\n self._workers = []\n for i in range(self._num_workers):\n # No certainty which module multiprocessing_context is\n index_queue = multiprocessing_context.Queue() # type: ignore\n # index_queue.cancel_join_thread()\n w = multiprocessing_context.Process(\n target=_utils.worker._worker_loop,\n args=(self._dataset_kind, self._dataset, index_queue,\n self._worker_result_queue, self._workers_done_event,\n self._auto_collation, self._collate_fn, self._drop_last,\n self._base_seed + i, self._worker_init_fn, i, self._num_workers,\n self._persistent_workers))\n w.daemon = True\n # NB: Process.start() actually take some time as it needs to\n # start a process and pass the arguments over via a pipe.\n # Therefore, we only add a worker to self._workers list after\n # it started, so that we do not call .join() if program dies\n # before it starts, and __del__ tries to join but will get:\n # AssertionError: can only join a started process.\n w.start()\n self._index_queues.append(index_queue)\n self._workers.append(w)\n\n if self._pin_memory:\n self._pin_memory_thread_done_event = threading.Event()\n\n # Queue is not type-annotated\n self._data_queue = queue.Queue() # type: ignore\n pin_memory_thread = threading.Thread(\n target=_utils.pin_memory._pin_memory_loop,\n args=(self._worker_result_queue, self._data_queue,\n torch.cuda.current_device(),\n self._pin_memory_thread_done_event))\n pin_memory_thread.daemon = True\n pin_memory_thread.start()\n # Similar to workers (see comment above), we only register\n # pin_memory_thread once it is started.\n self._pin_memory_thread = pin_memory_thread\n else:\n self._data_queue = self._worker_result_queue\n\n # .pid can be None only before process is spawned (not the case, so ignore)\n _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore\n _utils.signal_handling._set_SIGCHLD_handler()\n self._worker_pids_set = True\n self._reset(loader, first_iter=True)\n\n def _reset(self, loader, first_iter=False):\n super()._reset(loader, first_iter)\n self._send_idx = 0 # idx of the next task to be sent to workers\n self._rcvd_idx = 0 # idx of the next task to be returned in __next__\n # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).\n # map: task idx => - (worker_id,) if data isn't fetched (outstanding)\n # \\ (worker_id, data) if data is already fetched (out-of-order)\n self._task_info = {}\n self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1)\n # A list of booleans representing whether each worker still has work to\n # do, i.e., not having exhausted its iterable dataset object. It always\n # contains all `True`s if not using an iterable-style dataset\n # (i.e., if kind != Iterable).\n # Not that this indicates that a worker still has work to do *for this epoch*.\n # It does not mean that a worker is dead. In case of `_persistent_workers`, \n # the worker will be reset to available in the next epoch.\n self._workers_status = [True for i in range(self._num_workers)]\n # We resume the prefetching in case it was enabled\n if not first_iter:\n for idx in range(self._num_workers):\n self._index_queues[idx].put(_utils.worker._ResumeIteration())\n resume_iteration_cnt = self._num_workers\n while resume_iteration_cnt > 0:\n data = self._get_data()\n if isinstance(data, _utils.worker._ResumeIteration):\n resume_iteration_cnt -= 1\n # prime the prefetch loop\n for _ in range(self._prefetch_factor * self._num_workers):\n self._try_put_index()\n\n def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):\n # Tries to fetch data from `self._data_queue` once for a given timeout.\n # This can also be used as inner loop of fetching without timeout, with\n # the sender status as the loop condition.\n #\n # This raises a `RuntimeError` if any worker died expectedly. This error\n # can come from either the SIGCHLD handler in `_utils/signal_handling.py`\n # (only for non-Windows platforms), or the manual check below on errors\n # and timeouts.\n #\n # Returns a 2-tuple:\n # (bool: whether successfully get data, any: data if successful else None)\n try:\n data = self._data_queue.get(timeout=timeout)\n return (True, data)\n except Exception as e:\n # At timeout and error, we manually check whether any worker has\n # failed. Note that this is the only mechanism for Windows to detect\n # worker failures.\n failed_workers = []\n for worker_id, w in enumerate(self._workers):\n if self._workers_status[worker_id] and not w.is_alive():\n failed_workers.append(w)\n self._mark_worker_as_unavailable(worker_id)\n if len(failed_workers) > 0:\n pids_str = ', '.join(str(w.pid) for w in failed_workers)\n raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e\n if isinstance(e, queue.Empty):\n return (False, None)\n import tempfile\n import errno\n try:\n # Raise an exception if we are this close to the FDs limit.\n # Apparently, trying to open only one file is not a sufficient\n # test.\n # See NOTE [ DataLoader on Linux and open files limit ]\n fds_limit_margin = 10\n fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]\n except OSError as e:\n if e.errno == errno.EMFILE:\n raise RuntimeError(\n \"Too many open files. Communication with the\"\n \" workers is no longer possible. Please increase the\"\n \" limit using `ulimit -n` in the shell or change the\"\n \" sharing strategy by calling\"\n \" `torch.multiprocessing.set_sharing_strategy('file_system')`\"\n \" at the beginning of your code\") from None\n raise\n\n# NOTE [ DataLoader on Linux and open files limit ]\n#\n# On Linux when DataLoader is used with multiprocessing we pass the data between\n# the root process and the workers through SHM files. We remove those files from\n# the filesystem as soon as they are created and keep them alive by\n# passing around their file descriptors through AF_UNIX sockets. (See\n# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in\n# the wiki (https://github.com/pytorch/pytorch/wiki).)\n#\n# This sometimes leads us to exceeding the open files limit. When that happens,\n# and the offending file descriptor is coming over a socket, the `socket` Python\n# package silently strips the file descriptor from the message, setting only the\n# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that\n# it _indicates that some control data were discarded due to lack of space in\n# the buffer for ancillary data_). This might reflect the C implementation of\n# AF_UNIX sockets.\n#\n# This behaviour can be reproduced with the script and instructions at the\n# bottom of this note.\n#\n# When that happens, the standard Python `multiprocessing` (and not\n# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata`\n#\n# Sometimes, instead of the FD being stripped, you may get an `OSError:\n# Too many open files`, both in the script below and in DataLoader. However,\n# this is rare and seems to be nondeterministic.\n#\n#\n# #!/usr/bin/env python3\n# import sys\n# import socket\n# import os\n# import array\n# import shutil\n# import socket\n#\n#\n# if len(sys.argv) != 4:\n# print(\"Usage: \", sys.argv[0], \" tmp_dirname iteration (send|recv)\")\n# sys.exit(1)\n#\n# if __name__ == '__main__':\n# dirname = sys.argv[1]\n# sock_path = dirname + \"/sock\"\n# iterations = int(sys.argv[2])\n# def dummy_path(i):\n# return dirname + \"/\" + str(i) + \".dummy\"\n#\n#\n# if sys.argv[3] == 'send':\n# while not os.path.exists(sock_path):\n# pass\n# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n# client.connect(sock_path)\n# for i in range(iterations):\n# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT)\n# ancdata = array.array('i', [fd])\n# msg = bytes([i % 256])\n# print(\"Sending fd \", fd, \" (iteration #\", i, \")\")\n# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)])\n#\n#\n# else:\n# assert sys.argv[3] == 'recv'\n#\n# if os.path.exists(dirname):\n# raise Exception(\"Directory exists\")\n#\n# os.mkdir(dirname)\n#\n# print(\"Opening socket...\")\n# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n# server.bind(sock_path)\n#\n# print(\"Listening...\")\n# for i in range(iterations):\n# a = array.array('i')\n# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize))\n# assert(len(ancdata) == 1)\n# cmsg_level, cmsg_type, cmsg_data = ancdata[0]\n# a.frombytes(cmsg_data)\n# print(\"Received fd \", a[0], \" (iteration #\", i, \")\")\n#\n# shutil.rmtree(dirname)\n#\n# Steps to reproduce:\n#\n# 1. Run two shells and set lower file descriptor limit in the receiving one:\n# (shell1) ulimit -n 1020\n# (shell2) ulimit -n 1022\n#\n# 2. Run the script above with the `recv` option in the first shell\n# (shell1) ./test_socket.py sock_tmp 1017 recv\n#\n# 3. Run the script with the `send` option in the second shell:\n# (shell2) ./test_socket.py sock_tmp 1017 send\n\n def _get_data(self):\n # Fetches data from `self._data_queue`.\n #\n # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,\n # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)`\n # in a loop. This is the only mechanism to detect worker failures for\n # Windows. For other platforms, a SIGCHLD handler is also used for\n # worker failure detection.\n #\n # If `pin_memory=True`, we also need check if `pin_memory_thread` had\n # died at timeouts.\n if self._timeout > 0:\n success, data = self._try_get_data(self._timeout)\n if success:\n return data\n else:\n raise RuntimeError('DataLoader timed out after {} seconds'.format(self._timeout))\n elif self._pin_memory:\n while self._pin_memory_thread.is_alive():\n success, data = self._try_get_data()\n if success:\n return data\n else:\n # while condition is false, i.e., pin_memory_thread died.\n raise RuntimeError('Pin memory thread exited unexpectedly')\n # In this case, `self._data_queue` is a `queue.Queue`,. But we don't\n # need to call `.task_done()` because we don't use `.join()`.\n else:\n while True:\n success, data = self._try_get_data()\n if success:\n return data\n\n def _next_data(self):\n while True:\n # If the worker responsible for `self._rcvd_idx` has already ended\n # and was unable to fulfill this task (due to exhausting an `IterableDataset`),\n # we try to advance `self._rcvd_idx` to find the next valid index.\n #\n # This part needs to run in the loop because both the `self._get_data()`\n # call and `_IterableDatasetStopIteration` check below can mark\n # extra worker(s) as dead.\n while self._rcvd_idx < self._send_idx:\n info = self._task_info[self._rcvd_idx]\n worker_id = info[0]\n if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active\n break\n del self._task_info[self._rcvd_idx]\n self._rcvd_idx += 1\n else:\n # no valid `self._rcvd_idx` is found (i.e., didn't break)\n if not self._persistent_workers:\n self._shutdown_workers()\n raise StopIteration\n\n # Now `self._rcvd_idx` is the batch index we want to fetch\n\n # Check if the next sample has already been generated\n if len(self._task_info[self._rcvd_idx]) == 2:\n data = self._task_info.pop(self._rcvd_idx)[1]\n return self._process_data(data)\n\n assert not self._shutdown and self._tasks_outstanding > 0\n idx, data = self._get_data()\n self._tasks_outstanding -= 1\n if self._dataset_kind == _DatasetKind.Iterable:\n # Check for _IterableDatasetStopIteration\n if isinstance(data, _utils.worker._IterableDatasetStopIteration):\n if self._persistent_workers:\n self._workers_status[data.worker_id] = False\n else:\n self._mark_worker_as_unavailable(data.worker_id)\n self._try_put_index()\n continue\n\n if idx != self._rcvd_idx:\n # store out-of-order samples\n self._task_info[idx] += (data,)\n else:\n del self._task_info[idx]\n return self._process_data(data)\n\n def _try_put_index(self):\n assert self._tasks_outstanding < self._prefetch_factor * self._num_workers\n\n try:\n index = self._next_index()\n except StopIteration:\n return\n for _ in range(self._num_workers): # find the next active worker, if any\n worker_queue_idx = next(self._worker_queue_idx_cycle)\n if self._workers_status[worker_queue_idx]:\n break\n else:\n # not found (i.e., didn't break)\n return\n\n self._index_queues[worker_queue_idx].put((self._send_idx, index))\n self._task_info[self._send_idx] = (worker_queue_idx,)\n self._tasks_outstanding += 1\n self._send_idx += 1\n\n def _process_data(self, data):\n self._rcvd_idx += 1\n self._try_put_index()\n if isinstance(data, ExceptionWrapper):\n data.reraise()\n return data\n\n def _mark_worker_as_unavailable(self, worker_id, shutdown=False):\n # Mark a worker as having finished its work e.g., due to\n # exhausting an `IterableDataset`. This should be used only when this\n # `_MultiProcessingDataLoaderIter` is going to continue running.\n\n assert self._workers_status[worker_id] or (self._persistent_workers and shutdown)\n\n # Signal termination to that specific worker.\n q = self._index_queues[worker_id]\n # Indicate that no more data will be put on this queue by the current\n # process.\n q.put(None)\n\n # Note that we don't actually join the worker here, nor do we remove the\n # worker's pid from C side struct because (1) joining may be slow, and\n # (2) since we don't join, the worker may still raise error, and we\n # prefer capturing those, rather than ignoring them, even though they\n # are raised after the worker has finished its job.\n # Joinning is deferred to `_shutdown_workers`, which it is called when\n # all workers finish their jobs (e.g., `IterableDataset` replicas) or\n # when this iterator is garbage collected.\n\n self._workers_status[worker_id] = False\n\n assert self._workers_done_event.is_set() == shutdown\n\n def _shutdown_workers(self):\n # Called when shutting down this `_MultiProcessingDataLoaderIter`.\n # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on\n # the logic of this function.\n python_exit_status = _utils.python_exit_status\n if python_exit_status is True or python_exit_status is None:\n # See (2) of the note. If Python is shutting down, do no-op.\n return\n # Normal exit when last reference is gone / iterator is depleted.\n # See (1) and the second half of the note.\n if not self._shutdown:\n self._shutdown = True\n try:\n # Exit `pin_memory_thread` first because exiting workers may leave\n # corrupted data in `worker_result_queue` which `pin_memory_thread`\n # reads from.\n if hasattr(self, '_pin_memory_thread'):\n # Use hasattr in case error happens before we set the attribute.\n self._pin_memory_thread_done_event.set()\n # Send something to pin_memory_thread in case it is waiting\n # so that it can wake up and check `pin_memory_thread_done_event`\n self._worker_result_queue.put((None, None))\n self._pin_memory_thread.join()\n self._worker_result_queue.cancel_join_thread()\n self._worker_result_queue.close()\n\n # Exit workers now.\n self._workers_done_event.set()\n for worker_id in range(len(self._workers)):\n # Get number of workers from `len(self._workers)` instead of\n # `self._num_workers` in case we error before starting all\n # workers.\n # If we are using workers_status with persistent_workers\n # we have to shut it down because the worker is paused\n if self._persistent_workers or self._workers_status[worker_id]:\n self._mark_worker_as_unavailable(worker_id, shutdown=True)\n for w in self._workers:\n w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)\n if w.is_alive():\n # Existing mechanisms try to make the workers exit\n # peacefully, but in case that we unfortunately reach\n # here, which we shouldn't, (e.g., pytorch/pytorch#39570),\n # we kill the worker.\n w.terminate()\n for q in self._index_queues:\n q.cancel_join_thread()\n q.close()\n finally:\n # Even though all this function does is putting into queues that\n # we have called `cancel_join_thread` on, weird things can\n # happen when a worker is killed by a signal, e.g., hanging in\n # `Event.set()`. So we need to guard this with SIGCHLD handler,\n # and remove pids from the C side data structure only at the\n # end.\n #\n # FIXME: Unfortunately, for Windows, we are missing a worker\n # error detection mechanism here in this function, as it\n # doesn't provide a SIGCHLD handler.\n if self._worker_pids_set:\n _utils.signal_handling._remove_worker_pids(id(self))\n self._worker_pids_set = False\n\n def __del__(self):\n self._shutdown_workers()\n" ]
[ [ "torch.rand", "torch.ops.quantized.batch_norm2d", "torch.quantize_per_tensor", "torch.ops.quantized.batch_norm1d" ], [ "torch.empty", "torch.multiprocessing.get_all_start_methods", "torch.cuda.current_device", "torch._six.queue.Queue", "torch.multiprocessing.get_context", "torch.cuda.is_available", "torch._C._log_api_usage_once" ] ]
silverriver/Stylized_Dialog
[ "559dd97c4ec9c91e94deb048f789684ef3f1f9fa" ]
[ "TCFC/eval/bert_eval_acc.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\nimport shutil\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertForSequenceClassification,\n AlbertTokenizer,\n BertConfig,\n BertForSequenceClassification,\n BertTokenizer,\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizer,\n FlaubertConfig,\n FlaubertForSequenceClassification,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaForSequenceClassification,\n RobertaTokenizer,\n XLMConfig,\n XLMForSequenceClassification,\n XLMRobertaConfig,\n XLMRobertaForSequenceClassification,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetForSequenceClassification,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n)\nfrom transformers import glue_compute_metrics as compute_metrics\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\nfrom transformers import glue_output_modes as output_modes\nfrom transformers import glue_processors as processors\n\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum(\n (\n tuple(conf.pretrained_config_archive_map.keys())\n for conf in (\n BertConfig,\n XLNetConfig,\n XLMConfig,\n RobertaConfig,\n DistilBertConfig,\n AlbertConfig,\n XLMRobertaConfig,\n FlaubertConfig,\n )\n ),\n (),\n)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForSequenceClassification, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n \"roberta\": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),\n \"flaubert\": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef softmax(x):\n x_row_max = x.max(axis=-1)\n x_row_max = x_row_max.reshape(list(x.shape)[:-1]+[1])\n x = x - x_row_max\n x_exp = np.exp(x)\n x_exp_row_sum = x_exp.sum(axis=-1).reshape(list(x.shape)[:-1]+[1])\n softmax = x_exp / x_exp_row_sum\n return softmax\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to global_step of last saved checkpoint from model path\n try:\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n except ValueError:\n global_step = 0\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0],\n )\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / args.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n print(json.dumps({**logs, **{\"step\": global_step}}))\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + \"-MM\") if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(eval_task, preds, out_label_ids)\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return results\n\n\ndef pred_prob(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + \"-MM\") if args.task_name == \"mnli\" else (args.output_dir,)\n\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_logits = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n logits = logits.detach().cpu().numpy()\n if all_logits is None:\n all_logits = logits\n else:\n all_logits = np.concatenate((all_logits, logits), 0)\n\n all_logits = softmax(all_logits)\n results = all_logits[:, 1].reshape(-1)\n\n return results\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n processor = processors[task]()\n output_mode = output_modes[task]\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(\n args.data_dir,\n \"cached_{}_{}_{}_{}\".format(\n \"dev\" if evaluate else \"train\",\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = (\n processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)\n )\n features = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n )\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n\n\ndef main(file_path):\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument('--eval_file_path', help='path of the eval file')\n parser.add_argument(\n \"--data_dir\",\n default=\"tmp\",\n type=str,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=\"bert\",\n type=str,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=\"bert-base-cased\",\n type=str,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--task_name\",\n default=\"sst-2\",\n type=str,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()),\n )\n parser.add_argument(\n \"--output_dir\",\n default=\"../data/out_cased\",\n type=str,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"../data/cache\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\",\n )\n\n parser.add_argument(\n \"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\",\n )\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=2e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\",\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\",\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\",\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n args.no_cuda = True\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.ERROR\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=args.task_name,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Evaluation parameters %s\", args)\n\n # Evaluation\n if args.local_rank in [-1, 0]:\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n\n data_folder = args.data_dir\n dev_file_path = os.path.join(data_folder, 'dev.tsv')\n\n preds = []\n with open(file_path, encoding='utf8') as f:\n d = f.read().strip().split('\\n')\n for s in d:\n j = json.loads(s)\n preds += j['pred_style0']\n preds = '\\n'.join([s + '\\t0' for s in preds]) + '\\n'\n\n if os.path.exists(data_folder):\n shutil.rmtree(data_folder)\n os.makedirs(data_folder)\n with open(dev_file_path, 'w', encoding='utf8') as f:\n f.write(preds)\n\n result = evaluate(args, model, tokenizer, prefix=prefix)\n acc0 = result['acc']\n\n preds = []\n with open(file_path, encoding='utf8') as f:\n d = f.read().strip().split('\\n')\n for s in d:\n j = json.loads(s)\n preds += j['pred_style1']\n preds = '\\n'.join([s + '\\t1' for s in preds]) + '\\n'\n\n if os.path.exists(data_folder):\n shutil.rmtree(data_folder)\n os.makedirs(data_folder)\n with open(dev_file_path, 'w', encoding='utf8') as f:\n f.write(preds)\n\n result = evaluate(args, model, tokenizer, prefix=prefix)\n acc1 = result['acc']\n\n print('BERT:', 's0', acc0 * 100, 's1', acc1 * 100, 'mean', (acc0 + acc1) / 2 * 100)\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.manual_seed_all", "torch.no_grad", "numpy.random.seed", "torch.cuda.is_available", "torch.distributed.init_process_group", "torch.save", "torch.cuda.device_count", "torch.nn.DataParallel", "torch.utils.data.RandomSampler", "torch.device", "torch.cuda.set_device", "torch.load", "torch.distributed.get_world_size", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.tensor", "numpy.argmax", "torch.distributed.barrier", "torch.nn.parallel.DistributedDataParallel", "torch.utils.data.TensorDataset", "torch.utils.data.distributed.DistributedSampler", "numpy.squeeze", "numpy.exp", "torch.utils.tensorboard.SummaryWriter", "numpy.concatenate" ] ]
earthinversion/Fnet_IRIS_data_automated_download
[ "09a6e0c992662feac95744935e038d1c68539fa1" ]
[ "IRIS_data_download/IRIS_download_support/obspy/clients/fdsn/mass_downloader/download_helpers.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nHelpers for the mass downloader.\n\nIntended to simplify and stabilize the logic of the mass downloader and make\nit understandable in the first place.\n\n:copyright:\n Lion Krischer ([email protected]), 2014-2015\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport collections\nimport copy\nimport fnmatch\nimport itertools\nimport sys\nfrom multiprocessing.pool import ThreadPool\nimport os\nimport time\nimport timeit\n\nif sys.version_info.major == 2:\n from itertools import ifilterfalse as filterfalse\nelse:\n from itertools import filterfalse\n\nimport numpy as np\n\nfrom lxml.etree import XMLSyntaxError\n\nimport obspy\nfrom obspy.core.util import Enum\n\nfrom . import utils\n\n# The current status of an entity.\nSTATUS = Enum([\"none\", \"needs_downloading\", \"downloaded\", \"ignore\", \"exists\",\n \"download_failed\", \"download_rejected\",\n \"download_partially_failed\"])\n\n\nclass _SlotsEqualityComparisionObject(object):\n \"\"\"\n Helper object with an equality comparision method simply comparing all\n slotted attributes.\n \"\"\"\n __slots__ = []\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n return all([getattr(self, _i) == getattr(other, _i)\n for _i in self.__slots__])\n\n\nclass Station(_SlotsEqualityComparisionObject):\n \"\"\"\n Object representing a seismic station within the download helper classes.\n\n It knows the coordinates of the station to perform the filtering,\n its channels and the filename and status of the StationXML files.\n\n :param network: The network code.\n :type network: str\n :param station: The station code.\n :type station: str\n :param latitude: The latitude of the station.\n :type latitude: float\n :param longitude: The longitude of the station.\n :type longitude: float\n :param channels: The channels of the station.\n :type channels: list of :class:`~.Channel` objects\n :param stationxml_filename: The filename of the StationXML file.\n :type stationxml_filename: str\n :param stationxml_status: The current status of the station.\n :type stationxml_filename:\n :class:`~.STATUS`\n \"\"\"\n __slots__ = [\"network\", \"station\", \"latitude\", \"longitude\", \"channels\",\n \"_stationxml_filename\", \"want_station_information\",\n \"miss_station_information\", \"have_station_information\",\n \"stationxml_status\"]\n\n def __init__(self, network, station, latitude, longitude, channels,\n stationxml_filename=None, stationxml_status=None):\n # Station attributes.\n self.network = network\n self.station = station\n self.latitude = latitude\n self.longitude = longitude\n self.channels = channels\n # Station information settings.\n self.stationxml_filename = stationxml_filename\n self.stationxml_status = stationxml_status and STATUS.NONE\n\n # Internally keep track of which channels and time interval want\n # station information, which miss station information and which\n # already have some. want_station_information should always be the\n # union of miss and have.\n self.want_station_information = {}\n self.miss_station_information = {}\n self.have_station_information = {}\n\n @property\n def has_existing_or_downloaded_time_intervals(self):\n \"\"\"\n Returns true if any of the station's time intervals have status\n \"DOWNLOADED\" or \"EXISTS\". Otherwise it returns False meaning it does\n not have to be considered anymore.\n \"\"\"\n status = set()\n for chan in self.channels:\n for ti in chan.intervals:\n status.add(ti.status)\n if STATUS.EXISTS in status or STATUS.DOWNLOADED in status:\n return True\n return False\n\n @property\n def has_existing_time_intervals(self):\n \"\"\"\n Returns True if any of the station's time intervals already exist.\n \"\"\"\n for chan in self.channels:\n for ti in chan.intervals:\n if ti.status == STATUS.EXISTS:\n return True\n return False\n\n def remove_files(self, logger, reason):\n \"\"\"\n Delete all files under it. Only delete stuff that actually has been\n downloaded!\n \"\"\"\n for chan in self.channels:\n for ti in chan.intervals:\n if ti.status != STATUS.DOWNLOADED or not ti.filename:\n continue\n if os.path.exists(ti.filename):\n logger.info(\"Deleting MiniSEED file '%s'. Reason: %s\" % (\n ti.filename, reason))\n utils.safe_delete(ti.filename)\n\n if self.stationxml_status == STATUS.DOWNLOADED and \\\n self.stationxml_filename and \\\n os.path.exists(self.stationxml_filename):\n logger.info(\"Deleting StationXMl file '%s'. Reason: %s\" %\n (self.stationxml_filename, reason))\n utils.safe_delete(self.stationxml_filename)\n\n @property\n def stationxml_filename(self):\n return self._stationxml_filename\n\n @stationxml_filename.setter\n def stationxml_filename(self, value):\n \"\"\"\n Setter creating the directory for the file if it does not already\n exist.\n \"\"\"\n self._stationxml_filename = value\n if not value:\n return\n dirname = os.path.dirname(value)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n @property\n def temporal_bounds(self):\n \"\"\"\n Return the temporal bounds for the station.\n \"\"\"\n starttimes = []\n endtimes = []\n for channel in self.channels:\n s, e = channel.temporal_bounds\n starttimes.append(s)\n endtimes.append(e)\n return min(starttimes), max(endtimes)\n\n def __str__(self):\n channels = \"\\n\".join(str(i) for i in self.channels)\n channels = \"\\n\\t\".join(channels.splitlines())\n return (\n \"Station '{network}.{station}' [Lat: {lat:.2f}, Lng: {lng:.2f}]\\n\"\n \"\\t-> Filename: {filename} ({status})\\n\"\n \"\\t-> Wants station information for channels: {want}\\n\"\n \"\\t-> Has station information for channels: {has}\\n\"\n \"\\t-> Misses station information for channels: {miss}\\n\"\n \"\\t{channels}\"\n ).format(\n network=self.network,\n station=self.station,\n lat=self.latitude,\n lng=self.longitude,\n filename=self.stationxml_filename,\n status=\"exists\" if (self.stationxml_filename and os.path.exists(\n self.stationxml_filename)) else \"does not yet exist\",\n want=\", \".join([\"%s.%s\" % (_i[0], _i[1]) for _i in\n self.want_station_information.keys()]),\n has=\", \".join([\"%s.%s\" % (_i[0], _i[1]) for _i in\n self.have_station_information.keys()]),\n miss=\", \".join([\"%s.%s\" % (_i[0], _i[1]) for _i in\n self.miss_station_information.keys()]),\n channels=channels)\n\n def prepare_stationxml_download(self, stationxml_storage, logger):\n \"\"\"\n Figure out what to download.\n\n :param stationxml_storage:\n \"\"\"\n # Determine what channels actually want to have station information.\n # This will be a tuple of location code, channel code, starttime,\n # and endtime.\n self.want_station_information = {}\n for channel in self.channels:\n if channel.needs_station_file is False:\n continue\n self.want_station_information[\n (channel.location, channel.channel)] = channel.temporal_bounds\n\n # No channel has any data, thus nothing will happen.\n if not self.want_station_information:\n self.stationxml_status = STATUS.NONE\n return\n\n # Only those channels that now actually want station information\n # will be treated in the following.\n s, e = self.temporal_bounds\n storage = utils.get_stationxml_filename(\n stationxml_storage, self.network, self.station,\n list(self.want_station_information.keys()),\n starttime=s, endtime=e)\n\n # The simplest case. The function returns a string. Now two things\n # can happen.\n if isinstance(storage, (str, bytes)):\n filename = storage\n self.stationxml_filename = filename\n # 1. The file does not yet exist. Thus all channels must be\n # downloaded.\n if not os.path.exists(filename):\n self.miss_station_information = \\\n copy.deepcopy(self.want_station_information)\n self.have_station_information = {}\n self.stationxml_status = STATUS.NEEDS_DOWNLOADING\n return\n # 2. The file does exist. It will be parsed. If it contains ALL\n # necessary information, nothing will happen. Otherwise it will\n # be overwritten.\n else:\n info = utils.get_stationxml_contents(filename)\n for c_id, times in self.want_station_information.items():\n # Get the temporal range of information in the file.\n c_info = [_i for _i in info if\n _i.network == self.network and\n _i.station == self.station and\n _i.location == c_id[0] and\n _i.channel == c_id[1]]\n if not c_info:\n break\n starttime = min([_i.starttime for _i in c_info])\n endtime = max([_i.endtime for _i in c_info])\n if starttime > times[0] or endtime < times[1]:\n break\n # All good if no break is called.\n else:\n self.have_station_information = \\\n copy.deepcopy(self.want_station_information)\n self.miss_station_information = {}\n self.stationxml_status = STATUS.EXISTS\n return\n # Otherwise everything will be downloaded.\n self.miss_station_information = \\\n copy.deepcopy(self.want_station_information)\n self.have_station_information = {}\n self.stationxml_status = STATUS.NEEDS_DOWNLOADING\n return\n # The other possibility is that a dictionary is returned.\n else:\n # The types are already checked by the get_stationxml_filename()\n # function.\n missing_channels = storage[\"missing_channels\"]\n available_channels = storage[\"available_channels\"]\n\n # Get the channels wanting station information and filter them.\n channels_wanting_station_information = copy.deepcopy(\n self.want_station_information\n )\n\n # Figure out what channels are missing and will be downloaded.\n self.miss_station_information = {}\n for channel in missing_channels:\n if channel not in channels_wanting_station_information:\n continue\n self.miss_station_information[channel] = \\\n channels_wanting_station_information[channel]\n\n # Same thing but with the already available channels.\n self.have_station_information = {}\n for channel in available_channels:\n if channel not in channels_wanting_station_information:\n continue\n self.have_station_information[channel] = \\\n channels_wanting_station_information[channel]\n\n self.stationxml_filename = storage[\"filename\"]\n\n # Raise a warning if something is missing, but do not raise an\n # exception or halt the program at this point.\n have_channels = set(self.have_station_information.keys())\n miss_channels = set(self.miss_station_information.keys())\n want_channels = set(self.want_station_information.keys())\n if have_channels.union(miss_channels) != want_channels:\n logger.warning(\n \"The custom `stationxml_storage` did not return \"\n \"information about channels %s\" %\n str(want_channels.difference(have_channels.union(\n miss_channels))))\n\n if self.miss_station_information:\n self.stationxml_status = STATUS.NEEDS_DOWNLOADING\n elif not self.miss_station_information and \\\n self.have_station_information:\n self.stationxml_status = STATUS.EXISTS\n else:\n self.stationxml_status = STATUS.IGNORE\n\n def prepare_mseed_download(self, mseed_storage):\n \"\"\"\n Loop through all channels of the station and distribute filenames\n and the current status of the channel.\n\n A MiniSEED interval will be ignored, if the `mseed_storage` function\n returns `True`.\n Possible statuses after method execution are IGNORE, EXISTS, and\n NEEDS_DOWNLOADING.\n\n :param mseed_storage:\n \"\"\"\n for channel in self.channels:\n for interval in channel.intervals:\n interval.filename = utils.get_mseed_filename(\n mseed_storage, self.network, self.station,\n channel.location, channel.channel, interval.start,\n interval.end)\n if interval.filename is True:\n interval.status = STATUS.IGNORE\n elif os.path.exists(interval.filename):\n interval.status = STATUS.EXISTS\n else:\n if not os.path.exists(os.path.dirname(interval.filename)):\n os.makedirs(os.path.dirname(interval.filename))\n interval.status = STATUS.NEEDS_DOWNLOADING\n\n def sanitize_downloads(self, logger):\n \"\"\"\n Should be run after the MiniSEED and StationXML downloads finished.\n It will make sure that every MiniSEED file also has a corresponding\n StationXML file.\n\n It will delete MiniSEED files but never a StationXML file. The logic\n of the download helpers does not allow for a StationXML file with no\n data.\n \"\"\"\n from obspy.io.mseed.util import get_start_and_end_time\n # All or nothing for each channel.\n for id in self.miss_station_information.keys():\n logger.warning(\"Station information could not be downloaded for \"\n \"%s.%s.%s.%s. MiniSEED files outside of the \"\n \"station information period \"\n \"will be deleted!\" % (\n self.network, self.station, id[0], id[1]))\n channel = [_i for _i in self.channels if\n (_i.location, _i.channel) == id][0]\n for time_interval in channel.intervals:\n # Check that file exists before proceeding\n if not time_interval.filename or \\\n not os.path.isfile(time_interval.filename):\n continue\n # Check that the time_interval.start and end are correct!\n time_interval.start, time_interval.end = \\\n get_start_and_end_time(time_interval.filename)\n # Only delete downloaded things!\n if time_interval.status == STATUS.DOWNLOADED:\n # Only delete if the station data are actually missing\n # for this time\n miss_start, miss_end = self.miss_station_information[id]\n if miss_start <= time_interval.start <= miss_end and \\\n miss_start <= time_interval.end <= miss_end:\n utils.safe_delete(time_interval.filename)\n time_interval.status = STATUS.DOWNLOAD_REJECTED\n\n\nclass Channel(_SlotsEqualityComparisionObject):\n \"\"\"\n Object representing a Channel. Each time interval should end up in one\n MiniSEED file.\n \"\"\"\n __slots__ = [\"location\", \"channel\", \"intervals\"]\n\n def __init__(self, location, channel, intervals):\n self.location = location\n self.channel = channel\n self.intervals = intervals\n\n @property\n def needs_station_file(self):\n \"\"\"\n Determine if the channel requires any station information.\n\n As soon as the status of at least one interval is either\n ``DOWNLOADED`` or ``EXISTS`` the whole channel will be thought of as\n requiring station information. This does not yet mean that station\n information will be downloaded. That is decided at a later stage.\n \"\"\"\n status = set([_i.status for _i in self.intervals])\n if STATUS.DOWNLOADED in status or STATUS.EXISTS in status:\n return True\n return False\n\n @property\n def temporal_bounds(self):\n \"\"\"\n Returns a tuple of the minimum start time and the maximum end time.\n \"\"\"\n return (min([_i.start for _i in self.intervals]),\n max([_i.end for _i in self.intervals]))\n\n def __str__(self):\n return \"Channel '{location}.{channel}':\\n\\t{intervals}\".format(\n location=self.location, channel=self.channel,\n intervals=\"\\n\\t\".join([str(i) for i in self.intervals]))\n\n\nclass TimeInterval(_SlotsEqualityComparisionObject):\n \"\"\"\n Simple object representing a time interval of a channel.\n\n It knows the temporal bounds of the interval, the (desired) filename,\n and the current status of the interval.\n\n :param start: The start of the interval.\n :type start: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param end: The end of the interval.\n :type end: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param filename: The filename of the interval.\n :type filename: str\n :param status: The status of the time interval.\n :param status: :class:`~.STATUS`\n \"\"\"\n __slots__ = [\"start\", \"end\", \"filename\", \"status\"]\n\n def __init__(self, start, end, filename=None, status=None):\n self.start = start\n self.end = end\n self.filename = filename\n self.status = status if status is not None else STATUS.NONE\n\n def __repr__(self):\n return \"TimeInterval(start={start}, end={end}, filename={filename}, \" \\\n \"status='{status}')\".format(\n start=repr(self.start),\n end=repr(self.end),\n filename=\"'%s'\" % self.filename\n if self.filename is not None else \"None\",\n status=str(self.status))\n\n\nclass ClientDownloadHelper(object):\n \"\"\"\n :type client: :class:`obspy.fdsn.client.Client`\n :param client: An initialized FDSN client.\n :type client_name: str\n :param client_name: The name of the client. Only used for logging.\n :type restrictions: :class:`~.restrictions.Restrictions`\n :param restrictions: The non-domain related restrictions for the query.\n :type domain: :class:`~.domain.Domain` subclass\n :param domain: The domain definition.\n :param mseed_storage: The MiniSEED storage settings.\n :param stationxml_storage: The StationXML storage settings.\n :param logger: An active logger instance.\n \"\"\"\n def __init__(self, client, client_name, restrictions, domain,\n mseed_storage, stationxml_storage, logger):\n self.client = client\n self.client_name = client_name\n self.restrictions = restrictions\n self.domain = domain\n self.mseed_storage = mseed_storage\n self.stationxml_storage = stationxml_storage\n self.logger = logger\n self.stations = {}\n self.is_availability_reliable = None\n\n def __bool__(self):\n return bool(len(self))\n\n def __str__(self):\n avail_map = {\n None: \"Unknown reliability of availability information\",\n True: \"Reliable availability information\",\n False: \"Non-reliable availability information\"\n }\n reliability = avail_map[self.is_availability_reliable]\n return (\n \"ClientDownloadHelper object for client '{client}' ({url})\\n\"\n \"-> {reliability}\\n\"\n \"-> Manages {station_count} stations.\\n{stations}\").format(\n client=self.client_name,\n url=self.client.base_url,\n reliability=reliability,\n station_count=len(self),\n stations=\"\\n\".join([str(_i) for _i in self.stations.values()]))\n\n def __len__(self):\n return len(self.stations)\n\n def prepare_mseed_download(self):\n \"\"\"\n Prepare each Station for the MiniSEED downloading stage.\n\n This will distribute filenames and identify files that require\n downloading.\n \"\"\"\n for station in self.stations.values():\n station.prepare_mseed_download(mseed_storage=self.mseed_storage)\n\n def filter_stations_based_on_minimum_distance(\n self, existing_client_dl_helpers):\n \"\"\"\n Removes stations until all stations have a certain minimum distance to\n each other.\n\n Returns the rejected stations which is mainly useful for testing.\n\n :param existing_client_dl_helpers: Instances of already existing\n client download helpers.\n :type existing_client_dl_helpers: list of\n :class:`~.ClientDownloadHelper`\n \"\"\"\n if not self.restrictions.minimum_interstation_distance_in_m:\n # No rejected stations.\n return []\n\n # Create a sorted copy that will be used in the following. Make it\n # more deterministic by sorting the stations based on the id.\n stations = copy.copy(list(self.stations.values()))\n stations = sorted(stations, key=lambda x: (x.network, x.station))\n\n existing_stations = []\n for dlh in existing_client_dl_helpers:\n existing_stations.extend(list(dlh.stations.values()))\n\n remaining_stations = []\n rejected_stations = []\n\n # There are essentially two possibilities. If no station exists yet,\n # it will choose the largest subset of stations satisfying the\n # minimum inter-station distance constraint.\n if not existing_stations:\n # Build k-d-tree and query for the neighbours of each point within\n # the minimum distance.\n kd_tree = utils.SphericalNearestNeighbour(stations)\n nns = kd_tree.query_pairs(\n self.restrictions.minimum_interstation_distance_in_m)\n\n indexes_to_remove = []\n # Keep removing the station with the most pairs until no pairs are\n # left.\n while nns:\n most_common = collections.Counter(\n itertools.chain.from_iterable(nns)).most_common()[0][0]\n indexes_to_remove.append(most_common)\n nns = list(filterfalse(lambda x: most_common in x, nns))\n\n # Remove these indices this results in a set of stations we wish to\n # keep.\n new_remaining_stations = [_i[1] for _i in enumerate(stations)\n if _i[0] not in indexes_to_remove]\n new_rejected_stations = [_i[1] for _i in enumerate(stations)\n if _i[0] in indexes_to_remove]\n\n # Station objects are not hashable thus we have to go the long\n # route.\n for st in new_remaining_stations:\n if st not in remaining_stations:\n remaining_stations.append(st)\n\n for st in new_rejected_stations:\n if st not in rejected_stations:\n rejected_stations.append(st)\n\n # Otherwise it will add new stations approximating a Poisson disk\n # distribution.\n else:\n while stations:\n # kd-tree with all existing_stations\n existing_kd_tree = utils.SphericalNearestNeighbour(\n existing_stations)\n # Now we have to get the distance to the closest existing\n # station for all new stations.\n distances = np.ma.array(existing_kd_tree.query(stations)[0])\n if np.isinf(distances[0]):\n break\n distances.mask = False\n\n # Step one is to get rid of all stations that are closer\n # than the minimum distance to any existing station.\n remove = np.where(\n distances <\n self.restrictions.minimum_interstation_distance_in_m)[0]\n rejected_stations.extend([stations[_i] for _i in remove])\n\n keep = np.where(\n distances >=\n self.restrictions.minimum_interstation_distance_in_m)[0]\n distances.mask[remove] = True\n\n if len(keep):\n # Station with the largest distance to next closer station.\n largest = np.argmax(distances)\n remaining_stations.append(stations[largest])\n existing_stations.append(stations[largest])\n\n # Add all rejected stations here.\n stations = [stations[_i] for _i in keep if _i != largest]\n else:\n stations = []\n\n # Now actually delete the files and everything of the rejected\n # stations.\n for station in rejected_stations:\n station.remove_files(logger=self.logger,\n reason=\"Minimum distance filtering.\")\n self.stations = {}\n for station in remaining_stations:\n self.stations[(station.network, station.station)] = station\n\n # Return the rejected stations.\n return {(_i.network, _i.station): _i for _i in rejected_stations}\n\n def prepare_stationxml_download(self):\n \"\"\"\n Prepare each Station for the StationXML downloading stage.\n\n This will distribute filenames and identify files that require\n downloading.\n \"\"\"\n for station in self.stations.values():\n station.prepare_stationxml_download(\n stationxml_storage=self.stationxml_storage,\n logger=self.logger)\n\n def download_stationxml(self, threads=3):\n \"\"\"\n Actually download the StationXML files.\n\n :param threads: Limits the maximum number of threads for the client.\n \"\"\"\n\n def star_download_station(args):\n \"\"\"\n Maps arguments to the utils.download_stationxml() function.\n\n :param args: The to-be mapped arguments.\n \"\"\"\n try:\n ret_val = utils.download_stationxml(*args, logger=self.logger)\n except utils.ERRORS as e:\n self.logger.error(str(e))\n return None\n return ret_val\n\n # Build up everything we want to download.\n arguments = []\n for station in self.stations.values():\n if not station.miss_station_information:\n continue\n s, e = station.temporal_bounds\n if self.restrictions.station_starttime:\n s = self.restrictions.station_starttime\n if self.restrictions.station_endtime:\n e = self.restrictions.station_endtime\n bulk = [(station.network, station.station, channel.location,\n channel.channel, s, e) for channel in station.channels]\n arguments.append((self.client, self.client_name, bulk,\n station.stationxml_filename))\n\n if not arguments:\n self.logger.info(\"Client '%s' - No station information to \"\n \"download.\" % self.client_name)\n return\n\n # Download it.\n s_time = timeit.default_timer()\n pool = ThreadPool(min(threads, len(arguments)))\n results = pool.map(star_download_station, arguments)\n pool.close()\n e_time = timeit.default_timer()\n\n results = [_i for _i in results if _i is not None]\n\n # Check it.\n filecount = 0\n download_size = 0\n\n # Update the station structures. Loop over each returned file.\n for s_id, filename in results:\n filecount += 1\n station = self.stations[s_id]\n size = os.path.getsize(filename)\n download_size += size\n\n # Extract information about that file.\n try:\n info = utils.get_stationxml_contents(filename)\n # Sometimes some services choose to not return XML files - guard\n # against it and just delete the file. At subsequent runs the\n # mass downloader will attempt to download it again.\n except XMLSyntaxError:\n self.logger.info(\n \"Client '%s' - File %s is not an XML file - it will be \"\n \"deleted.\" % (self.client_name, filename))\n utils.safe_delete(filename)\n continue\n\n still_missing = {}\n # Make sure all missing information has been downloaded by\n # looping over each channel of the station that originally\n # requested to be downloaded.\n for c_id, times in station.miss_station_information.items():\n # Get the temporal range of information in the file.\n c_info = [_i for _i in info if\n _i.network == station.network and\n _i.station == station.station and\n _i.location == c_id[0] and\n _i.channel == c_id[1]]\n if not c_info:\n continue\n starttime = min([_i.starttime for _i in c_info])\n endtime = max([_i.endtime for _i in c_info])\n if starttime > times[0] or endtime < times[1]:\n # Cope with case that not full day of station info missing\n if starttime < times[1]:\n still_missing[c_id] = (times[0], starttime)\n station.have_station_information[c_id] = (starttime,\n times[1])\n elif endtime > times[0]:\n still_missing[c_id] = (endtime, times[1])\n station.have_station_information[c_id] = (times[0],\n endtime)\n else:\n still_missing[c_id] = times\n continue\n station.have_station_information[c_id] = times\n\n station.miss_station_information = still_missing\n if still_missing:\n station.stationxml_status = STATUS.DOWNLOAD_PARTIALLY_FAILED\n else:\n station.stationxml_status = STATUS.DOWNLOADED\n\n # Now loop over all stations and set the status of the ones that\n # still need downloading to download failed.\n for station in self.stations.values():\n if station.stationxml_status == STATUS.NEEDS_DOWNLOADING:\n station.stationxml_status = STATUS.DOWNLOAD_FAILED\n\n self.logger.info(\"Client '%s' - Downloaded %i station files [%.1f MB] \"\n \"in %.1f seconds [%.2f KB/sec].\" % (\n self.client_name, filecount,\n download_size / 1024.0 ** 2,\n e_time - s_time,\n (download_size / 1024.0) / (e_time - s_time)))\n\n def download_mseed(self, chunk_size_in_mb=25, threads_per_client=3):\n \"\"\"\n Actually download MiniSEED data.\n\n :param chunk_size_in_mb: Attempt to download data in chunks of this\n size.\n :param threads_per_client: Threads to launch per client. 3 seems to\n be a value in agreement with some data centers.\n \"\"\"\n # Estimate the download size to have equally sized chunks.\n channel_sampling_rate = {\n \"F\": 5000, \"G\": 5000, \"D\": 1000, \"C\": 1000, \"E\": 250, \"S\": 80,\n \"H\": 250, \"B\": 80, \"M\": 10, \"L\": 1, \"V\": 0.1, \"U\": 0.01,\n \"R\": 0.001, \"P\": 0.0001, \"T\": 0.00001, \"Q\": 0.000001, \"A\": 5000,\n \"O\": 5000}\n\n # Split into chunks of about equal size in terms of filesize.\n chunks = []\n chunks_curr = []\n curr_chunks_mb = 0\n\n # Don't request more than 50 chunks at once to not choke the servers.\n max_chunk_length = 50\n\n counter = collections.Counter()\n\n # Keep track of attempted downloads.\n for sta in self.stations.values():\n for cha in sta.channels:\n # The band code is used to estimate the sampling rate of the\n # data to be downloaded.\n band_code = cha.channel[0].upper()\n try:\n sr = channel_sampling_rate[band_code]\n except KeyError:\n # Generic sampling rate for exotic band codes.\n sr = 1.0\n\n for interval in cha.intervals:\n counter[interval.status] += 1\n # Only take those time intervals that actually require\n # some downloading.\n if interval.status != STATUS.NEEDS_DOWNLOADING:\n continue\n chunks_curr.append((\n sta.network, sta.station, cha.location, cha.channel,\n interval.start, interval.end, interval.filename))\n # Assume that each sample needs 4 byte, STEIM\n # compression reduces size to about a third.\n # chunk size is in MB\n duration = interval.end - interval.start\n curr_chunks_mb += \\\n sr * duration * 4.0 / 3.0 / 1024.0 / 1024.0\n if curr_chunks_mb >= chunk_size_in_mb or \\\n len(chunks_curr) >= max_chunk_length:\n chunks.append(chunks_curr)\n chunks_curr = []\n curr_chunks_mb = 0\n if chunks_curr:\n chunks.append(chunks_curr)\n\n keys = sorted(counter.keys())\n for key in keys:\n self.logger.info(\n \"Client '%s' - Status for %i time intervals/channels before \"\n \"downloading: %s\" % (self.client_name, counter[key],\n key.upper()))\n\n if not chunks:\n return\n\n def star_download_mseed(args):\n \"\"\"\n Star maps the arguments to the\n utils.download_and_split_mseed_bulk() function.\n\n :param args: The arguments to be passed.\n \"\"\"\n try:\n ret_val = utils.download_and_split_mseed_bulk(\n *args, logger=self.logger)\n except utils.ERRORS as e:\n msg = (\"Client '%s' - \" % args[1]) + str(e)\n if \"no data available\" in msg.lower():\n self.logger.info(msg.split(\"Detailed response\")[0].strip())\n else:\n self.logger.error(msg)\n return []\n return ret_val\n\n pool = ThreadPool(min(threads_per_client, len(chunks)))\n\n d_start = timeit.default_timer()\n pool.map(\n star_download_mseed,\n [(self.client, self.client_name, chunk) for chunk in chunks])\n pool.close()\n d_end = timeit.default_timer()\n\n self.logger.info(\"Client '%s' - Launching basic QC checks...\" %\n self.client_name)\n downloaded_bytes, discarded_bytes = self._check_downloaded_data()\n total_bytes = downloaded_bytes + discarded_bytes\n\n self.logger.info(\"Client '%s' - Downloaded %.1f MB [%.2f KB/sec] of \"\n \"data, %.1f MB of which were discarded afterwards.\" %\n (self.client_name, total_bytes / 1024.0 ** 2,\n total_bytes / 1024.0 / (d_end - d_start),\n discarded_bytes / 1024.0 ** 2))\n\n # Recount everything to be able to emit some nice statistics.\n counter = collections.Counter()\n for sta in self.stations.values():\n for chan in sta.channels:\n for interval in chan.intervals:\n counter[interval.status] += 1\n keys = sorted(counter.keys())\n for key in keys:\n self.logger.info(\n \"Client '%s' - Status for %i time intervals/channels after \"\n \"downloading: %s\" % (\n self.client_name, counter[key], key.upper()))\n\n self._remove_failed_and_ignored_stations()\n\n def _remove_failed_and_ignored_stations(self):\n \"\"\"\n Removes all stations that have no time interval with either exists\n or downloaded status.\n \"\"\"\n to_be_removed_keys = []\n for key, station in self.stations.items():\n if station.has_existing_or_downloaded_time_intervals is True:\n continue\n to_be_removed_keys.append(key)\n for key in to_be_removed_keys:\n del self.stations[key]\n\n def sanitize_downloads(self):\n \"\"\"\n Should be run after the MiniSEED and StationXML downloads finished.\n It will make sure that every MiniSEED file also has a corresponding\n StationXML file.\n \"\"\"\n for station in self.stations.values():\n station.sanitize_downloads(logger=self.logger)\n\n def _check_downloaded_data(self):\n \"\"\"\n Read the downloaded data, set the proper status flags and remove\n data that does not meet the QC criteria. It just checks the\n downloaded data for minimum length and gaps/overlaps.\n\n Returns the downloaded_bytes and the discarded_bytes.\n \"\"\"\n downloaded_bytes = 0\n discarded_bytes = 0\n for sta in self.stations.values():\n for cha in sta.channels:\n for interval in cha.intervals:\n # The status of the interval should not have changed if\n # it did not require downloading in the first place.\n if interval.status != STATUS.NEEDS_DOWNLOADING:\n continue\n\n # If the file does not exist, mark the time interval as\n # download failed.\n if not os.path.exists(interval.filename):\n interval.status = STATUS.DOWNLOAD_FAILED\n continue\n\n size = os.path.getsize(interval.filename)\n if size == 0:\n self.logger.warning(\"Zero byte file '%s'. Will be \"\n \"deleted.\" % interval.filename)\n utils.safe_delete(interval.filename)\n interval.status = STATUS.DOWNLOAD_FAILED\n continue\n\n # Guard against faulty files.\n try:\n st = obspy.read(interval.filename, headonly=True)\n except Exception as e:\n self.logger.warning(\n \"Could not read file '%s' due to: %s\\n\"\n \"Will be discarded.\" % (interval.filename, str(e)))\n utils.safe_delete(interval.filename)\n discarded_bytes += size\n interval.status = STATUS.DOWNLOAD_FAILED\n continue\n\n # Valid files with no data.\n if len(st) == 0:\n self.logger.warning(\n \"Empty file '%s'. Will be deleted.\" %\n interval.filename)\n utils.safe_delete(interval.filename)\n discarded_bytes += size\n interval.status = STATUS.DOWNLOAD_FAILED\n continue\n\n # If user did not want gappy files, remove them.\n if self.restrictions.reject_channels_with_gaps is True and\\\n len(st) > 1:\n self.logger.info(\n \"File '%s' has %i traces and thus contains \"\n \"gaps or overlaps. Will be deleted.\" % (\n interval.filename, len(st)))\n utils.safe_delete(interval.filename)\n discarded_bytes += size\n interval.status = STATUS.DOWNLOAD_REJECTED\n continue\n\n if self.restrictions.minimum_length:\n duration = sum([tr.stats.endtime - tr.stats.starttime\n for tr in st])\n expected_min_duration = \\\n self.restrictions.minimum_length * \\\n (interval.end - interval.start)\n if duration < expected_min_duration:\n self.logger.info(\n \"File '%s' has only %.2f seconds of data. \"\n \"%.2f are required. File will be deleted.\" %\n (interval.filename, duration,\n expected_min_duration))\n utils.safe_delete(interval.filename)\n discarded_bytes += size\n interval.status = STATUS.DOWNLOAD_REJECTED\n continue\n\n downloaded_bytes += size\n interval.status = STATUS.DOWNLOADED\n return downloaded_bytes, discarded_bytes\n\n def _parse_miniseed_filenames(self, filenames, restrictions):\n time_range = restrictions.minimum_length * (restrictions.endtime -\n restrictions.starttime)\n channel_availability = []\n for filename in filenames:\n st = obspy.read(filename, format=\"MSEED\", headonly=True)\n if restrictions.reject_channels_with_gaps and len(st) > 1:\n self.logger.warning(\"Channel %s has gap or overlap. Will be \"\n \"removed.\" % st[0].id)\n try:\n os.remove(filename)\n except OSError:\n pass\n continue\n elif len(st) == 0:\n self.logger.error(\"MiniSEED file with no data detected. \"\n \"Should not happen!\")\n continue\n tr = st[0]\n duration = tr.stats.endtime - tr.stats.starttime\n if restrictions.minimum_length and duration < time_range:\n self.logger.warning(\"Channel %s does not satisfy the minimum \"\n \"length requirement. %.2f seconds instead \"\n \"of the required %.2f seconds.\" % (\n tr.id, duration, time_range))\n try:\n os.remove(filename)\n except OSError:\n pass\n continue\n channel_availability.append(utils.ChannelAvailability(\n tr.stats.network, tr.stats.station, tr.stats.location,\n tr.stats.channel, tr.stats.starttime, tr.stats.endtime,\n filename))\n return channel_availability\n\n def discard_stations(self, existing_client_dl_helpers):\n \"\"\"\n Discard all stations part of any of the already existing client\n download helper instances. The station discarding happens purely\n based on station ids.\n\n :param existing_client_dl_helpers: Instances of already existing\n client download helpers. All stations part of this will not be\n downloaded anymore.\n :type existing_client_dl_helpers: list of\n :class:`~.ClientDownloadHelper`\n \"\"\"\n station_ids = []\n for helper in existing_client_dl_helpers:\n station_ids.extend(helper.stations.keys())\n\n for station_id in station_ids:\n try:\n del self.stations[station_id]\n except KeyError:\n pass\n\n def get_availability(self):\n \"\"\"\n Queries the current client for information on what stations are\n available given the spatial and temporal restrictions.\n \"\"\"\n # Check if stations needs to be filtered after downloading or if the\n # restrictions one can impose with the FDSN webservices queries are\n # enough. This depends on the domain definition.\n try:\n self.domain.is_in_domain(0, 0)\n needs_filtering = True\n except NotImplementedError:\n needs_filtering = False\n\n arguments = {\n \"network\": self.restrictions.network,\n \"station\": self.restrictions.station,\n \"location\": self.restrictions.location,\n \"channel\": self.restrictions.channel,\n \"starttime\": self.restrictions.starttime,\n \"endtime\": self.restrictions.endtime,\n # Request at the channel level.\n \"level\": \"channel\"\n }\n # Add the domain specific query parameters.\n arguments.update(self.domain.get_query_parameters())\n\n # Check the capabilities of the service and see what is the most\n # appropriate way of acquiring availability information. Some services\n # right now require manual overriding of what they claim to be\n # capable of.\n if \"matchtimeseries\" in self.client.services[\"station\"]:\n arguments[\"matchtimeseries\"] = True\n if \"format\" in self.client.services[\"station\"]:\n arguments[\"format\"] = \"text\"\n self.is_availability_reliable = True\n else:\n if \"format\" in self.client.services[\"station\"]:\n arguments[\"format\"] = \"text\"\n self.is_availability_reliable = False\n\n if self.is_availability_reliable:\n self.logger.info(\"Client '%s' - Requesting reliable \"\n \"availability.\" % self.client_name)\n else:\n self.logger.info(\n \"Client '%s' - Requesting unreliable availability.\" %\n self.client_name)\n\n try:\n start = time.time()\n inv = self.client.get_stations(**arguments)\n end = time.time()\n except utils.ERRORS as e:\n if \"no data available\" in str(e).lower():\n self.logger.info(\n \"Client '%s' - No data available for request.\" %\n self.client_name)\n return\n self.logger.error(\n \"Client '{0}' - Failed getting availability: %s\".format(\n self.client_name), str(e))\n return\n # This sometimes fires if a service returns some random stuff which\n # is not a valid station file.\n except Exception as e:\n self.logger.error(\n \"Client '{0}' - Failed getting availability due to \"\n \"unexpected exception: %s\".format(self.client_name), str(e))\n return\n\n self.logger.info(\"Client '%s' - Successfully requested availability \"\n \"(%.2f seconds)\" % (self.client_name, end - start))\n\n # Get the time intervals from the restrictions.\n intervals = [TimeInterval(start=_i[0], end=_i[1])\n for _i in self.restrictions]\n\n for network in inv:\n # Skip network if so desired.\n skip_network = False\n for pattern in self.restrictions.exclude_networks:\n if fnmatch.fnmatch(network.code, pattern):\n skip_network = True\n break\n if skip_network:\n continue\n\n for station in network:\n # Skip station if so desired.\n skip_station = False\n for pattern in self.restrictions.exclude_stations:\n if fnmatch.fnmatch(station.code, pattern):\n skip_station = True\n break\n if skip_station:\n continue\n\n # If an inventory is given, only keep stations part of the\n # inventory.\n if self.restrictions.limit_stations_to_inventory is not None \\\n and (network.code, station.code) not in \\\n self.restrictions.limit_stations_to_inventory:\n continue\n\n # Skip the station if it is not in the desired domain.\n if needs_filtering is True and \\\n not self.domain.is_in_domain(station.latitude,\n station.longitude):\n continue\n\n channels = []\n for channel in station.channels:\n # Remove channels that somehow slipped past the temporal\n # constraints due to weird behaviour from the data center.\n if (channel.start_date > self.restrictions.endtime) or \\\n (channel.end_date < self.restrictions.starttime):\n continue\n new_channel = Channel(\n location=channel.location_code, channel=channel.code,\n intervals=copy.deepcopy(intervals))\n # Multiple channel epochs would result in duplicate\n # channels which we don't want. Bit of a silly logic here\n # to get rid of them.\n if new_channel not in channels:\n channels.append(new_channel)\n\n if self.restrictions.channel is None:\n # Group by locations and apply the channel priority filter\n # to each.\n filtered_channels = []\n\n def get_loc(x):\n return x.location\n\n for location, _channels in itertools.groupby(\n sorted(channels, key=get_loc), get_loc):\n filtered_channels.extend(utils.filter_channel_priority(\n list(_channels), key=\"channel\",\n priorities=self.restrictions.channel_priorities))\n channels = filtered_channels\n\n if self.restrictions.location is None:\n # Filter to remove unwanted locations according to the\n # priority list.\n channels = utils.filter_channel_priority(\n channels, key=\"location\",\n priorities=self.restrictions.location_priorities)\n\n if not channels:\n continue\n\n self.stations[(network.code, station.code)] = Station(\n network=network.code,\n station=station.code,\n latitude=station.latitude,\n longitude=station.longitude,\n channels=channels)\n self.logger.info(\"Client '%s' - Found %i stations (%i channels).\" % (\n self.client_name, len(self.stations),\n sum([len(_i.channels) for _i in self.stations.values()])))\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n" ]
[ [ "numpy.where", "numpy.isinf", "numpy.argmax" ] ]
tripzero/deepvoice3_pytorch
[ "90027d27dab2889d856f9db9ffaf39d4f70b3067" ]
[ "deepvoice3_pytorch/modules.py" ]
[ "# coding: utf-8\n\nimport torch\nfrom torch import nn\nimport math\nimport numpy as np\nfrom torch.nn import functional as F\n\n\ndef position_encoding_init(n_position, d_pos_vec, position_rate=1.0,\n sinusoidal=True):\n ''' Init the sinusoid position encoding table '''\n\n # keep dim 0 for padding token position encoding zero vector\n position_enc = np.array([\n [position_rate * pos / np.power(10000, 2 * (i // 2) / d_pos_vec) for i in range(d_pos_vec)]\n if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])\n\n position_enc = torch.from_numpy(position_enc).float()\n if sinusoidal:\n position_enc[1:, 0::2] = torch.sin(position_enc[1:, 0::2]) # dim 2i\n position_enc[1:, 1::2] = torch.cos(position_enc[1:, 1::2]) # dim 2i+1\n\n return position_enc\n\n\ndef sinusoidal_encode(x, w):\n y = w * x\n y[1:, 0::2] = torch.sin(y[1:, 0::2].clone())\n y[1:, 1::2] = torch.cos(y[1:, 1::2].clone())\n return y\n\n\nclass SinusoidalEncoding(nn.Embedding):\n\n def __init__(self, num_embeddings, embedding_dim,\n *args, **kwargs):\n super(SinusoidalEncoding, self).__init__(num_embeddings, embedding_dim,\n padding_idx=0,\n *args, **kwargs)\n self.weight.data = position_encoding_init(num_embeddings, embedding_dim,\n position_rate=1.0,\n sinusoidal=False)\n\n def forward(self, x, w=1.0):\n isscaler = np.isscalar(w)\n assert self.padding_idx is not None\n\n if isscaler or w.size(0) == 1:\n weight = sinusoidal_encode(self.weight, w)\n return F.embedding(\n x, weight, self.padding_idx, self.max_norm,\n self.norm_type, self.scale_grad_by_freq, self.sparse)\n else:\n # TODO: cannot simply apply for batch\n # better to implement efficient function\n pe = []\n for batch_idx, we in enumerate(w):\n weight = sinusoidal_encode(self.weight, we)\n pe.append(F.embedding(\n x[batch_idx], weight, self.padding_idx, self.max_norm,\n self.norm_type, self.scale_grad_by_freq, self.sparse))\n pe = torch.stack(pe)\n return pe\n\n\nclass GradMultiply(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, scale):\n ctx.scale = scale\n res = x.new(x)\n ctx.mark_shared_storage((x, res))\n return res\n\n @staticmethod\n def backward(ctx, grad):\n return grad * ctx.scale, None\n\n\ndef Linear(in_features, out_features, dropout=0):\n \"\"\"Weight-normalized Linear layer (input: N x T x C)\"\"\"\n m = nn.Linear(in_features, out_features)\n m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx, std=0.01):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n m.weight.data.normal_(0, std)\n return m\n\n\ndef Conv1d(in_channels, out_channels, kernel_size, dropout=0, std_mul=4.0, **kwargs):\n from .conv import Conv1d\n m = Conv1d(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef ConvTranspose1d(in_channels, out_channels, kernel_size, dropout=0,\n std_mul=1.0, **kwargs):\n m = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\nclass Conv1dGLU(nn.Module):\n \"\"\"(Dilated) Conv1d + Gated linear unit + (optionally) speaker embedding\n \"\"\"\n\n def __init__(self, n_speakers, speaker_embed_dim,\n in_channels, out_channels, kernel_size,\n dropout, padding=None, dilation=1, causal=False, residual=False,\n *args, **kwargs):\n super(Conv1dGLU, self).__init__()\n self.dropout = dropout\n self.residual = residual\n if padding is None:\n # no future time stamps available\n if causal:\n padding = (kernel_size - 1) * dilation\n else:\n padding = (kernel_size - 1) // 2 * dilation\n self.causal = causal\n\n self.conv = Conv1d(in_channels, 2 * out_channels, kernel_size,\n dropout=dropout, padding=padding, dilation=dilation,\n *args, **kwargs)\n if n_speakers > 1:\n self.speaker_proj = Linear(speaker_embed_dim, out_channels)\n else:\n self.speaker_proj = None\n\n def forward(self, x, speaker_embed=None):\n return self._forward(x, speaker_embed, False)\n\n def incremental_forward(self, x, speaker_embed=None):\n return self._forward(x, speaker_embed, True)\n\n def _forward(self, x, speaker_embed, is_incremental):\n residual = x\n x = F.dropout(x, p=self.dropout, training=self.training)\n if is_incremental:\n splitdim = -1\n x = self.conv.incremental_forward(x)\n else:\n splitdim = 1\n x = self.conv(x)\n # remove future time steps\n x = x[:, :, :residual.size(-1)] if self.causal else x\n\n a, b = x.split(x.size(splitdim) // 2, dim=splitdim)\n if self.speaker_proj is not None:\n softsign = F.softsign(self.speaker_proj(speaker_embed))\n # Since conv layer assumes BCT, we need to transpose\n softsign = softsign if is_incremental else softsign.transpose(1, 2)\n a = a + softsign\n x = a * torch.sigmoid(b)\n return (x + residual) * math.sqrt(0.5) if self.residual else x\n\n def clear_buffer(self):\n self.conv.clear_buffer()\n\n\nclass HighwayConv1d(nn.Module):\n \"\"\"Weight normzlized Conv1d + Highway network (support incremental forward)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size=1, padding=None,\n dilation=1, causal=False, dropout=0, std_mul=None, glu=False):\n super(HighwayConv1d, self).__init__()\n if std_mul is None:\n std_mul = 4.0 if glu else 1.0\n if padding is None:\n # no future time stamps available\n if causal:\n padding = (kernel_size - 1) * dilation\n else:\n padding = (kernel_size - 1) // 2 * dilation\n self.causal = causal\n self.dropout = dropout\n self.glu = glu\n\n self.conv = Conv1d(in_channels, 2 * out_channels,\n kernel_size=kernel_size, padding=padding,\n dilation=dilation, dropout=dropout,\n std_mul=std_mul)\n\n def forward(self, x):\n return self._forward(x, False)\n\n def incremental_forward(self, x):\n return self._forward(x, True)\n\n def _forward(self, x, is_incremental):\n \"\"\"Forward\n\n Args:\n x: (B, in_channels, T)\n returns:\n (B, out_channels, T)\n \"\"\"\n\n residual = x\n x = F.dropout(x, p=self.dropout, training=self.training)\n if is_incremental:\n splitdim = -1\n x = self.conv.incremental_forward(x)\n else:\n splitdim = 1\n x = self.conv(x)\n # remove future time steps\n x = x[:, :, :residual.size(-1)] if self.causal else x\n\n if self.glu:\n x = F.glu(x, dim=splitdim)\n return (x + residual) * math.sqrt(0.5)\n else:\n a, b = x.split(x.size(splitdim) // 2, dim=splitdim)\n T = torch.sigmoid(b)\n return (T * a + (1 - T) * residual)\n\n def clear_buffer(self):\n self.conv.clear_buffer()\n\n\ndef get_mask_from_lengths(memory, memory_lengths):\n \"\"\"Get mask tensor from list of length\n Args:\n memory: (batch, max_time, dim)\n memory_lengths: array like\n \"\"\"\n mask = memory.data.new(memory.size(0), memory.size(1)).byte().zero_()\n for idx, l in enumerate(memory_lengths):\n mask[idx][:l] = 1\n return ~mask\n" ]
[ [ "torch.nn.ConvTranspose1d", "torch.stack", "torch.nn.Linear", "torch.nn.functional.embedding", "torch.cos", "torch.nn.functional.dropout", "numpy.zeros", "torch.nn.Embedding", "torch.sin", "torch.from_numpy", "torch.nn.functional.glu", "numpy.power", "torch.nn.utils.weight_norm", "torch.sigmoid", "numpy.isscalar" ] ]
boldsort/craftassist
[ "8058d115a250e30deb60d969b7b1a5fefd6e974c" ]
[ "python/base_agent/ttad/back_translation/modeling_gpt2.py" ]
[ "# coding=utf-8\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch OpenAI GPT-2 model.\"\"\"\n\n\nimport logging\nimport os\nimport warnings\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\nfrom transformers.modeling_gpt2 import PreTrainedModel, GPT2Config\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPast,\n CausalLMOutputWithPast,\n GPT2DoubleHeadsModelOutput,\n)\nfrom transformers.activations import ACT2FN\nfrom transformers.modeling_utils import (\n Conv1D,\n prune_conv1d_layer,\n SequenceSummary,\n find_pruneable_heads_and_indices,\n)\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"GPT2Config\"\n_TOKENIZER_FOR_DOC = \"GPT2Tokenizer\"\n\nGPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"gpt2\",\n \"gpt2-medium\",\n \"gpt2-large\",\n \"gpt2-xl\",\n \"distilgpt2\",\n # See all GPT-2 models at https://huggingface.co/models?filter=gpt2\n]\n\n\ndef load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import re\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(gpt2_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array.squeeze())\n\n for name, array in zip(names, arrays):\n name = name[6:] # skip \"model/\"\n name = name.split(\"/\")\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+\\d+\", m_name):\n scope_names = re.split(r\"(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"w\" or scope_names[0] == \"g\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"b\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"wpe\" or scope_names[0] == \"wte\":\n pointer = getattr(pointer, scope_names[0])\n pointer = getattr(pointer, \"weight\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass Attention(nn.Module):\n def __init__(self, nx, n_ctx, config, scale=False):\n super().__init__()\n\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx),\n )\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n\n self.c_attn = Conv1D(n_state * 3, nx)\n # TODO: check config.hidden_size\n self.query = nn.Linear(n_state, nx)\n self.key = nn.Linear(n_state, nx)\n self.value = nn.Linear(n_state, nx)\n\n self.c_proj = Conv1D(n_state, nx)\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n self.pruned_heads = set()\n self.config = config\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.n_head, self.split_size // self.n_head, self.pruned_heads\n )\n index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])\n\n # Prune conv1d layers\n self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)\n self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)\n\n # Update hyper params\n self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))\n self.n_head = self.n_head - len(heads)\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / (float(v.size(-1)) ** 0.5)\n nd, ns = w.size(-2), w.size(-1)\n mask = self.bias[:, :, ns - nd : ns, :ns]\n w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))\n\n if attention_mask is not None:\n # Apply the attention mask\n w = w + attention_mask\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n\n # Mask heads if we want to\n if head_mask is not None:\n w = w * head_mask\n\n outputs = [torch.matmul(w, v)]\n if output_attentions:\n outputs.append(w)\n return outputs\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(\n self,\n x,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n use_cache=False,\n output_attentions=False,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n ):\n if self.config.is_decoder:\n assert encoder_hidden_states is not None\n key = self.key(encoder_hidden_states)\n value = self.value(encoder_hidden_states)\n query = self.query(x)\n else:\n x = self.c_attn(x)\n query, key, value = x.split(self.split_size, dim=2)\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n\n if layer_past is not None:\n past_key, past_value = (\n layer_past[0].transpose(-2, -1),\n layer_past[1],\n ) # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = torch.stack(\n (key.transpose(-2, -1), value)\n ) # transpose to have same shapes for stacking\n else:\n present = (None,)\n if self.config.is_decoder:\n attn_outputs = self._attn(\n query, key, value, encoder_attention_mask, head_mask, output_attentions\n )\n else:\n attn_outputs = self._attn(\n query, key, value, attention_mask, head_mask, output_attentions\n )\n\n at = attn_outputs[0]\n at = self.merge_heads(at)\n at = self.c_proj(at)\n at = self.resid_dropout(at)\n outputs = [at, present] + attn_outputs[1:]\n return outputs # a, present, (attentions)\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super().__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return self.dropout(h2)\n\n\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False):\n super().__init__()\n nx = config.n_embd\n inner_dim = config.n_inner if config.n_inner is not None else 4 * nx\n self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.attn = Attention(nx, n_ctx, config, scale)\n self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.mlp = MLP(inner_dim, config)\n self.config = config\n \"\"\"\n TODO: add another self attention layer?\n \"\"\"\n\n def forward(\n self,\n x,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n use_cache=False,\n output_attentions=False,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n ):\n output_attn = self.attn(\n self.ln_1(x),\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n )\n a = output_attn[0] # output_attn: a, present, (attentions)\n x = x + a\n m = self.mlp(self.ln_2(x))\n x = x + m\n\n outputs = [x] + output_attn[1:]\n return outputs # x, present, (attentions)\n\n\nclass GPT2PreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = GPT2Config\n load_tf_weights = load_tf_weights_in_gpt2\n base_model_prefix = \"transformer\"\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nclass GPT2Model(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList(\n [Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]\n )\n self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n if \"past\" in kwargs:\n warnings.warn(\n \"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"past\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = [None] * len(self.h)\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(\n past_length, input_shape[-1] + past_length, dtype=torch.long, device=device\n )\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(\n dtype=next(self.parameters()).dtype\n ) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n else:\n token_type_embeds = 0\n hidden_states = inputs_embeds + position_embeds + token_type_embeds\n hidden_states = self.drop(hidden_states)\n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = () if use_cache else None\n all_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)\n\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n use_cache=use_cache,\n output_attentions=output_attentions,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n )\n\n hidden_states, present = outputs[:2]\n if use_cache is True:\n presents = presents + (present,)\n\n if output_attentions:\n all_attentions = all_attentions + (outputs[2],)\n\n hidden_states = self.ln_f(hidden_states)\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, presents, all_hidden_states, all_attentions]\n if v is not None\n )\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n\nclass GPT2LMHeadModel(GPT2PreTrainedModel):\n authorized_missing_keys = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def prepare_inputs_for_generation(self, input_ids, past, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n\n return {\"input_ids\": input_ids, \"past_key_values\": past, \"use_cache\": kwargs[\"use_cache\"]}\n\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for language modeling.\n Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``\n Indices are selected in ``[-100, 0, ..., config.vocab_size]``\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n if \"past\" in kwargs:\n warnings.warn(\n \"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"past\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n\nclass GPT2DoubleHeadsModel(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n config.num_labels = 1\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.multiple_choice_head = SequenceSummary(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n mc_token_ids=None,\n labels=None,\n mc_labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)\n Index of the classification token in each input sequence.\n Selected in the range ``[0, input_ids.size(-1) - 1[``.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)\n Labels for language modeling.\n Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``\n Indices are selected in ``[-1, 0, ..., config.vocab_size]``\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Return:\n\n Examples::\n\n >>> import torch\n >>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel\n\n >>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n >>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2, return_dict=True)\n\n >>> # Add a [CLS] to the vocabulary (we should train it also!)\n >>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})\n\n >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size\n\n >>> choices = [\"Hello, my dog is cute [CLS]\", \"Hello, my cat is cute [CLS]\"]\n >>> encoded_choices = [tokenizer.encode(s) for s in choices]\n >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]\n\n >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2\n >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1\n\n >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)\n >>> lm_logits = outputs.lm_logits\n >>> mc_logits = outputs.mc_logits\n\n \"\"\"\n if \"lm_labels\" in kwargs:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"lm_labels\")\n if \"past\" in kwargs:\n warnings.warn(\n \"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"past\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)\n\n mc_loss = None\n if mc_labels is not None:\n loss_fct = CrossEntropyLoss()\n mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))\n lm_loss = None\n if labels is not None:\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits, mc_logits) + transformer_outputs[1:]\n if mc_loss is not None:\n output = (mc_loss,) + output\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return GPT2DoubleHeadsModelOutput(\n lm_loss=lm_loss,\n mc_loss=mc_loss,\n lm_logits=lm_logits,\n mc_logits=mc_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n" ]
[ [ "torch.ones", "torch.nn.Linear", "tensorflow.train.list_variables", "torch.nn.Softmax", "torch.tensor", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "torch.from_numpy", "torch.nn.LayerNorm", "torch.arange", "tensorflow.train.load_variable", "torch.cat", "torch.nn.Dropout", "torch.matmul" ] ]
louis2889184/sg2im
[ "6df2095bf58703c7d6d74bf47535a7cf45690bc0" ]
[ "scripts/pl_sequence_train.py" ]
[ "import os\nimport json\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict\n\nfrom sg2im.utils import timeit, bool_flag, LossManager\nfrom sg2im.utils import int_tuple, float_tuple, str_tuple\nfrom sg2im.data.vg import SequenceTransformerVgSceneGraphDataset\n\nimport pytorch_lightning as pl\nfrom transformers import (\n BertTokenizerFast, \n BertTokenizer, \n EncoderDecoderModel, \n EncoderDecoderConfig, \n AutoModel,\n BertForSequenceClassification,\n)\n\nfrom pytorch_lightning.plugins import DDPPlugin\n\n\nVG_DIR = os.path.expanduser('datasets/vg')\nCOCO_DIR = os.path.expanduser('datasets/coco')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--test', action='store_true', default=False)\nparser.add_argument('--dataset', default='coco', choices=['vg', 'coco'])\nparser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')\nparser.add_argument('--load_checkpoint', default=\"\")\n\n# Optimization hyperparameters\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--num_iterations', default=1000000, type=int)\nparser.add_argument('--learning_rate', default=1e-5, type=float)\nparser.add_argument('--gpus', default=1, type=int)\n\n# Switch the generator to eval mode after this many iterations\nparser.add_argument('--eval_mode_after', default=100000, type=int)\n\n# Dataset options common to both VG and COCO\nparser.add_argument('--image_size', default='64,64', type=int_tuple)\nparser.add_argument('--num_train_samples', default=None, type=int)\nparser.add_argument('--num_val_samples', default=1024, type=int)\nparser.add_argument('--shuffle_val', default=True, type=bool_flag)\nparser.add_argument('--loader_num_workers', default=4, type=int)\nparser.add_argument('--include_relationships', default=True, type=bool_flag)\n\n# VG-specific options\nparser.add_argument('--vg_image_dir', default=os.path.join(VG_DIR, 'images'))\nparser.add_argument('--train_h5', default=os.path.join(VG_DIR, 'train.h5'))\nparser.add_argument('--val_h5', default=os.path.join(VG_DIR, 'val.h5'))\nparser.add_argument('--vocab_json', default=os.path.join(VG_DIR, 'vocab.json'))\nparser.add_argument('--max_objects_per_image', default=10, type=int)\nparser.add_argument('--vg_use_orphaned_objects', default=True, type=bool_flag)\n\n# COCO-specific options\nparser.add_argument('--coco_train_image_dir',\n default=os.path.join(COCO_DIR, 'images/train2017'))\nparser.add_argument('--coco_val_image_dir',\n default=os.path.join(COCO_DIR, 'images/val2017'))\nparser.add_argument('--coco_train_instances_json',\n default=os.path.join(COCO_DIR, 'annotations/instances_train2017.json'))\nparser.add_argument('--coco_train_stuff_json',\n default=os.path.join(COCO_DIR, 'annotations/stuff_train2017.json'))\nparser.add_argument('--coco_val_instances_json',\n default=os.path.join(COCO_DIR, 'annotations/instances_val2017.json'))\nparser.add_argument('--coco_val_stuff_json',\n default=os.path.join(COCO_DIR, 'annotations/stuff_val2017.json'))\nparser.add_argument('--instance_whitelist', default=None, type=str_tuple)\nparser.add_argument('--stuff_whitelist', default=None, type=str_tuple)\nparser.add_argument('--coco_include_other', default=False, type=bool_flag)\nparser.add_argument('--min_object_size', default=0.02, type=float)\nparser.add_argument('--min_objects_per_image', default=3, type=int)\nparser.add_argument('--coco_stuff_only', default=True, type=bool_flag)\nparser.add_argument('--max_lengths_for_image', default=1024, type=int)\n\n# Generator options\nparser.add_argument('--mask_size', default=16, type=int) # Set this to 0 to use no masks\nparser.add_argument('--embedding_dim', default=128, type=int)\nparser.add_argument('--gconv_dim', default=128, type=int)\nparser.add_argument('--gconv_hidden_dim', default=512, type=int)\nparser.add_argument('--gconv_num_layers', default=5, type=int)\nparser.add_argument('--mlp_normalization', default='none', type=str)\nparser.add_argument('--refinement_network_dims', default='1024,512,256,128,64', type=int_tuple)\nparser.add_argument('--normalization', default='batch')\nparser.add_argument('--activation', default='leakyrelu-0.2')\nparser.add_argument('--layout_noise_dim', default=32, type=int)\nparser.add_argument('--use_boxes_pred_after', default=-1, type=int)\n\n# Generator losses\nparser.add_argument('--mask_loss_weight', default=0, type=float)\nparser.add_argument('--l1_pixel_loss_weight', default=1.0, type=float)\nparser.add_argument('--bbox_pred_loss_weight', default=10, type=float)\nparser.add_argument('--predicate_pred_loss_weight', default=0, type=float) # DEPRECATED\n\n# Generic discriminator options\nparser.add_argument('--discriminator_loss_weight', default=0.01, type=float)\nparser.add_argument('--gan_loss_type', default='gan')\nparser.add_argument('--d_clip', default=None, type=float)\nparser.add_argument('--d_normalization', default='batch')\nparser.add_argument('--d_padding', default='valid')\nparser.add_argument('--d_activation', default='leakyrelu-0.2')\n\n# Object discriminator\nparser.add_argument('--d_obj_arch',\n default='C4-64-2,C4-128-2,C4-256-2')\nparser.add_argument('--crop_size', default=32, type=int)\nparser.add_argument('--d_obj_weight', default=1.0, type=float) # multiplied by d_loss_weight \nparser.add_argument('--ac_loss_weight', default=0.1, type=float)\n\n# Image discriminator\nparser.add_argument('--d_img_arch',\n default='C4-64-2,C4-128-2,C4-256-2')\nparser.add_argument('--d_img_weight', default=1.0, type=float) # multiplied by d_loss_weight\n\n# Output options\nparser.add_argument('--print_every', default=10, type=int)\nparser.add_argument('--timing', default=False, type=bool_flag)\nparser.add_argument('--checkpoint_every', default=10000, type=int)\nparser.add_argument('--output_dir', default=os.getcwd())\nparser.add_argument('--checkpoint_name', default='checkpoint')\nparser.add_argument('--checkpoint_start_from', default=None)\nparser.add_argument('--restore_from_checkpoint', default=False, type=bool_flag)\n\n\nclass VGDataModule(pl.LightningDataModule):\n\n def __init__(self, args, tokenizer, num_workers=8):\n super().__init__()\n self.args = args\n self.tokenizer = tokenizer\n self.num_workers = num_workers\n self.batch_size = args.batch_size\n\n def setup(self, stage=None):\n args = self.args\n with open(args.vocab_json, 'r') as f:\n vocab = json.load(f)\n dset_kwargs = {\n 'vocab': vocab,\n 'h5_path': args.train_h5,\n 'image_dir': args.vg_image_dir,\n 'image_size': args.image_size,\n 'max_samples': args.num_train_samples,\n 'max_objects': args.max_objects_per_image,\n 'use_orphaned_objects': args.vg_use_orphaned_objects,\n 'include_relationships': args.include_relationships,\n 'max_lengths_for_image': args.max_lengths_for_image\n }\n train_dset = SequenceTransformerVgSceneGraphDataset(\n **dset_kwargs, tokenizer=self.tokenizer\n )\n # iter_per_epoch = len(train_dset) // args.batch_size\n # print('There are %d iterations per epoch' % iter_per_epoch)\n\n dset_kwargs['h5_path'] = args.val_h5\n del dset_kwargs['max_samples']\n\n val_dset = SequenceTransformerVgSceneGraphDataset(\n **dset_kwargs, tokenizer=self.tokenizer\n )\n self.train_dset = train_dset\n self.val_dset = val_dset\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True\n )\n\n def val_dataloader(self):\n return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)\n\n def test_dataloader(self):\n return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, backbone):\n super().__init__()\n self.backbone = BertForSequenceClassification.from_pretrained(backbone)\n \n def forward(self, *args, **kwargs):\n outputs = self.backbone(*args, **kwargs)\n\n return outputs[\"loss\"]\n\n def apply_word_embeddings(self, inputs):\n \"\"\"\n Because Gumbel softmax outputs cannot directly feed to huggingface model,\n we have to compute the `input_embed` manually.\n \"\"\"\n word_embeddings = self.backbone.bert.embeddings.word_embeddings\n\n return torch.matmul(inputs, word_embeddings.weight)\n\n\nclass Generator(nn.Module):\n def __init__(self, backbone):\n super().__init__()\n self.backbone = EncoderDecoderModel.from_encoder_decoder_pretrained(\n backbone, backbone, tie_encoder_decoder=True\n )\n\n def forward(self, *args, **kwargs):\n return self.backbone(*args, **kwargs)\n\n def forward_logits(self, *args, **kwargs):\n return self.backbone(*args, **kwargs)[\"logits\"]\n\n def forward_loss(self, *args, **kwargs):\n return self.backbone(*args, **kwargs)[\"loss\"]\n\n def apply_word_embeddings(self, inputs):\n \"\"\"\n Because Gumbel softmax outputs cannot directly feed to huggingface model,\n we have to compute the `input_embed` manually.\n \"\"\"\n word_embeddings = self.backbone.encoder.embeddings.word_embeddings\n\n return torch.matmul(inputs, word_embeddings.weight)\n\n\nclass GAN(pl.LightningModule):\n\n def __init__(\n self,\n args,\n tokenizer,\n backbone=None,\n ):\n super().__init__()\n\n self.args = args\n\n self.validation_z = torch.randn(8, 100)\n self.tokenizer = tokenizer\n self.discriminator = Discriminator(backbone)\n self.generator = Generator(backbone)\n\n self.graph_special_token = \"[graph]\"\n self.image_special_token = \"[image]\"\n\n self.tau = 1\n\n self.image_token_id_list, self.text_token_id_list = self.retrieve_bad_image_text_tokens_ids()\n\n def retrieve_bad_image_text_tokens_ids(self):\n special_tokens_list = [\"[CLS]\", \"[SEP]\"]\n image_tokens_list = [f\"[itoken{i}]\" for i in range(512)]\n extra_image_tokens_list = [f\"[itoken{i}]\" for i in range(512, 32 * 32)]\n \n vocab = self.tokenizer.get_vocab()\n\n special_tokens_id_list = [vocab[token] for token in special_tokens_list]\n image_token_id_list = [vocab[token] for token in image_tokens_list]\n extra_image_tokens_id_list = [vocab[token] for token in extra_image_tokens_list]\n text_token_id_list = [v for k, v in vocab.items()]\n\n text_token_id_list = \\\n list(set(text_token_id_list) - set(image_token_id_list) - set(extra_image_tokens_id_list))\n\n return image_token_id_list + extra_image_tokens_id_list, text_token_id_list + extra_image_tokens_id_list\n\n def adversarial_loss(self, y_hat, y):\n return F.binary_cross_entropy_with_logits(y_hat, y)\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n # sample noise\n # z = torch.randn(imgs.shape[0], self.hparams.latent_dim)\n # z = z.type_as(imgs)\n\n generator_batch = {\n \"input_ids\": batch[\"sent_input/input_ids\"],\n \"attention_mask\": batch[\"sent_input/attention_mask\"],\n \"decoder_input_ids\": batch[\"code_output/input_ids\"],\n \"decoder_attention_mask\": batch[\"code_output/attention_mask\"],\n \"labels\": batch[\"code_output/input_ids\"].clone()\n }\n\n # exlude the loss for padding tokens\n generator_batch[\"labels\"][generator_batch[\"labels\"] == self.tokenizer.pad_token_id] = -100\n\n # train generator\n if optimizer_idx == 0:\n logits = self.generator.forward_logits(**generator_batch)\n\n predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)\n\n # log sampled images\n # sample_imgs = self.generated_imgs[:6]\n # grid = torchvision.utils.make_grid(sample_imgs)\n # self.logger.experiment.add_image('generated_images', grid, 0)\n\n # ground truth result (ie: all fake)\n # put on GPU because we created this tensor inside training_loop\n\n predictions_embedding = self.generator.apply_word_embeddings(predictions)\n\n fake_batch = {\n \"inputs_embeds\": predictions_embedding,\n \"attention_mask\": batch[\"code_output/attention_mask\"],\n \"decoder_input_ids\": batch[\"sent_output/input_ids\"],\n \"decoder_attention_mask\": batch[\"sent_output/attention_mask\"],\n \"labels\": batch[\"sent_output/input_ids\"].clone()\n }\n\n fake_batch[\"labels\"][fake_batch[\"labels\"] == self.tokenizer.pad_token_id] = -100\n\n ac_loss = self.generator.forward_loss(**fake_batch)\n\n predictions_embedding = self.discriminator.apply_word_embeddings(predictions)\n\n fake_dis_batch = {\n \"inputs_embeds\": predictions_embedding,\n \"attention_mask\": batch[\"code_output/attention_mask\"],\n \"labels\": torch.ones(predictions_embedding.shape[0]).type_as(predictions_embedding).long()\n }\n\n g_d_loss = self.discriminator(**fake_dis_batch)\n\n g_loss = g_d_loss + ac_loss\n # g_loss = ac_loss\n\n self.log('g_ac_loss', ac_loss, prog_bar=True)\n self.log('g_d_loss', g_d_loss, prog_bar=True)\n\n # return {\"loss\": g_loss}\n # train discriminator (inverse generator)\n # if optimizer_idx == 1:\n # Measure discriminator's ability to classify real from generated samples\n logits = self.generator.forward_logits(**generator_batch)\n\n predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)\n\n # don't compute the gradients of the generator\n predictions = predictions.detach()\n\n predictions_embedding = self.generator.apply_word_embeddings(predictions)\n\n fake_batch = {\n \"inputs_embeds\": predictions_embedding,\n \"attention_mask\": batch[\"code_output/attention_mask\"],\n \"decoder_input_ids\": batch[\"sent_output/input_ids\"],\n \"decoder_attention_mask\": batch[\"sent_output/attention_mask\"],\n \"labels\": batch[\"sent_output/input_ids\"].clone()\n }\n\n fake_batch[\"labels\"][fake_batch[\"labels\"] == self.tokenizer.pad_token_id] = -100\n\n fake_ac_loss = self.generator.forward_loss(**fake_batch)\n\n # For real data\n real_batch = {\n \"input_ids\": batch[\"code_output/input_ids\"],\n \"attention_mask\": batch[\"code_output/attention_mask\"],\n \"decoder_input_ids\": batch[\"sent_output/input_ids\"],\n \"decoder_attention_mask\": batch[\"sent_output/attention_mask\"],\n \"labels\": batch[\"sent_output/input_ids\"].clone()\n }\n\n real_batch[\"labels\"][real_batch[\"labels\"] == self.tokenizer.pad_token_id] = -100\n\n real_ac_loss = self.generator.forward_loss(**real_batch)\n\n ac_loss = (real_ac_loss + fake_ac_loss) / 2\n\n self.log('ac_loss', ac_loss, prog_bar=True)\n # return {\"loss\": ac_loss}\n return g_loss + ac_loss\n\n # train discriminator\n if optimizer_idx == 1:\n # Measure discriminator's ability to classify real from generated samples\n\n logits = self.generator.forward_logits(**generator_batch)\n\n # don't compute the gradients of the generator\n predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)\n\n predictions_embedding = self.discriminator.apply_word_embeddings(predictions)\n\n fake_dis_batch = {\n \"inputs_embeds\": predictions_embedding,\n \"attention_mask\": batch[\"code_output/attention_mask\"],\n \"labels\": torch.zeros(predictions.shape[0]).type_as(predictions).long()\n }\n\n fake_loss = self.discriminator(**fake_dis_batch)\n\n # fake = torch.zeros(fake_preds.shape)\n # fake = fake.type_as(fake_preds)\n\n # fake_loss = self.adversarial_loss(fake_preds, fake)\n\n real_dis_batch = {\n \"input_ids\": batch[\"code_output/input_ids\"],\n \"attention_mask\": batch[\"code_output/attention_mask\"],\n \"labels\": torch.ones(predictions.shape[0]).type_as(predictions).long()\n }\n\n real_loss = self.discriminator(**real_dis_batch)\n\n # real = torch.ones(real_preds.shape)\n # real = real.type_as(real_preds)\n\n # real_loss = self.adversarial_loss(real_preds, real)\n\n # discriminator loss is the average of these\n d_loss = (real_loss + fake_loss) / 2\n\n self.log('d_loss', d_loss, prog_bar=True)\n return d_loss\n\n def configure_optimizers(self):\n lr = self.args.learning_rate\n\n opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(0.5, 0.999))\n opt_d = torch.optim.Adam(\n self.discriminator.parameters(), \n lr=lr, \n betas=(0.5, 0.999)\n )\n return [opt_g, opt_d], []\n\n # def on_epoch_end(self):\n # z = self.validation_z.type_as(self.generator.model[0].weight)\n\n # # log sampled images\n # sample_imgs = self(z)\n # grid = torchvision.utils.make_grid(sample_imgs)\n # self.logger.experiment.add_image('generated_images', grid, self.current_epoch)\n\n def test_step(self, batch, batch_idx):\n pass\n\n def inference(self, scene_graphs_json):\n scene_graphs = self.read_scene_graphs(scene_graphs_json)\n\n image_tokens_generation = self.generator.backbone.generate(\n scene_graphs[\"input_ids\"], \n max_length=66, \n # num_beams=5, \n # no_repeat_ngram_size=2, \n # early_stopping=True,\n do_sample=True,\n top_p=0.92, \n top_k=0,\n decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,\n bad_words_ids=[[ids] for ids in self.text_token_id_list],\n )\n\n print(image_tokens_generation)\n\n output = []\n\n for data in image_tokens_generation:\n output.append(self.tokenizer.decode(data, skip_special_tokens=True))\n print(output[-1])\n\n reconstructed_graph = self.generator.backbone.generate(\n image_tokens_generation, \n max_length=64, \n # num_beams=5, \n # no_repeat_ngram_size=2, \n # early_stopping=True,\n do_sample=True,\n top_p=0.92, \n top_k=0,\n decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,\n bad_words_ids=[[ids]for ids in self.image_token_id_list],\n )\n\n for data in reconstructed_graph:\n print(self.tokenizer.decode(data, skip_special_tokens=True))\n\n \n if not os.path.exists(self.args.output_dir):\n os.makedirs(self.args.output_dir)\n itokens_output_file = os.path.join(self.args.output_dir, \"itokens_output.json\")\n\n with open(itokens_output_file, \"w\") as f:\n json.dump(output, f, indent=2)\n\n def read_scene_graphs(self, scene_graphs_json):\n with open(scene_graphs_json, 'r') as f:\n scene_graphs = json.load(f)\n\n if isinstance(scene_graphs, dict):\n # We just got a single scene graph, so promote it to a list\n scene_graphs = [scene_graphs]\n\n objs, triples, obj_to_img = [], [], []\n obj_offset = 0\n sents_list = []\n for i, sg in enumerate(scene_graphs):\n # Insert dummy __image__ object and __in_image__ relationships\n sents = []\n for s, p, o in sg['relationships']:\n sent = f\"{sg['objects'][s]} {p} {sg['objects'][o]}.\"\n sents.append(sent)\n\n sent = \" \".join(sents)\n sent = f\"{self.graph_special_token} {sent} {self.image_special_token}\"\n\n sents_list.append(sent)\n\n print(sent)\n \n sent_tensor = self.tokenizer(\n sents_list, \n return_tensors=\"pt\", \n padding=\"max_length\", \n max_length=64, \n truncation=True,\n add_special_tokens=False\n )\n\n device = next(self.parameters()).device\n sent_tensor = {k: v.to(device) for k, v in sent_tensor.items()}\n\n return sent_tensor\n\n\ndef main(args):\n backbone = \"bert-base-uncased-itokens\"\n tokenizer = BertTokenizerFast.from_pretrained(backbone)\n\n # encoder_decoder_config = EncoderDecoderConfig.from_pretrained(\"bert-base-uncased-itokens\")\n # model = EncoderDecoderModel.from_pretrained(\n # \"bert-base-uncased-itokens\", config=encoder_decoder_config\n # )\n\n # model = EncoderDecoderModel.from_encoder_decoder_pretrained(\n # \"bert-base-uncased-itokens\", \"bert-base-uncased-itokens\", tie_encoder_decoder=True\n # )\n\n # generator = Generator(model)\n\n # discriminator = Discriminator(\n # AutoModel.from_pretrained(\"bert-base-uncased-itokens\")\n # )\n\n if args.test:\n model = GAN.load_from_checkpoint(\n args.load_checkpoint,\n args=args, \n tokenizer=tokenizer, \n backbone=backbone\n )\n model.cuda()\n model.eval()\n\n model.inference(args.scene_graphs_json)\n \n return\n \n # train\n if args.gpus > 1:\n dm = VGDataModule(args, tokenizer, 2)\n else:\n dm = VGDataModule(args, tokenizer)\n\n if args.load_checkpoint != \"\":\n model = GAN.load_from_checkpoint(\n args.load_checkpoint, \n args=args, \n tokenizer=tokenizer, \n backbone=backbone\n )\n else:\n model = GAN(args, tokenizer, backbone)\n\n training_args = {\n \"gpus\": args.gpus,\n \"fast_dev_run\": False,\n \"max_steps\": args.num_iterations,\n \"precision\": 32,\n \"gradient_clip_val\": 1,\n }\n\n if args.gpus > 1:\n additional_args = {\n \"accelerator\": \"ddp\",\n \"plugins\": [DDPPlugin(find_unused_parameters=True)]\n # \"plugins\": [my_ddp]\n }\n\n training_args.update(additional_args)\n\n trainer = pl.Trainer(**training_args)\n trainer.fit(model, dm)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n main(args)" ]
[ [ "torch.utils.data.DataLoader", "torch.ones", "torch.randn", "torch.zeros", "torch.nn.functional.gumbel_softmax", "torch.matmul", "torch.nn.functional.binary_cross_entropy_with_logits" ] ]
mpharrigan/OpenFermion
[ "ae5bbaed60faa019fae9d47d6e578933874e074d" ]
[ "src/openfermion/utils/_grid.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport itertools\nimport numpy\nimport scipy\nimport scipy.linalg\n\n\n# Exceptions.\nclass OrbitalSpecificationError(Exception):\n pass\n\n\nclass Grid:\n \"\"\"\n A multi-dimension grid of points with an assigned length scale.\n\n This grid acts as a helper class for parallelpiped super cells. It\n tracks a mapping from indices to grid points and stores the associated\n reciprocal lattice with respect to the original real-space lattice.\n This enables calculations with non-trivial unit cells.\n\n Attributes:\n dimensions (int): Number of spatial dimensions the grid occupys\n length (tuple of ints): d-length tuple specifying number of points\n along each dimension.\n shifts (list of ints): Integer shifts in position to center grid.\n scale (ndarray): Vectors defining the super cell being simulated,\n vectors are stored as columns in the matrix.\n volume (float): Total volume of the supercell parallelpiped.\n num_points (int): Total number of points in the grid.\n reciprocal_scale (ndarray): Vectors defining the reciprocal lattice.\n The vectors are stored as the columns in the matrix.\n \"\"\"\n\n def __init__(self, dimensions, length, scale):\n \"\"\"\n Args:\n dimensions (int): The number of dimensions the grid lives in.\n length (int or tuple): The number of points along each grid axis\n that will be taken in both reciprocal and real space.\n If tuple, it is read for each dimension, otherwise assumed\n uniform.\n scale (float or ndarray): The total length of each grid dimension.\n If a float is passed, the uniform cubic unit cell is assumed.\n For an ndarray, dimensions independent vectors of the correct\n dimension must be passed. We assume column vectors define\n the supercell vectors.\n \"\"\"\n if not isinstance(dimensions, int) or dimensions <= 0:\n raise ValueError(\n 'dimensions must be a positive int but was {} {}'.format(\n type(dimensions), repr(dimensions)))\n if ((not isinstance(length, int) or length < 0) and\n (not isinstance(length, tuple)) and\n (not isinstance(length, list))):\n raise ValueError(\n 'length must be a non-negative int or tuple '\n 'but was {} {}'.format(\n type(length), repr(length)))\n if ((not isinstance(scale, float) or not scale > 0) and\n (not isinstance(scale, numpy.ndarray))):\n raise ValueError(\n 'scale must be a positive float or ndarray but was '\n '{} {}'.format(\n type(scale), repr(scale)))\n\n self.dimensions = dimensions\n\n # If single integer, assume uniform\n if isinstance(length, int):\n self.length = (length, ) * dimensions\n else:\n self.length = length\n\n self.shifts = [self.length[i] // 2 for i in range(dimensions)]\n\n # If single float, construct cubic unit cell\n if isinstance(scale, float):\n self.scale = numpy.diag([scale] * self.dimensions)\n else:\n self.scale = scale\n\n # Compute the volume of the super cell\n self.volume = numpy.abs(scipy.linalg.det(self.scale))\n\n # Compute total number of points\n self.num_points = numpy.prod(self.length)\n\n # Compute the reciprocal lattice basis\n self.reciprocal_scale = 2 * numpy.pi * scipy.linalg.inv(self.scale).T\n\n def volume_scale(self):\n \"\"\"\n Returns:\n float: The volume of a length-scale hypercube within the grid.\n \"\"\"\n return self.volume\n\n def all_points_indices(self):\n \"\"\"\n Returns:\n iterable[tuple[int]]:\n The index-coordinate tuple of each point in the grid.\n \"\"\"\n return itertools.product(*[range(self.length[i])\n for i in range(self.dimensions)])\n\n def position_vector(self, position_indices):\n \"\"\"Given grid point coordinate, return position vector with dimensions.\n\n Args:\n position_indices (int|iterable[int]):\n List or tuple of integers giving grid point coordinate.\n Allowed values are ints in [0, grid_length).\n\n Returns:\n position_vector (numpy.ndarray[float])\n \"\"\"\n # Raise exceptions.\n if isinstance(position_indices, int):\n position_indices = [position_indices]\n if not all(0 <= e < self.length[i]\n for i, e in enumerate(position_indices)):\n raise OrbitalSpecificationError(\n 'Position indices must be integers in [0, grid_length).')\n\n # Compute position vector\n vector = sum([(float(n - self.shifts[i]) /\n self.length[i]) * self.scale[:, i]\n for i, n in enumerate(position_indices)])\n return vector\n\n def momentum_vector(self, momentum_indices, periodic=True):\n \"\"\"Given grid point coordinate, return momentum vector with dimensions.\n\n Args:\n momentum_indices (list): integers giving momentum\n indices. Allowed values are ints in [0, grid_length).\n periodic (bool): Wrap the momentum indices according to periodicity\n\n Returns:\n momentum_vector: A numpy array giving the momentum vector with\n dimensions.\n \"\"\"\n # Raise exceptions.\n if isinstance(momentum_indices, int):\n momentum_indices = [momentum_indices]\n if (not all(0 <= e < self.length[i]\n for i, e in enumerate(momentum_indices))):\n raise OrbitalSpecificationError(\n 'Momentum indices must be integers in [0, grid_length).')\n\n # Compute momentum vector.\n momentum_ints = self.index_to_momentum_ints(momentum_indices)\n vector = self.momentum_ints_to_value(momentum_ints, periodic)\n\n return vector\n\n def index_to_momentum_ints(self, index):\n \"\"\"\n Args:\n index (tuple): d-dimensional tuple specifying index in the grid\n Returns:\n Integer momentum vector\n \"\"\"\n # Set baseline for grid between [-N//2, N//2]\n momentum_int = [index[i] - self.shifts[i]\n for i in range(self.dimensions)]\n\n return numpy.array(momentum_int, dtype=int)\n\n def momentum_ints_to_index(self, momentum_ints):\n \"\"\"\n Args:\n momentum_ints (tuple): d-dimensional tuple momentum integers\n Returns:\n d-dimensional tuples of indices\n \"\"\"\n\n indices = momentum_ints\n\n # Shift to indices\n indices = [n + self.shifts[i] for i, n in enumerate(indices)]\n\n # Wrap dimensions\n indices = [n % self.length[i] for i, n in enumerate(indices)]\n\n return indices\n\n def momentum_ints_to_value(self, momentum_ints, periodic=True):\n \"\"\"\n Args:\n momentum_ints (tuple): d-dimensional tuple momentum integers\n periodic (bool): Alias the momentum\n Returns:\n ndarray containing the momentum vector.\n\n \"\"\"\n # Alias the higher momentum modes\n if periodic:\n momentum_ints = self.index_to_momentum_ints(\n self.momentum_ints_to_index(momentum_ints))\n\n momentum_vector = sum([n * self.reciprocal_scale[:, i]\n for i, n in enumerate(momentum_ints)])\n return momentum_vector\n\n def orbital_id(self, grid_coordinates, spin=None):\n \"\"\"Return the tensor factor of a orbital with given coordinates and spin.\n\n Args:\n grid_coordinates: List or tuple of ints giving coordinates of grid\n element. Acceptable to provide an int(instead of tuple or list)\n for 1D case.\n spin (bool): 0 means spin down and 1 means spin up.\n If None, assume spinless model.\n\n Returns:\n tensor_factor (int):\n tensor factor associated with provided orbital label.\n \"\"\"\n # Initialize.\n if isinstance(grid_coordinates, int):\n grid_coordinates = [grid_coordinates]\n\n # Loop through dimensions of coordinate tuple.\n tensor_factor = 0\n for dimension, grid_coordinate in enumerate(grid_coordinates):\n\n # Make sure coordinate is an integer in the correct bounds.\n if (isinstance(grid_coordinate, int) and\n grid_coordinate < self.length[dimension]):\n tensor_factor += (grid_coordinate *\n int(numpy.product(self.length[:dimension])))\n else:\n # Raise for invalid model.\n raise OrbitalSpecificationError(\n 'Invalid orbital coordinates provided.')\n\n # Account for spin and return.\n if spin is None:\n return tensor_factor\n else:\n tensor_factor *= 2\n tensor_factor += spin\n return tensor_factor\n\n def grid_indices(self, qubit_id, spinless):\n \"\"\"This function is the inverse of orbital_id.\n\n Args:\n qubit_id (int): The tensor factor to map to grid indices.\n spinless (bool): Whether to use the spinless model or not.\n\n Returns:\n grid_indices (numpy.ndarray[int]):\n The location of the qubit on the grid.\n \"\"\"\n if not (numpy.product(self.length) * (2 - spinless) > qubit_id >= 0):\n raise OrbitalSpecificationError('Invalid qubit_id provided.')\n\n # Remove spin degree of freedom if it exists.\n orbital_id = qubit_id\n\n if not spinless:\n orbital_id //= 2\n\n # Get grid indices.\n grid_indices = []\n for dimension in range(self.dimensions):\n remainder = (orbital_id %\n int(numpy.product(self.length[:dimension + 1])))\n grid_index = (remainder //\n int(numpy.product(self.length[:dimension])))\n grid_indices += [grid_index]\n return grid_indices\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return (self.dimensions == other.dimensions and\n (self.scale == other.scale).all() and\n self.length == other.length)\n\n def __ne__(self, other):\n return not self == other\n" ]
[ [ "numpy.diag", "scipy.linalg.inv", "numpy.product", "numpy.prod", "numpy.array", "scipy.linalg.det" ] ]
profxj/ginga
[ "a5f447b760ac38dafa52181b3f99156545a6f2e7", "a5f447b760ac38dafa52181b3f99156545a6f2e7" ]
[ "ginga/canvas/transform.py", "ginga/qtw/CanvasRenderQt.py" ]
[ "#\n# transform.py -- coordinate transforms for Ginga\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport numpy as np\n\nfrom ginga import trcalc\nfrom ginga.misc import Bunch\n\n__all__ = ['TransformError', 'BaseTransform', 'ComposedTransform',\n 'InvertedTransform', 'PassThruTransform',\n 'WindowNativeTransform', 'CartesianWindowTransform',\n 'CartesianNativeTransform',\n 'RotationTransform', 'ScaleTransform',\n 'DataCartesianTransform', 'OffsetDataTransform',\n 'WCSDataTransform', 'get_catalog'\n ]\n\n\nclass TransformError(Exception):\n pass\n\n\nclass BaseTransform(object):\n\n def __init__(self):\n super(BaseTransform, self).__init__()\n\n def to_(self, x, y):\n raise TransformError(\"subclass should override this method\")\n\n def from_(self, tx, ty):\n raise TransformError(\"subclass should override this method\")\n\n def __add__(self, trans):\n return ComposedTransform(self, trans)\n\n def invert(self):\n return InvertedTransform(self)\n\n\nclass ComposedTransform(BaseTransform):\n \"\"\"\n A transform that composes two other transforms to make a new one.\n \"\"\"\n\n def __init__(self, tform1, tform2):\n super(ComposedTransform, self).__init__()\n self.tform1 = tform1\n self.tform2 = tform2\n\n def to_(self, pts, **kwargs):\n return self.tform2.to_(self.tform1.to_(pts, **kwargs))\n\n def from_(self, pts, **kwargs):\n return self.tform1.from_(self.tform2.from_(pts), **kwargs)\n\n\nclass InvertedTransform(BaseTransform):\n \"\"\"\n A transform that inverts another transform.\n \"\"\"\n\n def __init__(self, tform):\n super(InvertedTransform, self).__init__()\n self.tform = tform\n\n def to_(self, pts, **kwargs):\n return self.tform.from_(pts, **kwargs)\n\n def from_(self, pts, **kwargs):\n return self.tform.to_(pts, **kwargs)\n\n\nclass PassThruTransform(BaseTransform):\n \"\"\"\n A transform that essentially acts as a no-op.\n \"\"\"\n\n def __init__(self, viewer):\n super(PassThruTransform, self).__init__()\n\n def to_(self, pts, **kwargs):\n return pts\n\n def from_(self, pts, **kwargs):\n return pts\n\n\nclass WindowNativeTransform(BaseTransform):\n \"\"\"\n A transform from a typical window standard coordinate space with the\n upper left at (0, 0) to the viewer back end native pixel space.\n \"\"\"\n\n def __init__(self, viewer):\n super(WindowNativeTransform, self).__init__()\n self.viewer = viewer\n\n def to_(self, win_pts):\n if self.viewer.origin_upper:\n return win_pts\n\n win_pts = np.asarray(win_pts)\n has_z = (win_pts.shape[-1] > 2)\n\n # invert Y coord for backends that have the origin in the lower left\n win_wd, win_ht = self.viewer.get_window_size()\n\n # win_x, win_y = cvs_x, win_ht - cvs_y\n mpy_pt = [1.0, -1.0]\n if has_z:\n mpy_pt.append(1.0)\n\n add_pt = [0.0, win_ht]\n if has_z:\n add_pt.append(0.0)\n\n ntv_pts = np.add(np.multiply(win_pts, mpy_pt), add_pt)\n\n return ntv_pts\n\n def from_(self, ntv_pts):\n return self.to_(ntv_pts)\n\n\nclass WindowPercentageTransform(BaseTransform):\n \"\"\"\n A transform from standard window coordinates of a viewer\n to percentage coordinates.\n \"\"\"\n\n def __init__(self, viewer, as_int=True):\n super(WindowPercentageTransform, self).__init__()\n self.viewer = viewer\n self.as_int = as_int\n\n def to_(self, win_pts):\n win_pts = np.asarray(win_pts, dtype=np.float)\n has_z = (win_pts.shape[-1] > 2)\n\n max_pt = list(self.viewer.get_window_size())\n if has_z:\n max_pt.append(0.0)\n\n pct_pts = np.divide(win_pts, max_pt)\n return pct_pts\n\n def from_(self, pct_pts):\n \"\"\"Reverse of :meth:`to_`.\"\"\"\n pct_pts = np.asarray(pct_pts, dtype=np.float)\n has_z = (pct_pts.shape[-1] > 2)\n\n max_pt = list(self.viewer.get_window_size())\n if has_z:\n max_pt.append(0.0)\n\n win_pts = np.multiply(pct_pts, max_pt)\n\n # round to pixel units, if asked\n if self.as_int:\n win_pts = np.rint(win_pts).astype(np.int, copy=False)\n\n return win_pts\n\n\nclass CartesianWindowTransform(BaseTransform):\n \"\"\"\n A transform from cartesian coordinates to standard window coordinates\n of a viewer.\n \"\"\"\n\n def __init__(self, viewer, as_int=True):\n super(CartesianWindowTransform, self).__init__()\n self.viewer = viewer\n self.as_int = as_int\n\n def to_(self, off_pts):\n # add center pixel to convert from X/Y coordinate space to\n # window graphics space\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n ctr_pt = list(self.viewer.get_center())\n if has_z:\n ctr_pt.append(0.0)\n\n # win_x = off_x + ctr_x\n # win_y = ctr_y - off_y\n mpy_pt = [1.0, -1.0]\n if has_z:\n mpy_pt.append(1.0)\n\n win_pts = np.add(np.multiply(off_pts, mpy_pt), ctr_pt)\n\n # round to pixel units, if asked\n if self.as_int:\n win_pts = np.rint(win_pts).astype(np.int, copy=False)\n\n return win_pts\n\n def from_(self, win_pts):\n \"\"\"Reverse of :meth:`to_`.\"\"\"\n # make relative to center pixel to convert from window\n # graphics space to standard X/Y coordinate space\n win_pts = np.asarray(win_pts, dtype=np.float)\n has_z = (win_pts.shape[-1] > 2)\n\n ctr_pt = list(self.viewer.get_center())\n if has_z:\n ctr_pt.append(0.0)\n\n mpy_pt = [1.0, -1.0]\n if has_z:\n mpy_pt.append(1.0)\n\n # off_x = win_x - ctr_x\n # = win_x + -ctr_x\n # off_y = ctr_y - win_y\n # = -win_y + ctr_y\n ctr_pt[0] = -ctr_pt[0]\n off_pts = np.add(np.multiply(win_pts, mpy_pt), ctr_pt)\n\n return off_pts\n\n\nclass CartesianNativeTransform(BaseTransform):\n \"\"\"\n A transform from cartesian coordinates to the native pixel coordinates\n of a viewer.\n \"\"\"\n\n def __init__(self, viewer, as_int=True):\n super(CartesianNativeTransform, self).__init__()\n self.viewer = viewer\n self.as_int = as_int\n\n def to_(self, off_pts):\n # add center pixel to convert from X/Y coordinate space to\n # back end graphics space\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n ctr_pt = list(self.viewer.get_center())\n if has_z:\n ctr_pt.append(0.0)\n\n if self.viewer.origin_upper:\n mpy_pt = [1.0, -1.0]\n else:\n mpy_pt = [1.0, 1.0]\n\n if has_z:\n mpy_pt.append(1.0)\n\n win_pts = np.add(np.multiply(off_pts, mpy_pt), ctr_pt)\n\n # round to pixel units, if asked\n if self.as_int:\n win_pts = np.rint(win_pts).astype(np.int, copy=False)\n\n return win_pts\n\n def from_(self, win_pts):\n \"\"\"Reverse of :meth:`to_`.\"\"\"\n # make relative to center pixel to convert from back end\n # graphics space to standard X/Y coordinate space\n win_pts = np.asarray(win_pts, dtype=np.float)\n has_z = (win_pts.shape[-1] > 2)\n\n ctr_pt = list(self.viewer.get_center())\n if has_z:\n ctr_pt.append(0.0)\n\n ctr_pt[0] = -ctr_pt[0]\n if self.viewer.origin_upper:\n mpy_pt = [1.0, -1.0]\n else:\n ctr_pt[1] = -ctr_pt[1]\n mpy_pt = [1.0, 1.0]\n\n if has_z:\n mpy_pt.append(1.0)\n\n off_pts = np.add(np.multiply(win_pts, mpy_pt), ctr_pt)\n\n return off_pts\n\n\nclass RotationTransform(BaseTransform):\n \"\"\"\n A transform in cartesian coordinates based on the flip/swap setting and\n rotation setting of a viewer.\n \"\"\"\n\n def __init__(self, viewer):\n super(RotationTransform, self).__init__()\n self.viewer = viewer\n\n def to_(self, off_pts):\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n t_ = self.viewer.t_\n\n # flip\n flip_pt = [1.0, 1.0]\n if t_['flip_x']:\n flip_pt[0] = -1.0\n if t_['flip_y']:\n flip_pt[1] = -1.0\n if has_z:\n # no flip_z at the moment\n flip_pt.append(1.0)\n\n off_pts = np.multiply(off_pts, flip_pt)\n\n # swap\n if t_['swap_xy']:\n p = list(off_pts.T)\n off_pts = np.asarray([p[1], p[0]] + list(p[2:])).T\n\n # rotate\n if t_['rot_deg'] != 0:\n thetas = [t_['rot_deg']]\n offset = [0.0, 0.0]\n if has_z:\n offset.append(0.0)\n off_pts = trcalc.rotate_coord(off_pts, thetas, offset)\n\n return off_pts\n\n def from_(self, off_pts):\n \"\"\"Reverse of :meth:`to_`.\"\"\"\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n t_ = self.viewer.t_\n\n # rotate\n if t_['rot_deg'] != 0:\n thetas = [- t_['rot_deg']]\n offset = [0.0, 0.0]\n if has_z:\n offset.append(0.0)\n off_pts = trcalc.rotate_coord(off_pts, thetas, offset)\n\n # swap\n if t_['swap_xy']:\n p = list(off_pts.T)\n off_pts = np.asarray([p[1], p[0]] + list(p[2:])).T\n\n # flip\n flip_pt = [1.0, 1.0]\n if t_['flip_x']:\n flip_pt[0] = -1.0\n if t_['flip_y']:\n flip_pt[1] = -1.0\n if has_z:\n # no flip_z at the moment\n flip_pt.append(1.0)\n\n off_pts = np.multiply(off_pts, flip_pt)\n\n return off_pts\n\n\nclass ScaleTransform(BaseTransform):\n \"\"\"\n A transform in cartesian coordinates based on the scale of a viewer.\n \"\"\"\n\n def __init__(self, viewer):\n super(ScaleTransform, self).__init__()\n self.viewer = viewer\n\n def to_(self, off_pts):\n \"\"\"Reverse of :meth:`from_`.\"\"\"\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n # scale according to current settings\n scale_pt = [self.viewer._org_scale_x, self.viewer._org_scale_y]\n if has_z:\n scale_pt.append(self.viewer._org_scale_z)\n\n off_pts = np.multiply(off_pts, scale_pt)\n return off_pts\n\n def from_(self, off_pts):\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n scale_pt = [1.0 / self.viewer._org_scale_x,\n 1.0 / self.viewer._org_scale_y]\n if has_z:\n scale_pt.append(1.0 / self.viewer._org_scale_z)\n\n # Reverse scaling\n off_pts = np.multiply(off_pts, scale_pt)\n return off_pts\n\n\nclass DataCartesianTransform(BaseTransform):\n \"\"\"\n A transform from data coordinates to cartesian coordinates based on\n a viewer's pan position.\n \"\"\"\n\n def __init__(self, viewer, use_center=True):\n super(DataCartesianTransform, self).__init__()\n self.viewer = viewer\n # If use_center is True, then the coordinates are mapped such that the\n # pixel is centered on the square when the image is zoomed in past\n # 1X. This is the specification of the FITS image standard,\n # that the pixel is centered on the integer row/column.\n self.use_center = use_center\n\n def to_(self, data_pts):\n \"\"\"Reverse of :meth:`from_`.\"\"\"\n data_pts = np.asarray(data_pts, dtype=np.float)\n has_z = (data_pts.shape[-1] > 2)\n\n if self.use_center:\n data_pts = data_pts - self.viewer.data_off\n\n # subtract data indexes at center reference pixel\n ref_pt = [self.viewer._org_x, self.viewer._org_y]\n if has_z:\n ref_pt.append(self.viewer._org_z)\n\n off_pts = np.subtract(data_pts, ref_pt)\n return off_pts\n\n def from_(self, off_pts):\n off_pts = np.asarray(off_pts, dtype=np.float)\n has_z = (off_pts.shape[-1] > 2)\n\n # Add data index at center to offset\n # subtract data indexes at center reference pixel\n ref_pt = [self.viewer._org_x, self.viewer._org_y]\n if has_z:\n ref_pt.append(self.viewer._org_z)\n\n data_pts = np.add(off_pts, ref_pt)\n\n if self.use_center:\n data_pts = data_pts + self.viewer.data_off\n\n return data_pts\n\n\nclass OffsetDataTransform(BaseTransform):\n \"\"\"\n A transform whose coordinate space is offsets from a point in\n data space.\n \"\"\"\n\n def __init__(self, pt):\n super(OffsetDataTransform, self).__init__()\n self.pt = pt\n\n def to_(self, delta_pts):\n delta_x, delta_y = np.asarray(delta_pts, dtype=np.float).T\n ref_x, ref_y = self.pt[:2]\n res_x, res_y = ref_x + delta_x, ref_y + delta_y\n return np.asarray((res_x, res_y)).T\n\n def from_(self, data_pts):\n data_x, data_y = np.asarray(data_pts, dtype=np.float).T\n ref_x, ref_y = self.pt[:2]\n res_x, res_y = data_x - ref_x, data_y - ref_y\n return np.asarray((res_x, res_y)).T\n\n\nclass WCSDataTransform(BaseTransform):\n \"\"\"\n A transform whose coordinate space is based on the WCS of the primary\n image loaded in a viewer.\n \"\"\"\n\n def __init__(self, viewer):\n super(WCSDataTransform, self).__init__()\n self.viewer = viewer\n\n def to_(self, wcs_pts):\n wcs_pts = np.asarray(wcs_pts)\n\n # hack to work around passing singleton pt vs. array of pts\n unpack = False\n if len(wcs_pts.shape) < 2:\n # passed a single coordinate\n wcs_pts = np.asarray([wcs_pts])\n unpack = True\n\n image = self.viewer.get_image()\n if image is None:\n raise TransformError(\"No image, no WCS\")\n wcs = image.wcs\n if wcs is None:\n raise TransformError(\"No valid WCS found in image\")\n\n naxispath = image.naxispath\n\n res = wcs.wcspt_to_datapt(wcs_pts, naxispath=naxispath)\n if unpack:\n return res[0]\n return res\n\n def from_(self, data_pts):\n data_pts = np.asarray(data_pts)\n\n # hack to work around passing singleton pt vs. array of pts\n unpack = False\n if len(data_pts.shape) < 2:\n # passed a single coordinate\n data_pts = np.asarray([data_pts])\n unpack = True\n\n image = self.viewer.get_image()\n if image is None:\n raise TransformError(\"No image, no WCS\")\n wcs = image.wcs\n if wcs is None:\n raise TransformError(\"No valid WCS found in image\")\n\n naxispath = image.naxispath\n\n res = wcs.datapt_to_wcspt(data_pts, naxispath=naxispath)\n if unpack:\n return res[0]\n return res\n\n\ndef get_catalog():\n \"\"\"Returns a catalog of available transforms. These are used to\n build chains for rendering with different back ends.\n \"\"\"\n tforms = {}\n for name, value in list(globals().items()):\n if name.endswith('Transform'):\n tforms[name] = value\n\n return Bunch.Bunch(tforms, caseless=True)\n\n#END\n", "#\n# CanvasRenderQt.py -- for rendering into a ImageViewQt widget\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport numpy as np\n\nfrom ginga.qtw.QtHelp import (QtCore, QPen, QPolygon, QColor,\n QPainterPath, QImage, QPixmap, get_font,\n get_painter)\n\nfrom ginga import colors\nfrom ginga.canvas import render\n# force registration of all canvas types\nimport ginga.canvas.types.all # noqa\n\n\nclass RenderContext(render.RenderContextBase):\n\n def __init__(self, renderer, viewer, surface):\n render.RenderContextBase.__init__(self, renderer, viewer)\n\n self.cr = get_painter(surface)\n\n def __get_color(self, color, alpha):\n clr = QColor()\n if isinstance(color, tuple):\n clr.setRgbF(color[0], color[1], color[2], alpha)\n else:\n r, g, b = colors.lookup_color(color)\n clr.setRgbF(r, g, b, alpha)\n return clr\n\n def set_line_from_shape(self, shape):\n pen = QPen()\n pen.setWidthF(getattr(shape, 'linewidth', 1.0))\n\n if hasattr(shape, 'linestyle'):\n if shape.linestyle == 'dash':\n pen.setDashPattern([3.0, 4.0, 6.0, 4.0])\n pen.setDashOffset(5.0)\n\n alpha = getattr(shape, 'alpha', 1.0)\n color = self.__get_color(shape.color, alpha)\n pen.setColor(color)\n self.cr.setPen(pen)\n\n def set_fill_from_shape(self, shape):\n fill = getattr(shape, 'fill', False)\n if fill:\n if hasattr(shape, 'fillcolor') and shape.fillcolor:\n color = shape.fillcolor\n else:\n color = shape.color\n\n if color is None:\n self.cr.setBrush(QtCore.Qt.NoBrush)\n else:\n alpha = getattr(shape, 'alpha', None)\n fillalpha = getattr(shape, 'fillalpha', alpha)\n color = self.__get_color(color, fillalpha)\n self.cr.setBrush(color)\n else:\n self.cr.setBrush(QtCore.Qt.NoBrush)\n\n def set_font_from_shape(self, shape):\n if hasattr(shape, 'font'):\n if (hasattr(shape, 'fontsize') and shape.fontsize is not None and\n not getattr(shape, 'fontscale', False)):\n fontsize = shape.fontsize\n else:\n fontsize = shape.scale_font(self.viewer)\n fontsize = self.scale_fontsize(fontsize)\n font = get_font(shape.font, fontsize)\n self.cr.setFont(font)\n\n def initialize_from_shape(self, shape, line=True, fill=True, font=True):\n if line:\n self.set_line_from_shape(shape)\n if fill:\n self.set_fill_from_shape(shape)\n if font:\n self.set_font_from_shape(shape)\n\n def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):\n clr = self.__get_color(color, alpha)\n pen = self.cr.pen()\n pen.setColor(clr)\n pen.setWidthF(float(linewidth))\n if style == 'dash':\n pen.setDashPattern([3.0, 4.0, 6.0, 4.0])\n pen.setDashOffset(5.0)\n self.cr.setPen(pen)\n\n def set_fill(self, color, alpha=1.0):\n if color is None:\n self.cr.setBrush(QtCore.Qt.NoBrush)\n else:\n color = self.__get_color(color, alpha)\n self.cr.setBrush(color)\n\n def set_font(self, fontname, fontsize, color='black', alpha=1.0):\n self.set_line(color, alpha=alpha)\n fontsize = self.scale_fontsize(fontsize)\n font = get_font(fontname, fontsize)\n self.cr.setFont(font)\n\n def text_extents(self, text):\n fm = self.cr.fontMetrics()\n width = fm.width(text)\n height = fm.height()\n return width, height\n\n ##### DRAWING OPERATIONS #####\n\n def draw_text(self, cx, cy, text, rot_deg=0.0):\n self.cr.save()\n self.cr.translate(cx, cy)\n self.cr.rotate(-rot_deg)\n\n self.cr.drawText(0, 0, text)\n\n self.cr.restore()\n\n def draw_polygon(self, cpoints):\n qpoints = [QtCore.QPoint(p[0], p[1]) for p in cpoints]\n p = cpoints[0]\n qpoints.append(QtCore.QPoint(p[0], p[1]))\n qpoly = QPolygon(qpoints)\n\n self.cr.drawPolygon(qpoly)\n\n def draw_circle(self, cx, cy, cradius):\n # this is necessary to work around a bug in Qt--radius of 0\n # causes a crash\n cradius = max(cradius, 0.000001)\n pt = QtCore.QPointF(cx, cy)\n self.cr.drawEllipse(pt, float(cradius), float(cradius))\n\n def draw_bezier_curve(self, cp):\n path = QPainterPath()\n path.moveTo(cp[0][0], cp[0][1])\n path.cubicTo(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])\n self.cr.drawPath(path)\n\n def draw_ellipse_bezier(self, cp):\n # draw 4 bezier curves to make the ellipse\n path = QPainterPath()\n path.moveTo(cp[0][0], cp[0][1])\n path.cubicTo(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])\n path.cubicTo(cp[4][0], cp[4][1], cp[5][0], cp[5][1], cp[6][0], cp[6][1])\n path.cubicTo(cp[7][0], cp[7][1], cp[8][0], cp[8][1], cp[9][0], cp[9][1])\n path.cubicTo(cp[10][0], cp[10][1], cp[11][0], cp[11][1], cp[12][0], cp[12][1])\n self.cr.drawPath(path)\n\n def draw_line(self, cx1, cy1, cx2, cy2):\n self.cr.pen().setCapStyle(QtCore.Qt.RoundCap)\n self.cr.drawLine(cx1, cy1, cx2, cy2)\n\n def draw_path(self, cp):\n self.cr.pen().setCapStyle(QtCore.Qt.RoundCap)\n pts = [QtCore.QLineF(QtCore.QPointF(cp[i][0], cp[i][1]),\n QtCore.QPointF(cp[i + 1][0], cp[i + 1][1]))\n for i in range(len(cp) - 1)]\n self.cr.drawLines(pts)\n\n\nclass CanvasRenderer(render.RendererBase):\n\n def __init__(self, viewer, surface_type='qimage'):\n render.RendererBase.__init__(self, viewer)\n\n self.kind = 'qt'\n # Qt needs this to be in BGRA\n self.rgb_order = 'BGRA'\n self.qimg_fmt = QImage.Format_RGB32\n self.surface_type = surface_type\n # the offscreen drawing surface\n self.surface = None\n\n def resize(self, dims):\n \"\"\"Resize our drawing area to encompass a space defined by the\n given dimensions.\n \"\"\"\n width, height = dims[:2]\n self.logger.debug(\"renderer reconfigured to %dx%d\" % (\n width, height))\n if self.surface_type == 'qpixmap':\n self.surface = QPixmap(width, height)\n else:\n self.surface = QImage(width, height, self.qimg_fmt)\n\n # fill surface with background color;\n # this reduces unwanted garbage in the resizing window\n painter = get_painter(self.surface)\n size = self.surface.size()\n sf_wd, sf_ht = size.width(), size.height()\n bg = self.viewer.img_bg\n bgclr = self._get_color(*bg)\n painter.fillRect(QtCore.QRect(0, 0, sf_wd, sf_ht), bgclr)\n\n def _get_qimage(self, rgb_data):\n ht, wd, channels = rgb_data.shape\n\n result = QImage(rgb_data.data, wd, ht, self.qimg_fmt)\n # Need to hang on to a reference to the array\n result.ndarray = rgb_data\n return result\n\n def _get_color(self, r, g, b):\n # TODO: combine with the method from the RenderContext?\n n = 255.0\n clr = QColor(int(r * n), int(g * n), int(b * n))\n return clr\n\n def render_image(self, rgbobj, dst_x, dst_y):\n \"\"\"Render the image represented by (rgbobj) at dst_x, dst_y\n in the pixel space.\n *** internal method-- do not use ***\n \"\"\"\n self.logger.debug(\"redraw surface=%s\" % (self.surface))\n if self.surface is None:\n return\n self.logger.debug(\"drawing to surface\")\n\n # Prepare array for rendering\n # TODO: what are options for high bit depth under Qt?\n data = rgbobj.get_array(self.rgb_order, dtype=np.uint8)\n (height, width) = data.shape[:2]\n\n daht, dawd, depth = data.shape\n self.logger.debug(\"data shape is %dx%dx%d\" % (dawd, daht, depth))\n\n # Get qimage for copying pixel data\n qimage = self._get_qimage(data)\n drawable = self.surface\n\n painter = get_painter(drawable)\n #painter.setWorldMatrixEnabled(True)\n\n # fill surface with background color\n size = drawable.size()\n sf_wd, sf_ht = size.width(), size.height()\n bg = self.viewer.img_bg\n bgclr = self._get_color(*bg)\n painter.fillRect(QtCore.QRect(0, 0, sf_wd, sf_ht), bgclr)\n\n # draw image data from buffer to offscreen pixmap\n painter.drawImage(QtCore.QRect(dst_x, dst_y, width, height),\n qimage,\n QtCore.QRect(0, 0, width, height))\n\n def get_surface_as_array(self, order=None):\n if self.surface_type == 'qpixmap':\n qimg = self.surface.toImage()\n else:\n qimg = self.surface\n #qimg = qimg.convertToFormat(QImage.Format_RGBA32)\n\n width, height = qimg.width(), qimg.height()\n\n if hasattr(qimg, 'bits'):\n # PyQt\n ptr = qimg.bits()\n ptr.setsize(qimg.byteCount())\n else:\n # PySide\n ptr = qimg.constBits()\n\n arr = np.array(ptr).reshape(height, width, 4)\n\n # adjust according to viewer's needed order\n return self.reorder(order, arr)\n\n def setup_cr(self, shape):\n cr = RenderContext(self, self.viewer, self.surface)\n cr.initialize_from_shape(shape, font=False)\n return cr\n\n def get_dimensions(self, shape):\n cr = self.setup_cr(shape)\n cr.set_font_from_shape(shape)\n return cr.text_extents(shape.text)\n\n\n#END\n" ]
[ [ "numpy.rint", "numpy.multiply", "numpy.divide", "numpy.subtract", "numpy.asarray", "numpy.add" ], [ "numpy.array" ] ]
vsriv90/mechanical_engineering
[ "c922cdce1a595e9acb6a87cf415fb3685caf51a3" ]
[ "Beams/Cantilever Beam - End Loaded.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Cantilever beams - End Loaded\n\n# ![Cantilever%20-%20End%20Loaded.jpeg](attachment:Cantilever%20-%20End%20Loaded.jpeg)\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sn # to draw plots\n# import plotly.express as px\n# import csv\n# import sympy\n\nfrom PIL import Image\n\n# SIMPLY SUPPORTED BEAM - Single Load: PARAMETERS\n\nf = 700 # Value of load\nl = 100 # Total length of beam\n\nUnitF = str('N') # To show units of force\nUnitL = str(\"mm\") # To show units of length\n\n\n# In[16]:\n\n\nx = [0,l] \ny = [0,0] \n\n# plotting the points on X axis\nplt.plot(x, y,label = \"Beam\", color='green', linewidth = 10,marker='o', markerfacecolor='blue', markersize=15) \nplt.legend() # giving a legend to my graph\n\n##################### ENTERING LOADS TO GRAPH #####################\n# fig,ax = plt.subplots(1)\n\nAvX = [0,0] # x-axis values\nAvY = [f/2,-f/2] # corresponding y-axis values\nplt.plot(AvX, AvY, linestyle='--',marker='s',markersize=10, color='grey') # To create the reaction line \nplt.text(1,-f,str(round(f,2))+UnitF) # To show the values on the points\n \nLoadX = [l,l] \nLoadY = [0,f] \n \nplt.plot(LoadX, LoadY, linestyle='--',marker='v',markersize=10) # To create the force line \nplt.text(l,f+1,str(round(f,2))+UnitF) # (Coordiante x, Coordinate y,the value in text+Unit)\n\nSupportX = [0]\nSupportY = [0]\nplt.plot(SupportX, SupportX,marker='s',markersize=25) # To create the force line \nplt.text(-2,20,\"RIGID\") # (Coordiante x, Coordinate y,the text)\n\n#################### End of load entering ###########################\n\n# AXIS LEGENDS \n\nplt.xlabel('Beam length in '+ UnitL)\nplt.title('Loads on beam') # giving a title to graph \n\nplt.ylim(top=1.5*f) # to set maximum y-axis value\nplt.ylim(bottom=-1.5*f) # to set minimum y-axis value\n\nplt.gca().axes.get_yaxis().set_visible(False) # Make y-aixs values visible or invisible\nplt.show() # function to show the plot \n\n# ---------------------------------------------------------------------\n# SHEAR FORCE DIAGRAM\n\nShearX = [0,0,l,l] \nShearY = [0,f,f,0] \nplt.plot(ShearX, ShearY, linestyle='--', marker='o') #plotting the points on X axis\n\n# To show the values on the points\nplt.text(0,0,str(0)+UnitF) # (Coordiante x, Coordinate y,the value in text+Unit) # point 1\nplt.text(0,f,str(round(f,2))+UnitF) # point 2, with integers rounded off\nplt.text(l,f,str(round(f,2))+UnitF) # point 3\nplt.text(l,0,str(0)+UnitF) # point 4\n\n# Plotting the 0 line\nZeroLX = [0,l] \nZeroLY = [0,0] \nplt.plot(ZeroLX, ZeroLY, color='black') # plotting the line 2 points \n\n# OVERALL DETAILS FOR THE GRAPH\nplt.xlabel('Position along the beam')\nplt.title('Shear/Transverse Force Diagram') # giving a title to graph \nplt.gca().axes.get_yaxis().set_visible(False) # Make y-aixs values visible or invisible\nplt.show() # function to show the plot \n\n# ---------------------------------------------------------------------\n# BENDING MOMENT DIAGRAM\n\nMomX = [0,0,l] \nMomY = [0,f*l,0] \nplt.plot(MomX, MomY, linestyle=':', marker='o', label=\"Moment direction = Counter-clockwise\") # plotting the points on X axis\nplt.legend() #legend to show direction of moment\n\nplt.text(0,0,str((0))) # (Coordiante x, Coordinate y,the value in text+Unit) # point 1\nplt.text(0,f*l,str(round((f*l),2))+UnitF+UnitL) # To SHOW the Moment value at the point # point 2\nplt.text(l,-10,str((0))) # point 3\n\nplt.plot(ZeroLX, ZeroLY, color='black')\n\n# OVERALL DETAILS FOR THE GRAPH\nplt.xlabel('Position along the beam')\nplt.title('Bending Moment Diagram') # giving a title to graph \nplt.gca().axes.get_yaxis().set_visible(False) # Make y-aixs values visible or invisible \nplt.show() # function to show the plot \n\n\n# https://www.geeksforgeeks.org/graph-plotting-in-python-set-1/\n\n# In[ ]:\n\n\n\n\n\n# In[49]:\n\n\n# help(plt.plot)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.text", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
aetros/aetros-cli
[ "a2a1f38d6af1660e1e2680c7d413ec2aef45faab" ]
[ "aetros/utils/image.py" ]
[ "# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.\n# BSD 3-clause license\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport math\nfrom six.moves import range\n\n# Find the best implementation available\nfrom aetros.utils.pilutil import imresize\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nimport numpy as np\nimport PIL.Image\n\n# Library defaults:\n# PIL.Image:\n# size -- (width, height)\n# np.array:\n# shape -- (height, width, channels)\n# range -- [0-255]\n# dtype -- uint8\n# channels -- RGB\n# caffe.datum:\n# datum.data type -- bytes (uint8)\n# datum.float_data type -- float32\n# when decoding images, channels are BGR\n# DIGITS:\n# image_dims -- (height, width, channels)\n\n# List of supported file extensions\n# Use like \"if filename.endswith(SUPPORTED_EXTENSIONS)\"\nSUPPORTED_EXTENSIONS = ('.png', '.jpg', '.jpeg', '.bmp', '.ppm')\n\n\ndef upscale(image, ratio):\n \"\"\"\n return upscaled image array\n Arguments:\n image -- a (H,W,C) numpy.ndarray\n ratio -- scaling factor (>1)\n \"\"\"\n if not isinstance(image, np.ndarray):\n raise ValueError('Expected ndarray')\n if ratio < 1:\n raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)\n width = int(math.floor(image.shape[1] * ratio))\n height = int(math.floor(image.shape[0] * ratio))\n channels = image.shape[2]\n out = np.ndarray((height, width, channels), dtype=np.uint8)\n for x, y in np.ndindex((width, height)):\n out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]\n return out\n\n\ndef resize_image(image, height, width,\n channels=None,\n resize_mode=None\n ):\n \"\"\"\n Resizes an image and returns it as a np.array\n Arguments:\n image -- a PIL.Image or numpy.ndarray\n height -- height of new image\n width -- width of new image\n Keyword Arguments:\n channels -- channels of new image (stays unchanged if not specified)\n resize_mode -- can be crop, squash, fill or half_crop\n \"\"\"\n if resize_mode is None:\n resize_mode = 'squash'\n if resize_mode not in ['crop', 'squash', 'fill', 'half_crop']:\n raise ValueError('resize_mode \"%s\" not supported' % resize_mode)\n\n if channels not in [None, 1, 3]:\n raise ValueError('unsupported number of channels: %s' % channels)\n\n if isinstance(image, PIL.Image.Image):\n # Convert image mode (channels)\n if channels is None:\n image_mode = image.mode\n if image_mode == 'L':\n channels = 1\n elif image_mode == 'RGB':\n channels = 3\n else:\n raise ValueError('unknown image mode \"%s\"' % image_mode)\n elif channels == 1:\n # 8-bit pixels, black and white\n image_mode = 'L'\n elif channels == 3:\n # 3x8-bit pixels, true color\n image_mode = 'RGB'\n if image.mode != image_mode:\n image = image.convert(image_mode)\n image = np.array(image)\n elif isinstance(image, np.ndarray):\n if image.dtype != np.uint8:\n image = image.astype(np.uint8)\n if image.ndim == 3 and image.shape[2] == 1:\n image = image.reshape(image.shape[:2])\n if channels is None:\n if image.ndim == 2:\n channels = 1\n elif image.ndim == 3 and image.shape[2] == 3:\n channels = 3\n else:\n raise ValueError('invalid image shape: %s' % (image.shape,))\n elif channels == 1:\n if image.ndim != 2:\n if image.ndim == 3 and image.shape[2] == 3:\n # color to grayscale\n image = np.dot(image, [0.299, 0.587, 0.114]).astype(np.uint8)\n else:\n raise ValueError('invalid image shape: %s' % (image.shape,))\n elif channels == 3:\n if image.ndim == 2:\n # grayscale to color\n image = np.repeat(image, 3).reshape(image.shape + (3,))\n elif image.shape[2] != 3:\n raise ValueError('invalid image shape: %s' % (image.shape,))\n else:\n raise ValueError('resize_image() expected a PIL.Image.Image or a numpy.ndarray')\n\n # No need to resize\n if image.shape[0] == height and image.shape[1] == width:\n return image\n\n # Resize\n interp = 'bilinear'\n\n width_ratio = float(image.shape[1]) / width\n height_ratio = float(image.shape[0]) / height\n if resize_mode == 'squash' or width_ratio == height_ratio:\n return imresize(image, (height, width), interp=interp)\n elif resize_mode == 'crop':\n # resize to smallest of ratios (relatively larger image), keeping aspect ratio\n if width_ratio > height_ratio:\n resize_height = height\n resize_width = int(round(image.shape[1] / height_ratio))\n else:\n resize_width = width\n resize_height = int(round(image.shape[0] / width_ratio))\n image = imresize(image, (resize_height, resize_width), interp=interp)\n\n # chop off ends of dimension that is still too long\n if width_ratio > height_ratio:\n start = int(round((resize_width - width) / 2.0))\n return image[:, start:start + width]\n else:\n start = int(round((resize_height - height) / 2.0))\n return image[start:start + height, :]\n else:\n if resize_mode == 'fill':\n # resize to biggest of ratios (relatively smaller image), keeping aspect ratio\n if width_ratio > height_ratio:\n resize_width = width\n resize_height = int(round(image.shape[0] / width_ratio))\n if (height - resize_height) % 2 == 1:\n resize_height += 1\n else:\n resize_height = height\n resize_width = int(round(image.shape[1] / height_ratio))\n if (width - resize_width) % 2 == 1:\n resize_width += 1\n image = imresize(image, (resize_height, resize_width), interp=interp)\n elif resize_mode == 'half_crop':\n # resize to average ratio keeping aspect ratio\n new_ratio = (width_ratio + height_ratio) / 2.0\n resize_width = int(round(image.shape[1] / new_ratio))\n resize_height = int(round(image.shape[0] / new_ratio))\n if width_ratio > height_ratio and (height - resize_height) % 2 == 1:\n resize_height += 1\n elif width_ratio < height_ratio and (width - resize_width) % 2 == 1:\n resize_width += 1\n image = imresize(image, (resize_height, resize_width), interp=interp)\n # chop off ends of dimension that is still too long\n if width_ratio > height_ratio:\n start = int(round((resize_width - width) / 2.0))\n image = image[:, start:start + width]\n else:\n start = int(round((resize_height - height) / 2.0))\n image = image[start:start + height, :]\n else:\n raise Exception('unrecognized resize_mode \"%s\"' % resize_mode)\n\n # fill ends of dimension that is too short with random noise\n if width_ratio > height_ratio:\n padding = (height - resize_height) / 2\n noise_size = (padding, width)\n if channels > 1:\n noise_size += (channels,)\n noise = np.random.randint(0, 255, noise_size).astype('uint8')\n image = np.concatenate((noise, image, noise), axis=0)\n else:\n padding = (width - resize_width) / 2\n noise_size = (height, padding)\n if channels > 1:\n noise_size += (channels,)\n noise = np.random.randint(0, 255, noise_size).astype('uint8')\n image = np.concatenate((noise, image, noise), axis=1)\n\n return image\n\n\ndef embed_image_html(image):\n \"\"\"\n Returns an image embedded in HTML base64 format\n (Based on Caffe's web_demo)\n Arguments:\n image -- a PIL.Image or np.ndarray\n \"\"\"\n if image is None:\n return None\n elif isinstance(image, PIL.Image.Image):\n pass\n elif isinstance(image, np.ndarray):\n image = PIL.Image.fromarray(image)\n else:\n raise ValueError('image must be a PIL.Image or a np.ndarray')\n\n # Read format from the image\n fmt = image.format\n if not fmt:\n # default to JPEG\n fmt = 'jpeg'\n else:\n fmt = fmt.lower()\n\n string_buf = StringIO()\n image.save(string_buf, format=fmt)\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/%s;base64,%s' % (fmt, data)\n\n\ndef add_bboxes_to_image(image, bboxes, color='red', width=1):\n \"\"\"\n Draw rectangles on the image for the bounding boxes\n Returns a PIL.Image\n Arguments:\n image -- input image\n bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format\n Keyword arguments:\n color -- color to draw the rectangles\n width -- line width of the rectangles\n Example:\n image = Image.open(filename)\n add_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700')\n image.show()\n \"\"\"\n def expanded_bbox(bbox, n):\n \"\"\"\n Grow the bounding box by n pixels\n \"\"\"\n l = min(bbox[0][0], bbox[1][0])\n r = max(bbox[0][0], bbox[1][0])\n t = min(bbox[0][1], bbox[1][1])\n b = max(bbox[0][1], bbox[1][1])\n return ((l - n, t - n), (r + n, b + n))\n\n from PIL import Image, ImageDraw\n draw = ImageDraw.Draw(image)\n for bbox in bboxes:\n for n in range(width):\n draw.rectangle(expanded_bbox(bbox, n), outline=color)\n\n return image\n\n\ndef get_layer_vis_square(data,\n allow_heatmap=True,\n normalize=True,\n min_img_dim=100,\n max_width=1200,\n channel_order='RGB',\n colormap='jet',\n ):\n \"\"\"\n Returns a vis_square for the given layer data\n Arguments:\n data -- a np.ndarray\n Keyword arguments:\n allow_heatmap -- if True, convert single channel images to heatmaps\n normalize -- whether to normalize the data when visualizing\n max_width -- maximum width for the vis_square\n \"\"\"\n if channel_order not in ['RGB', 'BGR']:\n raise ValueError('Unsupported channel_order %s' % channel_order)\n if data.ndim == 1:\n # interpret as 1x1 grayscale images\n # (N, 1, 1)\n data = data[:, np.newaxis, np.newaxis]\n elif data.ndim == 2:\n # interpret as 1x1 grayscale images\n # (N, 1, 1)\n data = data.reshape((data.shape[0] * data.shape[1], 1, 1))\n elif data.ndim == 3:\n if data.shape[0] == 3:\n # interpret as a color image\n # (1, H, W,3)\n if channel_order == 'BGR':\n data = data[[2, 1, 0], ...] # BGR to RGB (see issue #59)\n data = data.transpose(1, 2, 0)\n data = data[np.newaxis, ...]\n else:\n # interpret as grayscale images\n # (N, H, W)\n pass\n elif data.ndim == 4:\n if data.shape[0] == 3:\n # interpret as HxW color images\n # (N, H, W, 3)\n data = data.transpose(1, 2, 3, 0)\n if channel_order == 'BGR':\n data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)\n elif data.shape[1] == 3:\n # interpret as HxW color images\n # (N, H, W, 3)\n data = data.transpose(0, 2, 3, 1)\n if channel_order == 'BGR':\n data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)\n else:\n # interpret as HxW grayscale images\n # (N, H, W)\n data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))\n else:\n raise RuntimeError('unrecognized data shape: %s' % (data.shape,))\n\n return get_layer_vis_square_raw(data,\n allow_heatmap,\n normalize,\n min_img_dim,\n max_width,\n colormap,\n )\n\ndef get_image_tales(images, colormap='jet', min_img_dim=100, max_width=1000):\n\n padsize = 1\n # convert to float since we're going to do some math\n images = images.astype('float32')\n\n images -= images.min()\n if images.max() > 0:\n images /= images.max()\n images *= 255\n\n if images.ndim == 3:\n # they're grayscale - convert to a colormap\n redmap, greenmap, bluemap = get_color_map(colormap)\n\n red = np.interp(images * (len(redmap) - 1) / 255.0, range(len(redmap)), redmap)\n green = np.interp(images * (len(greenmap) - 1) / 255.0, range(len(greenmap)), greenmap)\n blue = np.interp(images * (len(bluemap) - 1) / 255.0, range(len(bluemap)), bluemap)\n\n # Slap the channels back together\n images = np.concatenate(\n (red[..., np.newaxis], green[..., np.newaxis], blue[..., np.newaxis]), axis=3)\n images = np.minimum(images, 255)\n images = np.maximum(images, 0)\n\n # convert back to uint8\n images = images.astype('uint8')\n\n # Compute the output image matrix dimensions\n n = int(np.ceil(np.sqrt(images.shape[0])))\n ny = n\n nx = n\n length = images.shape[0]\n if n * (n - 1) >= length:\n nx = n - 1\n\n # Add padding between the images\n padding = ((0, nx * ny - length), (0, padsize), (0, padsize)) + ((0, 0),) * (images.ndim - 3)\n padded = np.pad(images, padding, mode='constant', constant_values=0)\n\n # Tile the images beside each other\n tiles = padded.reshape(\n (ny, nx) + padded.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, padded.ndim + 1)))\n tiles = tiles.reshape((ny * tiles.shape[1], nx * tiles.shape[3]) + tiles.shape[4:])\n\n return tiles\n\ndef get_layer_vis_square_raw(data,\n allow_heatmap=True,\n normalize=True,\n min_img_dim=100,\n max_width=1200,\n colormap='jet',\n ):\n # chop off data so that it will fit within max_width\n padsize = 0\n width = data.shape[2]\n if width > max_width:\n data = data[:1, :max_width, :max_width]\n else:\n if width > 1:\n padsize = 1\n width += 1\n n = max(max_width // width, 1)\n n *= n\n data = data[:n]\n\n if not allow_heatmap and data.ndim == 3:\n data = data[..., np.newaxis]\n\n vis = vis_square(data,\n padsize=padsize,\n normalize=normalize,\n colormap=colormap\n )\n\n # find minimum dimension and upscale if necessary\n _min = sorted(vis.shape[:2])[0]\n if _min < min_img_dim:\n # upscale image\n ratio = min_img_dim / float(_min)\n vis = upscale(vis, ratio)\n return vis\n\n\ndef vis_square(images,\n padsize=1,\n normalize=False,\n colormap='jet',\n ):\n \"\"\"\n Visualize each image in a grid of size approx sqrt(n) by sqrt(n)\n Returns a np.array image\n (Based on Caffe's filter_visualization notebook)\n Arguments:\n images -- an array of shape (N, H, W) or (N, H, W, C)\n if C is not set, a heatmap is computed for the result\n Keyword arguments:\n padsize -- how many pixels go inbetween the tiles\n normalize -- if true, scales (min, max) across all images out to (0, 1)\n colormap -- a string representing one of the supported colormaps\n \"\"\"\n assert 3 <= images.ndim <= 4, 'images.ndim must be 3 or 4'\n # convert to float since we're going to do some math\n images = images.astype('float32')\n if normalize:\n images -= images.min()\n if images.max() > 0:\n images /= images.max()\n images *= 255\n\n if images.ndim == 3:\n # they're grayscale - convert to a colormap\n redmap, greenmap, bluemap = get_color_map(colormap)\n\n red = np.interp(images * (len(redmap) - 1) / 255.0, range(len(redmap)), redmap)\n green = np.interp(images * (len(greenmap) - 1) / 255.0, range(len(greenmap)), greenmap)\n blue = np.interp(images * (len(bluemap) - 1) / 255.0, range(len(bluemap)), bluemap)\n\n # Slap the channels back together\n images = np.concatenate(\n (red[..., np.newaxis], green[..., np.newaxis], blue[..., np.newaxis]), axis=3)\n images = np.minimum(images, 255)\n images = np.maximum(images, 0)\n\n # convert back to uint8\n images = images.astype('uint8')\n\n # Compute the output image matrix dimensions\n n = int(np.ceil(np.sqrt(images.shape[0])))\n ny = n\n nx = n\n length = images.shape[0]\n if n * (n - 1) >= length:\n nx = n - 1\n\n # Add padding between the images\n padding = ((0, nx * ny - length), (0, padsize), (0, padsize)) + ((0, 0),) * (images.ndim - 3)\n padded = np.pad(images, padding, mode='constant', constant_values=255)\n\n # Tile the images beside each other\n tiles = padded.reshape(\n (ny, nx) + padded.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, padded.ndim + 1)))\n tiles = tiles.reshape((ny * tiles.shape[1], nx * tiles.shape[3]) + tiles.shape[4:])\n\n if tiles.shape[-1] == 1:\n # grayscale to color\n tiles = np.dstack([tiles.squeeze()] * 3)\n\n return tiles\n\n\ndef get_color_map(name):\n \"\"\"\n Return a colormap as (redmap, greenmap, bluemap)\n Arguments:\n name -- the name of the colormap. If unrecognized, will default to 'jet'.\n \"\"\"\n redmap = [0]\n greenmap = [0]\n bluemap = [0]\n if name == 'white':\n # essentially a noop\n redmap = [0, 1]\n greenmap = [0, 1]\n bluemap = [0, 1]\n elif name == 'simple':\n redmap = [0, 1, 1, 1]\n greenmap = [0, 0, 1, 1]\n bluemap = [0, 0, 0, 1]\n elif name == 'hot':\n redmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952,\n 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n greenmap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284,\n 0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265,\n 0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1]\n elif name == 'rainbow':\n redmap = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432,\n 0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667]\n greenmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936,\n 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535,\n 0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n elif name == 'winter':\n greenmap = [0, 1]\n bluemap = [1, 0.5]\n else:\n if name != 'jet':\n print('Warning: colormap \"%s\" not supported. Using jet instead.' % name)\n redmap = [0, 0, 0, 0, 0.5, 1, 1, 1, 0.5]\n greenmap = [0, 0, 0.5, 1, 1, 1, 0.5, 0, 0]\n bluemap = [0.5, 1, 1, 1, 0.5, 0, 0, 0, 0]\n return 255.0 * np.array(redmap), 255.0 * np.array(greenmap), 255.0 * np.array(bluemap)\n" ]
[ [ "numpy.sqrt", "numpy.dot", "numpy.maximum", "numpy.pad", "numpy.repeat", "numpy.ndarray", "numpy.array", "numpy.concatenate", "numpy.random.randint", "numpy.ndindex", "numpy.minimum" ] ]
lkeab/detectron2
[ "d4d2948aed6c0c73558da10f8647661f61470e37" ]
[ "configs/Misc/torchvision_imagenet_R_50.py" ]
[ "\"\"\"\nAn example config file to train a ImageNet classifier with detectron2.\nModel and dataloader both come from torchvision.\nThis shows how to use detectron2 as a general engine for any new models and tasks.\nTo run, use the following command:\n\npython tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \\\n --num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/\n\"\"\"\n\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom omegaconf import OmegaConf\nimport torchvision\nfrom torchvision.transforms import transforms as T\nfrom torchvision.models.resnet import ResNet, Bottleneck\nfrom fvcore.common.param_scheduler import MultiStepParamScheduler\n\nfrom detectron2.solver import WarmupParamScheduler\nfrom detectron2.solver.build import get_default_optimizer_params\nfrom detectron2.config import LazyCall as L\nfrom detectron2.model_zoo import get_config\nfrom detectron2.data.samplers import TrainingSampler, InferenceSampler\nfrom detectron2.evaluation import DatasetEvaluator\nfrom detectron2.utils import comm\n\n\ndef build_data_loader(dataset, batch_size, num_workers, training=True):\n return torch.utils.data.DataLoader(\n dataset,\n sampler=(TrainingSampler if training else InferenceSampler)(len(dataset)),\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=True,\n )\n\n\nclass ClassificationNet(nn.Module):\n def __init__(self, model: nn.Module):\n super().__init__()\n self.model = model\n\n @property\n def device(self):\n return list(self.model.parameters())[0].device\n\n def forward(self, inputs):\n image, label = inputs\n pred = self.model(image.to(self.device))\n if self.training:\n label = label.to(self.device)\n return F.cross_entropy(pred, label)\n else:\n return pred\n\n\nclass ClassificationAcc(DatasetEvaluator):\n def reset(self):\n self.corr = self.total = 0\n\n def process(self, inputs, outputs):\n image, label = inputs\n self.corr += (outputs.argmax(dim=1).cpu() == label.cpu()).sum().item()\n self.total += len(label)\n\n def evaluate(self):\n all_corr_total = comm.all_gather([self.corr, self.total])\n corr = sum(x[0] for x in all_corr_total)\n total = sum(x[1] for x in all_corr_total)\n return {\"accuracy\": corr / total}\n\n\ndataloader = OmegaConf.create()\ndataloader.train = L(build_data_loader)(\n dataset=L(torchvision.datasets.ImageNet)(\n root=\"/path/to/imagenet\",\n split=\"train\",\n transform=L(T.Compose)(\n transforms=[\n L(T.RandomResizedCrop)(size=224),\n L(T.RandomHorizontalFlip)(),\n T.ToTensor(),\n L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ]\n ),\n ),\n batch_size=256 // 8,\n num_workers=4,\n training=True,\n)\n\ndataloader.test = L(build_data_loader)(\n dataset=L(torchvision.datasets.ImageNet)(\n root=\"${...train.dataset.root}\",\n split=\"val\",\n transform=L(T.Compose)(\n transforms=[\n L(T.Resize)(size=256),\n L(T.CenterCrop)(size=224),\n T.ToTensor(),\n L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ]\n ),\n ),\n batch_size=256 // 8,\n num_workers=4,\n training=False,\n)\n\ndataloader.evaluator = L(ClassificationAcc)()\n\nmodel = L(ClassificationNet)(\n model=(ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True)\n)\n\n\noptimizer = L(torch.optim.SGD)(\n params=L(get_default_optimizer_params)(),\n lr=0.1,\n momentum=0.9,\n weight_decay=1e-4,\n)\n\nlr_multiplier = L(WarmupParamScheduler)(\n scheduler=L(MultiStepParamScheduler)(\n values=[1.0, 0.1, 0.01, 0.001], milestones=[30, 60, 90, 100]\n ),\n warmup_length=1 / 100,\n warmup_factor=0.1,\n)\n\n\ntrain = get_config(\"common/train.py\").train\ntrain.init_checkpoint = None\ntrain.max_iter = 100 * 1281167 // 256\n" ]
[ [ "torch.nn.functional.cross_entropy" ] ]
miraclestatus/mllearning
[ "f5db6642e8c05488b133ee627e5f63c92e45ff6e" ]
[ "ml/myscript/Logisticegression.py" ]
[ "import numpy as np\nfrom .metrics import accuracy_score\nclass Logisticegression():\n def __init__(self):\n # 系数\n self.coef_ = None\n # 截距\n self.intercept_ = None\n # 向量\n self._theta = None\n def _sigmoid(self, t):\n return 1./(1. + np.exp(-t))\n\n def fit(self, X_train, y_train, eta=0.01, n_iters=1e4):\n \"\"\"根据训练数据集X_train, y_train, 使用梯度下降法训练Linear Regression模型\"\"\"\n assert X_train.shape[0] == y_train.shape[0], \\\n \"the size of X_train must be equal to the size of y_train\"\n\n def J(theta, X_b, y):\n y_hat = self._sigmoid(X_b.dot(theta))\n try:\n return - np.sum(y*np.log(y_hat) + (1-y)*np.log(1-y_hat)) / len(y)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(self._sigmoid(X_b.dot(theta))-y) /len(y)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):\n\n theta = initial_theta\n cur_iter = 0\n\n while cur_iter < n_iters:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n break\n\n cur_iter += 1\n\n return theta\n\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)\n\n self.intercept_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n def predict_proba(self,X_predict):\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n return self._sigmoid(X_b.dot(self._theta))\n def predict(self, X_predict):\n proba = self.predict_proba(X_predict)\n return np.array(proba >= 0.5, dtype='int')\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return accuracy_score(y_test, y_predict)\n def __repr__(self):\n return \"Logisticegression()\"" ]
[ [ "numpy.array", "numpy.exp", "numpy.log", "numpy.zeros" ] ]
civodlu/trw
[ "b9a1cf045f61d6df9c65c014ef63b4048972dcdc" ]
[ "tests/test_transforms_resize_modulo_pad_crop.py" ]
[ "import unittest\nimport trw\nimport torch\nimport numpy as np\n\n\nclass TestTransformsResizeModuloPadCrop(unittest.TestCase):\n def test_crop_mode_torch(self):\n batch = {\n 'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)\n }\n\n tfm = trw.transforms.TransformResizeModuloCropPad(60)\n transformed = tfm(batch)\n assert transformed['images'].shape == (2, 3, 60, 60)\n\n def test_crop_mode_torch_multiples(self):\n # test with multiple of `multiples_of` shape\n batch = {\n 'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)\n }\n\n tfm = trw.transforms.TransformResizeModuloCropPad(10)\n transformed = tfm(batch)\n assert transformed['images'].shape == (2, 3, 60, 60)\n\n def test_crop_mode_torch_different_shape(self):\n batch = {\n 'images': torch.rand([2, 3, 64, 64], dtype=torch.float32),\n 'images2': torch.rand([2, 1, 64, 64], dtype=torch.float32)\n }\n batch['images'][0, 0, 32, 32] = 42.0\n batch['images2'][0, 0, 32, 32] = 42.0\n\n tfm = trw.transforms.TransformResizeModuloCropPad(60)\n transformed = tfm(batch)\n\n # make sure we can handle different shapes of the same dimension\n assert transformed['images'].shape == (2, 3, 60, 60)\n assert transformed['images2'].shape == (2, 1, 60, 60)\n\n # make sure the crop/pad are the same for the different images\n indices = np.where(batch['images'].numpy() == 42)\n assert (batch['images2'][indices] == 42.0).all()\n\n def test_pad_mode_torch(self):\n batch = {\n 'images': torch.rand([2, 3, 65, 65], dtype=torch.float32)\n }\n\n tfm = trw.transforms.TransformResizeModuloCropPad(32, mode='pad')\n transformed = tfm(batch)\n assert transformed['images'].shape == (2, 3, 96, 96)\n" ]
[ [ "torch.rand" ] ]
djhoese/verde
[ "ad14acf94717ee5c6672559f40576f65989753a5" ]
[ "verde/tests/test_scipy.py" ]
[ "\"\"\"\nTest the scipy based interpolator.\n\"\"\"\nimport warnings\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nimport numpy.testing as npt\n\nfrom ..scipygridder import ScipyGridder\nfrom ..coordinates import grid_coordinates\nfrom ..datasets.synthetic import CheckerBoard\n\n\ndef test_scipy_gridder_same_points():\n \"See if the gridder recovers known points.\"\n region = (1000, 5000, -8000, -7000)\n synth = CheckerBoard(region=region)\n data = synth.scatter(size=1000, random_state=0)\n coords = (data.easting, data.northing)\n # The interpolation should be perfect on top of the data points\n for method in [\"nearest\", \"linear\", \"cubic\"]:\n grd = ScipyGridder(method=method)\n grd.fit(coords, data.scalars)\n predicted = grd.predict(coords)\n npt.assert_allclose(predicted, data.scalars)\n npt.assert_allclose(grd.score(coords, data.scalars), 1)\n\n\ndef test_scipy_gridder():\n \"See if the gridder recovers known points.\"\n synth = CheckerBoard(region=(1000, 5000, -8000, -6000))\n data = synth.scatter(size=20000, random_state=0)\n coords = (data.easting, data.northing)\n pt_coords = (3000, -7000)\n true_data = synth.predict(pt_coords)\n # nearest will never be too close to the truth\n grd = ScipyGridder(\"cubic\").fit(coords, data.scalars)\n npt.assert_almost_equal(grd.predict(pt_coords), true_data, decimal=2)\n grd = ScipyGridder(\"linear\").fit(coords, data.scalars)\n npt.assert_almost_equal(grd.predict(pt_coords), true_data, decimal=1)\n\n\ndef test_scipy_gridder_region():\n \"See if the region is gotten from the data is correct.\"\n region = (1000, 5000, -8000, -6000)\n synth = CheckerBoard(region=region)\n # Test using xarray objects\n grid = synth.grid()\n coords = grid_coordinates(region, grid.scalars.shape)\n grd = ScipyGridder().fit(coords, grid.scalars)\n npt.assert_allclose(grd.region_, region)\n # Test using pandas objects\n data = pd.DataFrame(\n {\n \"easting\": coords[0].ravel(),\n \"northing\": coords[1].ravel(),\n \"scalars\": grid.scalars.values.ravel(),\n }\n )\n grd = ScipyGridder().fit((data.easting, data.northing), data.scalars)\n npt.assert_allclose(grd.region_, region)\n\n\ndef test_scipy_gridder_extra_args():\n \"Passing in extra arguments to scipy\"\n data = CheckerBoard().scatter(random_state=100)\n coords = (data.easting, data.northing)\n grd = ScipyGridder(method=\"linear\", extra_args=dict(rescale=True))\n grd.fit(coords, data.scalars)\n predicted = grd.predict(coords)\n npt.assert_allclose(predicted, data.scalars)\n\n\ndef test_scipy_gridder_fails():\n \"fit should fail for invalid method name\"\n data = CheckerBoard().scatter(random_state=0)\n grd = ScipyGridder(method=\"some invalid method name\")\n with pytest.raises(ValueError):\n grd.fit((data.easting, data.northing), data.scalars)\n\n\ndef test_scipy_gridder_warns():\n \"Check that a warning is issued when using weights.\"\n data = CheckerBoard().scatter(random_state=100)\n weights = np.ones_like(data.scalars)\n grd = ScipyGridder()\n msg = \"ScipyGridder does not support weights and they will be ignored.\"\n with warnings.catch_warnings(record=True) as warn:\n grd.fit((data.easting, data.northing), data.scalars, weights=weights)\n assert len(warn) == 1\n assert issubclass(warn[-1].category, UserWarning)\n assert str(warn[-1].message) == msg\n" ]
[ [ "numpy.ones_like", "numpy.testing.assert_allclose" ] ]
liushulinle/CRACSpell
[ "e0b495ed8424be7fdbd7fc3ef8c2919ab195b0e4" ]
[ "src/run_evaluation.py" ]
[ "import sys, os\nimport numpy as np\nimport tensorflow as tf\nfrom bert_tagging import DataProcessor, BertTagging\nimport modeling\nimport optimization\nimport time\nfrom tagging_eval import score_f\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nDEBUG = False\ndef evaluate(FLAGS, label_list=None):\n gpuid = FLAGS.gpuid\n max_sen_len = FLAGS.max_sen_len\n test_file = FLAGS.test_path\n out_dir = FLAGS.output_dir\n model_dir = FLAGS.model_dir\n batch_size = FLAGS.batch_size\n bert_config_path = './conf/bert_config.json'\n vocob_file = './conf/vocab.txt'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpuid \n\n # data processor\n data_processor = DataProcessor(test_file, max_sen_len, vocob_file, out_dir, label_list=None, is_training=False)\n test_num = data_processor.num_examples\n test_data = data_processor.build_data_generator(batch_size)\n iterator = test_data.make_one_shot_iterator()\n input_ids, input_mask, segment_ids, lmask, label_ids, masked_sample = iterator.get_next()\n\n #load model\n model = BertTagging(bert_config_path, num_class=len(data_processor.get_label_list()), max_sen_len=max_sen_len)\n\n (pred_loss, pred_result, gold_result, gold_mask, r_loss) = model.create_model(input_ids, input_mask, segment_ids, lmask, label_ids, batch_size=batch_size, masked_sample=masked_sample, is_training=False)\n tf_config = tf.ConfigProto(log_device_placement=False)\n tf_config.gpu_options.allow_growth = True\n sess = tf.Session(config=tf_config)\n ckpt = tf.train.get_checkpoint_state(model_dir)\n saver = tf.train.Saver()\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n\n label_list = data_processor.label_list\n ans = []\n all_inputs, all_golds, all_preds = [], [], []\n all_py_inputs, all_py_golds, all_py_preds = [], [], []\n all_fusion_preds = []\n all_inputs_sent, all_golds_sent, all_preds_sent = [], [], []\n for step in range(test_num // batch_size): \n inputs, loss_value, preds, golds, gmask = sess.run([input_ids, pred_loss, pred_result, gold_result, gold_mask])\n\n\n for k in range(batch_size):\n gsent, psent, isent = [], [], []\n for j in range(max_sen_len):\n if gmask[k][j] == 0: continue\n all_golds.append(golds[k][j])\n all_preds.append(preds[k][j])\n all_inputs.append(inputs[k][j])\n gsent.append(label_list[golds[k][j]])\n psent.append(label_list[preds[k][j]])\n isent.append(label_list[inputs[k][j]])\n all_golds_sent.append(gsent)\n all_preds_sent.append(psent)\n all_inputs_sent.append(isent) \n if DEBUG and step > 5: break\n fout = open('%s/pred_sent.txt' % out_dir, 'w', encoding='utf-8')\n fout.writelines('## input/gold/pred TAB ... ...\\n') \n for k in range(len(all_inputs_sent)):\n for j in range(len(all_inputs_sent[k])):\n ic = all_inputs_sent[k][j]\n pc = all_preds_sent[k][j]\n gc = all_golds_sent[k][j]\n fout.writelines('%s/%s/%s\\t' % (ic, gc, pc))\n fout.writelines('\\n')\n fout.close()\n \n all_golds = [label_list[k] for k in all_golds]\n all_preds = [label_list[k] for k in all_preds]\n all_inputs = [label_list[k] for k in all_inputs]\n \n print ('ALL LEN:%d' % len(all_preds))\n print('zi result:') \n p, r, f = score_f((all_inputs, all_golds, all_preds), only_check=False, out_dir=out_dir)\n return f\n\nif __name__ == '__main__':\n\n flags = tf.flags\n ## Required parameters\n flags.DEFINE_string(\"gpuid\", '0', \"The gpu NO. \")\n\n ## Optional\n flags.DEFINE_string(\"test_path\", '', \"train path \")\n flags.DEFINE_string(\"output_dir\", '', \"out dir \")\n flags.DEFINE_string(\"model_dir\", '', \"out dir \")\n flags.DEFINE_integer(\"batch_size\", '1', \"out dir \")\n flags.DEFINE_integer(\"max_sen_len\", 64, 'max_sen_len')\n\n\n flags.mark_flag_as_required('gpuid')\n flags.mark_flag_as_required('test_path')\n flags.mark_flag_as_required('output_dir')\n flags.mark_flag_as_required('max_sen_len')\n\n FLAGS = flags.FLAGS\n print ('Confings:')\n print ('\\tgpuid=', FLAGS.gpuid)\n print ('\\ttest_path=', FLAGS.test_path)\n print ('\\toutput_dir=', FLAGS.output_dir)\n evaluate(FLAGS, FLAGS.test_path)\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.logging.set_verbosity", "tensorflow.train.Saver", "tensorflow.Session", "tensorflow.ConfigProto" ] ]