repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
kklkodi/empirikushazi | [
"8780a9c4e3e4603e636da2fccbe6f2d8a5afad4b"
] | [
"hw_demo_estimation/data_manipulation.py"
] | [
"import pandas as pd\n\n\ndef compute_directed_edges(edges_w_features):\n \"\"\"\n Converts the undirected edge information into a directed format, by duplicating each edge and flipping the node\n attributes to make it point in the opposite direction. This makes computation from the viewpoint of each node\n simpler.\n :param edges_w_features:\n :return:\n \"\"\"\n opposite = edges_w_features.copy()\n # flipping the attributes of the endpoints\n opposite[[\"smaller_id\", \"greater_id\", \"AGE_x\", \"AGE_y\", \"gender_x\", \"gender_y\"]] = \\\n opposite[[\"greater_id\", \"smaller_id\", \"AGE_y\", \"AGE_x\", \"gender_y\", \"gender_x\"]]\n directed = pd.concat([edges_w_features, opposite], ignore_index=True)\n return directed\n\n\ndef add_nbrs_by_gender(nodes, directed_edges):\n \"\"\"\n Adds one column for each gender to the nodes table, which contain the number of neighbors of the given gender\n for each ndoe. Unknown-gender neighbors are not counted into either gender.\n :param nodes: Node feature data as DataFrame\n :param directed_edges: Edge data as DataFrame\n :return: the nodes DataFrame with the columns 0_nbrs and 1_nbrs added to it\n \"\"\"\n w_nbrs = nodes.copy()\n w_nbrs = w_nbrs.set_index(\"user_id\")\n nbrs = compute_nbrs_with_gender(directed_edges, 0.0)\n w_nbrs = w_nbrs.merge(nbrs, on=\"user_id\")\n nbrs = compute_nbrs_with_gender(directed_edges, 1.0)\n w_nbrs = w_nbrs.merge(nbrs, on=\"user_id\")\n return w_nbrs\n\n\ndef compute_nbrs_with_gender(directed_edges, gender):\n \"\"\"\n Counts the number of neighbors with the given gender for each node.\n :param directed_edges: directed edge information as a DataFrame\n :param gender: which gender the counted neighbors should have\n :return: A table containing a single column with the number of filtered neighbors.\n \"\"\"\n nbrs = directed_edges[directed_edges[\"gender_y\"] == gender].groupby(\"smaller_id\").count()[\"greater_id\"].to_frame()\n nbrs = nbrs.rename_axis(\"user_id\").rename(columns={\"greater_id\": (\"%d_nbrs\" % gender)})\n return nbrs"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Jianningli/MIA | [
"bf9b90b9972cd43f885f676c926a984bf38b743a"
] | [
"source/EncoderDecoder_boundaryloss.py"
] | [
"from __future__ import print_function, division\r\n\r\nimport os\r\nimport numpy as np\r\nfrom keras.layers import BatchNormalization, Activation\r\nfrom keras.layers import Input, Dense, Flatten, Dropout\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\r\nfrom keras.models import Sequential, Model\r\nfrom keras.models import load_model\r\nfrom keras.optimizers import Adam\r\nfrom sklearn.metrics import hamming_loss\r\nfrom utils import mkdirs\r\nfrom glob import glob\r\nimport random\r\nimport nrrd\r\nfrom scipy.ndimage import zoom\r\nfrom keras import backend as K\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom scipy.ndimage import distance_transform_edt as distance\r\n\r\n\r\n\r\n''' Boundary loss function adapted from https://github.com/LIVIAETS/boundary-loss.\r\n Credit goes to the original authors\r\n'''\r\n\r\n\r\n\r\ndef surface_loss_keras(y_true, y_pred):\r\n multipled = y_pred * y_true\r\n return K.mean(multipled)\r\n\r\n\r\n\r\ndef build_generator():\r\n\r\n model = Sequential()\r\n\r\n # Encoder\r\n model.add(Conv3D(32, kernel_size=5, strides=2, input_shape=vol_shape, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Conv3D(64, kernel_size=5, strides=2, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Conv3D(128, kernel_size=5, strides=2, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Conv3D(512, kernel_size=1, strides=2, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(Dropout(0.5))\r\n\r\n # Decoder\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(256, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('relu'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Deconv3D(128, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('relu'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(64, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('relu'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(channels, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('tanh'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(channels, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('tanh'))\r\n\r\n\r\n model.summary()\r\n\r\n\r\n return model\r\n\r\n\r\n\r\ndef resizing(label):\r\n a,b,c=label.shape\r\n resized_data = zoom(label,(128/a,128/b,64/c),order=2, mode='constant') \r\n return resized_data\r\n\r\ndef resizing_up(label):\r\n resized_data = zoom(label,(4,4,2),order=2, mode='constant') \r\n return resized_data\r\n\r\n\r\ndef save_model(MODEL_DIR):\r\n def save(model, model_name):\r\n model_path = os.path.join(MODEL_DIR, \"%s.h5\" % model_name)\r\n model.save(model_path)\r\n save(generator, \"boundaryloss\")\r\n\r\n\r\ndef calc_dist_map(seg):\r\n res = np.zeros_like(seg)\r\n posmask = seg.astype(np.bool)\r\n\r\n if posmask.any():\r\n negmask = ~posmask\r\n res = distance(negmask) * negmask - (distance(posmask) - 1) * posmask\r\n\r\n return res\r\n\r\n\r\ndef train(generator,MODEL_DIR, epochs, batch_size=16, sample_interval=50):\r\n\r\n\r\n ipt=np.load('ipt_85_128_128_64.npy')\r\n gt=np.load('gt_denoised.npy')\r\n\r\n for epoch in range(epochs):\r\n print(epoch)\r\n idx = np.random.randint(0, ipt.shape[0], batch_size)\r\n masked_vols=ipt[idx]\r\n missing_parts=gt[idx]\r\n \r\n gt_dist_transform=np.array([calc_dist_map(y) for y in missing_parts]).astype(np.float32)\r\n \r\n print('masked_vols:',masked_vols.shape)\r\n print('missing_parts:',missing_parts.shape)\r\n print('gt_dist_transform:',gt_dist_transform.shape)\r\n # Train Generator\r\n g_loss = generator.train_on_batch(masked_vols, gt_dist_transform)\r\n print(g_loss)\r\n if epoch % sample_interval == 0:\r\n save_model(MODEL_DIR)\r\n\r\n\r\n\r\n\r\ndef evaluate(testdir,test_results_dir):\r\n print('evaluating the model...')\r\n\r\n test_list=glob('{}/*.nrrd'.format(testdir))\r\n for i in range(len(test_list)):\r\n data,h=nrrd.read(test_list[i])\r\n data=data[:,:,data.shape[2]-128:data.shape[2]]\r\n data=resizing(data)\r\n data=np.expand_dims(np.expand_dims(data,axis=0),axis=4)\r\n gen_missing = generator.predict(data)\r\n\r\n gen_missing=(gen_missing>0)\r\n gen_missing=gen_missing+1-1\r\n gen_missing_up=resizing_up(gen_missing[0,:,:,:,0])\r\n filename1=test_results_dir+test_list[i][-10:-5]+'.nrrd'\r\n nrrd.write(filename1,gen_missing[0,:,:,:,0],h)\r\n filename2=test_results_dir+'resized/'+test_list[i][-10:-5]+'.nrrd'\r\n nrrd.write(filename2,gen_missing_up,h)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n vol_rows = 128\r\n vol_cols = 128\r\n vol_height = 64\r\n mask_height = 128\r\n mask_width = 128\r\n mask_length = 64\r\n channels = 1\r\n num_classes = 2\r\n vol_shape = (vol_rows, vol_cols, vol_height, channels)\r\n missing_shape = (mask_height, mask_width, mask_length, channels)\r\n test_dir=\"../defective_skull_test\"\r\n test_results_dir=\"../results_ae_boundary/\"\r\n MODEL_DIR = '../boundarylosss'\r\n mkdirs(MODEL_DIR)\r\n try:\r\n generator = load_model('../boundaryloss.h5',custom_objects={'surface_loss_keras': surface_loss_keras})\r\n print(\"Loaded checkpoints\")\r\n except:\r\n generator = build_generator()\r\n print(\"No checkpoints found\")\r\n\r\n masked_vol = Input(shape=vol_shape)\r\n optimizer = Adam(0.0002, 0.5)\r\n generator.compile(loss=surface_loss_keras,optimizer=optimizer)\r\n train(generator,MODEL_DIR,epochs=3000, batch_size=4, sample_interval=200)\r\n #evaluate(test_dir,test_results_dir)\r\n\r\n\r\n"
] | [
[
"numpy.expand_dims",
"scipy.ndimage.zoom",
"scipy.ndimage.distance_transform_edt",
"numpy.zeros_like",
"numpy.load",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
johnhendrick/adventofcode2021 | [
"04d884d65eebc0178ddb79b1ff2a5c088c349e5e",
"04d884d65eebc0178ddb79b1ff2a5c088c349e5e"
] | [
"adventcode/day7.py",
"adventcode/day1.py"
] | [
"import numpy as np\nfrom utils import read_file\n\nfile_path = './input/day7.txt'\n\n\ndef parse_file(file_content):\n return [int(x) for x in file_content.split(',')]\n\n\ndef fuel(steps):\n return steps*(steps+1)/2\n\n\ndata = parse_file(read_file(file_path))\n\nnp_data = np.array(data)\nmedian = np.median(np_data)\n\ndistance_sum = np.abs(np_data - median).sum()\nprint(distance_sum)\n\n# part 2\nsimulation = []\npossible_mid = list(range(min(np_data), max(np_data)+1))\n\nfor mid in possible_mid:\n fuel_req = fuel(np.abs(np_data-mid)).sum()\n simulation.append(fuel_req)\n\nprint('part 2 ', min(simulation))\n",
"import pandas as pd\n\nurl = \"https://adventofcode.com/2021/day/1/input\"\n\ndata = pd.read_csv('./input/input.txt', names=['input'], sep='\\n')\ndata_list = data.input.tolist()\n\n\ndef increase_count(data_list):\n \"\"\"\n Args:\n data_list (list): input list\n\n Returns:\n int: number of increment\n \"\"\"\n count = 0\n for i, ele in enumerate(data_list):\n if i < len(data_list)-1:\n if ele < data_list[i+1]:\n count += 1\n else:\n pass\n return count\n\n\ncount = increase_count(data_list)\nprint(count)\n\ndata['roll3'] = data.input.rolling(window=3).sum()\n# part 2\nprint(increase_count(data.roll3.to_list()))\n"
] | [
[
"numpy.median",
"numpy.array",
"numpy.abs"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cjluo/money-monkey | [
"b43e3c6df4221d14d78e1f6c8487ec2308286be1"
] | [
"model_presenter.py"
] | [
"import matplotlib\n# Do not use X for plotting\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nfrom matplotlib.ticker import FormatStrFormatter\nfrom tempfile import NamedTemporaryFile\n\n\ndef plot_to_file(symbol, timestamp, close, score):\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.plot(timestamp, close, color='r', marker='.', label=\"close\")\n ax2.plot(timestamp, score, color='b', marker='.', label=\"score\")\n plt.title(\"%s: score %0.2f\" % (symbol, score[-1]))\n\n fig.autofmt_xdate()\n ax1.xaxis.set_major_formatter(DateFormatter(\"%H:%M\"))\n ax1.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n ax1.legend(h1 + h2, l1 + l2)\n\n jpg_file = NamedTemporaryFile(delete=False, suffix='.jpg')\n jpg_file.close()\n\n fig.set_dpi(100)\n fig.set_size_inches(10, 4)\n fig.set_tight_layout(True)\n\n fig.savefig(jpg_file.name, quality=50)\n plt.close(fig)\n return jpg_file.name\n"
] | [
[
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.close",
"matplotlib.ticker.FormatStrFormatter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheRensselaerIDEA/ExplainableAI | [
"ea7a24c660120b61dc3d346ac0a0bc845c5eb0cf"
] | [
"build/lib/explainableAI/metrics/TAUOPTIMAL.py"
] | [
"import numpy as np\ndef optimalTau(probabilities, ylabels):\n \n \"\"\" Finds the Optimal tau based on the F1 score\"\"\"\n \n #STEP 1 SORT PROBABILITIES AND LABELS\n sortedIndexes = np.argsort( probabilities )\n probabilities1 = probabilities[ sortedIndexes ]\n ylabels1 = ylabels[ sortedIndexes ]\n \n #INITIALIZE THRESHOLD TO BE 0\n #SO EVERY POINT IS PREDICTED AS CLASS 1\n \n # initialPrediction = np.ones( probabilities1.shape[0] ) #matrix with all 1's - INITIAL PREDICTION\n \n TP = len( np.where( ylabels1 == 1)[0] ) #AT THE BEGGINING THE TRUE POSITIVES ARE THE SAME \n #AS THE POSITIVE LABELS OF THE DATASET\n \n FN = 0 #AT THE BEGGINING WE HAVE 0 POSITIVE POINTS CLASSIFIED AS NEGATIVE\n #XIAO HERE YOU WILL PUT ylabels == -1\n FP = len( np.where( ylabels1 == -1)[0] )\n \n precision = TP/(TP + FP)\n recall = TP/ (TP + FN)\n \n# print(precision, recall, TP, FN, FP)\n# return\n f1 = ( 2*precision*recall )/( precision + recall ) \n \n threshold = probabilities1.min()-0.1\n prob_F1 = [[threshold, f1]]\n \n for i, probability in enumerate( probabilities1 ):\n \n #print( \" Iteration: {}\".format(i))\n \n \n if ylabels1[i] == 1:\n \n TP -= 1\n FN += 1\n \n if ylabels1[i] == -1: #FOR XIAO HERE -1\n FP -= 1\n \n if (TP + FP == 0):\n \n precision = 0\n \n else:\n precision = TP/(TP + FP)\n \n recall = TP/ (TP + FN)\n \n if (precision + recall) == 0:\n \n f1new = 0\n \n else:\n \n f1new = ( 2*precision*recall )/( precision + recall ) \n \n prob_F1.append( [probability, f1new] ) #thresholds with F1 scores if you want to draw a graph\n \n if f1new >= f1 :\n threshold = probability\n f1 = f1new\n prec = precision\n rec = recall\n \n \n return threshold, f1, np.array(prob_F1), prec, rec\n"
] | [
[
"numpy.argsort",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tranbaohieu/SAFL_pytorch | [
"4d582974e40031fd595c663489f631dff1abbb5a"
] | [
"demo.py"
] | [
"from __future__ import absolute_import\nimport sys\nsys.path.append('./')\nimport timeit\nimport argparse\nimport os\nimport os.path as osp\nimport numpy as np\nimport math\nimport time\nfrom PIL import Image, ImageFile\n\nimport torch\nfrom torch import nn, optim\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom config import get_args\nfrom lib import datasets, evaluation_metrics, models\nfrom lib.models.model_builder import ModelBuilder\nfrom lib.datasets.dataset import LmdbDataset, AlignCollate\nfrom lib.loss import SequenceCrossEntropyLoss\nfrom lib.trainers import Trainer\nfrom lib.evaluators import Evaluator\nfrom lib.utils.logging import Logger, TFLogger\nfrom lib.utils.serialization import load_checkpoint, save_checkpoint\nfrom lib.utils.osutils import make_symlink_if_not_exists\nfrom lib.evaluation_metrics.metrics import get_str_list\nfrom lib.utils.labelmaps import get_vocabulary, labels2strs\n\nglobal_args = get_args(sys.argv[1:])\n\ndef image_process(image_path, imgH=32, imgW=100, keep_ratio=False, min_ratio=1):\n img = Image.open(image_path).convert('RGB')\n\n if keep_ratio:\n w, h = img.size\n ratio = w / float(h)\n imgW = int(np.floor(ratio * imgH))\n imgW = max(imgH * min_ratio, imgW)\n\n img = img.resize((imgW, imgH), Image.BILINEAR)\n img = transforms.ToTensor()(img)\n img.sub_(0.5).div_(0.5)\n\n return img\n\nclass DataInfo(object):\n \"\"\"\n Save the info about the dataset.\n This a code snippet from dataset.py\n \"\"\"\n def __init__(self, voc_type):\n super(DataInfo, self).__init__()\n self.voc_type = voc_type\n\n assert voc_type in ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS']\n self.EOS = 'EOS'\n self.PADDING = 'PADDING'\n self.UNKNOWN = 'UNKNOWN'\n self.voc = get_vocabulary(voc_type, EOS=self.EOS, PADDING=self.PADDING, UNKNOWN=self.UNKNOWN)\n self.char2id = dict(zip(self.voc, range(len(self.voc))))\n self.id2char = dict(zip(range(len(self.voc)), self.voc))\n\n self.rec_num_classes = len(self.voc)\n\n\ndef main(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n\n args.cuda = args.cuda and torch.cuda.is_available()\n # args.cuda = False\n if args.cuda:\n print('using cuda.')\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n torch.set_default_tensor_type('torch.FloatTensor')\n \n # Create data loaders\n if args.height is None or args.width is None:\n args.height, args.width = (32, 100)\n\n dataset_info = DataInfo(args.voc_type)\n\n # Create model\n model = ModelBuilder(arch=args.arch, rec_num_classes=dataset_info.rec_num_classes,\n sDim=args.decoder_sdim, attDim=args.attDim, max_len_labels=args.max_len,\n eos=dataset_info.char2id[dataset_info.EOS], STN_ON=args.STN_ON, encoder_block=4, decoder_block=4)\n\n # Load from checkpoint\n if args.resume:\n checkpoint = load_checkpoint(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n if args.cuda:\n device = torch.device(\"cuda\")\n model = model.to(device)\n model = nn.DataParallel(model)\n\n #Save model \n torch.save(model, \"model.pth\")\n # Evaluation\n model.eval()\n img = image_process(args.image_path)\n with torch.no_grad():\n img = img.to(device)\n input_dict = {}\n input_dict['images'] = img.unsqueeze(0)\n # TODO: testing should be more clean.\n # to be compatible with the lmdb-based testing, need to construct some meaningless variables.\n rec_targets = torch.IntTensor(1, args.max_len).fill_(1)\n rec_targets[:,args.max_len-1] = dataset_info.char2id[dataset_info.EOS]\n input_dict['rec_targets'] = rec_targets\n input_dict['rec_lengths'] = [args.max_len]\n start = timeit.timeit()\n output_dict = model(input_dict)\n end = timeit.timeit()\n pred_rec = output_dict['output']['pred_rec']\n import cv2\n from matplotlib import cm\n import matplotlib.pyplot as plt\n rec_im = output_dict['output']['rectified_images'].squeeze().transpose(2, 0)\n rec_im = rec_im.transpose(1, 0)\n rec_im = (rec_im*0.5 + 0.5)*255\n rec_im = rec_im.cpu().detach().numpy()\n print(rec_im.shape)\n # new_im = Image.fromarray(rec_im)\n\n # plt.imsave(\"rec_im.png\", rec_im)\n # print(rec_im*255)\n cv2.imwrite(\"rec.png\", rec_im)\n pred_str, _ = get_str_list(pred_rec, input_dict['rec_targets'], dataset=dataset_info)\n print('Recognition result: {0}'.format(pred_str[0]))\n print('{:f}'.format(end-start))\n\n\nif __name__ == '__main__':\n # parse the config\n args = get_args(sys.argv[1:])\n main(args)"
] | [
[
"torch.set_default_tensor_type",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.IntTensor",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"numpy.floor",
"torch.nn.DataParallel",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erthalion/ansible-ycsb | [
"86916fbc4128629df77090b49819a9a4c4d15ea4"
] | [
"parse.py"
] | [
"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport glob\nimport json\nimport logging\nimport itertools\nimport toolz\nimport statistics\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text', usetex=True)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import spline\n\nfrom parser import Parser\nfrom base import (FailedExperiment, MetricNotFound, RESULT_PATTERN,\n MetricData, PlotNames)\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef compare_result_file(file_name):\n match = RESULT_PATTERN.search(file_name)\n if match:\n return int(match.group(1))\n else:\n logger.error(f\"Cannot find threads in file {file_name}\")\n\n\ndef thread_info(threads, db, stage, metric):\n PATH = \"workload?_threads_{}_*/*/{}_{}\"\n\n def _get_metric(data):\n return getattr(Parser, metric)(data)\n\n def get_metric(file_name, data):\n try:\n return _get_metric(data)\n except FailedExperiment:\n logger.error(f\"Experiment for {db} with {threads} threads \" +\n f\"from {file_name} is failed\")\n\n return 0\n\n except MetricNotFound:\n logger.error(\n f\"Experiment for {db} with {threads} \" +\n f\"threads from {file_name} does not have metric {metric}\"\n )\n\n return 0\n except Exception as ex:\n print(f\"Got an Exception {ex} parsing {file_name}\")\n\n def get_median_metric(thread, file_names):\n data_list = [\n (file_name, json.loads(open(file_name).read()))\n for file_name in file_names\n ]\n\n metrics = [get_metric(*row) for row in data_list]\n metrics = list(filter(toolz.identity, metrics))\n val = statistics.mean(metrics)\n logger.debug(\"Metrics for thread {thread} : {metrics}\")\n logger.debug(\"Median for thread {thread} : {val}\")\n return val\n\n data_files = sorted(glob.glob(PATH.format(threads, stage, db)), key=compare_result_file)\n data_files_by_threads = toolz.groupby(compare_result_file, data_files)\n return [\n get_median_metric(thread, file_names)\n for thread, file_names in data_files_by_threads.items()\n ]\n\n\ndef main(db, stage, metric, threads=None):\n if threads is None:\n threads = \"*\"\n\n if db:\n metric_values = thread_info(threads, db, stage, metric)\n return [MetricData(metric, metric_values, db)]\n else:\n pg_metric_values = thread_info(threads, Databases.PG.value, stage, metric)\n mysql_metric_values = thread_info(threads, Databases.MYSQL.value, stage, metric)\n mongodb_metric_values = thread_info(threads, Databases.MONGO.value, stage, metric)\n return [\n MetricData(metric, pg_metric_values, Databases.PG.value),\n MetricData(metric, mysql_metric_values, Databases.MYSQL.value),\n MetricData(metric, mongodb_metric_values, Databases.MONGODB.value),\n ]\n\n\ndef print_metrics(metrics):\n for metric in metrics:\n print(f\"{metric.name} {metric.db} {metric.values}\")\n\n\ndef get_metric_option(metric):\n return \"_\".join(metric.name.split(\"_\")[2:])\n\n\ndef plot_metrics(metrics):\n plt, ax = prepare_plot(PlotNames.get(get_metric_option(metrics[0]), \"\"))\n\n for metric in metrics:\n ox, values = interpolate_metric(metric)\n plot_values(ax, ox, values, metric.db)\n\n ax.legend(shadow=True)\n plt.savefig(f\"{metric.db}_{metric.name}.png\")\n\n\ndef interpolate_metric(metric):\n interpolated_x = np.linspace(1, 100, 100)\n original_x = [1] + list(range(10, 110, 10))\n return (interpolated_x, spline(original_x, metric.values, interpolated_x))\n\n\ndef prepare_plot(plot_name):\n ax = plt.subplot()\n ax.set_facecolor(\"#eaeaf2\")\n ax.grid(color='#ffffff', linestyle='-')\n plt.title(plot_name)\n return plt, ax\n\n\ndef plot_values(ax, ox, oy, db):\n ax.plot(ox, oy, '#8172b2', label=db, linewidth=2)\n\n\nif __name__ == \"__main__\":\n\n args = iter(sys.argv[1:])\n db = next(args, None)\n stage = next(args, None)\n metric = next(args, None)\n threads = next(args, None)\n plot = bool(os.environ.get(\"PLOT\", 0))\n\n if os.environ.get(\"DEBUG\"):\n logger.setLevel(os.environ.get(\"LOG_LEVEL\", logging.INFO))\n\n if plot:\n plot_metrics(main(db, stage, metric, threads))\n else:\n print_metrics(main(db, stage, metric, threads))\n"
] | [
[
"matplotlib.pyplot.title",
"numpy.linspace",
"scipy.interpolate.spline",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"matplotlib.rc"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"1.2",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
CnBDM-Su/LV_XNN | [
"52f1ab2041d734a4b35066a7d2ffef1a3da7d792"
] | [
"scripts/benchmark/xgb_test.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 19 10:59:51 2020\n\n@author: suyu\n\"\"\"\nfrom sklearn.model_selection import train_test_split,GridSearchCV,PredefinedSplit\nfrom sklearn.metrics import make_scorer,mean_squared_error,roc_auc_score,mean_absolute_error,log_loss\nfrom xgboost import XGBClassifier,XGBRegressor\nimport numpy as np\nimport pandas as pd\nimport sys\nsys.path.append('../')\nfrom gammli.DataReader import data_initialize\n\ndef xgb(wc, data, meta_info_ori, task_type=\"Regression\", random_state=0):\n \n train, test = train_test_split(data, test_size=0.2, random_state=0)\n tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t= data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=True)\n \n x = np.concatenate([tr_x,val_x])\n y = np.concatenate([tr_y,val_y])\n \n val_fold = np.ones(x.shape[0])\n val_fold[:tr_x.shape[0]] = -1\n if task_type == \"Regression\":\n\n base = XGBRegressor(n_estimators=100, random_state=random_state)\n grid = GridSearchCV(base, param_grid={\"max_depth\": (3, 4, 5, 6, 7, 8)},\n scoring={\"mse\": make_scorer(mean_squared_error, greater_is_better=False)},\n cv=PredefinedSplit(val_fold), refit=False, n_jobs=-1, error_score=np.nan)\n grid.fit(x, y.ravel())\n model = grid.estimator.set_params(**grid.cv_results_[\"params\"][np.where((grid.cv_results_[\"rank_test_mse\"] == 1))[0][0]])\n cold_mae = []\n cold_rmse = []\n warm_mae = []\n warm_rmse = []\n for times in range(10):\n \n train, test = train_test_split(data, test_size=0.2, random_state=times)\n tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)\n \n model.fit(tr_x, tr_y.ravel())\n pred_test = model.predict(te_x).reshape([-1, 1])\n pred_test = sy.inverse_transform(pred_test.reshape(-1,1))\n te_y = sy_t.inverse_transform(te_y.reshape(-1,1))\n \n if wc == 'warm':\n if len([(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')])!=1:\n warm_y = te_y[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n warm_pred = pred_test[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n else:\n warm_y = te_y\n warm_pred= pred_test\n warm_mae.append(mean_absolute_error(warm_y,warm_pred))\n warm_rmse.append(mean_squared_error(warm_y,warm_pred)**0.5)\n \n if wc == 'cold':\n try:\n [(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')] != [True]\n print('no cold samples')\n continue\n except:\n cold_y = te_y[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_pred = pred_test[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_mae.append(mean_absolute_error(cold_y,cold_pred))\n cold_rmse.append(mean_squared_error(cold_y,cold_pred)**0.5)\n\n if wc == 'warm':\n \n i_result = np.array(['xgboost',np.mean(warm_mae),np.mean(warm_rmse),np.std(warm_mae),np.std(warm_rmse)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','warm_mae','warm_rmse','std_warm_mae','std_warm_rmse'])\n\n if wc == 'cold': \n i_result = np.array(['xgboost',np.mean(cold_mae),np.mean(cold_rmse),np.std(cold_mae),np.std(cold_rmse)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','cold_mae','cold_rmse','std_cold_mae','std_cold_rmse',])\n \n\n return result\n\n\n elif task_type == \"Classification\":\n\n base = XGBClassifier(n_estimators=100, random_state=random_state)\n grid = GridSearchCV(base, param_grid={\"max_depth\": (3, 4, 5, 6, 7, 8)},\n scoring={\"auc\": make_scorer(roc_auc_score, needs_proba=True)},\n cv=PredefinedSplit(val_fold), refit=False, n_jobs=-1, error_score=np.nan)\n grid.fit(x, y.ravel())\n model = grid.estimator.set_params(**grid.cv_results_[\"params\"][np.where((grid.cv_results_[\"rank_test_auc\"] == 1))[0][0]])\n \n cold_auc = []\n cold_logloss = []\n warm_auc = []\n warm_logloss = []\n for times in range(10):\n \n train, test = train_test_split(data, test_size=0.2, random_state=times)\n tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info , sy, sy_t= data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)\n\n model.fit(tr_x, tr_y.ravel())\n pred_test = model.predict_proba(te_x)[:,-1].reshape([-1, 1])\n \n if wc == 'warm':\n if len([(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')])!=1:\n warm_y = te_y[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n warm_pred = pred_test[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n else:\n warm_y = te_y\n warm_pred= pred_test\n warm_auc.append(roc_auc_score(warm_y,warm_pred))\n warm_logloss.append(log_loss(warm_y,warm_pred)) \n \n if wc == 'cold':\n \n try:\n [(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')] != [True]\n print('no cold samples')\n continue\n except:\n cold_y = te_y[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_pred = pred_test[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_auc.append(roc_auc_score(cold_y,cold_pred))\n cold_logloss.append(log_loss(cold_y,cold_pred))\n\n if wc == 'warm':\n i_result = np.array(['xgboost',np.mean(warm_auc),np.mean(warm_logloss),np.std(warm_auc),np.std(warm_logloss)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','warm_auc','warm_logloss','std_warm_auc','std_warm_logloss'])\n\n if wc == 'cold':\n i_result = np.array(['xgboost',np.mean(cold_auc),np.mean(cold_logloss),np.std(cold_auc),np.std(cold_logloss)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','cold_auc','cold_logloss','std_cold_auc','std_cold_logloss'])\n \n\n return result"
] | [
[
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.mean_absolute_error",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.model_selection.PredefinedSplit",
"numpy.concatenate",
"numpy.ones",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.log_loss",
"numpy.mean",
"sklearn.metrics.make_scorer",
"numpy.std",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Cyber-Neuron/inception_v3 | [
"d3f566ccfc17f4779900a9f2d81dd593b3100df5"
] | [
"inception/inception/inception_train.py"
] | [
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A library to train Inception using multiple GPU's with synchronous updates.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nfrom datetime import datetime\nimport os.path\nimport re\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom inception import image_processing\nfrom inception import inception_model as inception\nfrom inception.slim import slim\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 10000000,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_string('subset', 'train',\n \"\"\"Either 'train' or 'validation'.\"\"\")\n\n# Flags governing the hardware employed for running TensorFlow.\ntf.app.flags.DEFINE_integer('num_gpus', 1,\n \"\"\"How many GPUs to use.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\n\n# Flags governing the type of training.\ntf.app.flags.DEFINE_boolean('fine_tune', False,\n \"\"\"If set, randomly initialize the final layer \"\"\"\n \"\"\"of weights in order to train the network on a \"\"\"\n \"\"\"new task.\"\"\")\ntf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',\n \"\"\"If specified, restore this pretrained model \"\"\"\n \"\"\"before beginning any training.\"\"\")\n\n# **IMPORTANT**\n# Please note that this learning rate schedule is heavily dependent on the\n# hardware architecture, batch size and any changes to the model architecture\n# specification. Selecting a finely tuned learning rate schedule is an\n# empirical process that requires some experimentation. Please see README.md\n# more guidance and discussion.\n#\n# With 8 Tesla K40's and a batch size = 256, the following setup achieves\n# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).\n# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.\ntf.app.flags.DEFINE_float('initial_learning_rate', 0.1,\n \"\"\"Initial learning rate.\"\"\")\ntf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,\n \"\"\"Epochs after which learning rate decays.\"\"\")\ntf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,\n \"\"\"Learning rate decay factor.\"\"\")\n\n# Constants dictating the learning rate schedule.\nRMSPROP_DECAY = 0.9 # Decay term for RMSProp.\nRMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.\nRMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.\n\n\ndef _tower_loss(images, labels, num_classes, scope):\n \"\"\"Calculate the total loss on a single tower running the ImageNet model.\n\n We perform 'batch splitting'. This means that we cut up a batch across\n multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,\n then each tower will operate on an batch of 16 images.\n\n Args:\n images: Images. 4D tensor of size [batch_size, FLAGS.image_size,\n FLAGS.image_size, 3].\n labels: 1-D integer Tensor of [batch_size].\n num_classes: number of classes\n scope: unique prefix string identifying the ImageNet tower, e.g.\n 'tower_0'.\n\n Returns:\n Tensor of shape [] containing the total loss for a batch of data\n \"\"\"\n # When fine-tuning a model, we do not restore the logits but instead we\n # randomly initialize the logits. The number of classes in the output of the\n # logit is the number of classes in specified Dataset.\n restore_logits = not FLAGS.fine_tune\n\n # Build inference Graph.\n logits = inception.inference(images, num_classes, for_training=True,\n restore_logits=restore_logits,\n scope=scope)\n\n # Build the portion of the Graph calculating the losses. Note that we will\n # assemble the total_loss using a custom function below.\n split_batch_size = images.get_shape().as_list()[0]\n inception.loss(logits, labels, batch_size=split_batch_size)\n\n # Assemble all of the losses for the current tower only.\n losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)\n\n # Calculate the total loss for the current tower.\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summmary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on TensorBoard.\n loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.scalar_summary(loss_name +' (raw)', l)\n tf.scalar_summary(loss_name, loss_averages.average(l))\n\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n return total_loss\n\n\ndef _average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(0, grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\ndef train(dataset):\n \"\"\"Train on dataset for a number of steps.\"\"\"\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n # Create a variable to count the number of train() calls. This equals the\n # number of batches processed * FLAGS.num_gpus.\n global_step = tf.get_variable(\n 'global_step', [],\n initializer=tf.constant_initializer(0), trainable=False)\n\n # Calculate the learning rate schedule.\n num_batches_per_epoch = (dataset.num_examples_per_epoch() /\n FLAGS.batch_size)\n decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,\n global_step,\n decay_steps,\n FLAGS.learning_rate_decay_factor,\n staircase=True)\n\n # Create an optimizer that performs gradient descent.\n opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,\n momentum=RMSPROP_MOMENTUM,\n epsilon=RMSPROP_EPSILON)\n\n # Get images and labels for ImageNet and split the batch across GPUs.\n assert FLAGS.batch_size % FLAGS.num_gpus == 0, (\n 'Batch size must be divisible by number of GPUs')\n split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)\n\n # Override the number of preprocessing threads to account for the increased\n # number of GPU towers.\n num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus\n images, labels = image_processing.distorted_inputs(\n dataset,\n num_preprocess_threads=num_preprocess_threads)\n\n input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))\n\n # Number of classes in the Dataset label set plus 1.\n # Label 0 is reserved for an (unused) background class.\n num_classes = dataset.num_classes() + 1\n \n # Split the batch of images and labels for towers.\n images_splits = tf.split(0, FLAGS.num_gpus, images)\n labels_splits = tf.split(0, FLAGS.num_gpus, labels)\n\n # Calculate the gradients for each model tower.\n tower_grads = []\n for i in xrange(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:\n # Force all Variables to reside on the CPU.\n with slim.arg_scope([slim.variables.variable], device='/cpu:0'):\n # Calculate the loss for one tower of the ImageNet model. This\n # function constructs the entire ImageNet model but shares the\n # variables across all towers.\n loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,\n scope)\n\n # Reuse variables for the next tower.\n tf.get_variable_scope().reuse_variables()\n\n # Retain the summaries from the final tower.\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n\n # Retain the Batch Normalization updates operations only from the\n # final tower. Ideally, we should grab the updates from all towers\n # but these stats accumulate extremely fast so we can ignore the\n # other stats from the other towers without significant detriment.\n batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,\n scope)\n\n # Calculate the gradients for the batch of data on this ImageNet\n # tower.\n grads = opt.compute_gradients(loss)\n\n # Keep track of the gradients across all towers.\n tower_grads.append(grads)\n\n # We must calculate the mean of each gradient. Note that this is the\n # synchronization point across all towers.\n grads = _average_gradients(tower_grads)\n\n # Add a summaries for the input processing and global_step.\n summaries.extend(input_summaries)\n\n # Add a summary to track the learning rate.\n summaries.append(tf.scalar_summary('learning_rate', lr))\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n summaries.append(\n tf.histogram_summary(var.op.name + '/gradients', grad))\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n summaries.append(tf.histogram_summary(var.op.name, var))\n\n # Track the moving averages of all trainable variables.\n # Note that we maintain a \"double-average\" of the BatchNormalization\n # global statistics. This is more complicated then need be but we employ\n # this for backward-compatibility with our previous models.\n variable_averages = tf.train.ExponentialMovingAverage(\n inception.MOVING_AVERAGE_DECAY, global_step)\n\n # Another possiblility is to use tf.slim.get_variables().\n variables_to_average = (tf.trainable_variables() +\n tf.moving_average_variables())\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # Group all updates to into a single train op.\n batchnorm_updates_op = tf.group(*batchnorm_updates)\n train_op = tf.group(apply_gradient_op, variables_averages_op,\n batchnorm_updates_op)\n\n # Create a saver.\n saver = tf.train.Saver(tf.all_variables())\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.merge_summary(summaries)\n\n # Build an initialization operation to run below.\n init = tf.initialize_all_variables()\n\n # Start running operations on the Graph. allow_soft_placement must be set to\n # True to build towers on GPU, as some of the ops do not have GPU\n # implementations.\n sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=FLAGS.log_device_placement))\n sess.run(init)\n\n if FLAGS.pretrained_model_checkpoint_path:\n assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)\n variables_to_restore = tf.get_collection(\n slim.variables.VARIABLES_TO_RESTORE)\n restorer = tf.train.Saver(variables_to_restore)\n restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)\n print('%s: Pre-trained model restored from %s' %\n (datetime.now(), FLAGS.pretrained_model_checkpoint_path))\n\n # Start the queue runners.\n tf.train.start_queue_runners(sess=sess)\n\n summary_writer = tf.train.SummaryWriter(\n FLAGS.train_dir,\n graph_def=sess.graph.as_graph_def(add_shapes=True))\n\n for step in xrange(FLAGS.max_steps):\n start_time = time.time()\n _, loss_value = sess.run([train_op, loss])\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 10 == 0:\n examples_per_sec = FLAGS.batch_size / float(duration)\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print(format_str % (datetime.now(), step, loss_value,\n examples_per_sec, duration))\n\n if step % 100 == 0:\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Save the model checkpoint periodically.\n if step % 500 == 0 or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n"
] | [
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.gfile.Exists",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.moving_average_variables",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.group",
"tensorflow.add_n",
"tensorflow.merge_summary",
"tensorflow.all_variables",
"tensorflow.Graph",
"tensorflow.get_collection",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"tensorflow.initialize_all_variables",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.train.RMSPropOptimizer",
"numpy.isnan",
"tensorflow.identity",
"tensorflow.split",
"tensorflow.reduce_mean",
"tensorflow.train.start_queue_runners",
"tensorflow.scalar_summary",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.histogram_summary",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12"
]
}
] |
rhoadesScholar/daisy | [
"78cdd2ed0d67647a6602fb53cc952214450f3753"
] | [
"examples/visualize.py"
] | [
"#!/usr/bin/env python\n\nfrom funlib.show.neuroglancer import add_layer\nimport argparse\nimport daisy\nimport glob\nimport neuroglancer\nimport os\nimport webbrowser\nimport numpy as np\nimport zarr\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--file',\n '-f',\n type=str,\n action='append',\n help=\"The path to the container to show\")\nparser.add_argument(\n '--datasets',\n '-d',\n type=str,\n nargs='+',\n action='append',\n help=\"The datasets in the container to show\")\nparser.add_argument(\n '--graphs',\n '-g',\n type=str,\n nargs='+',\n action='append',\n help=\"The graphs in the container to show\")\nparser.add_argument(\n '--no-browser',\n '-n',\n type=bool,\n nargs='?',\n default=False,\n const=True,\n help=\"If set, do not open a browser, just print a URL\")\n\nargs = parser.parse_args()\n\nneuroglancer.set_server_bind_address('0.0.0.0')\nviewer = neuroglancer.Viewer()\n\ndef to_slice(slice_str):\n\n values = [int(x) for x in slice_str.split(':')]\n if len(values) == 1:\n return values[0]\n\n return slice(*values)\n\ndef parse_ds_name(ds):\n\n tokens = ds.split('[')\n\n if len(tokens) == 1:\n return ds, None\n\n ds, slices = tokens\n slices = list(map(to_slice, slices.rstrip(']').split(',')))\n\n return ds, slices\n\nclass Project:\n\n def __init__(self, array, dim, value):\n self.array = array\n self.dim = dim\n self.value = value\n self.shape = array.shape[:self.dim] + array.shape[self.dim + 1:]\n self.dtype = array.dtype\n\n def __getitem__(self, key):\n slices = key[:self.dim] + (self.value,) + key[self.dim:]\n ret = self.array[slices]\n return ret\n\ndef slice_dataset(a, slices):\n\n dims = a.roi.dims\n\n for d, s in list(enumerate(slices))[::-1]:\n\n if isinstance(s, slice):\n raise NotImplementedError(\"Slicing not yet implemented!\")\n else:\n index = (s - a.roi.get_begin()[d])//a.voxel_size[d]\n a.data = Project(a.data, d, index)\n a.roi = daisy.Roi(\n a.roi.get_begin()[:d] + a.roi.get_begin()[d + 1:],\n a.roi.get_shape()[:d] + a.roi.get_shape()[d + 1:])\n a.voxel_size = a.voxel_size[:d] + a.voxel_size[d + 1:]\n\n return a\n\ndef open_dataset(f, ds):\n original_ds = ds\n ds, slices = parse_ds_name(ds)\n slices_str = original_ds[len(ds):]\n\n try:\n dataset_as = []\n if all(key.startswith(\"s\") for key in zarr.open(f)[ds].keys()):\n raise AttributeError(\"This group is a multiscale array!\")\n for key in zarr.open(f)[ds].keys():\n dataset_as.extend(open_dataset(f, f\"{ds}/{key}{slices_str}\"))\n return dataset_as\n except AttributeError as e:\n # dataset is an array, not a group\n pass\n\n print(\"ds :\", ds)\n print(\"slices:\", slices)\n try:\n zarr.open(f)[ds].keys()\n is_multiscale = True\n except:\n is_multiscale = False\n\n if not is_multiscale:\n a = daisy.open_ds(f, ds)\n\n if slices is not None:\n a = slice_dataset(a, slices)\n\n if a.roi.dims == 2:\n print(\"ROI is 2D, recruiting next channel to z dimension\")\n a.roi = daisy.Roi((0,) + a.roi.get_begin(), (a.shape[-3],) + a.roi.get_shape())\n a.voxel_size = daisy.Coordinate((1,) + a.voxel_size)\n\n if a.roi.dims == 4:\n print(\"ROI is 4D, stripping first dimension and treat as channels\")\n a.roi = daisy.Roi(a.roi.get_begin()[1:], a.roi.get_shape()[1:])\n a.voxel_size = daisy.Coordinate(a.voxel_size[1:])\n\n if a.data.dtype == np.int64 or a.data.dtype == np.int16:\n print(\"Converting dtype in memory...\")\n a.data = a.data[:].astype(np.uint64)\n\n return [(a, ds)]\n else:\n return [([daisy.open_ds(f, f\"{ds}/{key}\") for key in zarr.open(f)[ds].keys()], ds)]\n\nfor f, datasets in zip(args.file, args.datasets):\n\n arrays = []\n for ds in datasets:\n try:\n\n print(\"Adding %s, %s\" % (f, ds))\n dataset_as = open_dataset(f, ds)\n\n except Exception as e:\n\n print(type(e), e)\n print(\"Didn't work, checking if this is multi-res...\")\n\n scales = glob.glob(os.path.join(f, ds, 's*'))\n if len(scales) == 0:\n print(f\"Couldn't read {ds}, skipping...\")\n raise e\n print(\"Found scales %s\" % ([\n os.path.relpath(s, f)\n for s in scales\n ],))\n a = [\n open_dataset(f, os.path.relpath(scale_ds, f))\n for scale_ds in scales\n ]\n for a in dataset_as:\n arrays.append(a)\n\n with viewer.txn() as s:\n for array, dataset in arrays:\n add_layer(s, array, dataset)\n\nif args.graphs:\n for f, graphs in zip(args.file, args.graphs):\n\n for graph in graphs:\n\n graph_annotations = []\n try:\n ids = daisy.open_ds(f, graph + '-ids').data\n loc = daisy.open_ds(f, graph + '-locations').data\n except:\n loc = daisy.open_ds(f, graph).data\n ids = None\n dims = loc.shape[-1]\n loc = loc[:].reshape((-1, dims))\n if ids is None:\n ids = range(len(loc))\n for i, l in zip(ids, loc):\n if dims == 2:\n l = np.concatenate([[0], l])\n graph_annotations.append(\n neuroglancer.EllipsoidAnnotation(\n center=l[::-1],\n radii=(5, 5, 5),\n id=i))\n graph_layer = neuroglancer.AnnotationLayer(\n annotations=graph_annotations,\n voxel_size=(1, 1, 1))\n\n with viewer.txn() as s:\n s.layers.append(name='graph', layer=graph_layer)\n\nurl = str(viewer)\nprint(url)\nif os.environ.get(\"DISPLAY\") and not args.no_browser:\n webbrowser.open_new(url)\n\nprint(\"Press ENTER to quit\")\ninput()"
] | [
[
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aelnouby/summerschool2015 | [
"c96da4af353fc1b0c1a7e3a08863c6de89072b19"
] | [
"convnets/logistic_sgd.py"
] | [
"\"\"\"\nThis tutorial introduces logistic regression using Theano and stochastic\ngradient descent.\n\nLogistic regression is a probabilistic, linear classifier. It is parametrized\nby a weight matrix :math:`W` and a bias vector :math:`b`. Classification is\ndone by projecting data points onto a set of hyperplanes, the distance to\nwhich is used to determine a class membership probability.\n\nMathematically, this can be written as:\n\n.. math::\n P(Y=i|x, W,b) &= softmax_i(W x + b) \\\\\n &= \\frac {e^{W_i x + b_i}} {\\sum_j e^{W_j x + b_j}}\n\n\nThe output of the model or prediction is then done by taking the argmax of\nthe vector whose i'th element is P(Y=i|x).\n\n.. math::\n\n y_{pred} = argmax_i P(Y=i|x,W,b)\n\n\nThis tutorial presents a stochastic gradient descent optimization method\nsuitable for large datasets.\n\n\nReferences:\n\n - textbooks: \"Pattern Recognition and Machine Learning\" -\n Christopher M. Bishop, section 4.3.2\n\n\"\"\"\nfrom __future__ import print_function\nimport gzip\nimport os\nimport sys\nimport time\n\nimport numpy\nimport six\nfrom six.moves import cPickle, xrange\n\nimport theano\nimport theano.tensor as T\n\n__docformat__ = 'restructedtext en'\n\n\nclass LogisticRegression(object):\n \"\"\"Multi-class Logistic Regression Class\n\n The logistic regression is fully described by a weight matrix :math:`W`\n and bias vector :math:`b`. Classification is done by projecting data\n points onto a set of hyperplanes, the distance to which is used to\n determine a class membership probability.\n \"\"\"\n\n def __init__(self, input, n_in, n_out):\n \"\"\" Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n \"\"\"\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(\n value=numpy.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n # initialize the baises b as a vector of n_out 0s\n self.b = theano.shared(\n value=numpy.zeros(\n (n_out,),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyper plain for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of hyper\n # plain-k\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n def negative_log_likelihood(self, y):\n \"\"\"Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \\frac{1}{|\\mathcal{D}|} \\mathcal{L} (\\theta=\\{W,b\\}, \\mathcal{D}) =\n \\frac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\\\n \\ell (\\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n \"\"\"\n # start-snippet-2\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n # end-snippet-2\n\n def errors(self, y):\n \"\"\"Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n \"\"\"\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError()\n\n\ndef load_data(dataset):\n ''' Loads the dataset\n\n :type dataset: string\n :param dataset: the path to the dataset (here MNIST)\n '''\n\n #############\n # LOAD DATA #\n #############\n\n # Download the MNIST dataset if it is not present\n data_dir, data_file = os.path.split(dataset)\n if data_dir == \"\" and not os.path.isfile(dataset):\n # Check if dataset is in the data directory.\n if \"__file__\" in globals():\n new_path = os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"data\",\n )\n if os.path.isdir(new_path):\n new_path = os.path.join(new_path, dataset)\n if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':\n dataset = new_path\n\n if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':\n from six.moves.urllib.request import urlretrieve\n origin = (\n 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'\n )\n print('Downloading data from %s' % origin)\n urlretrieve(origin, dataset)\n\n print('... loading data')\n\n # Load the dataset\n f = gzip.open(dataset, 'rb')\n if six.PY3:\n train_set, valid_set, test_set = cPickle.load(f, encoding='latin1')\n else:\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n # train_set, valid_set, test_set format: tuple(input, target)\n # input is an numpy.ndarray of 2 dimensions (a matrix)\n # witch row's correspond to an example. target is a\n # numpy.ndarray of 1 dimensions (vector)) that have the same length as\n # the number of rows in the input. It should give the target\n # target to the example with the same index in the input.\n\n def shared_dataset(data_xy, borrow=True):\n \"\"\" Function that loads the dataset into shared variables\n\n The reason we store our dataset in shared variables is to allow\n Theano to copy it into the GPU memory (when code is run on GPU).\n Since copying data into the GPU is slow, copying a minibatch everytime\n is needed (the default behaviour if the data is not in a shared\n variable) would lead to a large decrease in performance.\n \"\"\"\n data_x, data_y = data_xy\n shared_x = theano.shared(numpy.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(numpy.asarray(data_y,\n dtype=theano.config.floatX),\n borrow=borrow)\n # When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets ous get around this issue\n return shared_x, T.cast(shared_y, 'int32')\n\n test_set_x, test_set_y = shared_dataset(test_set)\n valid_set_x, valid_set_y = shared_dataset(valid_set)\n train_set_x, train_set_y = shared_dataset(train_set)\n\n rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),\n (test_set_x, test_set_y)]\n return rval\n\n\ndef sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,\n dataset='mnist.pkl.gz',\n batch_size=600):\n \"\"\"\n Demonstrate stochastic gradient descent optimization of a log-linear\n model\n\n This is demonstrated on MNIST.\n\n :type learning_rate: float\n :param learning_rate: learning rate used (factor for the stochastic\n gradient)\n\n :type n_epochs: int\n :param n_epochs: maximal number of epochs to run the optimizer\n\n :type dataset: string\n :param dataset: the path of the MNIST dataset file from\n http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz\n\n \"\"\"\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print('... building the model')\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n\n # generate symbolic variables for input (x and y represent a\n # minibatch)\n x = T.matrix('x') # data, presented as rasterized images\n y = T.ivector('y') # labels, presented as 1D vector of [int] labels\n\n # construct the logistic regression class\n # Each MNIST image has size 28*28\n classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)\n\n # the cost we minimize during training is the negative log likelihood of\n # the model in symbolic format\n cost = classifier.negative_log_likelihood(y)\n\n # compiling a Theano function that computes the mistakes that are made by\n # the model on a minibatch\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n # compute the gradient of cost with respect to theta = (W,b)\n g_W = T.grad(cost=cost, wrt=classifier.W)\n g_b = T.grad(cost=cost, wrt=classifier.b)\n\n # start-snippet-3\n # specify how to update the parameters of the model as a list of\n # (variable, update expression) pairs.\n updates = [(classifier.W, classifier.W - learning_rate * g_W),\n (classifier.b, classifier.b - learning_rate * g_b)]\n\n # compiling a Theano function `train_model` that returns the cost, but in\n # the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n # end-snippet-3\n\n ###############\n # TRAIN MODEL #\n ###############\n print('... training the model')\n # early-stopping parameters\n patience = 5000 # look as this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is found\n\n # a relative improvement of this much is considered significant\n improvement_threshold = 0.995\n\n # go through this many minibatches before checking the network on\n # the validation set; in this case we check every epoch\n validation_frequency = min(n_train_batches, patience / 2)\n\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = time.clock()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in xrange(n_train_batches):\n\n train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i)\n for i in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n # improve patience if loss improvement is good enough\n if this_validation_loss < best_validation_loss * \\\n improvement_threshold:\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n # test it on the test set\n\n test_losses = [test_model(i)\n for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n ' epoch %i, minibatch %i/%i, test error of'\n ' best model %f %%'\n ) %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n test_score * 100.\n )\n )\n\n if patience <= iter:\n done_looping = True\n break\n\n end_time = time.clock()\n print(\n (\n 'Optimization complete with best validation score of %f %%,'\n 'with test performance %f %%'\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print('The code run for %d epochs, with %f epochs/sec' % (\n epoch, 1. * epoch / (end_time - start_time)))\n\n print('The code ran for %.1fs' % ((end_time - start_time)),\n file=sys.stderr)\n\n # Call Python GC to make sure the GPU memory is freed. That way,\n # we are sure next call will have enough memory.\n import gc\n for i in range(4):\n gc.collect()\n"
] | [
[
"numpy.asarray",
"numpy.zeros",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fxdupe/graphmatchingtools | [
"4503a04c4a0822315535e6ab3cd698417859908d",
"4503a04c4a0822315535e6ab3cd698417859908d"
] | [
"graph_matching_tools/algorithms/multiway/hippi.py",
"tests/algorithms/multiway/test_kmeans.py"
] | [
"\"\"\"\nHiPPI algorithm as described in ICCV 2019 paper\n\n.. moduleauthor:: François-Xavier Dupé\n\"\"\"\nimport numpy as np\n\nimport graph_matching_tools.algorithms.multiway.utils as utils\n\n\ndef hippi_multiway_matching(s, sizes, knode, u_dim, iterations=100, tolerance=1e-6, init=None):\n \"\"\"\n HiPPI method for multi-graph matching based on a power method\n :param np.ndarray s: the bulk matrix with the adjacency matrices on the diagonal\n :param list sizes: the number of nodes of the different graphs (in order)\n :param np.ndarray knode: the node affinity matrix\n :param int u_dim: the dimension of the universe of nodes\n :param int iterations: the maximal number of iterations\n :param float tolerance: the tolerance for convergence\n :param np.ndarray init: the initialization, random if None\n :return: the universe of node projection for all the nodes\n \"\"\"\n if init is None:\n u = np.ones((s.shape[0], u_dim)) / u_dim + 1e-3 * np.random.randn(s.shape[0], u_dim)\n else:\n u = init\n\n w = knode.T @ s @ knode\n vi = w @ u @ u.T @ w @ u\n fu = np.trace(u.T @ vi)\n\n for i in range(iterations):\n u = utils.u_projector(vi, sizes)\n vi = w @ u @ u.T @ w @ u\n n_fu = np.trace(u.T @ vi)\n if np.abs(n_fu - fu) < tolerance:\n break\n fu = n_fu\n\n return u\n",
"from unittest import TestCase\n\nimport numpy as np\nimport networkx as nx\n\nimport graph_matching_tools.algorithms.multiway.kmeans as kmeans\nimport graph_matching_tools.algorithms.kernels.gaussian as kern\nimport graph_matching_tools.algorithms.kernels.utils as utils\n\n\nclass TestKMeans(TestCase):\n\n def test_get_permutation_with_kmeans(self):\n node_kernel = kern.create_gaussian_node_kernel(0.1, \"weight\")\n\n graph1 = nx.Graph()\n graph1.add_node(0, weight=1.0)\n graph1.add_node(1, weight=2.0)\n graph1.add_edge(0, 1, weight=1.0)\n\n graph2 = nx.Graph()\n graph2.add_node(0, weight=2.0)\n graph2.add_node(1, weight=1.0)\n graph2.add_edge(0, 1, weight=1.0)\n\n graphs = [graph1, graph2]\n sizes = [2, 2]\n\n knode = utils.create_full_node_affinity_matrix(graphs, node_kernel)\n res = kmeans.get_permutation_with_kmeans(2, knode)\n\n truth = np.array([[1., 0., 0., 1.],\n [0., 1., 1., 0.],\n [0., 1., 1., 0.],\n [1., 0., 0., 1.]])\n self.assertEqual(np.linalg.norm(res - truth) < 1e-3, True)\n"
] | [
[
"numpy.abs",
"numpy.random.randn",
"numpy.trace",
"numpy.ones"
],
[
"numpy.array",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
summerRainn/DeepLearningNotes | [
"6657694d5e22e73969e47699b4e31a28385d0f19",
"6657694d5e22e73969e47699b4e31a28385d0f19",
"6657694d5e22e73969e47699b4e31a28385d0f19",
"6657694d5e22e73969e47699b4e31a28385d0f19",
"6657694d5e22e73969e47699b4e31a28385d0f19",
"6657694d5e22e73969e47699b4e31a28385d0f19",
"6657694d5e22e73969e47699b4e31a28385d0f19"
] | [
"Note-6 A3CNet/Note-6.2.1 代码阅读顺序/sonnet/python/modules/base_test.py",
"Note-6 A3CNet/Note-6.4 HS300指数增强/sonnet/python/ops/nest_test.py",
"Note-6 A3CNet/Note-6.4 HS300指数增强/agent/access.py",
"Torch-1 DDPG/Torch-3 DDPG GPU 重构和可视化/agent/noise.py",
"Note-6 A3CNet/Note 6 simple ACNet/Agent_adjust.py",
"Torch-2 DQN/Torch-5 DQN/agent/agent.py",
"Note-6 A3CNet/Note-6.4 HS300指数增强/sonnet/python/modules/rnn_core_test.py"
] | [
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Tests for sonnet.python.modules.base.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport pickle\nimport numpy as np\nimport six\nfrom sonnet.python.modules import base\nimport tensorflow as tf\n\nlogging = tf.logging\n\n\nclass ModuleWithClassKeys(base.AbstractModule):\n \"\"\"Dummy module that defines some keys as class attributes.\"\"\"\n POSSIBLE_INITIALIZER_KEYS = {\"foo\", \"bar\"}\n\n\nclass ModuleWithNoInitializerKeys(base.AbstractModule):\n \"\"\"Dummy module without any intiailizer keys.\"\"\"\n pass\n\n\nclass ModuleWithCustomInitializerKeys(base.AbstractModule):\n \"\"\"Dummy module that overrides get_possible_initializer_keys.\"\"\"\n\n @classmethod\n def get_possible_initializer_keys(cls, custom_key):\n return {\"foo\"} if custom_key else {\"bar\"}\n\n\nclass IdentityModule(base.AbstractModule):\n \"\"\"Sonnet module that builds a single `tf.identity` op.\"\"\"\n\n def _build(self, inputs):\n return tf.identity(inputs)\n\n\nclass NoInitIdentityModule(base.AbstractModule):\n \"\"\"Sonnet module that inherits `base.AbstractModule.__init__`.\"\"\"\n\n def _build(self, inputs):\n return tf.identity(inputs)\n\n\nclass NoSuperInitIdentityModule(base.AbstractModule):\n \"\"\"Sonnet module that doesn't call `base.AbstractModule.__init__`.\"\"\"\n\n def __init__(self):\n pass # Don't call superclass initializer.\n\n def _build(self, inputs):\n return tf.identity(inputs)\n\n\nclass SimpleModule(base.AbstractModule):\n \"\"\"Simple module with variables created in constructor and build.\"\"\"\n\n def __init__(self, custom_getter=None, name=\"simple_module\"):\n\n super(SimpleModule, self).__init__(custom_getter=custom_getter,\n name=name)\n\n with self._enter_variable_scope():\n self._b = tf.get_variable(\"b\", dtype=tf.float32, shape=[10, 10])\n\n def _build(self, inputs):\n self._w = tf.get_variable(\"w\", dtype=tf.float32, shape=[10, 10])\n\n return self._w * inputs + self._b\n\n\nclass ComplexModule(base.AbstractModule):\n \"\"\"Complex module consisting of two sub modules.\"\"\"\n\n def __init__(self, custom_getter=None, name=\"complex_module\"):\n\n super(ComplexModule, self).__init__(custom_getter=custom_getter,\n name=name)\n\n with self._enter_variable_scope():\n self._a = SimpleModule(name=\"linear_1\")\n\n def _build(self, inputs):\n self._b = SimpleModule(name=\"linear_2\")\n\n return self._b(self._a(inputs)) # pylint: disable=not-callable\n\n\nclass AbstractModuleTest(tf.test.TestCase):\n\n def testInitializerKeys(self):\n keys = ModuleWithClassKeys.get_possible_initializer_keys()\n self.assertEqual(keys, {\"foo\", \"bar\"})\n keys = ModuleWithNoInitializerKeys.get_possible_initializer_keys()\n self.assertEqual(keys, set())\n msg = (\"missing 1 required positional argument\" if six.PY3\n else \"takes exactly 2 arguments\")\n self.assertRaisesRegexp(\n TypeError, msg,\n ModuleWithCustomInitializerKeys.get_possible_initializer_keys)\n keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(True)\n self.assertEqual(keys, {\"foo\"})\n keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(False)\n self.assertEqual(keys, {\"bar\"})\n\n def testMultipleGraphs(self):\n id_mod = IdentityModule(name=\"identity\")\n # gpylint incorrectly thinks IdentityModule is not callable, so disable.\n # pylint: disable=not-callable\n with tf.Graph().as_default() as graph:\n id_mod(tf.placeholder(dtype=tf.float32, shape=[42]))\n self.assertEqual(id_mod._graph, graph)\n\n with tf.Graph().as_default():\n with self.assertRaisesRegexp(base.DifferentGraphError,\n \"Cannot connect module\"):\n id_mod(tf.placeholder(dtype=tf.float32, shape=[42]))\n # pylint: enable=not-callable\n\n def testNameScopeRecording(self):\n id_mod = IdentityModule(name=\"foo\")\n\n # Connect inside different name scope contexts, check that each is recorded.\n # pylint: disable=not-callable\n id_mod(tf.placeholder(dtype=tf.float32, shape=[22]))\n self.assertIn(id_mod.name_scopes, ((\"foo\",), (\"foo_1\",)))\n with tf.name_scope(\"blah\"):\n id_mod(tf.placeholder(dtype=tf.float32, shape=[23]))\n self.assertIn(id_mod.name_scopes,\n ((\"foo\", \"blah/foo\"), (\"foo_1\", \"blah/foo\")))\n with tf.name_scope(\"baz\"):\n id_mod(tf.placeholder(dtype=tf.float32, shape=[24]))\n # pylint: enable=not-callable\n self.assertIn(id_mod.name_scopes,\n ((\"foo\", \"blah/foo\", \"baz/foo\"),\n (\"foo_1\", \"blah/foo\", \"baz/foo\")))\n\n def testSubgraphsRecording(self):\n id_mod = IdentityModule(name=\"foo\")\n\n with self.assertRaisesRegexp(base.NotConnectedError,\n \"not instantiated yet\"):\n id_mod.last_connected_subgraph()\n\n # pylint: disable=not-callable\n inputs = tf.placeholder(dtype=tf.float32, shape=[21])\n outputs = id_mod(inputs)\n with tf.name_scope(\"blah\"):\n blah_inputs = tf.placeholder(dtype=tf.float32, shape=[22])\n blah_outputs = id_mod(blah_inputs)\n with tf.name_scope(\"baz\"):\n baz_inputs = tf.placeholder(dtype=tf.float32, shape=[23])\n baz_outputs = id_mod(baz_inputs)\n # pylint: enable=not-callable\n subgraphs = id_mod.connected_subgraphs\n self.assertEqual(id_mod.last_connected_subgraph.name_scope, \"baz/foo\")\n self.assertIs(id_mod.last_connected_subgraph, subgraphs[2])\n self.assertIs(subgraphs[0].builder, id_mod)\n self.assertIn(subgraphs[0].name_scope, (\"foo\", \"foo_1\"))\n self.assertEqual(subgraphs[1].name_scope, \"blah/foo\")\n self.assertEqual(subgraphs[2].name_scope, \"baz/foo\")\n self.assertIs(subgraphs[0].inputs.args[0], inputs)\n self.assertIs(subgraphs[1].inputs.args[0], blah_inputs)\n self.assertIs(subgraphs[2].inputs.args[0], baz_inputs)\n self.assertIs(subgraphs[0].outputs, outputs)\n self.assertIs(subgraphs[1].outputs, blah_outputs)\n self.assertIs(subgraphs[2].outputs, baz_outputs)\n\n def testInitNoNamedArgs(self):\n \"\"\"Tests if calling __init__ without named args raises a ValueError.\"\"\"\n with self.assertRaises(ValueError):\n NoInitIdentityModule(\"foobar\")\n\n def testInitInvalidTypeArgs(self):\n \"\"\"Tests if calling __init__ without a string name raises a TypeError.\"\"\"\n with self.assertRaises(TypeError):\n NoInitIdentityModule(name=123)\n\n def testInitNoArgs(self):\n \"\"\"Tests if calling __init__ with no args uses correct defaults.\"\"\"\n module = NoInitIdentityModule()\n self.assertEqual(module.module_name, \"no_init_identity_module\")\n\n def testInitNoSuper(self):\n \"\"\"Tests if a __call__ with no __init__ raises an error.\"\"\"\n module = NoSuperInitIdentityModule()\n with self.assertRaises(base.NotInitializedError):\n module(tf.constant([1])) # pylint: disable=not-callable\n\n def testPicklingNotSupported(self):\n module = IdentityModule()\n with self.assertRaisesRegexp(base.NotSupportedError,\n \"cannot be serialized\"):\n # Writing the object to a string will fail.\n pickle.dumps(module)\n\n def testCustomGetter(self):\n\n connection_count = {\"x\": 0}\n def custom_getter(getter, name, *args, **kwargs):\n connection_count[\"x\"] += 1\n return getter(name, *args, **kwargs)\n\n inputs = tf.placeholder(tf.float32, [10, 10])\n\n with tf.variable_scope(\"scope\"):\n module = SimpleModule(name=\"mod1\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(0, connection_count[\"x\"])\n\n module = SimpleModule(custom_getter=custom_getter, name=\"mod2\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(2, connection_count[\"x\"]) # w & b\n\n module = SimpleModule(custom_getter={\"w\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(3, connection_count[\"x\"]) # w\n\n module = SimpleModule(custom_getter={\"w.*\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, connection_count[\"x\"]) # w\n\n module = SimpleModule(custom_getter={\".*\": custom_getter}, name=\"mod4\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(6, connection_count[\"x\"]) # w & b\n\n err = r\"More than one custom_getter matched scope/mod5/w \\(w\\):.*\"\n with self.assertRaisesRegexp(KeyError, err):\n module = SimpleModule(\n custom_getter={\".*\": custom_getter, \"w.*\": custom_getter},\n name=\"mod5\")\n module(inputs) # pylint: disable=not-callable\n\n err = \"Given custom_getter is not callable.\"\n with self.assertRaisesRegexp(TypeError, err):\n module = SimpleModule(custom_getter=0, name=\"mod6\")\n with self.assertRaisesRegexp(TypeError, err):\n module = SimpleModule(custom_getter={\"w\": 0}, name=\"mod7\")\n\n def testCustomGetterNested(self):\n\n def custom_getter(getter, name, *args, **kwargs):\n kwargs[\"trainable\"] = False\n return getter(name, *args, **kwargs)\n\n inputs = tf.placeholder(tf.float32, [10, 10])\n\n with tf.variable_scope(\"scope\"):\n module = ComplexModule(name=\"mod1\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, len(tf.trainable_variables()))\n\n module = ComplexModule(custom_getter=custom_getter, name=\"mod2\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, len(tf.trainable_variables())) # All variables.\n\n module = ComplexModule(custom_getter={\".*/w\": custom_getter},\n name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n trainable_names = [v.op.name for v in tf.trainable_variables()]\n self.assertEqual(6, len(trainable_names)) # linear_1/w and linear_2/w.\n self.assertIn(\"scope/mod3/linear_1/b\", trainable_names)\n self.assertIn(\"scope/mod3/linear_2/b\", trainable_names)\n\n module = ComplexModule(custom_getter={\".*/b\": custom_getter}, name=\"mod4\")\n module(inputs) # pylint: disable=not-callable\n trainable_names = [v.op.name for v in tf.trainable_variables()]\n self.assertEqual(8, len(trainable_names)) # linear_1/b and linear_2/b.\n self.assertIn(\"scope/mod4/linear_1/w\", trainable_names)\n self.assertIn(\"scope/mod4/linear_2/w\", trainable_names)\n\n module = ComplexModule(custom_getter={\".*\": custom_getter}, name=\"mod5\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(8, len(tf.trainable_variables())) # All variables.\n\n module = ComplexModule(custom_getter={\"w\": custom_getter}, name=\"mod6\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(12, len(tf.trainable_variables())) # No variables.\n\n\ndef _make_model_with_params(inputs, output_size):\n weight_shape = [inputs.get_shape().as_list()[-1], output_size]\n weight = tf.get_variable(\"w\", shape=weight_shape, dtype=inputs.dtype)\n return tf.matmul(inputs, weight)\n\n\nclass ModuleTest(tf.test.TestCase):\n\n def testFunctionType(self):\n with self.assertRaises(TypeError) as cm:\n base.Module(build=\"not_a_function\")\n\n self.assertEqual(str(cm.exception), \"Input 'build' must be callable.\")\n\n def testSharing(self):\n batch_size = 3\n in_size = 4\n inputs1 = tf.placeholder(tf.float32, shape=[batch_size, in_size])\n inputs2 = tf.placeholder(tf.float32, shape=[batch_size, in_size])\n\n build = functools.partial(_make_model_with_params, output_size=10)\n model = base.Module(build)\n self.assertEqual(model.scope_name, \"make_model_with_params\")\n outputs1 = model(inputs1)\n outputs2 = model(inputs2)\n input_data = np.random.rand(batch_size, in_size)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n outputs1, outputs2 = sess.run(\n [outputs1, outputs2],\n feed_dict={inputs1: input_data,\n inputs2: input_data})\n self.assertAllClose(outputs1, outputs2)\n\n def testCustomGetter(self):\n def simple_module_build(inputs):\n w = tf.get_variable(\"w\", dtype=tf.float32, shape=[10, 10])\n b = tf.get_variable(\"b\", dtype=tf.float32, shape=[10, 10])\n return w * inputs + b\n\n connection_count = {\"x\": 0}\n\n def custom_getter(getter, name, *args, **kwargs):\n connection_count[\"x\"] += 1\n return getter(name, *args, **kwargs)\n\n create_module = functools.partial(base.Module, build=simple_module_build)\n\n inputs = tf.placeholder(tf.float32, [10, 10])\n\n with tf.variable_scope(\"scope\"):\n module = create_module(name=\"mod1\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(0, connection_count[\"x\"])\n\n module = create_module(custom_getter=custom_getter, name=\"mod2\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(2, connection_count[\"x\"]) # w & b\n\n module = create_module(custom_getter={\"w\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(3, connection_count[\"x\"]) # w\n\n module = create_module(custom_getter={\"w.*\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, connection_count[\"x\"]) # w\n\n module = create_module(custom_getter={\".*\": custom_getter}, name=\"mod4\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(6, connection_count[\"x\"]) # w & b\n\n err = r\"More than one custom_getter matched scope/mod5/w \\(w\\):.*\"\n with self.assertRaisesRegexp(KeyError, err):\n module = create_module(\n custom_getter={\".*\": custom_getter, \"w.*\": custom_getter},\n name=\"mod5\")\n module(inputs) # pylint: disable=not-callable\n\n err = \"Given custom_getter is not callable.\"\n with self.assertRaisesRegexp(TypeError, err):\n module = create_module(custom_getter=0, name=\"mod6\")\n with self.assertRaisesRegexp(TypeError, err):\n module = create_module(custom_getter={\"w\": 0}, name=\"mod7\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Tests for sonnet.python.ops.nest.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n# Dependency imports\n\nimport numpy as np\nimport six\nfrom sonnet.python.ops import nest\nimport tensorflow as tf\n\ntypekw = \"class\" if six.PY3 else \"type\"\n\n\nclass NestTest(tf.test.TestCase):\n\n def testAssertShallowStructure(self):\n inp_ab = [\"a\", \"b\"]\n inp_abc = [\"a\", \"b\", \"c\"]\n with self.assertRaises(ValueError) as cm:\n nest.assert_shallow_structure(inp_abc, inp_ab)\n self.assertEqual(str(cm.exception),\n \"The two structures don't have the same sequence length. \"\n \"Input structure has length 2, while shallow structure \"\n \"has length 3.\")\n\n inp_ab1 = [(1, 1), (2, 2)]\n inp_ab2 = [[1, 1], [2, 2]]\n with self.assertRaises(TypeError) as cm:\n nest.assert_shallow_structure(inp_ab2, inp_ab1)\n self.assertEqual(str(cm.exception),\n \"The two structures don't have the same sequence type. \"\n \"Input structure has type <{0} 'tuple'>, while shallow \"\n \"structure has type <{0} 'list'>.\".format(typekw))\n\n def testFlattenUpTo(self):\n # Normal application (Example 1).\n input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]\n shallow_tree = [[True, True], [False, True]]\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])\n self.assertEqual(flattened_shallow_tree, [True, True, False, True])\n\n # Normal application (Example 2).\n input_tree = [[(\"a\", 1), [(\"b\", 2), [(\"c\", 3), [(\"d\", 4)]]]]]\n shallow_tree = [[\"level_1\", [\"level_2\", [\"level_3\", [\"level_4\"]]]]]\n input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,\n input_tree)\n input_tree_flattened = nest.flatten(input_tree)\n self.assertEqual(input_tree_flattened_as_shallow_tree,\n [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4)])\n self.assertEqual(input_tree_flattened, [\"a\", 1, \"b\", 2, \"c\", 3, \"d\", 4])\n\n ## Shallow non-list edge-case.\n # Using iterable elements.\n input_tree = [\"input_tree\"]\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n input_tree = [\"input_tree_0\", \"input_tree_1\"]\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n # Using non-iterable elements.\n input_tree = [0]\n shallow_tree = 9\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n input_tree = [0, 1]\n shallow_tree = 9\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n ## Both non-list edge-case.\n # Using iterable elements.\n input_tree = \"input_tree\"\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n # Using non-iterable elements.\n input_tree = 0\n shallow_tree = 0\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n ## Input non-list edge-case.\n # Using iterable elements.\n input_tree = \"input_tree\"\n shallow_tree = [\"shallow_tree\"]\n with self.assertRaises(TypeError) as cm:\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(str(cm.exception),\n \"If shallow structure is a sequence, input must also be \"\n \"a sequence. Input has type: <{} 'str'>.\".format(typekw))\n self.assertEqual(flattened_shallow_tree, shallow_tree)\n\n input_tree = \"input_tree\"\n shallow_tree = [\"shallow_tree_9\", \"shallow_tree_8\"]\n with self.assertRaises(TypeError) as cm:\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(str(cm.exception),\n \"If shallow structure is a sequence, input must also be \"\n \"a sequence. Input has type: <{} 'str'>.\".format(typekw))\n self.assertEqual(flattened_shallow_tree, shallow_tree)\n\n # Using non-iterable elements.\n input_tree = 0\n shallow_tree = [9]\n with self.assertRaises(TypeError) as cm:\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(str(cm.exception),\n \"If shallow structure is a sequence, input must also be \"\n \"a sequence. Input has type: <{} 'int'>.\".format(typekw))\n self.assertEqual(flattened_shallow_tree, shallow_tree)\n\n input_tree = 0\n shallow_tree = [9, 8]\n with self.assertRaises(TypeError) as cm:\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(str(cm.exception),\n \"If shallow structure is a sequence, input must also be \"\n \"a sequence. Input has type: <{} 'int'>.\".format(typekw))\n self.assertEqual(flattened_shallow_tree, shallow_tree)\n\n def testMapUpTo(self):\n # Example 1.\n ab_tuple = collections.namedtuple(\"ab_tuple\", \"a, b\")\n op_tuple = collections.namedtuple(\"op_tuple\", \"add, mul\")\n inp_val = ab_tuple(a=2, b=3)\n inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))\n out = nest.map_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,\n inp_val, inp_ops)\n self.assertEqual(out.a, 6)\n self.assertEqual(out.b, 15)\n\n # Example 2.\n data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]\n name_list = [\"evens\", [\"odds\", \"primes\"]]\n out = nest.map_up_to(name_list,\n lambda name, sec: \"first_{}_{}\".format(len(sec), name),\n name_list, data_list)\n self.assertEqual(out, [\"first_4_evens\", [\"first_5_odds\", \"first_3_primes\"]])\n\n def testStringRepeat(self):\n ab_tuple = collections.namedtuple(\"ab_tuple\", \"a, b\")\n inp_a = ab_tuple(a=\"foo\", b=(\"bar\", \"baz\"))\n inp_b = ab_tuple(a=2, b=(1, 3))\n out = nest.map(lambda string, repeats: string * repeats, inp_a, inp_b)\n self.assertEqual(out.a, \"foofoo\")\n self.assertEqual(out.b[0], \"bar\")\n self.assertEqual(out.b[1], \"bazbazbaz\")\n\n def testMapSingleCollection(self):\n ab_tuple = collections.namedtuple(\"ab_tuple\", \"a, b\")\n nt = ab_tuple(a=(\"something\", \"something_else\"),\n b=\"yet another thing\")\n rev_nt = nest.map(lambda x: x[::-1], nt)\n\n # Check the output is the correct structure, and all strings are reversed.\n nest.assert_same_structure(nt, rev_nt)\n self.assertEqual(nt.a[0][::-1], rev_nt.a[0])\n self.assertEqual(nt.a[1][::-1], rev_nt.a[1])\n self.assertEqual(nt.b[::-1], rev_nt.b)\n\n def testMapOverTwoTuples(self):\n inp_a = (tf.placeholder(tf.float32, shape=[3, 4]),\n tf.placeholder(tf.float32, shape=[3, 7]))\n inp_b = (tf.placeholder(tf.float32, shape=[3, 4]),\n tf.placeholder(tf.float32, shape=[3, 7]))\n\n output = nest.map(lambda x1, x2: x1 + x2, inp_a, inp_b)\n\n nest.assert_same_structure(output, inp_a)\n self.assertShapeEqual(np.zeros((3, 4)), output[0])\n self.assertShapeEqual(np.zeros((3, 7)), output[1])\n\n feed_dict = {\n inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),\n inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))\n }\n\n with self.test_session() as sess:\n output_np = sess.run(output, feed_dict=feed_dict)\n self.assertAllClose(output_np[0],\n feed_dict[inp_a][0] + feed_dict[inp_b][0])\n self.assertAllClose(output_np[1],\n feed_dict[inp_a][1] + feed_dict[inp_b][1])\n\n def testStructureMustBeSame(self):\n inp_a = (3, 4)\n inp_b = (42, 42, 44)\n err = \"The two structures don't have the same number of elements.\"\n with self.assertRaisesRegexp(ValueError, err):\n nest.map(lambda a, b: a + b, inp_a, inp_b)\n\n def testMultiNest(self):\n inp_a = (3, (4, 5))\n inp_b = (42, (42, 44))\n output = nest.map(lambda a, b: a + b, inp_a, inp_b)\n self.assertEqual((45, (46, 49)), output)\n\n def testNoSequences(self):\n with self.assertRaisesRegexp(ValueError,\n \"Must provide at least one structure\"):\n nest.map(lambda x: x)\n\n def testEmptySequences(self):\n f = lambda x: x + 1\n empty_nt = collections.namedtuple(\"empty_nt\", \"\")\n\n self.assertEqual((), nest.map(f, ()))\n self.assertEqual([], nest.map(f, []))\n self.assertEqual(empty_nt(), nest.map(f, empty_nt()))\n\n # This is checking actual equality of types, empty list != empty tuple\n self.assertNotEqual((), nest.map(f, []))\n\n def testFlattenAndPackIterable(self):\n # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.\n named_tuple = collections.namedtuple(\"A\", (\"b\", \"c\"))\n mess = [\n \"z\",\n named_tuple(3, 4),\n {\n \"c\": [\n 1,\n collections.OrderedDict([\n (\"b\", 3),\n (\"a\", 2),\n ]),\n ],\n \"b\": 5\n },\n 17\n ]\n\n flattened = nest.flatten_iterable(mess)\n self.assertEqual(flattened, [\"z\", 3, 4, 5, 1, 3, 2, 17])\n\n structure_of_mess = [\n 14,\n named_tuple(\"a\", True),\n {\n \"c\": [\n 0,\n collections.OrderedDict([\n (\"b\", 9),\n (\"a\", 8),\n ]),\n ],\n \"b\": 3\n },\n \"hi everybody\",\n ]\n\n unflattened = nest.pack_iterable_as(structure_of_mess, flattened)\n self.assertEqual(unflattened, mess)\n\n def testFlattenIterable_numpyIsNotFlattened(self):\n structure = np.array([1, 2, 3])\n flattened = nest.flatten_iterable(structure)\n self.assertEqual(len(flattened), 1)\n\n def testFlattenIterable_stringIsNotFlattened(self):\n structure = \"lots of letters\"\n flattened = nest.flatten_iterable(structure)\n self.assertEqual(len(flattened), 1)\n\n def testFlatternIterable_scalarStructure(self):\n # Tests can call flatten_iterable with single \"scalar\" object.\n structure = \"hello\"\n flattened = nest.flatten_iterable(structure)\n unflattened = nest.pack_iterable_as(\"goodbye\", flattened)\n self.assertEqual(structure, unflattened)\n\n def testPackIterableAs_notIterableError(self):\n with self.assertRaisesRegexp(TypeError,\n \"flat_iterable must be an iterable\"):\n nest.pack_iterable_as(\"hi\", \"bye\")\n\n def testPackIterableAs_scalarStructureError(self):\n with self.assertRaisesRegexp(\n ValueError, r\"Structure is a scalar but len\\(flat_iterable\\) == 2 > 1\"):\n nest.pack_iterable_as(\"hi\", [\"bye\", \"twice\"])\n\n def testPackIterableAs_wrongLengthsError(self):\n with self.assertRaisesRegexp(\n ValueError,\n \"Structure had 2 elements, but flat_iterable had 3 elements.\"):\n nest.pack_iterable_as([\"hello\", \"world\"],\n [\"and\", \"goodbye\", \"again\"])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n\n\n",
"import tensorflow as tf\nfrom agent.forward import ActorCriticNet\nfrom params import *\n\n\nclass Access(object):\n def __init__(self, inputs_shape, action_size):\n with tf.variable_scope('Access'):\n # placeholder\n self.inputs = tf.placeholder(\n tf.float32, [None] + inputs_shape, 'inputs')\n\n # neural network interface\n inputs = tf.expand_dims(self.inputs, axis=-1)\n self.net = ActorCriticNet()\n self.policy, self.value = \\\n self.net(inputs, action_size)\n\n # global optimizer\n self.optimizer = tf.train.RMSPropOptimizer(\n LEARNING_RATE, DECAY_RATE, name='optimizer')\n\n # saver\n var_list = list(self.get_trainable())\n self.saver = tf.train.Saver(var_list=var_list)\n\n def get_trainable(self):\n return list(self.net.get_variables())\n\n def save(self, sess, path):\n self.saver.save(sess, path)\n\n def restore(self, sess, path):\n var_list = list(self.get_trainable()[0] + self.get_trainable()[1])\n saver = tf.train.Saver(var_list=var_list)\n saver.restore(sess, path)",
"\"\"\"\n@author: Young\n@license: (C) Copyright 2013-2017\n@contact: [email protected]\n@file: noise.py\n@time: 2018/1/18 21:47\n\"\"\"\nimport numpy as np\n\n\nclass Noise(object):\n def __init__(self, action_size, mu=0, theta=0.15, sigma=0.2):\n self.action_size = action_size\n self.mu = mu\n self.theta = theta\n self.sigma = sigma\n self.X = np.ones(self.action_size) * self.mu\n\n def reset(self):\n self.X = np.ones(self.action_size) * self.mu\n\n def sample(self):\n dx = self.theta * (self.mu - self.X)\n dx = dx + self.sigma * np.random.randn(len(self.X))\n self.X += dx\n return self.X\n\n def __call__(self):\n return self.sample()\n",
"import numpy as np\nimport gym\nfrom ACNet_adjust import ACNet\n\n_EPSILON = 1e-6\nnp.random.seed(1)\nMAX_EPISODES = 1000\nGAME = 'CartPole-v0'\n\n\nclass Agent(object):\n def __init__(self, env_name):\n self.env = gym.make(env_name)\n self.state_size = self.env.observation_space.shape[0]\n self.action_size = self.env.action_space.n\n self.AC = ACNet(self.state_size, self.action_size)\n self.episode = 1\n self.accumulate_reward_list = []\n self.accumulate_reward = 0\n self.T = 0\n self.clear()\n\n def clear(self):\n self.states = []\n self.actions = []\n self.rewards = []\n self.R = []\n\n def train(self):\n states = np.vstack(self.states)\n actions = np.squeeze(np.vstack(self.actions), axis=1)\n R = np.squeeze(np.vstack(self.R), axis=1)\n self.AC.train_actor(states, actions, R)\n self.AC.train_critic(states, R)\n\n\n def run_episode(self, t_max=10):\n state = self.env.reset()\n t = 0 # initialize thread step counter t <- 1\n while True:\n action = self.AC.predict_action(np.expand_dims(state, axis=0))\n next_state, reward, done, info = self.env.step(action)\n\n self.accumulate_reward += reward\n self.states.append(state)\n self.actions.append(action)\n self.rewards.append(reward)\n\n state = next_state\n t += 1\n if done:\n self.accumulate_reward = self.accumulate_reward * 0.9 + reward * 0.1\n R = 0\n self.update_bellman(R)\n self.train()\n break\n elif t%t_max==0:\n R = self.AC.predict_value(np.expand_dims(next_state, axis=0))\n self.update_bellman(R)\n self.train()\n\n def update_bellman(self, R):\n for i in range(len(self.rewards), 0, -1):\n self.R.append(self.rewards[i-1] + 0.9 * R)\n R = self.AC.predict_value([self.states[i-1]])\n self.R = np.flip(self.R, axis=0)\n\n def run(self, MAX_EPISODES, t_max):\n while self.episode < MAX_EPISODES:\n self.run_episode(t_max)\n self.episode += 1\n self.accumulate_reward_list.append(self.accumulate_reward)\n print('done')\n\n def run_adjust(self, MAX_EPISODES, t_max):\n while self.episode < MAX_EPISODES:\n self.adjust_parameters(t_max)\n self.episode += 1\n self.accumulate_reward_list.append(self.accumulate_reward)\n print('done')\n\n def get_loss(self):\n states = np.vstack(self.states)\n actions = np.squeeze(np.vstack(self.actions), axis=1)\n targets = np.squeeze(np.vstack(self.R), axis=1)\n outputs = self.AC.get_loss(states, actions, targets)\n self.clear()\n return outputs\n\n def adjust_parameters(self, t_max=10):\n state = self.env.reset()\n t = 0 # initialize thread step counter t <- 1\n while True:\n self.T += 1\n action = self.AC.predict_action(np.expand_dims(state, axis=0))\n next_state, reward, done, info = self.env.step(action)\n\n self.accumulate_reward += reward\n self.states.append(state)\n self.actions.append(action)\n self.rewards.append(reward)\n\n state = next_state\n t += 1\n if done:\n self.accumulate_reward = self.accumulate_reward * 0.9 + reward * 0.1\n R = 0\n self.update_bellman(R)\n self.train()\n print(self.T, self.get_loss())\n break\n\n elif t%t_max==0:\n R = self.AC.predict_value(np.expand_dims(next_state, axis=0))\n self.update_bellman(R)\n self.train()\n print(self.T, self.get_loss())",
"from copy import deepcopy\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn.functional import softmax\nfrom torch.autograd import Variable\nfrom agent.forward import DQN\nfrom agent.access import Access\n\n\n# Ensure values are greater than epsilon to avoid numerical instability.\n_EPSILON = 1e-6\n\n\nclass Agent(object):\n def __init__(self, image_shape, output_size,\n capacity=int(1e6), learning_rate=1e-3):\n self.output_size = output_size\n self.access = Access(capacity)\n self.value_net = DQN(image_shape, output_size)\n self.target_net = deepcopy(self.value_net)\n # 自动使用gpu\n self.gpu = torch.cuda.is_available()\n if self.gpu:\n self.value_net.cuda()\n self.target_net.cuda()\n\n self.optimizer = torch.optim.Adam(\n self.value_net.parameters(), lr=learning_rate)\n self.loss_func = nn.MSELoss()\n\n def get_deterministic_policy(self, x):\n x = Variable(torch.from_numpy(x.astype(np.float32)))\n if not self.gpu:\n out = self.value_net(x).data.numpy()\n return np.argmax(out, axis=1)\n else:\n x = x.cuda()\n out = self.value_net(x)\n out = out.cpu().data.numpy()\n return np.argmax(out, axis=1)\n\n def get_stochastic_policy(self, x):\n x = Variable(torch.from_numpy(x.astype(np.float32)))\n if not self.gpu:\n out = softmax(self.value_net(x), 1)\n out = out.data.numpy()\n return np.random.choice(self.output_size, 1, p=out[0])[0]\n else:\n x = x.cuda()\n out = softmax(self.value_net(x), 1)\n out = out.cpu().data.numpy()\n return np.random.choice(self.output_size, 1, p=out[0])[0]\n\n def get_epsilon_policy(self, x, epsilon=0.9):\n if np.random.uniform() > epsilon:\n return np.random.randint(self.output_size)\n else:\n return self.get_stochastic_policy(x)\n\n def optimize(self, batch_size=64, gamma=.9):\n batch = self.sample(batch_size)\n if self.gpu:\n state, action, reward, done, next_state = \\\n [Variable(torch.from_numpy(np.float32(i))).cuda() for i in batch]\n action = action.type(torch.LongTensor).cuda()\n else:\n state, action, reward, done, next_state = \\\n [Variable(torch.from_numpy(np.float32(i))) for i in batch]\n action = action.type(torch.LongTensor)\n\n value = self.value_net(state).gather(1, action.unsqueeze(1))\n next_value = self.target_net(next_state).detach()\n next_value = next_value.max(1)[0].view([-1, 1])\n value = value.squeeze(1)\n next_value = next_value.squeeze(1)\n target = done * reward + (1 - done) * (reward + gamma * next_value)\n loss = self.loss_func(value, target)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n def _update_target(self):\n # update target network parameters\n for t, s in zip(self.target_net.parameters(), self.value_net.parameters()):\n t.data.copy_(s.data)\n\n def append(self, *args):\n self.access.append(*args)\n\n def sample(self, batch_size=128):\n return self.access.sample(batch_size)\n\n\n\n",
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Tests for Recurrent cores in sonnet.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport mock\nimport numpy as np\nimport sonnet as snt\nfrom sonnet.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow.python.util import nest\n\nBATCH_SIZE = 5\nMASK_TUPLE = (True, (False, True))\n\n_state_size_tuple = (3, (4, 5))\n_state_size_element = 6\n\n\n# Use patch to instantiate RNNCore\[email protected](snt.RNNCore, __abstractmethods__=set())\nclass RNNCoreTest(tf.test.TestCase, parameterized.ParameterizedTestCase):\n\n @parameterized.Parameters(\n (False, False, _state_size_tuple),\n (False, True, _state_size_tuple),\n (True, False, _state_size_tuple),\n (True, True, _state_size_tuple),\n (False, False, _state_size_element),\n (False, True, _state_size_element),\n (True, False, _state_size_element),\n (True, True, _state_size_element))\n def testInitialStateTuple(self, trainable, use_custom_initial_value,\n state_size):\n batch_size = 6\n\n # Set the attribute to the class since it we can't set properties of\n # abstract classes\n snt.RNNCore.state_size = state_size\n flat_state_size = nest.flatten(state_size)\n core = snt.RNNCore(name=\"dummy_core\")\n if use_custom_initial_value:\n flat_initializer = [tf.constant_initializer(2)] * len(flat_state_size)\n trainable_initializers = nest.pack_sequence_as(\n structure=state_size, flat_sequence=flat_initializer)\n else:\n trainable_initializers = None\n initial_state = core.initial_state(\n batch_size, dtype=tf.float32, trainable=trainable,\n trainable_initializers=trainable_initializers)\n\n nest.assert_same_structure(initial_state, state_size)\n flat_initial_state = nest.flatten(initial_state)\n\n for state, size in zip(flat_initial_state, flat_state_size):\n self.assertEqual(state.get_shape(), [batch_size, size])\n\n with self.test_session() as sess:\n tf.global_variables_initializer().run()\n flat_initial_state_value = sess.run(flat_initial_state)\n for value, size in zip(flat_initial_state_value, flat_state_size):\n expected_initial_state = np.empty([batch_size, size])\n if not trainable:\n expected_initial_state.fill(0)\n elif use_custom_initial_value:\n expected_initial_state.fill(2)\n else:\n value_row = value[0]\n expected_initial_state = np.tile(value_row, (batch_size, 1))\n self.assertAllClose(value, expected_initial_state)\n\n @parameterized.Parameters(\n (False, _state_size_tuple),\n (True, _state_size_tuple),\n (False, _state_size_element),\n (True, _state_size_element))\n def testRegularizers(self, trainable, state_size):\n batch_size = 6\n\n # Set the attribute to the class since it we can't set properties of\n # abstract classes\n snt.RNNCore.state_size = state_size\n flat_state_size = nest.flatten(state_size)\n core = snt.RNNCore(name=\"dummy_core\")\n flat_regularizer = ([tf.contrib.layers.l1_regularizer(scale=0.5)] *\n len(flat_state_size))\n trainable_regularizers = nest.pack_sequence_as(\n structure=state_size, flat_sequence=flat_regularizer)\n\n core.initial_state(batch_size, dtype=tf.float32, trainable=trainable,\n trainable_regularizers=trainable_regularizers)\n\n graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if not trainable:\n self.assertFalse(graph_regularizers)\n else:\n for i in range(len(flat_state_size)):\n self.assertRegexpMatches(\n graph_regularizers[i].name, \".*l1_regularizer.*\")\n\n\nclass TrainableInitialState(tf.test.TestCase,\n parameterized.ParameterizedTestCase):\n\n @parameterized.Parameters((True, MASK_TUPLE), (True, None), (False, False),\n (False, None))\n def testInitialStateComputation(self, tuple_state, mask):\n if tuple_state:\n initial_state = (tf.fill([BATCH_SIZE, 6], 2),\n (tf.fill([BATCH_SIZE, 7], 3),\n tf.fill([BATCH_SIZE, 8], 4)))\n else:\n initial_state = tf.fill([BATCH_SIZE, 9], 10)\n\n trainable_state_module = snt.TrainableInitialState(initial_state, mask=mask)\n trainable_state = trainable_state_module()\n nest.assert_same_structure(initial_state, trainable_state)\n flat_initial_state = nest.flatten(initial_state)\n flat_trainable_state = nest.flatten(trainable_state)\n if mask is not None:\n flat_mask = nest.flatten(mask)\n else:\n flat_mask = (True,) * len(flat_initial_state)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # Check all variables are initialized correctly and return a state that\n # has the same as it is provided.\n for trainable_state, initial_state in zip(flat_trainable_state,\n flat_initial_state):\n self.assertAllEqual(sess.run(trainable_state), sess.run(initial_state))\n\n # Change the value of all the trainable variables to ones.\n for variable in tf.trainable_variables():\n sess.run(tf.assign(variable, tf.ones_like(variable)))\n\n # Check that the values of the initial_states have changed if and only if\n # they are trainable.\n for trainable_state, initial_state, mask in zip(flat_trainable_state,\n flat_initial_state,\n flat_mask):\n trainable_state_value = sess.run(trainable_state)\n initial_state_value = sess.run(initial_state)\n if mask:\n expected_value = np.ones_like(initial_state_value)\n else:\n expected_value = initial_state_value\n\n self.assertAllEqual(trainable_state_value, expected_value)\n\n def testBadArguments(self):\n initial_state = (tf.random_normal([BATCH_SIZE, 6]),\n (tf.random_normal([BATCH_SIZE, 7]),\n tf.random_normal([BATCH_SIZE, 8])))\n with self.assertRaises(TypeError):\n snt.TrainableInitialState(initial_state, mask=(True, (False, \"foo\")))\n\n snt.TrainableInitialState(initial_state, mask=(True, (False, True)))()\n with self.test_session() as sess:\n with self.assertRaises(tf.errors.InvalidArgumentError):\n # Check that the class checks that the elements of initial_state have\n # identical rows.\n sess.run(tf.global_variables_initializer())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.Graph",
"tensorflow.identity",
"tensorflow.test.main",
"tensorflow.placeholder",
"tensorflow.trainable_variables",
"tensorflow.global_variables_initializer",
"numpy.random.rand",
"tensorflow.name_scope",
"tensorflow.variable_scope"
],
[
"tensorflow.placeholder",
"tensorflow.test.main",
"numpy.random.randn",
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.train.RMSPropOptimizer",
"tensorflow.expand_dims",
"tensorflow.placeholder",
"tensorflow.variable_scope",
"tensorflow.train.Saver"
],
[
"numpy.ones"
],
[
"numpy.vstack",
"numpy.expand_dims",
"numpy.flip",
"numpy.random.seed"
],
[
"numpy.random.choice",
"numpy.argmax",
"torch.cuda.is_available",
"numpy.float32",
"numpy.random.uniform",
"torch.nn.MSELoss",
"numpy.random.randint"
],
[
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.fill",
"numpy.ones_like",
"tensorflow.get_collection",
"tensorflow.contrib.layers.l1_regularizer",
"tensorflow.ones_like",
"tensorflow.test.main",
"numpy.empty",
"numpy.tile",
"tensorflow.constant_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.trainable_variables",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.util.nest.flatten",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
h77h7/tvm-04.26 | [
"1bd8e6b921f392ae29b7672159326d94d40d6922",
"7f567264ae1bc1e4bc24a2eeb5b18425997dce22",
"7f567264ae1bc1e4bc24a2eeb5b18425997dce22",
"1bd8e6b921f392ae29b7672159326d94d40d6922"
] | [
"tutorials/language/reduction.py",
"tests/python/integration/test_ewise.py",
"tests/python/relay/test_op_level4.py",
"apps/topi_recipe/gemm/gemm_int8.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nReduction\n=========\n**Author**: `Tianqi Chen <https://tqchen.github.io>`_\n\nThis is an introduction material on how to do reduction in TVM.\nAssociative reduction operators like sum/max/min are typical\nconstruction blocks of linear algebra operations.\n\nIn this tutorial, we will demonstrate how to do reduction in TVM.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport tvm\nimport tvm.testing\nfrom tvm import te\nimport numpy as np\n\n######################################################################\n# Describe Sum of Rows\n# --------------------\n# Assume we want to compute sum of rows as our example.\n# In numpy semantics this can be written as :code:`B = numpy.sum(A, axis=1)`\n#\n# The following lines describe the row sum operation.\n# To create a reduction formula, we declare a reduction axis using\n# :any:`te.reduce_axis`. :any:`te.reduce_axis` takes in the range of reductions.\n# :any:`te.sum` takes in the expression to be reduced as well as the reduction\n# axis and compute the sum of value over all k in the declared range.\n#\n# The equivalent C code is as follows:\n#\n# .. code-block:: c\n#\n# for (int i = 0; i < n; ++i) {\n# B[i] = 0;\n# for (int k = 0; k < m; ++k) {\n# B[i] = B[i] + A[i][k];\n# }\n# }\n#\nn = te.var(\"n\")\nm = te.var(\"m\")\nA = te.placeholder((n, m), name=\"A\")\nk = te.reduce_axis((0, m), \"k\")\nB = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name=\"B\")\n\n######################################################################\n# Schedule the Reduction\n# ----------------------\n# There are several ways to schedule a reduction.\n# Before doing anything, let us print out the IR code of default schedule.\n#\ns = te.create_schedule(B.op)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# You can find that the IR code is quite like the C code.\n# The reduction axis is similar to a normal axis, it can be splitted.\n#\n# In the following code we split both the row axis of B as well\n# axis by different factors. The result is a nested reduction.\n#\nko, ki = s[B].split(B.op.reduce_axis[0], factor=16)\nxo, xi = s[B].split(B.op.axis[0], factor=32)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# If we are building a GPU kernel, we can bind the rows of B to GPU threads.\ns[B].bind(xo, te.thread_axis(\"blockIdx.x\"))\ns[B].bind(xi, te.thread_axis(\"threadIdx.x\"))\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# Reduction Factoring and Parallelization\n# ---------------------------------------\n# One problem of building a reduction is that we cannot simply\n# parallelize over the reduction axis. We need to divide the computation\n# of the reduction, store the local reduction result in a temporal array\n# before doing a reduction over the temp array.\n#\n# The rfactor primitive does such rewrite of the computation.\n# In the following schedule, the result of B is written to a temporary\n# result B.rf. The factored dimension becomes the first dimension of B.rf.\n#\ns = te.create_schedule(B.op)\nko, ki = s[B].split(B.op.reduce_axis[0], factor=16)\nBF = s.rfactor(B, ki)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# The scheduled operator of B also get rewritten to be sum over\n# the first axis of reduced result of B.f\n#\nprint(s[B].op.body)\n\n######################################################################\n# Cross Thread Reduction\n# ----------------------\n# We can now parallelize over the factored axis.\n# Here the reduction axis of B is marked to be a thread.\n# TVM allows reduction axis to be marked as thread if it is the only\n# axis in reduction and cross thread reduction is possible in the device.\n#\n# This is indeed the case after the factoring.\n# We can directly compute BF at the reduction axis as well.\n# The final generated kernel will divide the rows by blockIdx.x and threadIdx.y\n# columns by threadIdx.x and finally do a cross thread reduction over threadIdx.x\n#\nxo, xi = s[B].split(s[B].op.axis[0], factor=32)\ns[B].bind(xo, te.thread_axis(\"blockIdx.x\"))\ns[B].bind(xi, te.thread_axis(\"threadIdx.y\"))\ntx = te.thread_axis(\"threadIdx.x\")\ns[B].bind(s[B].op.reduce_axis[0], tx)\ns[BF].compute_at(s[B], s[B].op.reduce_axis[0])\ns[B].set_store_predicate(tx.var.equal(0))\nfcuda = tvm.build(s, [A, B], \"cuda\")\nprint(fcuda.imported_modules[0].get_source())\n\n######################################################################\n# Verify the correctness of result kernel by comparing it to numpy.\n#\nnn = 128\ndev = tvm.gpu(0)\na = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev)\nb = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)\nfcuda(a, b)\ntvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4)\n\n######################################################################\n# Describe Convolution via 2D Reduction\n# -------------------------------------\n# In TVM, we can describe convolution via 2D reduction in a simple way.\n# Here is an example for 2D convolution with filter size = [3, 3] and strides = [1, 1].\n#\nn = te.var(\"n\")\nInput = te.placeholder((n, n), name=\"Input\")\nFilter = te.placeholder((3, 3), name=\"Filter\")\ndi = te.reduce_axis((0, 3), name=\"di\")\ndj = te.reduce_axis((0, 3), name=\"dj\")\nOutput = te.compute(\n (n - 2, n - 2),\n lambda i, j: te.sum(Input[i + di, j + dj] * Filter[di, dj], axis=[di, dj]),\n name=\"Output\",\n)\ns = te.create_schedule(Output.op)\nprint(tvm.lower(s, [Input, Filter, Output], simple_mode=True))\n\n######################################################################\n# .. _general-reduction:\n#\n# Define General Commutative Reduction Operation\n# ----------------------------------------------\n# Besides the built-in reduction operations like :any:`te.sum`,\n# :any:`tvm.te.min` and :any:`tvm.te.max`, you can also define your\n# commutative reduction operation by :any:`te.comm_reducer`.\n#\n\nn = te.var(\"n\")\nm = te.var(\"m\")\nproduct = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name=\"product\")\nA = te.placeholder((n, m), name=\"A\")\nk = te.reduce_axis((0, m), name=\"k\")\nB = te.compute((n,), lambda i: product(A[i, k], axis=k), name=\"B\")\n\n######################################################################\n# .. note::\n#\n# Sometimes we would like to perform reduction that involves multiple\n# values like :code:`argmax`, which can be done by tuple inputs.\n# See :ref:`reduction-with-tuple-inputs` for more detail.\n\n######################################################################\n# Summary\n# -------\n# This tutorial provides a walk through of reduction schedule.\n#\n# - Describe reduction with reduce_axis.\n# - Use rfactor to factor out axis if we need parallelism.\n# - Define new reduction operation by :any:`te.comm_reducer`\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nfrom tvm import te\nfrom tvm.contrib import nvcc\nimport numpy as np\nimport time\nimport tvm.testing\n\n\[email protected]_gpu\ndef test_exp():\n # graph\n n = tvm.runtime.convert(1024)\n A = te.placeholder((n,), name=\"A\")\n B = te.compute(A.shape, lambda *i: te.exp(A(*i)), name=\"B\")\n s = te.create_schedule(B.op)\n # create iter var and assign them tags.\n num_thread = 8\n bx, tx = s[B].split(B.op.axis[0], factor=num_thread)\n s[B].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[B].bind(tx, te.thread_axis(\"threadIdx.x\"))\n\n # one line to build the function.\n def check_device(device, host=\"stackvm\"):\n if not tvm.testing.device_enabled(host):\n return\n dev = tvm.device(device, 0)\n fexp = tvm.build(s, [A, B], device, host, name=\"myexp\")\n dev = tvm.device(device, 0)\n # launch the kernel.\n n = 1024\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)\n fexp(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), np.exp(a.asnumpy()), rtol=1e-5)\n\n check_device(\"opencl -device=intel_graphics\")\n check_device(\"cuda\", \"llvm\")\n check_device(\"vulkan\")\n\n\[email protected]_gpu\ndef test_fmod():\n # graph\n def run(dtype):\n n = te.size_var(\"n\")\n A = te.placeholder((n,), name=\"A\", dtype=dtype)\n B = te.placeholder((n,), name=\"B\", dtype=dtype)\n C = te.compute(A.shape, lambda *i: te.fmod(A(*i), B(*i)), name=\"C\")\n s = te.create_schedule(C.op)\n # create iter var and assign them tags.\n num_thread = 8\n bx, tx = s[C].split(C.op.axis[0], factor=num_thread)\n\n def check_device(device):\n dev = tvm.device(device, 0)\n if not tvm.testing.device_enabled(device):\n print(\"skip because %s is not enabled..\" % device)\n return\n target = tvm.target.Target(device)\n if \"cpu\" not in target.keys:\n s[C].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(tx, te.thread_axis(\"threadIdx.x\"))\n fmod = tvm.build(s, [A, B, C], device, name=\"myfmod\")\n\n # launch the kernel.\n n = 1024\n a_np = (np.random.uniform(size=n) * 256).astype(A.dtype)\n b_np = (np.random.uniform(size=n) * 256).astype(B.dtype)\n\n # \"fix\" the values in a and b to avoid the result being too small\n b_np += (b_np < 2.0) * 2\n a_np[np.abs(np.fmod(a_np, b_np)) < 1] += 1\n\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(b_np, dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n ftimer = fmod.time_evaluator(fmod.entry_name, dev, number=1)\n tcost = ftimer(a, b, c).mean\n # fmod(a, b, c)\n np.testing.assert_allclose(c.asnumpy(), np.mod(a.asnumpy(), b.asnumpy()), rtol=1e-5)\n\n check_device(\"cuda\")\n check_device(\"opencl -device=intel_graphics\")\n check_device(\"metal\")\n\n run(\"float32\")\n\n\[email protected]_gpu\ndef test_multiple_cache_write():\n # graph\n n = tvm.runtime.convert(1024)\n A0 = te.placeholder((n,), name=\"A0\", dtype=\"float32\")\n A1 = te.placeholder((n,), name=\"A1\", dtype=\"float32\")\n B0, B1 = te.compute((n,), lambda *i: (A0(*i) + A1(*i), A0(*i) * A1(*i)), name=\"B\")\n C = te.compute((n,), lambda *i: B0(*i) + B1(*i), name=\"C\")\n s = te.create_schedule(C.op)\n # create iter var and assign them tags.\n num_thread = 8\n B0_cache, B1_cache = s.cache_write([B0, B1], \"local\")\n bx, tx = s[C].split(C.op.axis[0], factor=num_thread)\n s[B0].compute_at(s[C], bx)\n s[B0_cache].compute_at(s[C], bx)\n s[C].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(tx, te.thread_axis(\"threadIdx.x\"))\n # one line to build the function.\n def check_device(device, host=\"stackvm\"):\n if not tvm.testing.device_enabled(host):\n return\n dev = tvm.device(device, 0)\n if not tvm.testing.device_enabled(device):\n return\n func = tvm.build(s, [A0, A1, C], device, host, name=\"multiple_cache_write\")\n dev = tvm.device(device, 0)\n # launch the kernel.\n n = 1024\n a0 = tvm.nd.array(np.random.uniform(size=n).astype(A0.dtype), dev)\n a1 = tvm.nd.array(np.random.uniform(size=n).astype(A1.dtype), dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n func(a0, a1, c)\n tvm.testing.assert_allclose(\n c.asnumpy(), a0.asnumpy() + a1.asnumpy() + (a0.asnumpy() * a1.asnumpy()), rtol=1e-5\n )\n\n check_device(\"cuda\", \"llvm\")\n check_device(\"vulkan\")\n check_device(\"opencl\")\n\n\ndef test_log_pow_llvm():\n # graph\n n = te.size_var(\"n\")\n A = te.placeholder((n,), name=\"A\")\n B = te.compute(A.shape, lambda *i: te.power(te.log(A(*i)), 2.0), name=\"B\")\n s = te.create_schedule(B.op)\n # create iter var and assign them tags.\n bx, tx = s[B].split(B.op.axis[0], factor=32)\n # one line to build the function.\n if not tvm.testing.device_enabled(\"llvm\"):\n return\n\n flog = tvm.build(s, [A, B], \"llvm\", name=\"mylog\")\n dev = tvm.cpu(0)\n # launch the kernel.\n n = 1028\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)\n repeat = 10\n ftimer = flog.time_evaluator(flog.entry_name, dev, number=1, repeat=repeat)\n res = ftimer(a, b)\n assert len(res.results) == repeat\n tvm.testing.assert_allclose(b.asnumpy(), np.power(np.log(a.asnumpy()), 2.0), rtol=1e-5)\n\n\[email protected]_gpu\ndef test_popcount():\n def run(dtype):\n # graph\n n = tvm.runtime.convert(1024)\n A = te.placeholder((n,), name=\"A\", dtype=dtype)\n B = te.compute(A.shape, lambda *i: tvm.tir.popcount(A(*i)), name=\"B\")\n s = te.create_schedule(B.op)\n # simple schedule\n num_thread = 8\n bx, tx = s[B].split(B.op.axis[0], factor=num_thread)\n\n def check_device(device):\n dev = tvm.device(device, 0)\n if not tvm.testing.device_enabled(device):\n print(\"skip because %s is not enabled..\" % device)\n return\n target = tvm.target.Target(device)\n if \"cpu\" not in target.keys:\n s[B].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[B].bind(tx, te.thread_axis(\"threadIdx.x\"))\n func = tvm.build(s, [A, B], device)\n # launch the kernel.\n n = 1024\n a = tvm.nd.array(np.random.randint(low=0, high=1000, size=n, dtype=A.dtype), dev)\n b = tvm.nd.array(np.zeros(shape=n, dtype=B.dtype), dev)\n func(a, b)\n tvm.testing.assert_allclose(\n b.asnumpy(), list(map(lambda x: bin(x).count(\"1\"), a.asnumpy())), rtol=1e-5\n )\n\n check_device(\"llvm\")\n check_device(\"cuda\")\n check_device(\"opencl\")\n if dtype == \"uint32\":\n check_device(\"metal\")\n check_device(\"vulkan\")\n\n run(\"uint32\")\n run(\"uint64\")\n\n\[email protected]_gpu\ndef test_add():\n def run(dtype):\n # graph\n n = te.size_var(\"n\")\n A = te.placeholder((n,), name=\"A\", dtype=dtype)\n B = te.placeholder((n,), name=\"B\", dtype=dtype)\n bias = te.var(\"bias\", dtype=dtype)\n scale = te.var(\"scale\", dtype=dtype)\n C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name=\"C\")\n # schedule\n s = te.create_schedule(C.op)\n # create iter var and assign them tags.\n num_thread = 16\n bx, x = s[C].split(C.op.axis[0], factor=num_thread * 4)\n tx, x = s[C].split(x, nparts=num_thread)\n _, x = s[C].split(x, factor=4)\n s[C].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[C].vectorize(x)\n\n # one line to build the function.\n def check_device(device):\n dev = tvm.device(device, 0)\n if not tvm.testing.device_enabled(device):\n print(\"skip because %s is not enabled..\" % device)\n return\n fadd = tvm.build(s, [A, B, C], device, name=\"myadd\")\n\n # launch the kernel.\n n = 1024\n a = tvm.nd.array((np.random.uniform(size=n) * 256).astype(A.dtype), dev)\n b = tvm.nd.array((np.random.uniform(size=n) * 256).astype(B.dtype), dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n ftimer = fadd.time_evaluator(fadd.entry_name, dev, number=1)\n tcost = ftimer(a, b, c).mean\n tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy(), rtol=1e-6)\n\n check_device(\"opencl\")\n check_device(\"cuda\")\n if dtype == \"float32\":\n check_device(\"metal\")\n check_device(\"vulkan\")\n\n run(\"float32\")\n run(\"int32\")\n run(\"int64\")\n run(\"uint64\")\n\n\[email protected]_gpu\ndef try_warp_memory():\n \"\"\"skip this in default test because it require higher arch\"\"\"\n m = 128\n A = te.placeholder((m,), name=\"A\")\n B = te.compute((m,), lambda i: A[i] + 3, name=\"B\")\n warp_size = 32\n s = te.create_schedule(B.op)\n AA = s.cache_read(A, \"warp\", [B])\n xo, xi = s[B].split(B.op.axis[0], warp_size * 2)\n xi0, xi1 = s[B].split(xi, factor=warp_size)\n tx = te.thread_axis(\"threadIdx.x\")\n s[B].bind(xi1, tx)\n s[B].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[AA].compute_at(s[B], xo)\n xo, xi = s[AA].split(s[AA].op.axis[0], warp_size)\n s[AA].bind(xi, tx)\n\n @tvm.register_func\n def tvm_callback_cuda_compile(code):\n ptx = nvcc.compile_cuda(code, target=\"ptx\")\n return ptx\n\n # one line to build the function.\n def check_device(device):\n dev = tvm.device(device, 0)\n if not tvm.testing.device_enabled(device):\n print(\"skip because %s is not enabled..\" % device)\n return\n f = tvm.build(s, [A, B], device)\n a = tvm.nd.array((np.random.uniform(size=m) * 256).astype(A.dtype), dev)\n b = tvm.nd.array(np.zeros(m, dtype=B.dtype), dev)\n f(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 3, rtol=1e-6)\n\n check_device(\"cuda\")\n\n\nif __name__ == \"__main__\":\n test_exp()\n try_warp_memory()\n test_multiple_cache_write()\n test_add()\n test_log_pow_llvm()\n test_popcount()\n test_fmod()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nfrom tvm import te\nimport numpy as np\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import run_infer_type\nimport tvm.topi.testing\nimport tvm.testing\n\n\[email protected]_gpu\ndef test_binary_op():\n def check_binary_op(opfunc, ref):\n n = te.size_var(\"n\")\n t1 = relay.TensorType((5, n, 5))\n t2 = relay.TensorType((n, 1))\n x = relay.var(\"x\", t1)\n y = relay.var(\"y\", t2)\n z = opfunc(x, y)\n # test printer\n assert (\"{}(%x, %y)\".format(z.op.name)) in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == t1\n\n if ref is not None:\n t1 = relay.TensorType((5, 10, 5))\n t2 = relay.TensorType((5, 10, 5))\n x = relay.var(\"x\", t1)\n y = relay.var(\"y\", t2)\n z = opfunc(x, y)\n x_data = np.random.rand(5, 10, 5).astype(t1.dtype)\n y_data = np.random.rand(5, 10, 5).astype(t2.dtype)\n ref_res = ref(x_data, y_data)\n func = relay.Function([x, y], z)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n for opfunc, ref in [(relay.power, np.power)]:\n check_binary_op(opfunc, ref)\n\n\[email protected]_gpu\ndef test_cmp_type():\n for op, ref in (\n (relay.greater, np.greater),\n (relay.greater_equal, np.greater_equal),\n (relay.less, np.less),\n (relay.less_equal, np.less_equal),\n (relay.equal, np.equal),\n (relay.not_equal, np.not_equal),\n ):\n x = relay.var(\"x\", relay.TensorType((10, 4), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((5, 10, 1), \"float32\"))\n z = op(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((5, 10, 4), \"bool\")\n\n if ref is not None:\n x_shape = (10, 4)\n y_shape = (5, 10, 1)\n t1 = relay.TensorType(x_shape)\n t2 = relay.TensorType(y_shape)\n x = relay.var(\"x\", t1)\n y = relay.var(\"y\", t2)\n z = op(x, y)\n x_data = np.random.rand(*x_shape).astype(t1.dtype)\n y_data = np.random.rand(*y_shape).astype(t2.dtype)\n ref_res = ref(x_data, y_data)\n func = relay.Function([x, y], z)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n\[email protected]_gpu\ndef test_binary_int_broadcast_1():\n for op, ref in [(relay.right_shift, np.right_shift), (relay.left_shift, np.left_shift)]:\n x = relay.var(\"x\", relay.TensorType((10, 4), \"int32\"))\n y = relay.var(\"y\", relay.TensorType((5, 10, 1), \"int32\"))\n z = op(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((5, 10, 4), \"int32\")\n\n if ref is not None:\n x_shape = (10, 4)\n y_shape = (5, 10, 1)\n t1 = relay.TensorType(x_shape, \"int32\")\n t2 = relay.TensorType(y_shape, \"int32\")\n x_data = np.random.randint(1, 10000, size=(x_shape)).astype(t1.dtype)\n y_data = np.random.randint(1, 31, size=(y_shape)).astype(t2.dtype)\n func = relay.Function([x, y], z)\n ref_res = ref(x_data, y_data)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n\[email protected]_gpu\ndef test_binary_int_broadcast_2():\n for op, ref in [(relay.maximum, np.maximum), (relay.minimum, np.minimum), (relay.mod, np.mod)]:\n x = relay.var(\"x\", relay.TensorType((10, 4), \"int32\"))\n y = relay.var(\"y\", relay.TensorType((5, 10, 1), \"int32\"))\n z = op(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((5, 10, 4), \"int32\")\n\n if ref is not None:\n x_shape = (10, 4)\n y_shape = (5, 10, 1)\n t1 = relay.TensorType(x_shape, \"int32\")\n t2 = relay.TensorType(y_shape, \"int32\")\n x_data = np.random.randint(1, 10000, size=(x_shape)).astype(t1.dtype)\n y_data = np.random.randint(1, 10000, size=(y_shape)).astype(t2.dtype)\n func = relay.Function([x, y], z)\n ref_res = ref(x_data, y_data)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n\[email protected]_gpu\ndef test_where():\n def run(func, inputs, ref_res):\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, device=dev, target=target)\n op_res = intrp.evaluate(func)(*inputs)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n def verify(x_np, y_np, cond_np):\n ref_res = np.where(cond_np, x_np, y_np)\n\n args = []\n args_np = []\n vs = []\n\n cond = relay.var(\"cond\", relay.TensorType(cond_np.shape, \"bool\"))\n\n args.append(cond)\n args_np.append(cond_np)\n\n for v_name, v_np in [(\"x\", x_np), (\"y\", y_np)]:\n if len(v_np.shape) == 0:\n v = relay.const(v_np.item())\n else:\n v = relay.var(v_name, relay.TensorType(v_np.shape, dtype))\n args.append(v)\n args_np.append(v_np)\n vs.append(v)\n\n z = relay.where(cond, vs[0], vs[1])\n\n func = relay.Function(args, z)\n\n run(func, args_np, ref_res)\n\n dtype = \"float32\"\n\n x_np = np.random.uniform(size=(3, 4)).astype(dtype)\n y_np = np.random.uniform(size=(3, 4)).astype(dtype)\n cond_np = np.random.uniform(low=-1, high=1, size=(3, 4)) > 0\n\n verify(x_np, y_np, cond_np)\n\n x_np = np.array(1.0, dtype)\n y_np = np.array(-1.0, dtype)\n cond_np = np.array([1, 0, 1], dtype=np.bool)\n\n verify(x_np, y_np, cond_np)\n\n x_np = np.arange(10).astype(dtype)\n y_np = 10 * x_np\n cond_np = x_np < 5\n\n verify(x_np, y_np, cond_np)\n\n x_np = np.array([[1, 2], [3, 4]], dtype)\n y_np = np.array([[5, 6], [7, 8]], dtype)\n cond_np = np.array([[1], [0]], dtype=np.bool)\n\n verify(x_np, y_np, cond_np)\n verify(x_np, y_np, cond_np.T)\n\n x_np = np.random.randn(1, 12, 8, 8).astype(dtype)\n y_np = np.array(-1.0, dtype)\n cond_np = np.random.randn(1, 1, 8, 8) > 0\n\n verify(x_np, y_np, cond_np)\n\n x_np, y_np = np.ogrid[:3, :4]\n cond_np = np.where(x_np < y_np, x_np, 10 + y_np).astype(np.bool)\n\n verify(x_np.astype(dtype), y_np.astype(dtype), cond_np)\n\n\ndef verify_reduce(funcs, data, axis, keepdims, exclude, output, dtype=\"float32\"):\n test_func = funcs[0]\n ref_func = funcs[1]\n dtype = \"bool\" if ref_func in [np.all, np.any] else dtype\n\n x = relay.var(\"x\", relay.TensorType(data, dtype))\n if test_func == relay.logsumexp:\n z = test_func(x, axis, keepdims)\n else:\n z = test_func(x, axis, keepdims, exclude)\n zz = run_infer_type(z)\n if axis:\n assert \"axis=\" in z.astext()\n if keepdims:\n assert \"keepdims=\" in z.astext()\n if exclude:\n assert \"exclude=\" in z.astext()\n out_type = \"int32\" if test_func in [relay.argmin, relay.argmax] else dtype\n assert zz.checked_type == relay.ty.TensorType(output, out_type)\n\n if all(isinstance(v, tvm.tir.Var) == 1 for v in data):\n return\n\n func = relay.Function([x], z)\n x_data = (\n np.random.choice([True, False], size=data)\n if ref_func in [np.all]\n else np.random.uniform(size=data).astype(dtype)\n )\n\n if ref_func in [np.sum]:\n ref_res = ref_func(x_data + 0, axis=axis, dtype=dtype, keepdims=keepdims)\n elif ref_func in [np.max, np.min, np.mean, np.prod]:\n ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)\n else: # argmin/argmax\n if axis and not isinstance(axis, int) and len(axis) > 1:\n return\n ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp1 = relay.create_executor(\"graph\", device=dev, target=target)\n intrp2 = relay.create_executor(\"debug\", device=dev, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n\[email protected]_gpu\ndef test_reduce_functions():\n def _with_keepdims(func):\n def _wrapper(data, axis=None, keepdims=False):\n if not keepdims:\n return func(data, axis=axis)\n else:\n if axis is not None:\n axis = axis if isinstance(axis, int) else axis[0]\n out_shape = list(data.shape)\n out_shape[axis] = 1\n else:\n out_shape = [1 for _ in range(len(data.shape))]\n return func(data, axis=axis).reshape(out_shape)\n\n return _wrapper\n\n def _np_log_sum_exp(x, axis, keepdims=False):\n max_x = np.max(x, axis=axis, keepdims=True)\n x = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True))\n x = x + max_x\n if not keepdims:\n x = np.squeeze(x, axis=axis)\n return x\n\n def _unbiased_relay_wrapper(f):\n def _unbiased_func(x, axis=None, keepdims=False, exclude=False):\n return f(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True)\n\n return _unbiased_func\n\n def _unbiased_np_wrapper(f):\n def _unbiased_func(a, axis=None, dtype=None, keepdims=None):\n return f(a, axis=axis, dtype=dtype, ddof=1, keepdims=keepdims)\n\n return _unbiased_func\n\n d1, d2, d3, d4 = te.var(\"d1\"), te.var(\"d2\"), te.var(\"d3\"), te.var(\"d4\")\n for func in [\n [relay.sum, np.sum],\n [relay.max, np.max],\n [relay.min, np.min],\n [relay.mean, np.mean],\n [relay.variance, np.var],\n [_unbiased_relay_wrapper(relay.variance), _unbiased_np_wrapper(np.var)],\n [relay.std, np.std],\n [_unbiased_relay_wrapper(relay.std), _unbiased_np_wrapper(np.std)],\n [relay.prod, np.prod],\n [relay.all, np.all],\n [relay.any, np.any],\n [relay.logsumexp, _np_log_sum_exp],\n [relay.argmin, _with_keepdims(np.argmin)],\n [relay.argmax, _with_keepdims(np.argmax)],\n ]:\n verify_reduce(func, (d1, d2, d3, d4), None, False, False, ())\n verify_reduce(func, (d1, d2, d3, d4), 2, True, False, (d1, d2, 1, d4))\n verify_reduce(func, (d1, d2, d3, d4), 0, True, False, (1, d2, d3, d4))\n verify_reduce(func, (d1, d2, d3), 1, True, False, (d1, 1, d3))\n verify_reduce(func, (d1, d2, d3), 0, True, False, (1, d2, d3))\n verify_reduce(func, (d1, d2, d3), None, True, False, (1, 1, 1))\n verify_reduce(func, (d1, d2, d3), (0, 1), True, False, (1, 1, d3))\n verify_reduce(func, (2, 3, 4), 1, True, False, (2, 1, 4))\n verify_reduce(func, (2, 3, 4), (1,), True, False, (2, 1, 4))\n verify_reduce(func, (2, 3, 4), -1, True, False, (2, 3, 1))\n verify_reduce(func, (2, 3, 4), (0, 1, 2), False, False, ())\n verify_reduce(func, (4, 4, 3), None, False, False, ())\n verify_reduce(func, (4, 4, 3), (0, 2), False, False, (4,))\n verify_reduce(func, (128, 24, 128), (0, 1), False, False, (128,))\n verify_reduce(func, (128, 24, 128), (0, 2), False, False, (24,))\n verify_reduce(func, (128, 24, 128), (0, 1), True, False, (1, 1, 128))\n verify_reduce(func, (128, 24, 128), (0, 2), True, False, (1, 24, 1))\n\n\ndef verify_mean_var_std(funcs, shape, axis, keepdims):\n test_func = funcs[0]\n ref_func = funcs[1]\n dtype = \"float32\"\n\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n z = test_func(x, axis, keepdims)\n func = relay.Function([x], z.astuple())\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_mean = np.mean(x_data, axis=axis, dtype=dtype, keepdims=keepdims)\n ref_res = ref_func(x_data, axis=axis, dtype=dtype, keepdims=keepdims)\n\n for target, dev in tvm.testing.enabled_targets():\n intrp1 = relay.create_executor(\"graph\", device=dev, target=target)\n intrp2 = relay.create_executor(\"debug\", device=dev, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1[0].asnumpy(), ref_mean, rtol=1e-5)\n tvm.testing.assert_allclose(op_res1[1].asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2[0].asnumpy(), ref_mean, rtol=1e-5)\n tvm.testing.assert_allclose(op_res2[1].asnumpy(), ref_res, rtol=1e-5)\n\n\[email protected]_gpu\ndef test_mean_var_std():\n for func in [[relay.mean_variance, np.var], [relay.mean_std, np.std]]:\n verify_mean_var_std(func, (2, 3, 4), 1, True)\n verify_mean_var_std(func, (2, 3, 4), (1,), True)\n verify_mean_var_std(func, (2, 3, 4), -1, True)\n verify_mean_var_std(func, (2, 3, 4), (0, 1, 2), False)\n verify_mean_var_std(func, (4, 4, 3), None, False)\n verify_mean_var_std(func, (4, 4, 3), (0, 2), False)\n verify_mean_var_std(func, (128, 24, 128), (0, 1), False)\n verify_mean_var_std(func, (128, 24, 128), (0, 2), False)\n verify_mean_var_std(func, (128, 24, 128), (0, 1), True)\n verify_mean_var_std(func, (128, 24, 128), (0, 2), True)\n\n\[email protected]_gpu\ndef test_strided_slice():\n def verify(dshape, begin, end, strides, output, slice_mode=\"end\", test_ref=True, dtype=\"int32\"):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n ndim = len(dshape)\n begin = begin if begin else [0] * ndim\n end = end if end else list(dshape)\n\n # target numpy result\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n ref_res = tvm.topi.testing.strided_slice_python(x_data, begin, end, strides, slice_mode)\n\n if strides:\n z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)\n else:\n z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)\n func = relay.Function([x], z)\n\n func = run_infer_type(func)\n text = func.astext()\n assert \"begin=\" in text\n assert \"end=\" in text\n\n if output:\n assert func.body.checked_type == relay.ty.TensorType(output, \"float32\")\n\n if not test_ref:\n return\n for target, dev in tvm.testing.enabled_targets():\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n verify((1, 3, 10, 10), [0, 0, 0, 0], [-1, 3, 10, 10], [1], (0, 3, 10, 10), dtype=\"int64\")\n verify(\n (1, 224, 224, 3),\n [0, 20, 20, 0],\n [1, 140, 140, 3],\n [1, 1, 1, 1],\n (1, 120, 120, 3),\n dtype=\"int64\",\n )\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype=\"int16\")\n verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))\n verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))\n # Test backwards slicing.\n verify((3, 4, 3), [-1, -1, -1], [-5, -5, -5], [-1, -1, -1], (3, 4, 3))\n # Test slice mode.\n verify(\n (3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode=\"size\", test_ref=False\n )\n verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode=\"size\", test_ref=True)\n\n\[email protected]_gpu\ndef test_dyn_strided_slice():\n def verify(dshape, begin, end, strides, output, slice_mode=\"end\", test_ref=True, dtype=\"int32\"):\n ndim = len(dshape)\n begin = begin if begin else [0] * ndim\n end = end if end else list(dshape)\n\n # target numpy result\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n ref_res = tvm.topi.testing.strided_slice_python(x_data, begin, end, strides, slice_mode)\n\n x = relay.var(\"x\", relay.TensorType((relay.Any(),) * ndim, \"float32\"))\n if strides:\n z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)\n else:\n z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)\n func = relay.Function([x], z)\n\n func = run_infer_type(func)\n text = func.astext()\n assert \"begin=\" in text\n assert \"end=\" in text\n\n if not test_ref:\n return\n for target, dev in tvm.testing.enabled_targets():\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(\"vm\", mod=mod, device=dev, target=target)\n op_res = intrp.evaluate()(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n verify(\n (1, 224, 224, 3),\n [0, 20, 20, 0],\n [1, 140, 140, 3],\n [1, 1, 1, 1],\n (1, 120, 120, 3),\n dtype=\"int64\",\n )\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype=\"int16\")\n verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))\n # TODO(mbrookhart): fix static strided_slice with dynamic input and negative begin\n # verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))\n # verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))\n verify(\n (3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode=\"size\", test_ref=False\n )\n verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode=\"size\", test_ref=True)\n\n\[email protected]_gpu\ndef test_strided_set():\n def verify(dshape, begin, end, strides, vshape, test_ref=True):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n v = relay.var(\"v\", relay.TensorType(vshape, \"float32\"))\n begin_c = relay.const(begin, dtype=\"int32\")\n end_c = relay.const(end, dtype=\"int32\")\n if strides:\n strides_c = relay.const(strides, dtype=\"int32\")\n z = relay.strided_set(x, v, begin=begin_c, end=end_c, strides=strides_c)\n else:\n z = relay.strided_set(x, v, begin=begin_c, end=end_c)\n func = relay.Function([x, v], z)\n func = run_infer_type(func)\n text = func.astext()\n assert \"strided_set\" in text\n print(text)\n assert func.body.checked_type == relay.ty.TensorType(dshape, \"float32\")\n if not test_ref:\n return\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n v_data = np.random.uniform(size=vshape).astype(\"float32\")\n ref_res = tvm.topi.testing.strided_set_python(x_data, v_data, begin, end, strides)\n for target, dev in tvm.testing.enabled_targets():\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n op_res = intrp.evaluate(func)(x_data, v_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)\n\n verify((3, 4, 16), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))\n verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3))\n verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))\n verify((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2], (1, 2, 2))\n verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))\n verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))\n\n\nif __name__ == \"__main__\":\n test_strided_slice()\n test_strided_set()\n test_binary_op()\n test_cmp_type()\n test_binary_int_broadcast_1()\n test_binary_int_broadcast_2()\n test_where()\n test_reduce_functions()\n test_mean_var_std()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"Example code to perform int8 GEMM\"\nimport logging\nimport sys\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import autotvm\nfrom tvm.topi.cuda.tensor_intrin import dp4a\n\nDO_TUNING = True\nPRETUNED_INDEX = 75333\n\nintrin_dp4a = dp4a(\"local\", \"local\", \"local\")\n\n\[email protected]\ndef gemm_int8(n, m, l):\n A = te.placeholder((n, l), name=\"A\", dtype=\"int8\")\n B = te.placeholder((m, l), name=\"B\", dtype=\"int8\")\n\n k = te.reduce_axis((0, l), name=\"k\")\n C = te.compute(\n (n, m),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n cfg = autotvm.get_config()\n s = te.create_schedule(C.op)\n y, x = C.op.axis\n\n AA = s.cache_read(A, \"shared\", [C])\n BB = s.cache_read(B, \"shared\", [C])\n AL = s.cache_read(AA, \"local\", [C])\n BL = s.cache_read(BB, \"local\", [C])\n CC = s.cache_write(C, \"local\")\n\n k = CC.op.reduce_axis[0]\n\n cfg.define_split(\n \"tile_k\",\n cfg.axis(k),\n num_outputs=3,\n filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],\n )\n\n ko, kt, ki = cfg[\"tile_k\"].apply(s, CC, k)\n\n s[CC].tensorize(ki, intrin_dp4a)\n\n block_x = te.thread_axis(\"blockIdx.x\")\n block_y = te.thread_axis(\"blockIdx.y\")\n thread_x = te.thread_axis(\"threadIdx.x\")\n thread_y = te.thread_axis(\"threadIdx.y\")\n\n def block_size_filter(entity):\n return (\n entity.size[0] * 2 >= entity.size[1] * 2\n and entity.size[1] <= 16\n and entity.size[3] <= 4\n )\n\n cfg.define_split(\"tile_y\", cfg.axis(y), num_outputs=4, filter=block_size_filter)\n cfg.define_split(\"tile_x\", cfg.axis(x), num_outputs=4, filter=block_size_filter)\n by, tyz, ty, yi = cfg[\"tile_y\"].apply(s, C, y)\n bx, txz, tx, xi = cfg[\"tile_x\"].apply(s, C, x)\n\n s[C].bind(by, block_y)\n s[C].bind(bx, block_x)\n s[C].bind(tyz, te.thread_axis(\"vthread\"))\n s[C].bind(txz, te.thread_axis(\"vthread\"))\n s[C].bind(ty, thread_y)\n s[C].bind(tx, thread_x)\n s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)\n\n s[CC].compute_at(s[C], tx)\n\n yo, xo = CC.op.axis\n s[CC].reorder(ko, kt, yo, xo, ki)\n s[CC].unroll(kt)\n\n for stage in [AL, BL]:\n s[stage].compute_at(s[CC], kt)\n _, xi = s[stage].split(stage.op.axis[1], factor=4)\n s[stage].vectorize(xi)\n s[stage].double_buffer()\n\n cfg.define_knob(\"storage_align\", [16, 48])\n for stage in [AA, BB]:\n s[stage].storage_align(s[stage].op.axis[0], cfg[\"storage_align\"].val, 0)\n s[stage].compute_at(s[CC], ko)\n\n fused = s[stage].fuse(*s[stage].op.axis)\n ty, tx = s[stage].split(fused, nparts=cfg[\"tile_y\"].size[2])\n tx, xi = s[stage].split(tx, nparts=cfg[\"tile_x\"].size[2])\n _, xi = s[stage].split(xi, factor=16)\n\n s[stage].bind(ty, thread_y)\n s[stage].bind(tx, thread_x)\n s[stage].vectorize(xi)\n\n cfg.define_knob(\"auto_unroll_max_step\", [512, 1500])\n s[C].pragma(by, \"auto_unroll_max_step\", cfg[\"auto_unroll_max_step\"].val)\n s[C].pragma(by, \"unroll_explicit\", False)\n\n cfg.add_flop(n * m * l * 2)\n return s, [A, B, C]\n\n\nif __name__ == \"__main__\":\n N = 2048\n n = m = l = N\n\n logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)\n task = autotvm.task.create(gemm_int8, args=(n, m, l), target=\"cuda\")\n print(task.config_space)\n\n measure_option = autotvm.measure_option(\n builder=autotvm.LocalBuilder(),\n runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),\n )\n\n log_name = \"gemm_int8.log\"\n if DO_TUNING:\n tuner = autotvm.tuner.XGBTuner(task)\n tuner.tune(\n n_trial=1000,\n measure_option=measure_option,\n callbacks=[autotvm.callback.log_to_file(log_name)],\n )\n\n dispatch_context = autotvm.apply_history_best(log_name)\n best_config = dispatch_context.query(task.target, task.workload)\n print(\"\\nBest config:\")\n print(best_config)\n else:\n config = task.config_space.get(PRETUNED_INDEX)\n dispatch_context = autotvm.task.ApplyConfig(config)\n print(\"Using pretuned config:\")\n print(config)\n\n with dispatch_context:\n with tvm.target.Target(\"cuda\"):\n s, arg_bufs = gemm_int8(n, m, l)\n f = tvm.build(s, arg_bufs, \"cuda\", name=\"gemm_int8\")\n\n dev = tvm.device(\"cuda\", 0)\n\n a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype=\"int8\")\n b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype=\"int8\")\n\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(b_np, dev)\n c = tvm.nd.array(np.zeros((n, m), dtype=\"int32\"), dev)\n f(a, b, c)\n\n tvm.testing.assert_allclose(\n c.asnumpy(), np.dot(a_np.astype(\"int32\"), b_np.T.astype(\"int32\")), rtol=1e-5\n )\n\n num_ops = 2 * l * m * n\n num_runs = 1000\n timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)\n t = timer_f(a, b, c).mean\n GOPS = num_ops / (t * 1e3) / 1e6\n print(\"average time cost of %d runs = %g ms, %g GOPS.\" % (num_runs, t * 1e3, GOPS))\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros"
],
[
"numpy.random.uniform",
"numpy.fmod",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.random.choice",
"numpy.arange",
"numpy.squeeze",
"numpy.max",
"numpy.mean",
"numpy.random.randn",
"numpy.random.rand",
"numpy.exp",
"numpy.random.uniform",
"numpy.array",
"numpy.where",
"numpy.random.randint"
],
[
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AkshatSh/BinarizedNMT | [
"7fa15149fdfcad6b1fd0956157c3730f3dcd781f"
] | [
"translation/models/AttentionQRNN.py"
] | [
"import sys\nsys.path.append(\"..\")\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport random\nimport argparse\n\ntry:\n from torchqrnn import QRNN\nexcept:\n # to stop python 3.7.x breaking\n QRNN = None\n\nfrom models.EncoderDecoder import (\n EncoderModel,\n DecoderModel,\n EncoderDecoderModel,\n DecoderOutputType,\n)\n\nfrom models.components.attention import (\n AttentionModule\n)\n\nfrom vocab import Vocabulary\n\nfrom constants import (\n UNKNOWN_TOKEN,\n PAD_TOKEN,\n)\n\nclass EncoderQRNN(EncoderModel):\n def __init__(\n self,\n src_vocab: Vocabulary,\n hidden_size: int,\n num_layers: int,\n dropout: float,\n ):\n super(EncoderQRNN, self).__init__()\n self.input_size = len(src_vocab)\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout = dropout\n\n self.embedding = nn.Embedding(\n len(src_vocab),\n hidden_size,\n )\n self.lstm = QRNN(\n input_size=hidden_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n )\n \n def forward(\n self,\n src_tokens: torch.Tensor,\n src_lengths: torch.Tensor,\n hidden: torch.Tensor = None,\n ) -> torch.Tensor:\n embedded = self.embedding(src_tokens)\n # print(embedded.shape)\n #packed = nn.utils.rnn.pack_padded_sequence(embedded, src_lengths, batch_first=True)\n #packed = packed.t()\n embedded = embedded.transpose(0, 1)\n outputs, hidden = self.lstm(embedded, hidden)\n outputs = outputs.transpose(0, 1)\n #outputs, outputs_length = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)\n\n # sum up bidirectional outputs to keep hidden size the same\n #outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]\n # print('output: ', outputs.shape)\n return outputs, hidden\n\nclass AttentionDecoderQRNN(DecoderModel):\n def __init__(\n self,\n trg_vocab: Vocabulary,\n hidden_size: int,\n num_layers: int,\n dropout: float,\n teacher_student_ratio: float,\n ):\n super(AttentionDecoderQRNN, self).__init__()\n\n self.hidden_size = hidden_size\n self.output_size = len(trg_vocab)\n self.num_layers = num_layers\n self.dropout = dropout\n self.teacher_student_ratio = teacher_student_ratio\n self.trg_vocab = trg_vocab\n\n # layers\n self.embedding = nn.Embedding(\n len(trg_vocab),\n hidden_size,\n )\n\n self.dropout = nn.Dropout(dropout)\n\n self.attn = AttentionModule('general', hidden_size)\n\n self.lstm = QRNN(\n input_size=hidden_size * 2,\n hidden_size=hidden_size,\n num_layers=num_layers,\n )\n\n self.out = nn.Linear(\n hidden_size,\n len(trg_vocab),\n )\n \n def forward(\n self,\n prev_tokens: torch.Tensor,\n encoder_out: tuple,\n ) -> torch.Tensor:\n encoder_outputs, last_hidden = encoder_out\n batch_size, seq_len = prev_tokens.shape\n if random.random() <= self.teacher_student_ratio:\n return self.teacher_forward(\n last_hidden,\n encoder_outputs,\n prev_tokens,\n )\n else:\n return self.student_forward(\n last_hidden,\n encoder_outputs,\n seq_len,\n )\n \n def forward_eval(\n self,\n prev_tokens: torch.Tensor,\n encoder_out: tuple,\n intermediate: torch.Tensor,\n ) -> torch.Tensor:\n encoder_outputs, last_hidden = encoder_out\n return self.teacher_forward(\n last_hidden if intermediate is None else intermediate,\n encoder_outputs,\n prev_tokens,\n )\n\n def teacher_forward(\n self,\n final_hidden: torch.Tensor,\n encoder_outputs: torch.Tensor,\n prev_tokens: torch.Tensor,\n ) -> torch.Tensor:\n batch_size, seq_len = prev_tokens.shape\n final_hidden = final_hidden[:self.num_layers]\n final_encoder_hidden = final_hidden\n\n # embedded_prev_tokens: (batch, seq_len, trg_vocab)\n embedded_prev_tokens = self.embedding(prev_tokens)\n embedded_prev_tokens = self.dropout(embedded_prev_tokens)\n\n decoder_outputs = []\n last_hidden = final_hidden\n \n for i in range(seq_len):\n attn_weights = self.attn(last_hidden[-1], encoder_outputs)\n\n # encoder_outputs: (batch, seq_len, dim)\n # attn_weights = (batch, seq_len)\n context = attn_weights.transpose(1,2).bmm(encoder_outputs)\n #print(encoder_outputs.shape)\n\n #print(embedded_prev_tokens.shape, context.shape)\n lstm_input = torch.cat((embedded_prev_tokens[:, i:i+1, :], context), dim=2)\n lstm_input = lstm_input.transpose(0, 1)\n output, last_hidden = self.lstm(lstm_input, last_hidden)\n output = output.transpose(0, 1)\n decoder_outputs.append(output)\n decoder_outputs = torch.cat(decoder_outputs, dim=1)\n out = self.out(decoder_outputs)\n return out, last_hidden \n \n def student_forward(\n self,\n final_hidden: torch.Tensor,\n encoder_outputs: torch.Tensor,\n seq_len: int,\n ) -> torch.Tensor:\n batch_size = encoder_outputs.shape[0]\n final_hidden = final_hidden[:self.num_layers]\n device = final_hidden.device\n\n prev_output = torch.zeros((batch_size, 1)).long().to(device)\n prev_output[:, 0] = self.trg_vocab.stoi['<sos>']\n final_encoder_hidden = final_hidden\n\n decoder_outputs = []\n last_hidden = final_hidden\n \n for i in range(seq_len):\n attn_weights = self.attn(last_hidden[-1], encoder_outputs)\n\n # encoder_outputs: (batch, seq_len, dim)\n # attn_weights = (batch, seq_len)\n context = attn_weights.transpose(1,2).bmm(encoder_outputs)\n\n embedded_prev_tokens = self.embedding(prev_output)\n embedded_prev_tokens = self.dropout(embedded_prev_tokens)\n\n lstm_input = torch.cat((embedded_prev_tokens, context), dim=2)\n output, last_hidden = self.lstm(lstm_input, last_hidden)\n output = self.out(output)\n decoder_outputs.append(output)\n topi = output.data.max(2)[1]\n prev_output = topi\n decoder_outputs = torch.cat(decoder_outputs, dim=1)\n return decoder_outputs, last_hidden \n\ndef build_model(\n src_vocab: Vocabulary,\n trg_vocab: Vocabulary,\n encoder_embed_dim: int,\n encoder_hidden_dim: int,\n encoder_dropout: float,\n encoder_num_layers: int,\n decoder_embed_dim: int,\n decoder_hidden_dim: int,\n decoder_dropout: float,\n decoder_num_layers: int,\n teacher_student_ratio: float,\n) -> nn.Module:\n encoder = EncoderQRNN(\n src_vocab=src_vocab,\n hidden_size=encoder_hidden_dim,\n num_layers=encoder_num_layers,\n dropout=encoder_dropout,\n )\n\n decoder = AttentionDecoderQRNN(\n trg_vocab=trg_vocab,\n hidden_size=decoder_hidden_dim,\n num_layers=decoder_num_layers,\n dropout=decoder_dropout,\n teacher_student_ratio=teacher_student_ratio,\n )\n\n return EncoderDecoderModel(\n encoder,\n decoder,\n src_vocab,\n trg_vocab,\n )\n\ndef add_args(parser: argparse.ArgumentParser) -> None:\n parser.add_argument('--encoder_embed_dim', type=int, default=512, help='Embedding dimension for the encoder')\n parser.add_argument('--encoder_hidden_dim', type=int, default=512, help='The hidden (feature size) for the encoder')\n parser.add_argument('--encoder_dropout', type=float, default=0.2, help='the encoder dropout to apply')\n parser.add_argument('--decoder_embed_dim', type=int, default=512, help='the decoder embedding dimension')\n parser.add_argument('--decoder_hidden_dim', type=int, default=512, help='the hidden (feature size) for the decoder')\n parser.add_argument('--decoder_dropout', type=float, default=0.2, help='the decoder dropout')\n parser.add_argument('--encoder_layers', type=int, default=4, help='the number of layers in the encoder')\n parser.add_argument('--decoder_layers', type=int, default=4, help='the number of layers in the decoder')\n parser.add_argument('--teacher_student_ratio', type=float, default=1.0, help='the ratio of teacher to student to use')\n"
] | [
[
"torch.nn.Dropout",
"torch.zeros",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imjoseangel/100DaysOfCode | [
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3",
"bff90569033e2b02a56e893bd45727125962aeb3"
] | [
"python/mltraining/cross-validation/leave-p-out.py",
"python/mltraining/cross-validation/k-fold.py",
"python/datawrangling/datawrangling.py",
"python/mltraining/supervised/naive_bayes/bell_curve.py",
"python/mlcookbook/simuds_regression.py",
"python/interactive-data/lebron-vs-durant.py",
"python/mltraining/logistic_regression/logistic_ex1.py",
"python/200problems/conditionandloop/fibonacci.py"
] | [
"# Example of LOOCV and LPOCV splitting\n\nimport numpy\nfrom sklearn.model_selection import LeaveOneOut, LeavePOut\n\n# Configurable constants\nP_VAL = 2\n\n\ndef print_result(split_data):\n \"\"\"\n Prints the result of either a LPOCV or LOOCV operation\n Args:\n split_data: The resulting (train, test) split data\n \"\"\"\n for train, test in split_data:\n output_train = ''\n output_test = ''\n\n dash = [\"-\"] * (len(train) + len(test))\n\n # Build our output for display from the resulting split\n for i in train:\n output_train = \"{}({}: {}) \".format(output_train, i, data[i])\n\n for i in test:\n dash[i] = \"T\"\n output_test = \"{}({}: {}) \".format(output_test, i, data[i])\n\n print(\"[ {} ]\".format(\" \".join(dash)))\n print(\"Train: {}\".format(output_train))\n print(\"Test: {}\\n\".format(output_test))\n\n\n# Create some data to split with\ndata = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n\n# Our two methods\nloocv = LeaveOneOut()\nlpocv = LeavePOut(p=P_VAL)\n\nsplit_loocv = loocv.split(data)\nsplit_lpocv = lpocv.split(data)\n\nprint(\"\"\"\\\nThe Leave-P-Out method works by using every combination of P points as test\ndata.\nThe following output shows the result of splitting some sample data by\nLeave-One-Out and Leave-P-Out methods.\nA bar displaying the current train-test split as well as the actual data\npoints are displayed for each split.\nIn the bar, \"-\" is a training point and \"T\" is a test point.\n\"\"\")\n\nprint(\"Data:\\n{}\\n\".format(data))\n\nprint(\"Leave-One-Out:\\n\")\nprint_result(split_loocv)\n\nprint(\"Leave-P-Out (where p = {}):\\n\".format(P_VAL))\nprint_result(split_lpocv)\n",
"# An example of K-Fold Cross Validation split\n\nimport numpy\nfrom sklearn.model_selection import KFold\n\n# Configurable constants\nNUM_SPLITS = 3\n\n# Create some data to perform K-Fold CV on\ndata = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])\n\n# Perform a K-Fold split and print results\nkfold = KFold(n_splits=NUM_SPLITS)\nsplit_data = kfold.split(data)\n\nprint(\"\"\"\\\nThe K-Fold method works by splitting off 'folds' of test data until every\npoint has been used for testing.\nThe following output shows the result of splitting some sample data.\nA bar displaying the current train-test split as well as the actual data\npoints are displayed for each split.\nIn the bar, \"-\" is a training point and \"T\" is a test point.\n\"\"\")\n\nprint(\"Data:\\n{}\\n\".format(data))\nprint('K-Fold split (with n_splits = {}):\\n'.format(NUM_SPLITS))\n\nfor train, test in split_data:\n output_train = ''\n output_test = ''\n\n bar = [\"-\"] * (len(train) + len(test))\n\n # Build our output for display from the resulting split\n for i in train:\n output_train = \"{}({}: {}) \".format(output_train, i, data[i])\n\n for i in test:\n bar[i] = \"T\"\n output_test = \"{}({}: {}) \".format(output_test, i, data[i])\n\n print(\"[ {} ]\".format(\" \".join(bar)))\n print(\"Train: {}\".format(output_train))\n print(\"Test: {}\\n\".format(output_test))\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os\nimport pandas as pd\nimport numpy as np\n\n# Get File Directory\nWORK_DIR = os.path.dirname((os.path.realpath(__file__)))\n\n# Loading the json data as python dictionary\ndf = pd.read_csv(WORK_DIR + \"/data/titanic.csv\")\nhead = df.head()\ndropna = df.dropna()\n\nover30 = df[df['Age'] > 30]\nfemale = df[df['Sex'] == 'female']\nover30female = df[(df['Age'] > 30) & (df['Sex'] == 'female')]\n\nprint(over30female)\n\nbysex = df.groupby('Sex').Survived.value_counts()\n\nprint(bysex)\n\n# Create an array of 200 elements at the interval of 1 sec.\ndata = pd.date_range('1/1/2016', periods=150, freq='s')\n\n# Let's create timeseries data by assigning random values to\n# integer to each values in data\n\ntime_series = pd.Series(np.random.randint(0, 500, len(data)), index=data)\nprint(time_series.head())\nprint(\"\\n\")\n\n# Resample: bin 1 sec raws to minutes and summing the corresponding values\ntime_series = time_series.resample('1Min').sum()\nprint(time_series.head())\nprint(\"\\n\")\n\n# Time zone conversion: Let's assume original timeseries was\n# in UTC and we want to convert to US/Eastern\n\ntime_series_utc = time_series.tz_localize('UTC')\ntime_series_utc.tz_convert('US/Eastern')\n\nresult = df[(df['Age'] > 30) & (df['Sex'] == 'female')]\nresult.to_excel('result.xlsx')\n",
"import numpy as np\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Create a bell curve plot using numpy and stats\nx = np.linspace(norm.ppf(0.01), norm.ppf(0.99), 100)\nsns.set_style(\"darkgrid\")\nplt.plot(x, norm.pdf(x))\n\n# Remove ticks from the plot\nplt.xticks([])\nplt.yticks([])\n\nplt.tight_layout()\nplt.show()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\n# Load Libraries\nfrom sklearn.datasets import make_regression\nimport matplotlib.pyplot as plt\n\n# Generate feature, matrix, target vectors and true coefficients\nfeatures, target, coefficients = make_regression(n_samples=100,\n n_features=3,\n n_informative=3,\n n_targets=1,\n noise=0.0,\n coef=True,\n random_state=1)\n\n# View feature matrix and target vector\nprint('Feature Matrix\\n', features[:3])\nprint('Feature Vector\\n', target[:3])\n\n# Use Style\nplt.style.use('fivethirtyeight')\nplt.title('Make Regression')\n\n# View plot\nplt.scatter(features[:, 0], features[:, 1], c=target)\nplt.show()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Bokeh Visualization Template\n\nThis template is a general outline for turning your data into a\nvisualization using Bokeh.\n\"\"\"\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os\nimport pandas as pd\n\n# Bokeh Libraries\nfrom bokeh.plotting import figure, show\nfrom bokeh.io import output_file\nfrom bokeh.models import ColumnDataSource, CDSView, GroupFilter\nfrom bokeh.layouts import row\n\n# Get File Directory\nWORK_DIR = os.path.dirname((os.path.realpath(__file__)))\n\n# Read the csv files\nplayer_stats = pd.read_csv(WORK_DIR + '/data/2017-18_playerBoxScore.csv',\n parse_dates=['gmDate'])\nteam_stats = pd.read_csv(WORK_DIR + '/data/2017-18_teamBoxScore.csv',\n parse_dates=['gmDate'])\nstandings = pd.read_csv(WORK_DIR + '/data/2017-18_standings.csv',\n parse_dates=['stDate'])\n\n# Output inline in the notebook\noutput_file('lebron-vs-durant.html', title='LeBron James vs. Kevin Durant')\n\n# Store the data in a ColumnDataSource\nplayer_gm_stats = ColumnDataSource(player_stats)\n\n# Create a view for each player\nlebron_filters = [\n GroupFilter(column_name='playFNm', group='LeBron'),\n GroupFilter(column_name='playLNm', group='James')\n]\nlebron_view = CDSView(source=player_gm_stats, filters=lebron_filters)\n\ndurant_filters = [\n GroupFilter(column_name='playFNm', group='Kevin'),\n GroupFilter(column_name='playLNm', group='Durant')\n]\ndurant_view = CDSView(source=player_gm_stats, filters=durant_filters)\n\n# Consolidate the common keyword arguments in dicts\ncommon_figure_kwargs = {\n 'plot_width': 400,\n 'x_axis_label': 'Points',\n 'toolbar_location': None,\n}\ncommon_circle_kwargs = {\n 'x': 'playPTS',\n 'y': 'playTRB',\n 'source': player_gm_stats,\n 'size': 12,\n 'alpha': 0.7,\n}\ncommon_lebron_kwargs = {\n 'view': lebron_view,\n 'color': '#002859',\n 'legend': 'LeBron James'\n}\ncommon_durant_kwargs = {\n 'view': durant_view,\n 'color': '#FFC324',\n 'legend': 'Kevin Durant'\n}\n\n# Create the two figures and draw the data\nhide_fig = figure(**common_figure_kwargs,\n title='Click Legend to HIDE Data',\n y_axis_label='Rebounds')\nhide_fig.circle(**common_circle_kwargs, **common_lebron_kwargs)\nhide_fig.circle(**common_circle_kwargs, **common_durant_kwargs)\n\nmute_fig = figure(**common_figure_kwargs, title='Click Legend to MUTE Data')\nmute_fig.circle(**common_circle_kwargs,\n **common_lebron_kwargs,\n muted_alpha=0.1)\nmute_fig.circle(**common_circle_kwargs,\n **common_durant_kwargs,\n muted_alpha=0.1)\n\n# Add interactivity to the legend\nhide_fig.legend.click_policy = 'hide'\nmute_fig.legend.click_policy = 'mute'\n\n# Visualize\nshow(row(hide_fig, mute_fig))\n",
"import random\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\n\n\n# defines the classification for the training data.\ndef true_classifier(classification):\n if classification >= 700:\n return 1\n return 0\n\n\n# Generate a random dataset which includes random scores from 0 to 1000.\nx = np.array([random.randint(0, 1000) for i in range(0, 1000)])\n\n# The model will expect a 2D array, so we must reshape\n# For the model, the 2D array must have rows equal to the number of samples,\n# and columns equal to the number of features.\n# For this example, we have 1000 samples and 1 feature.\nx = x.reshape((-1, 1))\n\n# For each point, y is a pass/fail for the grade. The simple threshold is\n# arbitrary,\n# and can be changed as you would like. Classes are 1 for success and 0 for\n# failure\ny = [true_classifier(x[i][0]) for i in range(0, 1000)]\n\n# Again, we need a numpy array, so we convert.\ny = np.array(y)\n\n# Our goal will be to train a logistic regression model to do pass/fail to the\n# same threshold.\nmodel = LogisticRegression(solver='liblinear')\n\n# The fit method actually fits the model to our training data\nmodel = model.fit(x, y)\n\n# Create 100 random samples to try against our model as test data\nsamples = [random.randint(0, 1000) for i in range(0, 100)]\n# Once again, we need a 2d Numpy array\nsamples = np.array(samples)\nsamples = samples.reshape(-1, 1)\n\n# Now we use our model against the samples. output is the probability,\n# and _class is the class.\n_class = model.predict(samples)\nproba = model.predict_proba(samples)\n\nnum_accurate = 0\n\n# Finally, output the results, formatted for nicer viewing.\n# The format is [<sample value>]: Class <class number>, probability\n# [ <probability for class 0> <probability for class 1>]\n# So, the probability array is the probability of failure, followed by the\n# probability of passing.\n# In an example run, [7]: Class 0, probability\n# [ 9.99966694e-01 3.33062825e-05]\n# Means that for value 7, the class is 0 (failure) and the probability of\n# failure is 99.9%\nfor i in range(0, 100):\n if (true_classifier(samples[i])) == (_class[i] == 1):\n num_accurate = num_accurate + 1\n print(\"\" + str(samples[i]) + \": Class \" + str(_class[i]) +\n \", probability \" + str(proba[i]))\n# skip a line to separate overall result from sample output\nprint(\"\")\nprint(str(num_accurate) + \" out of 100 correct.\")\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals, annotations)\n\n# Write a Python program to get the Fibonacci series between 0 to 50\n\nimport matplotlib.pyplot as plt\n\n\ndef fibonacciinput(maxnumber):\n fibonacci = []\n minnumber = 0\n sumnumber = 1\n while sumnumber <= maxnumber:\n fibonacci.append(sumnumber)\n minnumber, sumnumber = sumnumber, minnumber + sumnumber\n return fibonacci\n\n\ndef main():\n fibonacci = fibonacciinput(1000)\n print(fibonacci)\n # Use Style\n plt.style.use('fivethirtyeight')\n plt.title('Fibonacci')\n\n plt.plot(fibonacci)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"sklearn.model_selection.LeavePOut",
"sklearn.model_selection.LeaveOneOut",
"numpy.array"
],
[
"numpy.array",
"sklearn.model_selection.KFold"
],
[
"pandas.read_csv",
"pandas.date_range"
],
[
"scipy.stats.norm.ppf",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"sklearn.datasets.make_regression",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
],
[
"pandas.read_csv"
],
[
"numpy.array",
"sklearn.linear_model.LogisticRegression"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.title"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jacob-Barhak/holoviews | [
"5df0269595ca7befca202f9d05522c68983dc974",
"5df0269595ca7befca202f9d05522c68983dc974"
] | [
"holoviews/tests/plotting/bokeh/testelementplot.py",
"holoviews/tests/plotting/matplotlib/testrasterplot.py"
] | [
"from unittest import SkipTest\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom bokeh.core.properties import value\nfrom holoviews.core import Dimension, DynamicMap, NdOverlay, HoloMap\nfrom holoviews.element import Curve, Image, Scatter, Labels\nfrom holoviews.streams import Stream, PointDraw\nfrom holoviews.plotting.util import process_cmap\nfrom holoviews.util import render\n\nfrom .testplot import TestBokehPlot, bokeh_renderer\nfrom ...utils import LoggingComparisonTestCase\n\ntry:\n from bokeh.document import Document\n from bokeh.models import tools\n from bokeh.models import (FuncTickFormatter, PrintfTickFormatter,\n NumeralTickFormatter, LogTicker)\nexcept:\n pass\n\n\n\nclass TestElementPlot(LoggingComparisonTestCase, TestBokehPlot):\n\n def test_element_show_frame_disabled(self):\n curve = Curve(range(10)).opts(plot=dict(show_frame=False))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.outline_line_alpha, 0)\n\n def test_element_xaxis_top(self):\n curve = Curve(range(10)).options(xaxis='top')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertTrue(xaxis in plot.state.above)\n\n def test_element_xaxis_bare(self):\n curve = Curve(range(10)).options(xaxis='bare')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.minor_tick_line_color, None)\n self.assertEqual(xaxis.major_tick_line_color, None)\n self.assertTrue(xaxis in plot.state.below)\n\n def test_element_xaxis_bottom_bare(self):\n curve = Curve(range(10)).options(xaxis='bottom-bare')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.minor_tick_line_color, None)\n self.assertEqual(xaxis.major_tick_line_color, None)\n self.assertTrue(xaxis in plot.state.below)\n\n def test_element_xaxis_top_bare(self):\n curve = Curve(range(10)).options(xaxis='top-bare')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.minor_tick_line_color, None)\n self.assertEqual(xaxis.major_tick_line_color, None)\n self.assertTrue(xaxis in plot.state.above)\n\n def test_element_yaxis_right(self):\n curve = Curve(range(10)).options(yaxis='right')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertTrue(yaxis in plot.state.right)\n\n def test_element_yaxis_bare(self):\n curve = Curve(range(10)).options(yaxis='bare')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertEqual(yaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.minor_tick_line_color, None)\n self.assertEqual(yaxis.major_tick_line_color, None)\n self.assertTrue(yaxis in plot.state.left)\n\n def test_element_yaxis_left_bare(self):\n curve = Curve(range(10)).options(yaxis='left-bare')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertEqual(yaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.minor_tick_line_color, None)\n self.assertEqual(yaxis.major_tick_line_color, None)\n self.assertTrue(yaxis in plot.state.left)\n\n def test_element_yaxis_right_bare(self):\n curve = Curve(range(10)).options(yaxis='right-bare')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertEqual(yaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.minor_tick_line_color, None)\n self.assertEqual(yaxis.major_tick_line_color, None)\n self.assertTrue(yaxis in plot.state.right)\n\n def test_element_title_format(self):\n title_str = ('Label: {label}, group: {group}, '\n 'dims: {dimensions}, type: {type}')\n e = Scatter(\n [],\n label='the_label',\n group='the_group',\n ).opts(title=title_str)\n title = 'Label: the_label, group: the_group, dims: , type: Scatter'\n self.assertEqual(render(e).title.text, title)\n\n def test_element_xformatter_string(self):\n curve = Curve(range(10)).options(xformatter='%d')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertIsInstance(xaxis.formatter, PrintfTickFormatter)\n self.assertEqual(xaxis.formatter.format, '%d')\n\n def test_element_yformatter_string(self):\n curve = Curve(range(10)).options(yformatter='%d')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertIsInstance(yaxis.formatter, PrintfTickFormatter)\n self.assertEqual(yaxis.formatter.format, '%d')\n\n def test_element_xformatter_function(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(value):\n return str(value) + ' %'\n curve = Curve(range(10)).options(xformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertIsInstance(xaxis.formatter, FuncTickFormatter)\n\n def test_element_yformatter_function(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(value):\n return str(value) + ' %'\n curve = Curve(range(10)).options(yformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertIsInstance(yaxis.formatter, FuncTickFormatter)\n\n def test_element_xformatter_instance(self):\n formatter = NumeralTickFormatter()\n curve = Curve(range(10)).options(xformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertIs(xaxis.formatter, formatter)\n\n def test_element_yformatter_instance(self):\n formatter = NumeralTickFormatter()\n curve = Curve(range(10)).options(yformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertIs(yaxis.formatter, formatter)\n\n def test_empty_element_visibility(self):\n curve = Curve([])\n plot = bokeh_renderer.get_plot(curve)\n self.assertTrue(plot.handles['glyph_renderer'].visible)\n\n def test_element_no_xaxis(self):\n curve = Curve(range(10)).opts(plot=dict(xaxis=None))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertFalse(plot.xaxis[0].visible)\n\n def test_element_no_yaxis(self):\n curve = Curve(range(10)).opts(plot=dict(yaxis=None))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertFalse(plot.yaxis[0].visible)\n\n def test_element_xrotation(self):\n curve = Curve(range(10)).opts(plot=dict(xrotation=90))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].major_label_orientation, np.pi/2)\n\n def test_element_yrotation(self):\n curve = Curve(range(10)).opts(plot=dict(yrotation=90))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.yaxis[0].major_label_orientation, np.pi/2)\n\n def test_element_xlabel_override(self):\n curve = Curve(range(10)).options(xlabel='custom x-label')\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, 'custom x-label')\n\n def test_element_ylabel_override(self):\n curve = Curve(range(10)).options(ylabel='custom y-label')\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.yaxis[0].axis_label, 'custom y-label')\n\n def test_element_labelled_x_disabled(self):\n curve = Curve(range(10)).options(labelled=['y'])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, '')\n self.assertEqual(plot.yaxis[0].axis_label, 'y')\n\n def test_element_labelled_y_disabled(self):\n curve = Curve(range(10)).options(labelled=['x'])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, 'x')\n self.assertEqual(plot.yaxis[0].axis_label, '')\n\n def test_element_labelled_both_disabled(self):\n curve = Curve(range(10)).options(labelled=[])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, '')\n self.assertEqual(plot.yaxis[0].axis_label, '')\n\n def test_static_source_optimization(self):\n global data\n data = np.ones((5, 5))\n img = Image(data)\n def get_img(test):\n global data\n data *= test\n return img\n stream = Stream.define(str('Test'), test=1)()\n dmap = DynamicMap(get_img, streams=[stream])\n plot = bokeh_renderer.get_plot(dmap, doc=Document())\n source = plot.handles['source']\n self.assertEqual(source.data['image'][0].mean(), 1)\n stream.event(test=2)\n self.assertTrue(plot.static_source)\n self.assertEqual(source.data['image'][0].mean(), 2)\n self.assertNotIn(source, plot.current_handles)\n\n def test_stream_cleanup(self):\n stream = Stream.define(str('Test'), test=1)()\n dmap = DynamicMap(lambda test: Curve([]), streams=[stream])\n plot = bokeh_renderer.get_plot(dmap)\n self.assertTrue(bool(stream._subscribers))\n plot.cleanup()\n self.assertFalse(bool(stream._subscribers))\n\n def test_element_formatter_xaxis(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(x):\n return '%s' % x\n curve = Curve(range(10), kdims=[Dimension('x', value_format=formatter)])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertIsInstance(plot.xaxis[0].formatter, FuncTickFormatter)\n\n def test_element_formatter_yaxis(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(x):\n return '%s' % x\n curve = Curve(range(10), vdims=[Dimension('y', value_format=formatter)])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertIsInstance(plot.yaxis[0].formatter, FuncTickFormatter)\n\n def test_element_grid_custom_xticker(self):\n curve = Curve([1, 2, 3]).opts(xticks=[0.5, 1.5], show_grid=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertIs(plot.state.xgrid[0].ticker, plot.state.xaxis[0].ticker)\n\n def test_element_grid_custom_yticker(self):\n curve = Curve([1, 2, 3]).opts(yticks=[0.5, 2.5], show_grid=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertIs(plot.state.ygrid[0].ticker, plot.state.yaxis[0].ticker)\n\n def test_element_grid_options(self):\n grid_style = {'grid_line_color': 'blue', 'grid_line_width': 1.5, 'ygrid_bounds': (0.3, 0.7),\n 'minor_xgrid_line_color': 'lightgray', 'xgrid_line_dash': [4, 4]}\n curve = Curve(range(10)).options(show_grid=True, gridstyle=grid_style)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.xgrid[0].grid_line_color, 'blue')\n self.assertEqual(plot.state.xgrid[0].grid_line_width, 1.5)\n self.assertEqual(plot.state.xgrid[0].grid_line_dash, [4, 4])\n self.assertEqual(plot.state.xgrid[0].minor_grid_line_color, 'lightgray')\n self.assertEqual(plot.state.ygrid[0].grid_line_color, 'blue')\n self.assertEqual(plot.state.ygrid[0].grid_line_width, 1.5)\n self.assertEqual(plot.state.ygrid[0].bounds, (0.3, 0.7))\n\n def test_change_cds_columns(self):\n lengths = {'a': 1, 'b': 2, 'c': 3}\n curve = DynamicMap(lambda a: Curve(range(lengths[a]), a), kdims=['a']).redim.values(a=['a', 'b', 'c'])\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['a', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'a')\n plot.update(('b',))\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['b', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'b')\n\n def test_update_cds_columns(self):\n curve = DynamicMap(lambda a: Curve(range(10), a), kdims=['a']).redim.values(a=['a', 'b', 'c'])\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['a', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'a')\n plot.update(('b',))\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['a', 'b', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'b')\n\n def test_categorical_axis_fontsize(self):\n curve = Curve([('A', 1), ('B', 2)]).options(fontsize={'minor_xticks': '6pt', 'xticks': 18})\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.major_label_text_font_size, '6pt')\n self.assertEqual(xaxis.group_text_font_size, {'value': '18pt'})\n\n def test_categorical_axis_fontsize_both(self):\n curve = Curve([('A', 1), ('B', 2)]).options(fontsize={'xticks': 18})\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.major_label_text_font_size, {'value': '18pt'})\n self.assertEqual(xaxis.group_text_font_size, {'value': '18pt'})\n\n def test_cftime_transform_gregorian_no_warn(self):\n try:\n import cftime\n except:\n raise SkipTest('Test requires cftime library')\n gregorian_dates = [cftime.DatetimeGregorian(2000, 2, 28),\n cftime.DatetimeGregorian(2000, 3, 1),\n cftime.DatetimeGregorian(2000, 3, 2)]\n curve = Curve((gregorian_dates, [1, 2, 3]))\n plot = bokeh_renderer.get_plot(curve)\n xs = plot.handles['cds'].data['x']\n self.assertEqual(xs.astype('int64'),\n np.array([951696000000, 951868800000, 951955200000]))\n\n def test_cftime_transform_noleap_warn(self):\n try:\n import cftime\n except:\n raise SkipTest('Test requires cftime library')\n gregorian_dates = [cftime.DatetimeNoLeap(2000, 2, 28),\n cftime.DatetimeNoLeap(2000, 3, 1),\n cftime.DatetimeNoLeap(2000, 3, 2)]\n curve = Curve((gregorian_dates, [1, 2, 3]))\n plot = bokeh_renderer.get_plot(curve)\n xs = plot.handles['cds'].data['x']\n self.assertEqual(xs.astype('int64'),\n np.array([951696000000, 951868800000, 951955200000]))\n substr = (\n \"Converting cftime.datetime from a non-standard calendar \"\n \"(noleap) to a standard calendar for plotting. This may \"\n \"lead to subtle errors in formatting dates, for accurate \"\n \"tick formatting switch to the matplotlib backend.\")\n self.log_handler.assertEndsWith('WARNING', substr)\n\n def test_active_tools_drag(self):\n curve = Curve([1, 2, 3]).options(active_tools=['box_zoom'])\n plot = bokeh_renderer.get_plot(curve)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_drag, tools.BoxZoomTool)\n\n def test_active_tools_scroll(self):\n curve = Curve([1, 2, 3]).options(active_tools=['wheel_zoom'])\n plot = bokeh_renderer.get_plot(curve)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_scroll, tools.WheelZoomTool)\n\n def test_active_tools_tap(self):\n curve = Curve([1, 2, 3]).options(active_tools=['tap'], tools=['tap'])\n plot = bokeh_renderer.get_plot(curve)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.TapTool)\n\n def test_active_tools_draw_stream(self):\n scatter = Scatter([1, 2, 3]).options(active_tools=['point_draw'])\n PointDraw(source=scatter)\n plot = bokeh_renderer.get_plot(scatter)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.PointDrawTool)\n self.assertIsInstance(toolbar.active_drag, tools.PointDrawTool)\n\n def test_hover_tooltip_update(self):\n hmap = HoloMap({'a': Curve([1, 2, 3], vdims='a'), 'b': Curve([1, 2, 3], vdims='b')}).opts(\n tools=['hover'])\n plot = bokeh_renderer.get_plot(hmap)\n self.assertEqual(plot.handles['hover'].tooltips, [('x', '@{x}'), ('a', '@{a}')])\n plot.update(('b',))\n self.assertEqual(plot.handles['hover'].tooltips, [('x', '@{x}'), ('b', '@{b}')])\n\n def test_categorical_dimension_values(self):\n curve = Curve([('C', 1), ('B', 3)]).redim.values(x=['A', 'B', 'C'])\n plot = bokeh_renderer.get_plot(curve)\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, ['A', 'B', 'C'])\n\n def test_categorical_dimension_type(self):\n curve = Curve([]).redim.type(x=str)\n plot = bokeh_renderer.get_plot(curve)\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, [])\n\n #################################################################\n # Aspect tests\n #################################################################\n\n def test_element_aspect(self):\n curve = Curve([1, 2, 3]).opts(aspect=2)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 300)\n self.assertEqual(plot.state.frame_width, 600)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_width(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_width_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, height=400, width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, 400)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.aspect_ratio, None)\n self.log_handler.assertContains('WARNING', \"aspect value was ignored\")\n\n def test_element_aspect_frame_width(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_frame_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_frame_width_frame_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_height=400, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_ratio, None)\n self.log_handler.assertContains('WARNING', \"aspect value was ignored\")\n\n def test_element_data_aspect(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=1.5)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 300)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 1.5)\n\n def test_element_data_aspect_width(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_height(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_width_height(self):\n curve = Curve([0, 2, 3]).opts(data_aspect=2, height=400, width=400)\n plot = bokeh_renderer.get_plot(curve)\n x_range, y_range = plot.handles['x_range'], plot.handles['y_range']\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n self.assertEqual(x_range.start, -2)\n self.assertEqual(x_range.end, 4)\n self.assertEqual(y_range.start, 0)\n self.assertEqual(y_range.end, 3)\n\n def test_element_data_aspect_frame_width(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 800)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_frame_height(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 200)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_frame_width_frame_height(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_height=400, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n #################################################################\n # Aspect tests\n #################################################################\n\n def test_element_responsive(self):\n curve = Curve([1, 2, 3]).opts(responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_both')\n\n def test_element_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, 400)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_height')\n\n def test_element_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_width')\n\n def test_element_frame_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'stretch_height')\n\n def test_element_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(frame_height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_width')\n\n def test_element_aspect_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'scale_both')\n\n def test_element_aspect_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_aspect_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_width_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(height=400, width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, 400)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_aspect_frame_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_aspect_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_frame_width_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(frame_height=400, frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_responsive(self):\n curve = Curve([0, 2]).opts(data_aspect=1, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.aspect_ratio, 0.5)\n self.assertEqual(plot.state.aspect_scale, 1)\n self.assertEqual(plot.state.sizing_mode, 'scale_both')\n\n def test_element_data_aspect_and_aspect_responsive(self):\n curve = Curve([0, 2]).opts(data_aspect=1, aspect=2, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.aspect_ratio, 0.5)\n self.assertEqual(plot.state.aspect_scale, 1)\n self.assertEqual(plot.state.sizing_mode, 'scale_both')\n x_range = plot.handles['x_range']\n y_range = plot.handles['y_range']\n self.assertEqual(x_range.start, 0)\n self.assertEqual(x_range.end, 1)\n self.assertEqual(y_range.start, 0)\n self.assertEqual(y_range.end, 2)\n\n def test_element_data_aspect_width_responsive(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_height_responsive(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_frame_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 800)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 200)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n\n\nclass TestColorbarPlot(TestBokehPlot):\n\n def test_colormapper_symmetric(self):\n img = Image(np.array([[0, 1], [2, 3]])).options(symmetric=True)\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(cmapper.low, -3)\n self.assertEqual(cmapper.high, 3)\n\n def test_colormapper_color_levels(self):\n cmap = process_cmap('viridis', provider='bokeh')\n img = Image(np.array([[0, 1], [2, 3]])).options(color_levels=5, cmap=cmap)\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(len(cmapper.palette), 5)\n self.assertEqual(cmapper.palette, ['#440154', '#440255', '#440357', '#450558', '#45065A'])\n\n def test_colormapper_transparent_nan(self):\n img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'NaN': 'transparent'})\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(cmapper.nan_color, 'rgba(0, 0, 0, 0)')\n\n def test_colormapper_min_max_colors(self):\n img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'min': 'red', 'max': 'blue'})\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(cmapper.low_color, 'red')\n self.assertEqual(cmapper.high_color, 'blue')\n\n def test_custom_colorbar_ticker(self):\n ticker = LogTicker()\n img = Image(np.array([[0, 1], [2, 3]])).options(colorbar=True, colorbar_opts=dict(ticker=ticker))\n plot = bokeh_renderer.get_plot(img)\n colorbar = plot.handles['colorbar']\n self.assertIs(colorbar.ticker, ticker)\n\n def test_explicit_categorical_cmap_on_integer_data(self):\n explicit_mapping = OrderedDict([(0, 'blue'), (1, 'red'), (2, 'green'), (3, 'purple')])\n points = Scatter(([0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]), vdims=['y', 'Category']).options(\n color_index='Category', cmap=explicit_mapping\n )\n plot = bokeh_renderer.get_plot(points)\n cmapper = plot.handles['color_mapper']\n cds = plot.handles['cds']\n self.assertEqual(cds.data['Category_str__'], ['0', '1', '2', '3'])\n self.assertEqual(cmapper.factors, ['0', '1', '2', '3'])\n self.assertEqual(cmapper.palette, ['blue', 'red', 'green', 'purple'])\n\n\nclass TestOverlayPlot(TestBokehPlot):\n\n def test_overlay_projection_clashing(self):\n overlay = Curve([]).options(projection='polar') * Curve([]).options(projection='custom')\n with self.assertRaises(Exception):\n bokeh_renderer.get_plot(overlay)\n\n def test_overlay_projection_propagates(self):\n overlay = Curve([]) * Curve([]).options(projection='custom')\n plot = bokeh_renderer.get_plot(overlay)\n self.assertEqual([p.projection for p in plot.subplots.values()], ['custom', 'custom'])\n\n def test_overlay_gridstyle_applies(self):\n grid_style = {'grid_line_color': 'blue', 'grid_line_width': 2}\n overlay = (Scatter([(10,10)]).options(gridstyle=grid_style, show_grid=True, size=20)\n * Labels([(10, 10, 'A')]))\n plot = bokeh_renderer.get_plot(overlay)\n self.assertEqual(plot.state.xgrid[0].grid_line_color, 'blue')\n self.assertEqual(plot.state.xgrid[0].grid_line_width, 2)\n\n def test_ndoverlay_legend_muted(self):\n overlay = NdOverlay({i: Curve(np.random.randn(10).cumsum()) for i in range(5)}).options(legend_muted=True)\n plot = bokeh_renderer.get_plot(overlay)\n for sp in plot.subplots.values():\n self.assertTrue(sp.handles['glyph_renderer'].muted)\n\n def test_overlay_legend_muted(self):\n overlay = (Curve(np.random.randn(10).cumsum(), label='A') *\n Curve(np.random.randn(10).cumsum(), label='B')).options(legend_muted=True)\n plot = bokeh_renderer.get_plot(overlay)\n for sp in plot.subplots.values():\n self.assertTrue(sp.handles['glyph_renderer'].muted)\n\n def test_active_tools_drag(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3])\n overlay = (scatter * curve).options(active_tools=['box_zoom'])\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_drag, tools.BoxZoomTool)\n\n def test_active_tools_scroll(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3])\n overlay = (scatter * curve).options(active_tools=['wheel_zoom'])\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_scroll, tools.WheelZoomTool)\n\n def test_active_tools_tap(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3]).options(tools=['tap'])\n overlay = (scatter * curve).options(active_tools=['tap'])\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.TapTool)\n\n def test_active_tools_draw_stream(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3]).options(active_tools=['point_draw'])\n PointDraw(source=scatter)\n overlay = (scatter * curve)\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.PointDrawTool)\n self.assertIsInstance(toolbar.active_drag, tools.PointDrawTool)\n\n def test_categorical_overlay_dimension_values(self):\n curve = Curve([('C', 1), ('B', 3)]).redim.values(x=['A', 'B', 'C'])\n scatter = Scatter([('A', 2)])\n plot = bokeh_renderer.get_plot(curve*scatter)\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, ['A', 'B', 'C'])\n\n def test_categorical_overlay_dimension_values_skip_factor(self):\n curve = Curve([('C', 1), ('B', 3)])\n scatter = Scatter([('A', 2)])\n plot = bokeh_renderer.get_plot((curve*scatter).redim.values(x=['A', 'C']))\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, ['A', 'C'])\n",
"import numpy as np\n\nfrom holoviews.element import Raster, Image\n\nfrom .testplot import TestMPLPlot, mpl_renderer\n\ntry:\n from matplotlib.colors import ListedColormap\nexcept:\n pass\n\n\nclass TestRasterPlot(TestMPLPlot):\n\n def test_raster_invert_axes(self):\n arr = np.array([[0, 1, 2], [3, 4, 5]])\n raster = Raster(arr).opts(invert_axes=True)\n plot = mpl_renderer.get_plot(raster)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array().data, arr.T[::-1])\n self.assertEqual(artist.get_extent(), [0, 2, 0, 3])\n\n def test_image_invert_axes(self):\n arr = np.array([[0, 1, 2], [3, 4, 5]])\n raster = Image(arr).opts(invert_axes=True)\n plot = mpl_renderer.get_plot(raster)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array().data, arr.T[::-1, ::-1])\n self.assertEqual(artist.get_extent(), [-0.5, 0.5, -0.5, 0.5])\n\n def test_image_listed_cmap(self):\n colors = ['#ffffff','#000000']\n img = Image(np.array([[0, 1, 2], [3, 4, 5]])).opts(cmap=colors)\n plot = mpl_renderer.get_plot(img)\n artist = plot.handles['artist']\n cmap = artist.get_cmap()\n self.assertIsInstance(cmap, ListedColormap)\n self.assertEqual(cmap.colors, colors)\n\n def test_image_cbar_extend_both(self):\n img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(1,2)))\n plot = mpl_renderer.get_plot(img.opts(colorbar=True))\n self.assertEqual(plot.handles['cbar'].extend, 'both')\n\n def test_image_cbar_extend_min(self):\n img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(1, None)))\n plot = mpl_renderer.get_plot(img.opts(colorbar=True))\n self.assertEqual(plot.handles['cbar'].extend, 'min')\n\n def test_image_cbar_extend_max(self):\n img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(None, 2)))\n plot = mpl_renderer.get_plot(img.opts(colorbar=True))\n self.assertEqual(plot.handles['cbar'].extend, 'max')\n\n def test_image_cbar_extend_clim(self):\n img = Image(np.array([[0, 1], [2, 3]])).opts(\n clim=(np.nan, np.nan), colorbar=True)\n plot = mpl_renderer.get_plot(img)\n self.assertEqual(plot.handles['cbar'].extend, 'neither')\n"
] | [
[
"numpy.array",
"numpy.random.randn",
"numpy.ones"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andreped/GSI-RADS | [
"3582ed84216266e05cb71f6223ce9814a6df203f"
] | [
"diagnosis/src/Utils/volume_utilities.py"
] | [
"import numpy as np\nfrom copy import deepcopy\nfrom skimage.transform import resize\nfrom scipy.ndimage import binary_fill_holes\nfrom skimage.measure import regionprops\nfrom diagnosis.src.Utils.configuration_parser import *\n\n\ndef crop_MR(volume, parameters):\n original_volume = np.copy(volume)\n volume[volume >= 0.2] = 1\n volume[volume < 0.2] = 0\n volume = volume.astype(np.uint8)\n volume = binary_fill_holes(volume).astype(np.uint8)\n regions = regionprops(volume)\n min_row, min_col, min_depth, max_row, max_col, max_depth = regions[0].bbox\n print('cropping params', min_row, min_col, min_depth, max_row, max_col, max_depth)\n\n cropped_volume = original_volume[min_row:max_row, min_col:max_col, min_depth:max_depth]\n bbox = [min_row, min_col, min_depth, max_row, max_col, max_depth]\n\n return cropped_volume, bbox\n\n\ndef resize_volume(volume, new_slice_size, slicing_plane, order=1):\n new_volume = None\n if len(new_slice_size) == 2:\n if slicing_plane == 'axial':\n new_val = int(volume.shape[2] * (new_slice_size[1] / volume.shape[1]))\n new_volume = resize(volume, (new_slice_size[0], new_slice_size[1], new_val), order=order)\n elif slicing_plane == 'sagittal':\n new_val = new_slice_size[0]\n new_volume = resize(volume, (new_val, new_slice_size[0], new_slice_size[1]), order=order)\n elif slicing_plane == 'coronal':\n new_val = new_slice_size[0]\n new_volume = resize(volume, (new_slice_size[0], new_val, new_slice_size[1]), order=order)\n elif len(new_slice_size) == 3:\n new_volume = resize(volume, new_slice_size, order=order)\n return new_volume\n\n\ndef __intensity_normalization_MRI(volume, parameters):\n result = deepcopy(volume).astype('float32')\n if parameters.intensity_clipping_range[1] - parameters.intensity_clipping_range[0] != 100:\n limits = np.percentile(volume, q=parameters.intensity_clipping_range)\n result[volume < limits[0]] = limits[0]\n result[volume > limits[1]] = limits[1]\n\n if parameters.normalization_method == 'zeromean':\n mean_val = np.mean(result)\n var_val = np.std(result)\n tmp = (result - mean_val) / var_val\n result = tmp\n else:\n min_val = np.min(result)\n max_val = np.max(result)\n if (max_val - min_val) != 0:\n tmp = (result - min_val) / (max_val - min_val)\n result = tmp\n # else:\n # result = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))\n\n return result\n\n\ndef intensity_normalization(volume, parameters):\n return __intensity_normalization_MRI(volume, parameters)\n\n\ndef padding_for_inference(data, slab_size, slicing_plane):\n new_data = data\n if slicing_plane == 'axial':\n missing_dimension = (slab_size - (data.shape[2] % slab_size)) % slab_size\n if missing_dimension != 0:\n new_data = np.pad(data, ((0, 0), (0, 0), (0, missing_dimension), (0, 0)), mode='edge')\n elif slicing_plane == 'sagittal':\n missing_dimension = (slab_size - (data.shape[0] % slab_size)) % slab_size\n if missing_dimension != 0:\n new_data = np.pad(data, ((0, missing_dimension), (0, 0), (0, 0), (0, 0)), mode='edge')\n elif slicing_plane == 'coronal':\n missing_dimension = (slab_size - (data.shape[1] % slab_size)) % slab_size\n if missing_dimension != 0:\n new_data = np.pad(data, ((0, 0), (0, missing_dimension), (0, 0), (0, 0)), mode='edge')\n\n return new_data, missing_dimension\n\n\ndef padding_for_inference_both_ends(data, slab_size, slicing_plane):\n new_data = data\n padding_val = int(slab_size / 2)\n if slicing_plane == 'axial':\n new_data = np.pad(data, ((0, 0), (0, 0), (padding_val, padding_val), (0, 0)), mode='edge')\n elif slicing_plane == 'sagittal':\n new_data = np.pad(data, ((padding_val, padding_val), (0, 0), (0, 0), (0, 0)), mode='edge')\n elif slicing_plane == 'coronal':\n new_data = np.pad(data, ((0, 0), (padding_val, padding_val), (0, 0), (0, 0)), mode='edge')\n\n return new_data\n"
] | [
[
"numpy.pad",
"numpy.min",
"numpy.percentile",
"numpy.max",
"numpy.std",
"numpy.copy",
"numpy.mean",
"scipy.ndimage.binary_fill_holes"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
2742195759/Paddle | [
"ce034db1834af85539b22ab68492df9972ff3e69",
"ce034db1834af85539b22ab68492df9972ff3e69",
"ce034db1834af85539b22ab68492df9972ff3e69",
"311b3b44fc7d51d4d66d90ab8a3fc0d42231afda",
"ce034db1834af85539b22ab68492df9972ff3e69"
] | [
"python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py",
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py",
"python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py",
"python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py",
"python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py"
] | [
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom scipy.special import expit, erf\nimport paddle.fluid.core as core\nfrom paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16\nfrom paddle.fluid.tests.unittests.test_activation_op import TestActivation, TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu, TestSwish, TestHardSwish, TestRelu6, TestSigmoid\nfrom paddle.fluid.tests.unittests.test_gelu_op import gelu\nfrom mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd\n\n\nclass TestMKLDNNReluDim2(TestRelu):\n def setUp(self):\n super(TestMKLDNNReluDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNRelu6Dim2(TestRelu6):\n def setUp(self):\n super(TestMKLDNNRelu6Dim2, self).setUp()\n self.attrs.update({\"use_mkldnn\": True})\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNLeakyReluDim2(TestLeakyRelu):\n def setUp(self):\n super(TestMKLDNNLeakyReluDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNGeluDim2(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)\n out = gelu(x, False)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNGeluDim2Approx(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)\n out = gelu(x, True)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"approximate\": True}\n\n\nclass TestMKLDNNTanhDim2(TestTanh):\n def setUp(self):\n super(TestMKLDNNTanhDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSqrtDim2(TestSqrt):\n def setUp(self):\n super(TestMKLDNNSqrtDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNAbsDim2(TestAbs):\n def setUp(self):\n super(TestMKLDNNAbsDim2, self).setUp()\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSwishDim2(TestSwish):\n def setUp(self):\n super(TestMKLDNNSwishDim2, self).setUp()\n\n self.attrs[\"use_mkldnn\"] = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNHardSwishDim2(TestHardSwish):\n def setUp(self):\n super(TestMKLDNNHardSwishDim2, self).setUp()\n\n self.attrs[\"use_mkldnn\"] = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSigmoidDim2(TestSigmoid):\n def setUp(self):\n super(TestMKLDNNSigmoidDim2, self).setUp()\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNReluDim4(TestRelu):\n def setUp(self):\n super(TestMKLDNNReluDim4, self).setUp()\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(\"float32\")\n # The same reason with TestAbs\n x[np.abs(x) < 0.005] = 0.02\n out = np.maximum(x, 0)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNLeakyReluDim4(TestLeakyRelu):\n def setUp(self):\n super(TestMKLDNNLeakyReluDim4, self).setUp()\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(\"float32\")\n # The same reason with TestAbs\n x[np.abs(x) < 0.005] = 0.02\n out = np.maximum(x, 0.02 * x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNGeluDim4(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)\n out = gelu(x, False)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNGeluDim4Approx(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)\n out = gelu(x, True)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"approximate\": True}\n\n\[email protected](not core.supports_bfloat16(),\n \"place does not support BF16 evaluation\")\nclass TestMKLDNNGeluBf16Dim4(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.uint16\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)\n out = convert_float_to_uint16(gelu(x, False))\n\n self.inputs = {'X': convert_float_to_uint16(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def test_check_output(self):\n self.check_output_with_place(core.CPUPlace())\n\n def test_check_grad(self):\n pass\n\n\[email protected](not core.supports_bfloat16(),\n \"place does not support BF16 evaluation\")\nclass TestMKLDNNGeluBf16Dim4Approx(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.uint16\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)\n out = convert_float_to_uint16(gelu(x, True))\n\n self.inputs = {'X': convert_float_to_uint16(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"approximate\": True}\n\n def test_check_output(self):\n self.check_output_with_place(core.CPUPlace())\n\n def test_check_grad(self):\n pass\n\n\nclass TestMKLDNNTanhDim4(TestTanh):\n def setUp(self):\n super(TestMKLDNNTanhDim4, self).setUp()\n\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(\"float32\")\n }\n self.outputs = {'Out': np.tanh(self.inputs['X'])}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNSqrtDim4(TestSqrt):\n def setUp(self):\n super(TestMKLDNNSqrtDim4, self).setUp()\n\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(\"float32\")\n }\n self.outputs = {'Out': np.sqrt(self.inputs['X'])}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNAbsDim4(TestAbs):\n def setUp(self):\n super(TestMKLDNNAbsDim4, self).setUp()\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(\"float32\")\n # The same reason with TestAbs\n x[np.abs(x) < 0.005] = 0.02\n self.inputs = {'X': x}\n self.outputs = {'Out': np.abs(self.inputs['X'])}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSwishDim4(TestSwish):\n def setUp(self):\n super(TestMKLDNNSwishDim4, self).setUp()\n\n x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)\n beta = 2.3\n out = x * expit(beta * x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"beta\": beta}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\ndef ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):\n return (x * np.minimum(np.maximum(x + offset, 0.), threshold) /\n scale).astype(x.dtype)\n\n\nclass TestMKLDNNHardSwishDim4(TestHardSwish):\n def setUp(self):\n super(TestMKLDNNHardSwishDim4, self).setUp()\n\n x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)\n threshold = 6.0\n scale = 6.0\n offset = 3.0\n x[np.abs(x + offset) < 0.005] = 0.02\n x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02\n\n out = ref_hardswish(x, threshold, scale, offset)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSigmoidDim4(TestSigmoid):\n def setUp(self):\n super(TestMKLDNNSigmoidDim4, self).setUp()\n\n x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)\n out = 1 / (1 + np.exp(-x))\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n\n# Check if primitives already exist in backward\nclass TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):\n def setUp(self):\n super(TestMKLDNNAbsPrimitivesAlreadyExist, self).setUp()\n\n np.random.seed(123)\n self.op_type = 'abs'\n self.x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32)\n self.out = np.abs(self.x)\n self.out_grad = np.random.random_sample(self.x.shape).astype(np.float32)\n self.x_grad = self.__abs_bwd(self.x, self.out_grad)\n\n # Abs grad calculation\n def __abs_bwd(self, x, out_grad):\n return out_grad * np.sign(x)\n\n def test_check(self):\n check_if_mkldnn_primitives_exist_in_bwd(\n self, self.op_type, self.x, self.out, self.out_grad, self.x_grad)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy\n\nimport unittest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.jit import declarative\n\n\ndef dyfunc_tensor_shape_1(x):\n x = fluid.dygraph.to_variable(x)\n res = fluid.layers.reshape(x, shape=x.shape)\n return res\n\n\ndef dyfunc_tensor_shape_2(x):\n x = paddle.to_tensor(x)\n shape = x.shape\n shape2 = shape\n res = paddle.reshape(x, shape2)\n return res\n\n\ndef dyfunc_tensor_shape_3(x):\n # Transform y.shape but run y.shape actually because y is not Tensor\n x = fluid.dygraph.to_variable(x)\n y = numpy.ones(5)\n res = fluid.layers.reshape(x, shape=y.shape)\n return res\n\n\ndef dyfunc_tensor_shape_4(x):\n x = fluid.dygraph.to_variable(x)\n res = fluid.layers.reshape(x, shape=(-1, x.shape[0], len(x.shape)))\n return res\n\n\ndef dyfunc_tensor_shape_5(x):\n # `res = fluid.layers.reshape(x, shape=(-1, s))` to\n # `res = fluid.layers.reshape(x, shape=(-1,\n # paddle.jit.dy2static.convert_var_shape(x)[0]))`\n x = fluid.dygraph.to_variable(x)\n s = x.shape[0]\n res = fluid.layers.reshape(x, shape=(-1, s))\n return res\n\n\ndef dyfunc_tensor_shape_6(x):\n # `res = fluid.layers.reshape(x, shape=(-1, s))` to\n # `res = fluid.layers.reshape(x, shape=(-1,\n # paddle.jit.dy2static.convert_var_shape(x)[0:]))`\n x = fluid.dygraph.to_variable(x)\n s = x.shape[0:]\n res = fluid.layers.reshape(x, shape=s)\n return res\n\n\ndef dyfunc_tuple_shape_1(x):\n x = paddle.to_tensor(x)\n a, b = x.shape\n res = paddle.reshape(x, shape=(b, a))\n return res\n\n\ndef dyfunc_tuple_shape_2(x):\n x = paddle.to_tensor(x)\n shape = x.shape\n a, b = shape\n res = paddle.reshape(x, shape=(b, a))\n return res\n\n\ndef dyfunc_tuple_shape_3(x):\n x = paddle.to_tensor(x)\n a, b = paddle.shape(x)\n res = paddle.reshape(x, shape=(b, a))\n return res\n\n\ndef dyfunc_paddle_shape_api(x):\n x = paddle.to_tensor(x)\n # paddle.shape will not be converted.\n a = paddle.shape(x)[0]\n # alias api will also not be converted.\n alias_old_api = paddle.fluid.layers\n b = alias_old_api.shape(x)[1]\n res = paddle.reshape(x, shape=(b, a))\n return res\n\n\ndef dyfunc_with_if_1(x):\n x = fluid.dygraph.to_variable(x)\n res = fluid.layers.reshape(x, [-1, 1])\n x_shape_0 = x.shape[0]\n if x_shape_0 < 1:\n # `res.shape[0]` is transformed into\n # `paddle.jit.dy2static.convert_var_shape(res)[0]`\n if res.shape[0] > 1:\n res = fluid.layers.fill_constant(\n value=2, shape=x.shape, dtype=\"int32\")\n else:\n res = fluid.layers.fill_constant(\n value=3, shape=x.shape, dtype=\"int32\")\n return res\n\n\ndef dyfunc_with_if_2(x):\n x = fluid.dygraph.to_variable(x)\n # `len(x.shape)` will not be transformed because x.shape is not used by Paddle api.\n if len(x.shape) < 1:\n res = x\n else:\n res = fluid.layers.fill_constant(value=8, shape=x.shape, dtype=\"int32\")\n\n return res\n\n\ndef dyfunc_with_for_1(x):\n x = fluid.dygraph.to_variable(x)\n res = fluid.layers.fill_constant(value=0, shape=[1], dtype=\"int32\")\n # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`\n for i in range(x.shape[0]):\n res += 1\n return res\n\n\ndef dyfunc_with_for_2(x):\n x = fluid.dygraph.to_variable(x)\n x_shape_0 = x.shape[0]\n res = fluid.layers.fill_constant(value=0, shape=[1], dtype=\"int32\")\n\n # `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`\n for i in range(x_shape_0):\n res += 1\n return res\n\n\ndef dyfunc_with_for_3(x):\n x = fluid.dygraph.to_variable(x)\n res = fluid.layers.fill_constant(value=0, shape=[1], dtype=\"int32\")\n # `len(x.shape)` is not transformed.\n for i in range(len(x.shape)):\n res += 1\n\n return res\n\n\ndef dyfunc_with_while_1(x):\n x = fluid.dygraph.to_variable(x)\n res = fluid.layers.fill_constant(value=0, shape=[1], dtype=\"int32\")\n # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`\n i = 1\n while i < x.shape[0]:\n res += 1\n i = i + 2\n return res\n\n\ndef dyfunc_with_while_2(x):\n x = fluid.dygraph.to_variable(x)\n x_shape_0 = x.shape[0]\n res = fluid.layers.fill_constant(value=0, shape=[1], dtype=\"int32\")\n i = 1\n # `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`\n while i < x_shape_0:\n res += 1\n i = i + 2\n return res\n\n\ndef dyfunc_with_while_3(x):\n x = fluid.dygraph.to_variable(x)\n x_shape = x.shape\n res = fluid.layers.fill_constant(value=0, shape=[1], dtype=\"int32\")\n i = 1\n\n # `len(x.shape)` is not transformed.\n while len(x_shape) > i:\n res += 1\n i += 1\n return res\n\n\ndef dyfunc_with_while_4(x):\n x = paddle.to_tensor(x)\n y = numpy.ones(5)\n y_shape_0 = y.shape[0]\n i = 1\n\n # Transform y_shape_0 but run y.shape[0] actually because y is not Tensor\n while y_shape_0 > i:\n x += 1\n i += 1\n return x\n\n\ndef dyfunc_change_shape_after_assign(x):\n x = paddle.to_tensor(x)\n a, b = x.shape\n x = paddle.reshape(x, shape=(-1, 1))\n res = paddle.reshape(x, shape=(b, a))\n return res\n\n\n# 1. Basic tests without control flow\nclass TestTensorShapeBasic(unittest.TestCase):\n def setUp(self):\n self.input = numpy.ones(5).astype(\"int32\")\n self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n self._set_input_spec()\n self._set_expected_op_num()\n self.init_test_func()\n\n def init_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_1\n\n def _set_input_spec(self):\n self.input_spec = [paddle.static.InputSpec(shape=[5], dtype=\"int32\")]\n\n def _run(self, to_static):\n with fluid.dygraph.guard():\n if to_static:\n res = declarative(self.dygraph_func)(self.input).numpy()\n else:\n res = self.dygraph_func(self.input).numpy()\n return res\n\n def get_dygraph_output(self):\n return self._run(to_static=False)\n\n def get_static_output(self):\n return self._run(to_static=True)\n\n def test_transformed_static_result(self):\n static_res = self.get_static_output()\n dygraph_res = self.get_dygraph_output()\n self.assertTrue(\n numpy.allclose(dygraph_res, static_res),\n msg='dygraph res is {}\\nstatic_res is {}'.format(dygraph_res,\n static_res))\n\n def _set_expected_op_num(self):\n self.expected_op_num = 2\n self.expected_shape_op_num = 0\n self.expected_slice_op_num = 0\n\n def _compute_op_num(self, program):\n self.op_num = sum([len(block.ops) for block in program.blocks])\n self.shape_op_num = 0\n self.slice_op_num = 0\n\n for block in program.blocks:\n self.shape_op_num += len(\n [op for op in block.ops if op.type == \"shape\"])\n self.slice_op_num += len(\n [op for op in block.ops if op.type == \"slice\"])\n\n def test_op_num(self):\n static_layer = paddle.jit.to_static(self.dygraph_func, self.input_spec)\n program = static_layer.main_program\n self._compute_op_num(program)\n self.assertEqual(self.op_num, self.expected_op_num)\n self.assertEqual(self.shape_op_num, self.expected_shape_op_num)\n self.assertEqual(self.slice_op_num, self.expected_slice_op_num)\n\n\nclass TestTensorShapeBasic2(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_2\n\n def _set_expected_op_num(self):\n self.expected_op_num = 3\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 0\n\n\nclass TestTensorShapeBasic3(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_3\n\n\nclass TestTensorShapeBasic4(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_4\n\n\nclass TestTensorShapeBasic5(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_5\n\n def _set_expected_op_num(self):\n self.expected_op_num = 4\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 1\n\n\nclass TestTensorShapeBasic6(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_6\n\n def _set_expected_op_num(self):\n self.expected_op_num = 4\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 1\n\n\nclass TestTupleShape1(TestTensorShapeBasic):\n def init_test_func(self):\n self.input = numpy.ones((5, 7)).astype(\"int32\")\n self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype=\"int32\")]\n self.dygraph_func = dyfunc_tuple_shape_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 6\n self.expected_shape_op_num = 2\n self.expected_slice_op_num = 2\n\n\nclass TestTupleShape2(TestTensorShapeBasic):\n def init_test_func(self):\n self.input = numpy.ones((5, 7)).astype(\"int32\")\n self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype=\"int32\")]\n self.dygraph_func = dyfunc_tuple_shape_2\n\n def _set_expected_op_num(self):\n self.expected_op_num = 5\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 2\n\n\nclass TestTupleShape3(TestTensorShapeBasic):\n def init_test_func(self):\n self.input = numpy.ones((5, 7)).astype(\"int32\")\n self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype=\"int32\")]\n self.dygraph_func = dyfunc_tuple_shape_3\n\n def _set_expected_op_num(self):\n self.expected_op_num = 5\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 2\n\n\nclass TestPaddleShapeApi(TestTensorShapeBasic):\n def init_test_func(self):\n self.input = numpy.ones((5, 7)).astype(\"int32\")\n self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype=\"int32\")]\n self.dygraph_func = dyfunc_paddle_shape_api\n\n def _set_expected_op_num(self):\n self.expected_op_num = 6\n self.expected_shape_op_num = 2\n self.expected_slice_op_num = 2\n\n\n# 2. Tests with control flow if\nclass TestTensorShapeInIf1(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_if_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 4\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 1\n\n\nclass TestTensorShapeInIf2(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_if_2\n\n def _set_expected_op_num(self):\n self.expected_op_num = 14\n self.expected_shape_op_num = 2\n self.expected_slice_op_num = 1\n\n\n# 3. Tests with control flow for loop\nclass TestTensorShapeInFor1(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_for_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 22\n self.expected_shape_op_num = 3\n self.expected_slice_op_num = 3\n\n\nclass TestTensorShapeInFor2(TestTensorShapeInFor1):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_for_2\n\n def _set_expected_op_num(self):\n self.expected_op_num = 9\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 1\n\n\nclass TestTensorShapeInFor3(TestTensorShapeInFor1):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_for_3\n\n def _set_expected_op_num(self):\n self.expected_op_num = 25\n self.expected_shape_op_num = 6\n self.expected_slice_op_num = 3\n\n\n# 4. Tests with control flow while loop\nclass TestTensorShapeInWhile1(TestTensorShapeInFor1):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_while_1\n\n\nclass TestTensorShapeInWhile2(TestTensorShapeInFor1):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_while_2\n\n def _set_expected_op_num(self):\n self.expected_op_num = 6\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 1\n\n\nclass TestTensorShapeInWhile3(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_while_3\n\n def _set_expected_op_num(self):\n self.expected_op_num = 3\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 0\n\n\nclass TestTensorShapeInWhile4(TestTensorShapeBasic):\n def init_test_func(self):\n self.dygraph_func = dyfunc_with_while_4\n\n def _set_expected_op_num(self):\n self.expected_op_num = 5\n self.expected_shape_op_num = 0\n self.expected_slice_op_num = 0\n\n\n# 5. Test op num for negetive dim\nclass TestOpNumBasicWithTensorShape(unittest.TestCase):\n def setUp(self):\n self._set_input_spec()\n self._set_test_func()\n self._set_expected_op_num()\n\n def _set_input_spec(self):\n self.input_spec = [\n paddle.static.InputSpec(\n shape=[-1, 5], dtype=\"int32\")\n ]\n\n def _set_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 3\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 0\n\n def _compute_op_num(self, program):\n self.op_num = sum([len(block.ops) for block in program.blocks])\n self.shape_op_num = 0\n self.slice_op_num = 0\n\n for block in program.blocks:\n self.shape_op_num += len(\n [op for op in block.ops if op.type == \"shape\"])\n self.slice_op_num += len(\n [op for op in block.ops if op.type == \"slice\"])\n\n def test_op_num(self):\n static_layer = paddle.jit.to_static(self.dygraph_func, self.input_spec)\n program = static_layer.main_program\n\n self._compute_op_num(program)\n self.assertEqual(self.op_num, self.expected_op_num)\n self.assertEqual(self.shape_op_num, self.expected_shape_op_num)\n self.assertEqual(self.slice_op_num, self.expected_slice_op_num)\n\n\nclass TestOpNumBasicWithTensorShape4(TestOpNumBasicWithTensorShape):\n def _set_test_func(self):\n self.dygraph_func = dyfunc_tensor_shape_4\n\n def _set_expected_op_num(self):\n self.expected_op_num = 6\n self.expected_shape_op_num = 1\n self.expected_slice_op_num = 1\n\n\nclass TestOpNumWithTensorShapeTuple1(TestOpNumBasicWithTensorShape):\n def _set_test_func(self):\n self.dygraph_func = dyfunc_tuple_shape_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 7\n self.expected_shape_op_num = 2\n self.expected_slice_op_num = 2\n\n\nclass TestOpNumWithTensorShapeInIf1(TestOpNumBasicWithTensorShape):\n def _set_test_func(self):\n self.dygraph_func = dyfunc_with_if_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 28\n self.expected_shape_op_num = 4\n self.expected_slice_op_num = 2\n\n\nclass TestOpNumWithTensorShapeInFor1(TestOpNumBasicWithTensorShape):\n def _set_test_func(self):\n self.dygraph_func = dyfunc_with_for_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 22\n self.expected_shape_op_num = 3\n self.expected_slice_op_num = 3\n\n\nclass TestOpNumWithTensorShapeInWhile1(TestOpNumBasicWithTensorShape):\n def _set_test_func(self):\n self.dygraph_func = dyfunc_with_while_1\n\n def _set_expected_op_num(self):\n self.expected_op_num = 22\n self.expected_shape_op_num = 3\n self.expected_slice_op_num = 3\n\n\nclass TestChangeShapeAfterAssign(TestTensorShapeBasic):\n def init_test_func(self):\n self.input = numpy.ones((2, 3)).astype(\"int32\")\n self.input_spec = [paddle.static.InputSpec(shape=[2, 3], dtype=\"int32\")]\n self.dygraph_func = dyfunc_change_shape_after_assign\n\n def _set_expected_op_num(self):\n self.expected_op_num = 7\n self.expected_shape_op_num = 2\n self.expected_slice_op_num = 2\n\n\ndef dyfunc_with_static_convert_var_shape(x):\n # Note: this will create `batch_size__static_convert_var_shape_suffix_0` firstly.\n batch_size = x.shape[0]\n if len(x.shape) < 1:\n res = x\n else:\n # Test for correctly to find `batch_size__static_convert_var_shape_suffix_0` in\n # deeply nested scope.\n res = fluid.layers.fill_constant(\n value=8, shape=[batch_size], dtype=\"int32\")\n\n return res\n\n\nclass TestFindStatiConvertVarShapeSuffixVar(unittest.TestCase):\n def test(self):\n x_spec = paddle.static.InputSpec(shape=[None, 10])\n func = paddle.jit.to_static(dyfunc_with_if_2, input_spec=[x_spec])\n # Call this function to trigger program translation.\n func.concrete_program\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom .... import core\nfrom ....framework import IrGraph\nfrom ....framework import _get_paddle_place\n\n__all__ = ['Quant2Int8MkldnnPass']\n\nOpRole = core.op_proto_and_checker_maker.OpRole\n\n\nclass Quant2Int8MkldnnPass(object):\n \"\"\"\n Transform a quant model IrGraph into MKL-DNN supported INT8 IrGraph.\n The pass consists of the following transformations:\n 1. gather scale values from fake quantize/dequantize operators,\n 2. extract FP32 inference model graph from the quant graph, i.e.\n a. remove fake quantize/dequantize operators,\n b. dequantize conv2d and mul's weights,\n 3. optimize the FP32 graph using standard FP32 optimization fuses\n (e.g. `conv2d`+`bn` -> `conv2d`),\n 4. quantize the optimized FP32 graph using standard INT8v2 quantization\n passes (`cpu_quantize_pass`, `cpu_quantize_squash_pass`).\n \"\"\"\n\n def __init__(self,\n _ops_to_quantize,\n _op_ids_to_skip=None,\n _scope=None,\n _place=None,\n _core=None,\n _debug=False):\n self._scope = _scope\n self._place = _get_paddle_place(_place)\n self._core = _core\n self._debug = _debug\n self._fake_quantize_types = [\n 'fake_quantize_moving_average_abs_max',\n 'fake_quantize_range_abs_max',\n ]\n self._fake_dequantize_types = [\n 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs'\n ]\n self._fake_quantize_dequantize_types = [\n 'fake_quantize_dequantize_abs_max',\n 'fake_quantize_dequantize_moving_average_abs_max',\n 'fake_channel_wise_quantize_dequantize_abs_max'\n ]\n self._ops_to_quantize = _ops_to_quantize\n self._op_ids_to_skip = _op_ids_to_skip if _op_ids_to_skip is not None else set(\n [-1])\n self._scale_immutable_ops = ['transpose2', 'reshape2', 'pool2d']\n self._scale_ops = ['scale']\n self._conv_ops = ['conv2d', 'depthwise_conv2d']\n self._pool_ops = ['pool2d']\n self._mul_ops = ['mul']\n self._fc_ops = ['fc']\n self._relu_ops = ['relu', 'relu6']\n self._matmul_ops = ['matmul']\n self._gru_ops = ['fusion_gru', 'multi_gru']\n self._lstm_ops = ['fusion_lstm']\n self._weight_thresholds = {}\n # Collect the Input and Output sclaes from Fake quant models\n self._var_quant_scales = {}\n self._max_range = {}\n self._s8_max = 127\n self._pass_idx = 0\n self._pass_group = 'int8'\n\n def apply(self, graph):\n assert isinstance(graph,\n IrGraph), 'graph must be the instance of IrGraph.'\n\n self._reset_pass_idx_and_group('int8')\n graph = self._label_skip_quantized_op(graph)\n graph = self._gather_weight_thresholds_from_fake(graph)\n graph = self._gather_input_scales_from_fake(graph)\n graph = self._gather_output_scales_from_attr(graph)\n graph = self._remove_fake_ops(graph)\n graph = self._dequantize_weights(graph)\n graph = self._optimize_fp32_graph(graph)\n graph = self._compute_weight_scales(graph)\n # This function causes nondeterministic quantization behavior\n # graph = self._update_relu_output_scales(graph)\n graph = self._propagate_scales(graph)\n graph = self._quantize_fp32_graph(graph)\n graph = self._final_optimizations(graph)\n graph = self._cleanup(graph)\n return graph\n\n def prepare_and_optimize_fp32(self, graph):\n assert isinstance(graph,\n IrGraph), 'graph must be the instance of IrGraph.'\n\n self._reset_pass_idx_and_group('fp32')\n graph = self._optimize_fp32_graph(graph)\n graph = self._final_optimizations(graph)\n graph = self._cleanup(graph)\n return graph\n\n def _reset_pass_idx_and_group(self, group):\n self._pass_idx = 0\n self._pass_group = group\n\n def _convert_scale2tensor(self, scale):\n tensor = core.LoDTensor()\n tensor.set(scale, core.CPUPlace())\n return tensor\n\n def _is_quantizing_all_ops(self):\n return len(self._ops_to_quantize) == 0\n\n def _is_any_of_op_types_in_graph(self, op_types, graph):\n return any(op.name() in op_types for op in graph.all_op_nodes())\n\n def _is_any_of_op_types_quantized(self, op_types, graph):\n return self._is_any_of_op_types_in_graph(\n op_types, graph) and (self._is_quantizing_all_ops() or\n any(op_type in self._ops_to_quantize\n for op_type in op_types))\n\n def _is_conv_quantized(self, graph):\n return self._is_any_of_op_types_quantized(self._conv_ops, graph)\n\n def _is_fc_quantized(self, graph):\n return self._is_any_of_op_types_quantized(self._fc_ops, graph)\n\n def _label_skip_quantized_op(self, graph):\n \"\"\"\n For some ops(conv2d, depthwise_conv2d, mul, matml), find and label\n the skip quantized ops. cpu_quantize_placement_pass will use the\n label to identify it.\n For static models, the skip quantized ops have `skip_quant` attr.\n Therefore, it only needs to find and label the skip quantized ops for\n dygraph models, in which the quantized ops don't have `quantization_type`\n attr.\n \"\"\"\n target_ops = self._conv_ops + self._mul_ops + self._matmul_ops\n for op_node in graph.all_op_nodes():\n if op_node.name() in target_ops and \\\n not op_node.op().has_attr(\"quantization_type\"):\n is_quantized_op = True\n for var_node in op_node.inputs:\n for front_op_node in var_node.inputs:\n if \"quantize_dequantize\" not in front_op_node.name():\n is_quantized_op = False\n if not is_quantized_op:\n op_node.op()._set_attr(\"skip_quant\", True)\n return graph\n\n def _add_scale_for_vars(self, var_names, use_unsigned_int, lod_tensor):\n \"\"\"\n Save quantization scales for variables. Do not overwrite.\n \"\"\"\n scales = self._var_quant_scales\n for var_name in var_names:\n if var_name not in scales:\n scales[var_name] = (use_unsigned_int, lod_tensor)\n\n def _gather_input_scales_from_fake(self, graph):\n # fake_quantize_dequantize_abs_max doesn't have scale value\n fake_ops = ['fake_quantize_dequantize_moving_average_abs_max']\n fake_ops.extend(self._fake_quantize_types)\n\n for op in graph.all_op_nodes():\n if op.name() in fake_ops:\n bit_length = op.op().attr(\"bit_length\")\n assert bit_length == 8, 'Unsupported number quantization bits ({}). Only 8 is supported now.'.format(\n bit_length)\n\n input_name = op.input(\"X\")[0]\n scale_name = op.input(\"InScale\")[0]\n output_name = op.output(\"Out\")[0]\n # Gather new weight scales after folding batchnorm in convolution\n scale = np.array(1.0 / self._load_param(\n self._scope, scale_name)[0]).astype(np.float64)\n scale[scale == np.Inf] = 0.0\n lod_tensor = self._convert_scale2tensor(scale)\n use_unsigned_int = False\n self._add_scale_for_vars([input_name, output_name],\n use_unsigned_int, lod_tensor)\n\n return graph\n\n def _gather_weight_thresholds_from_fake(self, graph):\n for op in graph.all_op_nodes():\n if op.name() in self._fake_dequantize_types:\n input_name = op.input(\"X\")[0]\n if op.op().has_attr(\"max_range\"):\n _max_range = np.array(op.op().attr(\"max_range\")).astype(\n np.float64)\n self._weight_thresholds[input_name] = np.array(\n self._s8_max * self._s8_max /\n _max_range).astype(np.float64)\n else:\n scale_name = op.input(\"Scales\")[0]\n self._weight_thresholds[input_name] = np.array(\n self._load_param(self._scope, scale_name)).astype(\n np.float64)\n\n return graph\n\n def _gather_output_scales_from_attr(self, graph):\n for op in graph.all_op_nodes():\n if op.op().has_attr(\"out_threshold\"):\n attr_scale = op.op().attr(\"out_threshold\")\n if attr_scale == 0.0:\n continue\n scale = np.array(1.0 / attr_scale).astype(np.float64)\n scale[scale == np.Inf] = 0.0\n scale_lod_tensor = self._convert_scale2tensor(scale)\n use_unsigned_int = False\n for output_name in op.op().outputs():\n for out_var_name in op.op().output(output_name):\n self._add_scale_for_vars(\n [out_var_name], use_unsigned_int, scale_lod_tensor)\n\n return graph\n\n def _propagate_scales(self, graph):\n def _update_scale_op_in_scale(op, input, output):\n unsigned, tensor = self._var_quant_scales[output]\n scale = np.array(tensor) * op.op().attr(\"scale\")\n new_tensor = self._convert_scale2tensor(scale.astype(np.float64))\n self._var_quant_scales[input] = (unsigned, new_tensor)\n\n def _update_scales(graph):\n waiting_for_scale = set()\n for op in graph.all_op_nodes():\n if op.name() in self._scale_immutable_ops:\n input_name = op.input(\"X\")[0]\n output_name = op.output(\"Out\")[0]\n tensor_names = [input_name, output_name]\n\n if all(name not in self._var_quant_scales\n for name in tensor_names):\n waiting_for_scale.update(tensor_names)\n continue\n elif input_name in self._var_quant_scales:\n self._var_quant_scales[\n output_name] = self._var_quant_scales[input_name]\n elif output_name in self._var_quant_scales:\n self._var_quant_scales[\n input_name] = self._var_quant_scales[output_name]\n elif op.name() in self._scale_ops:\n input_name = op.input(\"X\")[0]\n output_name = op.output(\"Out\")[0]\n if output_name in self._var_quant_scales:\n _update_scale_op_in_scale(op, input_name, output_name)\n return waiting_for_scale\n\n waiting_for_scale = _update_scales(graph)\n waiting_for_scale_prev = set()\n\n while len(waiting_for_scale\n ) != 0 and waiting_for_scale != waiting_for_scale_prev:\n waiting_for_scale_prev = waiting_for_scale\n waiting_for_scale = _update_scales(graph)\n\n return graph\n\n def _load_param(self, scope, param_name):\n return np.array(scope.find_var(param_name).get_tensor())\n\n def _remove_fake_ops(self, graph):\n for op in graph.all_op_nodes():\n if op.name() in self._fake_quantize_types:\n self._remove_fake_quantize(graph, op)\n elif op.name() in self._fake_dequantize_types:\n self._remove_fake_dequantize(graph, op)\n elif op.name() in self._fake_quantize_dequantize_types:\n self._remove_fake_dequantize(graph, op)\n\n return graph\n\n def _remove_fake_quantize(self, graph, op):\n fake_quant_in = graph._find_node_by_name(op.inputs, op.input(\"X\")[0])\n fake_quant_in_scale = graph._find_node_by_name(op.inputs,\n op.input(\"InScale\")[0])\n fake_quant_out = graph._find_node_by_name(op.outputs,\n op.output(\"Out\")[0])\n fake_quant_out_scale = graph._find_node_by_name(\n op.outputs, op.output(\"OutScale\")[0])\n\n next_ops = fake_quant_out.outputs\n for next_op in next_ops:\n self._swap_inputs(next_op, fake_quant_out, fake_quant_in)\n graph.link_to(fake_quant_in, next_op)\n graph.safe_remove_nodes(\n {op, fake_quant_in_scale, fake_quant_out, fake_quant_out_scale})\n\n return graph\n\n def _remove_fake_dequantize(self, graph, op):\n fake_dequant_in = graph._find_node_by_name(op.inputs, op.input(\"X\")[0])\n fake_dequant_out = graph._find_node_by_name(op.outputs,\n op.output(\"Out\")[0])\n\n next_ops = fake_dequant_out.outputs\n for next_op in next_ops:\n self._swap_inputs(next_op, fake_dequant_out, fake_dequant_in)\n graph.link_to(fake_dequant_in, next_op)\n graph.safe_remove_nodes({op, fake_dequant_out})\n\n return graph\n\n def _swap_inputs(self, op, old_input, new_input):\n for input_name in op.op().input_names():\n if old_input.name() in op.input(input_name):\n op.op().set_input(input_name, [\n new_input.name() if x == old_input.name() else x\n for x in op.input(input_name)\n ])\n\n def _dequantize_weights(self, graph):\n def _is_int8_weights(op_node, weight_name):\n weight_var_name = op_node.input(weight_name)[0]\n weight = self._load_param(self._scope, weight_var_name)\n return np.all(np.mod(weight, 1) == 0)\n\n for op in graph.all_op_nodes():\n if op.name() in self._conv_ops and _is_int8_weights(op, \"Filter\"):\n self._dequantize_op_weights(graph, op, \"Filter\", \"Output\")\n elif op.name() in self._mul_ops and _is_int8_weights(op, \"Y\"):\n self._dequantize_op_weights(graph, op, \"Y\", \"Out\")\n return graph\n\n def _dequantize_op_weights(self, graph, op_node, weight_name, output_name):\n weight_var_name = op_node.input(weight_name)[0]\n output_var_name = op_node.output(output_name)[0]\n # Convert int8 range weights to fp32 range weights\n scales = self._weight_thresholds[output_var_name]\n weight = self._load_param(self._scope, weight_var_name)\n if scales.size == 1 or scales.size == weight.shape[0]:\n w_fp32 = np.multiply(np.divide(weight, self._s8_max).T, scales.T).T\n elif len(weight.shape) > 1 and scales.size == weight.shape[1]:\n w_fp32 = np.multiply(np.divide(weight, self._s8_max), scales)\n else:\n raise ValueError(\n \"The size of weight scales vector ({}) does not match the dimensions ({}) of the weights tensor {}.\"\n .format(scales.size, weight.shape, weight_var_name))\n w_fp32 = w_fp32.reshape(weight.shape).astype(np.float32)\n self._restore_var(weight_var_name, w_fp32)\n\n def _restore_var(self, name, array):\n tensor = self._scope.find_var(name).get_tensor()\n tensor.set(array, self._place)\n\n def _update_activations(self, graph):\n for op in graph.all_op_nodes():\n if op.name() in self._conv_ops and not op.op().has_attr(\n \"fuse_activation\"):\n activation = \"\"\n if op.op().has_attr(\"fuse_relu\") and op.op().attr(\"fuse_relu\"):\n activation = \"relu\"\n elif op.op().has_attr(\"fuse_brelu\") and op.op().attr(\n \"fuse_brelu\"):\n activation = \"relu6\"\n alpha = 6.0\n if op.op().has_attr(\"fuse_brelu_threshold\"):\n alpha = op.op().attr(\"fuse_brelu_threshold\")\n op.set_attr(\"fuse_alpha\", alpha)\n op.set_attr(\"fuse_activation\", activation)\n return graph\n\n def _remove_ctrl_vars(self, graph):\n remove_ctr_vars = set()\n for node in graph.all_var_nodes():\n if node.is_ctrl_var():\n remove_ctr_vars.add(node)\n graph.safe_remove_nodes(remove_ctr_vars)\n return graph\n\n def _optimize_fp32_graph(self, graph):\n graph = self._update_activations(graph)\n graph = self._remove_ctrl_vars(graph)\n graph = self._apply_pass(graph, 'attention_lstm_fuse_pass')\n graph = self._apply_pass(graph, 'seqconv_eltadd_relu_fuse_pass')\n # graph = self._apply_pass(graph, 'seqpool_concat_fuse_pass')\n graph = self._apply_pass(graph, 'seqpool_cvm_concat_fuse_pass')\n # graph = self._apply_pass(graph, 'embedding_fc_lstm_fuse_pass')\n graph = self._apply_pass(graph, 'fc_lstm_fuse_pass')\n graph = self._apply_pass(graph, 'mul_lstm_fuse_pass')\n graph = self._apply_pass(graph, 'fc_gru_fuse_pass')\n graph = self._apply_pass(graph, 'mul_gru_fuse_pass')\n graph = self._apply_pass(graph, 'multi_gru_fuse_pass')\n graph = self._apply_pass(graph, 'multi_gru_seq_fuse_pass')\n graph = self._apply_pass(graph, 'seq_concat_fc_fuse_pass')\n graph = self._apply_pass(graph, 'squared_mat_sub_fuse_pass')\n graph = self._apply_pass(graph, 'is_test_pass')\n graph = self._apply_pass(graph, 'mkldnn_placement_pass',\n ['mkldnn_enabled_op_types'], [set()])\n graph = self._apply_pass(graph, 'depthwise_conv_mkldnn_pass')\n graph = self._apply_pass(graph, 'conv_bn_fuse_pass')\n graph = self._apply_pass(graph, 'conv_eltwiseadd_bn_fuse_pass')\n graph = self._apply_pass(graph, 'conv_transpose_bn_fuse_pass')\n graph = self._apply_pass(graph,\n 'conv_transpose_eltwiseadd_bn_fuse_pass')\n graph = self._apply_pass(graph, 'conv_bias_mkldnn_fuse_pass')\n graph = self._apply_pass(graph, 'conv_elementwise_add_mkldnn_fuse_pass')\n graph = self._apply_pass(graph, 'conv_relu_mkldnn_fuse_pass')\n graph = self._apply_pass(graph, 'conv_relu6_mkldnn_fuse_pass')\n graph = self._apply_pass(graph, 'fc_fuse_pass',\n ['use_gpu', 'use_fc_padding'], [False, False])\n graph = self._apply_pass(graph, 'repeated_fc_relu_fuse_pass')\n if self._is_fc_quantized(graph):\n graph = self._apply_pass(graph, 'fc_mkldnn_pass')\n graph = self._apply_pass(graph, 'matmul_transpose_reshape_fuse_pass')\n # the following pass should be the last one since it will work on all fused ops.\n graph = self._apply_pass(graph, 'runtime_context_cache_pass')\n return graph\n\n def _apply_pass(self, graph, pass_name, attrs=None, attr_values=None):\n ir_pass = core.get_pass(pass_name)\n cpp_graph = graph.graph\n if not cpp_graph.has('__param_scope__'):\n cpp_graph.set_not_owned('__param_scope__', self._scope)\n if attrs:\n assert attr_values and len(attrs) == len(\n attr_values\n ), \"Different number of pass attributes and their values.\"\n for attr, value in zip(attrs, attr_values):\n ir_pass.set(attr, value)\n ir_pass.apply(cpp_graph)\n if self._debug:\n graph.draw('.', '{}_{}_{}'.format(self._pass_group, self._pass_idx,\n pass_name), graph.all_op_nodes())\n self._remove_unused_var_nodes(graph)\n self._pass_idx += 1\n return graph\n\n def _final_optimizations(self, graph):\n # remove dropout ops\n graph = self._apply_pass(graph, 'simplify_with_basic_ops_pass')\n # make some MKL-DNN ops working inplace\n graph = self._apply_pass(graph, 'mkldnn_inplace_pass')\n return graph\n\n def _cleanup(self, graph):\n graph = self._remove_unused_var_nodes(graph)\n graph = self._set_op_role_forward(graph)\n return graph\n\n def _remove_unused_var_nodes(self, graph):\n all_used_vars = set()\n ops = graph.all_op_nodes()\n for op_node in ops:\n for input_node in op_node.inputs:\n all_used_vars.add(input_node)\n for output_node in op_node.outputs:\n all_used_vars.add(output_node)\n\n all_used_vars = {n.node for n in all_used_vars}\n all_unused_vars = {\n n\n for n in filter(lambda node: node.node not in all_used_vars,\n graph.all_var_nodes())\n }\n graph.safe_remove_nodes(all_unused_vars)\n return graph\n\n def _set_op_role_forward(self, graph):\n ops = graph.all_op_nodes()\n for op in ops:\n op.set_attr(\"op_role\", OpRole.Forward)\n return graph\n\n def _compute_weight_scales(self, graph):\n def _compute_var_scales(ops, w_name, axis):\n for op in graph.all_op_nodes():\n if op.op().type() in ops:\n weight_var_name = op.input(w_name)[0]\n weights = np.array(\n self._load_param(self._scope, weight_var_name))\n scales = 1.0 / np.amax(\n np.abs(weights.reshape(weights.shape[0], -1)).astype(\n np.float64),\n axis=axis)\n scales[scales == np.Inf] = 0.0\n\n lod_tensor = self._convert_scale2tensor(scales)\n use_unsigned_int = False\n self._var_quant_scales[weight_var_name] = (use_unsigned_int,\n lod_tensor)\n\n def _compute_single_gru_weight_scales(wx_var_name, wh_var_name):\n wx = np.array(self._load_param(self._scope, wx_var_name))\n wh = np.array(self._load_param(self._scope, wh_var_name))\n OC = wh.shape[0]\n scale_ur = 1.0 / np.max(np.abs(\n np.concatenate(\n [\n wx[:, :2 * OC], wh.flatten()[:2 * OC * OC].reshape(OC, 2\n * OC)\n ],\n axis=0)),\n axis=0)\n scale_o = 1.0 / np.max(np.abs(\n np.concatenate(\n [\n wx[:, 2 * OC:], wh.flatten()[2 * OC * OC:].reshape(OC,\n OC)\n ],\n axis=0)),\n axis=0)\n\n gru_weights_scale = np.concatenate([scale_ur,\n scale_o]).astype('float')\n\n return self._convert_scale2tensor(gru_weights_scale)\n\n def _compute_gru_weight_scales(wx_name, wh_name):\n for op in graph.all_op_nodes():\n if op.op().type() in self._gru_ops:\n assert len(op.input(wx_name)) == len(\n op.input(wh_name)\n ), 'Mismatch in number of weights inputs ({} for WeightX vs. {} for WeightH).'.format(\n len(op.input(wx_name)), len(op.input(wh_name)))\n for i, wx_var_name in enumerate(op.input(wx_name)):\n wh_var_name = op.input(wh_name)[i]\n use_unsigned_int = False\n lod_tensor = _compute_single_gru_weight_scales(\n wx_var_name, wh_var_name)\n self._var_quant_scales[wx_var_name] = (use_unsigned_int,\n lod_tensor)\n\n def _compute_single_lstm_weight_scales(wx_var_name, wh_var_name):\n wx = np.array(self._load_param(self._scope, wx_var_name))\n wh = np.array(self._load_param(self._scope, wh_var_name))\n\n lstm_weights_scale = 1.0 / np.max(\n np.abs(np.concatenate(\n [wx[:, :], wh[:, :]], axis=0)), axis=0)\n lstm_weights_scale = lstm_weights_scale.astype('float')\n\n return self._convert_scale2tensor(lstm_weights_scale)\n\n def _compute_lstm_weight_scales(wx_name, wh_name):\n for op in graph.all_op_nodes():\n if op.op().type() in self._lstm_ops:\n assert len(op.input(wx_name)) == len(\n op.input(wh_name)\n ), 'Mismatch in number of weights inputs ({} for WeightX vs. {} for WeightH).'.format(\n len(op.input(wx_name)), len(op.input(wh_name)))\n for i, wx_var_name in enumerate(op.input(wx_name)):\n wh_var_name = op.input(wh_name)[i]\n use_unsigned_int = False\n lod_tensor = _compute_single_lstm_weight_scales(\n wx_var_name, wh_var_name)\n self._var_quant_scales[wx_var_name] = (use_unsigned_int,\n lod_tensor)\n\n _compute_var_scales(self._conv_ops, \"Filter\", axis=1)\n _compute_var_scales(self._fc_ops, \"W\", axis=0)\n _compute_var_scales(self._gru_ops, \"WeightH\", axis=0)\n _compute_var_scales(self._lstm_ops, \"WeightH\", axis=0)\n _compute_gru_weight_scales(\"WeightX\", \"WeightH\")\n _compute_lstm_weight_scales(\"WeightX\", \"WeightH\")\n return graph\n\n def _find_avg_pooling_ids(self, graph):\n for op in graph.all_op_nodes():\n if op.name() in self._pool_ops:\n if op.op().attr(\"pooling_type\") == \"avg\":\n self._op_ids_to_skip.add(op.id())\n return self._op_ids_to_skip\n\n def _update_relu_output_scales(self, graph):\n def _set_unsigned_scale(graph, ops, op_out_name, predicate):\n '''\n Sets the type of an output scale of a passed op type(s) to 'unsigned int8' if the\n predicate applied on op passes. Typically, the predicate checks if op's\n activation is set to relu.\n '''\n for op in graph.all_op_nodes():\n if op.name() in ops:\n out_name = op.output(op_out_name)[0]\n if out_name in self._var_quant_scales and predicate(op.op(\n )):\n is_unsigned, tensor = self._var_quant_scales[out_name]\n if is_unsigned is False:\n # If the variable is signed, it means that the scales for this var\n # were computed for signed data, so the scale must be multiplied by 2\n # to fill the entire range of uint8\n scale = np.array(tensor) * 2\n tensor = self._convert_scale2tensor(\n scale.astype(np.float64))\n self._var_quant_scales[out_name] = (True, tensor)\n return graph\n\n def conv_predicate(op):\n return op.attr(\"fuse_activation\") in self._relu_ops\n\n graph = _set_unsigned_scale(graph, self._conv_ops, \"Output\",\n conv_predicate)\n\n def fc_predicate(op):\n return op.attr(\"activation_type\") in self._relu_ops\n\n graph = _set_unsigned_scale(graph, self._fc_ops, \"Out\", fc_predicate)\n\n graph = _set_unsigned_scale(graph, self._relu_ops, 'Out',\n lambda op: True)\n\n return graph\n\n def _get_data_layout(self, graph):\n return 'NHWC' if self._is_conv_quantized(graph) else 'NCHW'\n\n def _quantize_fp32_graph(self, graph):\n graph = self._apply_pass(\n graph, 'cpu_quantize_placement_pass',\n ['quantize_enabled_op_types', 'quantize_excluded_op_ids'],\n [self._ops_to_quantize, self._find_avg_pooling_ids(graph)])\n graph = self._apply_pass(graph, 'scale_matmul_fuse_pass')\n graph = self._apply_pass(graph,\n 'reshape_transpose_matmul_mkldnn_fuse_pass')\n graph = self._apply_pass(\n graph, 'cpu_quantize_pass', ['quant_var_scales', 'data_layout'],\n [self._var_quant_scales, self._get_data_layout(graph)])\n graph = self._apply_pass(graph, 'cpu_quantize_squash_pass')\n return graph\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom inference_pass_test import InferencePassTest\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.core import PassVersionChecker\n\n\nclass ConvBnFusePassExplicitPaddingTest(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 3, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=6,\n filter_size=6,\n groups=3,\n padding=[1, 1, 1, 1],\n bias_attr=False,\n act=None)\n bn_out = fluid.layers.batch_norm(conv_out, is_test=True)\n\n self.feeds = {\n \"data\": np.random.random([1, 3, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [bn_out]\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass'))\n\n\nclass ConvBnFusePassValidPaddingTest(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 3, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=6,\n filter_size=6,\n groups=3,\n padding='VALID',\n bias_attr=False,\n act=None)\n bn_out = fluid.layers.batch_norm(conv_out, is_test=True)\n\n self.feeds = {\n \"data\": np.random.random([1, 3, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [bn_out]\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass'))\n\n\nclass ConvBnFusePassSamePaddingTest(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 3, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=6,\n filter_size=6,\n groups=3,\n padding='SAME',\n bias_attr=False,\n act=None)\n bn_out = fluid.layers.batch_norm(conv_out, is_test=True)\n\n self.feeds = {\n \"data\": np.random.random([1, 3, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [bn_out]\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass'))\n\n\nclass ConvEltwiseAddBnFuseExplicitPaddingPass(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 3, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=6,\n filter_size=6,\n groups=3,\n padding=[1, 1, 1, 1],\n bias_attr=None,\n act=None)\n bn_out = fluid.layers.batch_norm(conv_out, is_test=True)\n\n self.feeds = {\n \"data\": np.random.random([1, 3, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [bn_out]\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(\n PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass'))\n\n\nclass ConvEltwiseAddBnFuseValidPaddingPass(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 3, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=6,\n filter_size=6,\n groups=3,\n padding='VALID',\n bias_attr=None,\n act=None)\n bn_out = fluid.layers.batch_norm(conv_out, is_test=True)\n\n self.feeds = {\n \"data\": np.random.random([1, 3, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [bn_out]\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(\n PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass'))\n\n\nclass ConvEltwiseAddBnFuseSamePaddingPass(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 3, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=6,\n filter_size=6,\n groups=3,\n padding='SAME',\n bias_attr=None,\n act=None)\n bn_out = fluid.layers.batch_norm(conv_out, is_test=True)\n\n self.feeds = {\n \"data\": np.random.random([1, 3, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [bn_out]\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(\n PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass'))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons\nfrom program_config import TensorConfig, ProgramConfig\nimport numpy as np\nimport paddle.inference as paddle_infer\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\n\n\nclass TrtConvertClipTest(TrtLayerAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n return True\n\n def sample_program_configs(self):\n def generate_input1(dims, batch, attrs: List[Dict[str, Any]]):\n if dims == 1:\n return np.ones([64]).astype(np.float32)\n elif dims == 2:\n return np.ones([3, 64]).astype(np.float32)\n elif dims == 3:\n return np.ones([3, 64, 64]).astype(np.float32)\n else:\n return np.ones([batch, 3, 64, 64]).astype(np.float32)\n\n def generate_weight1(attrs: List[Dict[str, Any]]):\n return np.array([np.random.uniform(1, 10)]).astype(\"float32\")\n\n def generate_weight2(attrs: List[Dict[str, Any]]):\n return np.array([np.random.uniform(10, 20)]).astype(\"float32\")\n\n for dims in [1, 2, 3, 4]:\n for batch in [1, 2, 4]:\n for op_inputs in [{\n \"X\": [\"input_data\"]\n }, {\n \"X\": [\"input_data\"],\n \"Min\": [\"Min_\"],\n \"Max\": [\"Max_\"]\n }]:\n self.input_num = len(op_inputs)\n self.dims = dims\n dics = [{\n \"min\": np.random.uniform(1, 10),\n \"max\": np.random.uniform(10, 20)\n }, {\n \"op_inputs\": op_inputs\n }]\n ops_config = [{\n \"op_type\": \"clip\",\n \"op_inputs\": op_inputs,\n \"op_outputs\": {\n \"Out\": [\"output_data\"]\n },\n \"op_attrs\": dics[0]\n }]\n ops = self.generate_op_config(ops_config)\n\n program_config = ProgramConfig(\n ops=ops,\n weights={\n \"Min_\": TensorConfig(data_gen=partial(\n generate_weight1, dics)),\n \"Max_\": TensorConfig(data_gen=partial(\n generate_weight2, dics))\n },\n inputs={\n \"input_data\": TensorConfig(data_gen=partial(\n generate_input1, dims, batch, dics))\n },\n outputs=[\"output_data\"])\n\n yield program_config\n\n def sample_predictor_configs(self, program_config):\n def generate_dynamic_shape(attrs):\n if self.dims == 1:\n self.dynamic_shape.min_input_shape = {\"input_data\": [1]}\n self.dynamic_shape.max_input_shape = {\"input_data\": [128]}\n self.dynamic_shape.opt_input_shape = {\"input_data\": [64]}\n elif self.dims == 2:\n self.dynamic_shape.min_input_shape = {\"input_data\": [1, 32]}\n self.dynamic_shape.max_input_shape = {\"input_data\": [4, 64]}\n self.dynamic_shape.opt_input_shape = {\"input_data\": [3, 64]}\n elif self.dims == 3:\n self.dynamic_shape.min_input_shape = {\"input_data\": [1, 32, 32]}\n self.dynamic_shape.max_input_shape = {\n \"input_data\": [10, 64, 64]\n }\n self.dynamic_shape.opt_input_shape = {\"input_data\": [3, 64, 64]}\n else:\n self.dynamic_shape.min_input_shape = {\n \"input_data\": [1, 3, 32, 32]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data\": [4, 3, 64, 64]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data\": [1, 3, 64, 64]\n }\n\n def clear_dynamic_shape():\n self.dynamic_shape.min_input_shape = {}\n self.dynamic_shape.max_input_shape = {}\n self.dynamic_shape.opt_input_shape = {}\n\n def generate_trt_nodes_num(attrs, dynamic_shape):\n if self.input_num == 3 or self.dims == 1:\n return 0, 3\n else:\n return 1, 2\n\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n\n # for static_shape\n clear_dynamic_shape()\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, False), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, False), 1e-5\n\n # for dynamic_shape\n generate_dynamic_shape(attrs)\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), generate_trt_nodes_num(attrs,\n True), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), generate_trt_nodes_num(attrs,\n True), 1e-5\n\n def add_skip_trt_case(self):\n def teller1(program_config, predictor_config):\n if len(\n program_config.inputs['input_data'].shape\n ) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():\n return True\n return False\n\n self.add_skip_case(\n teller1, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"The output shape has diff, but we can add shuffle layer to resolve it.\"\n )\n\n def test(self):\n self.add_skip_trt_case()\n self.run_test()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.maximum",
"numpy.sqrt",
"numpy.abs",
"numpy.random.seed",
"scipy.special.expit",
"numpy.random.random_sample",
"numpy.sign",
"numpy.exp",
"numpy.random.uniform",
"numpy.tanh"
],
[
"numpy.allclose",
"numpy.ones"
],
[
"numpy.mod",
"numpy.array",
"numpy.concatenate",
"numpy.divide"
],
[
"numpy.random.random"
],
[
"numpy.random.uniform",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Term-inator/Brain-Tumor-Detection | [
"b59715092cca7a17b589b5d906983eb42ee4ad87"
] | [
"run.py"
] | [
"# ====================================================\n# main\n# ====================================================\nimport os\nimport shutil\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import GroupKFold\n\nfrom utils import get_score, seed_torch\nfrom train import train_loop, set_params\nfrom logger import init_logger, close_logger, Logger\n\n\ntarget_cols_map = {\n 'tumor': ['label'],\n 'T1SS': ['label'],\n 'T2SS': ['label'],\n 'T1': ['T1'],\n '2label': ['label', 'T1'],\n 'randT1': ['label', 'T1'],\n 'randTumor': ['label', 'T1']\n}\n\ndata_path_map = {\n 'tumor': 'RealTrain/',\n 'T1SS': 'T1TrainSameSize/',\n 'T2SS': 'T2TrainSameSize/',\n 'T1': 'RealTrain/',\n '2label': 'RealTrain/',\n 'randT1': 'RealTrainRandomT1/',\n 'randTumor': 'RealTrainRandomTumor/'\n}\n\n\nclass Params:\n n_fold = 4\n trn_fold = [0, 1, 2]\n\n debug = False\n train = True\n\n type = None\n target_cols = None\n data_path = None\n output_dir = None\n seed = None\n\n def __init__(self, type, seed, epochs):\n Params.type = type\n output_base_path = '../output/'\n data_base_path = '../input/'\n\n Params.target_cols = target_cols_map[type]\n Params.data_path = data_base_path + data_path_map[type]\n\n Params.target_size = len(Params.target_cols)\n Params.seed = seed\n Params.epochs = epochs\n Params.output_dir = output_base_path + f'{type}_seed{seed}-ep{epochs}/'\n # ====================================================\n # Directory settings\n # ====================================================\n if os.path.exists(Params.output_dir):\n shutil.rmtree(Params.output_dir)\n os.makedirs(Params.output_dir)\n\n\nif Params.debug:\n Params.epochs = 1\n\n\ndef main():\n train = pd.read_csv(Params.data_path + 'data.csv')\n # print(train.head())\n init_logger(Params.output_dir + 'train.log')\n\n seed_torch(seed=Params.seed)\n\n # ====================================================\n # Split Train Test\n # ====================================================\n folds = train.copy()\n if Params.type != 'T1SS' and Params.type != 'T2SS':\n Fold = GroupKFold(n_splits=Params.n_fold)\n groups = folds['filename'].values\n for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[Params.target_cols], groups)):\n folds.loc[val_index, 'fold'] = int(n)\n folds['fold'] = folds['fold'].astype(int)\n # print(folds.groupby('fold').size())\n # print(folds)\n\n tst_idx = folds[folds['fold'] == Params.n_fold - 1].index\n\n test_fold = folds.loc[tst_idx].reset_index(drop=True)\n _test_fold = test_fold.copy(deep=True)\n\n train_folds = folds[folds['fold'].isin([i for i in range(Params.n_fold - 1)])]\n\n # print(train_folds.groupby('fold').size())\n # print(train_folds)\n\n def get_test_result(test_scores):\n Logger().info(f'Scores: {np.round(np.mean(test_scores, axis=0), decimals=4)}')\n\n def get_result(result_df):\n preds = result_df[[f'pred_{c}' for c in Params.target_cols]].values\n labels = result_df[Params.target_cols].values\n score, scores = get_score(labels, preds)\n Logger().info(f'Score: {score:<.4f} Scores: {np.round(scores, decimals=4)}')\n\n set_params(Params)\n all_test_scores = []\n\n if Params.train:\n # train\n oof_df = pd.DataFrame()\n for fold in range(Params.n_fold - 1):\n if fold in Params.trn_fold:\n _oof_df, test_scores = train_loop(train_folds, fold, _test_fold)\n oof_df = pd.concat([oof_df, _oof_df])\n all_test_scores.append(test_scores)\n Logger().info(f\"========== fold: {fold} result ==========\")\n get_result(_oof_df)\n # test result\n Logger().info(f\"\\n========== TEST ==========\")\n get_test_result(np.array(all_test_scores))\n # CV result\n Logger().info(f\"========== CV ==========\")\n get_result(oof_df)\n # save result\n oof_df.to_csv(Params.output_dir + 'result.csv', index=False)\n\n close_logger()\n\n\nseed_list = [31, 37, 41, 42, 43, 47, 53]\nseeds = [53]\ntype_list = ['tumor', 'T1SS', 'T2SS', 'T1', '2label', 'randT1', 'randTumor']\ntypes = ['randTumor']\n\nif __name__ == '__main__':\n for seed in seeds:\n for type in types:\n for epochs in range(10, 61, 10):\n Params(type, seed, epochs)\n print(f'target_cols: {Params.target_cols}')\n print(f'data_path: {Params.data_path}, output_dir: {Params.output_dir}')\n print(f'seed: {seed}, epochs: {epochs}')\n main()\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.round",
"numpy.mean",
"numpy.array",
"sklearn.model_selection.GroupKFold"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AlexFaernon/refactoring | [
"5aafbbcadd92f3fe1fb3b6a6cfb2fd79d6bef9a4"
] | [
"filter.py"
] | [
"from PIL import Image\r\nimport numpy as np\r\n\r\ncorrect_path = False\r\nCELL_X_OFFSET = 10\r\nCELL_Y_OFFSET = 10\r\nGREY_GRADATION = 50\r\nwhile not correct_path:\r\n img_path = input(\"Enter path to image for filtering: \")\r\n try:\r\n img = Image.open(img_path)\r\n correct_path = True\r\n except:\r\n print(\"Incorrect path/file format, please try again\")\r\nimg_matrix = np.array(img)\r\nlen_x = len(img_matrix)\r\nlen_y = len(img_matrix[1])\r\ncell_x = 0\r\nwhile cell_x < len_x:\r\n cell_y = 0\r\n while cell_y < len_y:\r\n sum_RGB = 0\r\n sum_RGB += np.sum(img_matrix[cell_x:cell_x + CELL_X_OFFSET, cell_y:cell_y + CELL_Y_OFFSET]) // 3\r\n sum_RGB = int(sum_RGB // 100)\r\n grey_matrix = np.zeros((CELL_X_OFFSET, CELL_Y_OFFSET, 3))\r\n grey_matrix[:] = int(sum_RGB // GREY_GRADATION) * GREY_GRADATION\r\n img_matrix[cell_x:cell_x + CELL_X_OFFSET, cell_y:cell_y + CELL_Y_OFFSET] = grey_matrix\r\n cell_y = cell_y + CELL_Y_OFFSET\r\n cell_x = cell_x + CELL_X_OFFSET\r\nres = Image.fromarray(img_matrix)\r\ncorrect_path = False\r\nwhile not correct_path:\r\n res_path = input(\"Enter path to resulting image: \")\r\n try:\r\n res.save(res_path)\r\n correct_path = True\r\n except:\r\n print(\"Incorrect path/file format, please try again\")\r\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
swiftfish/flexitext | [
"9863650b662bef6dd63222f0b9bade2e72f4d762"
] | [
"flexitext/flexitext.py"
] | [
"import matplotlib.pyplot as plt\n\nfrom matplotlib.offsetbox import AnnotationBbox\n\nfrom flexitext.parser import make_texts\nfrom flexitext.textgrid import make_text_grid\n\n\nclass FlexiText:\n \"\"\"Handle storing and drawing of formatted text.\n\n Parameters\n ----------\n\n texts: tuple or list of flexitext.Text instances\n These objects represent the text together with their styles.\n \"\"\"\n\n HORIZONTAL_ALIGNMENT = {\"center\": 0.5, \"left\": 0, \"right\": 1}\n VERTICAL_ALIGNMENT = {\"center\": 0.5, \"top\": 1, \"bottom\": 0}\n\n def __init__(self, *texts):\n self.texts = texts\n\n def plot(\n self,\n x,\n y,\n ha=\"left\",\n va=\"center\",\n ma=\"left\",\n mva=\"baseline\",\n xycoords=\"axes fraction\",\n ax=None,\n ):\n \"\"\"Draw text with multiple formats.\n\n Parameters\n ----------\n x: float\n The horizontal position to place the text. By default, this is in axes fraction\n coordinates.\n y: float\n The vertical position to place the text. By default, this is in axes fraction\n coordinates.\n ha: str\n Horizontal alignment. Must be one of `'center'`, `'right'`, or `'left'`.\n va: str\n Horizontal alignment. Must be one of `'center'`, `'top'`, or `'bottom'`.\n ma: str\n Alignment for multiline texts. The layout of the bounding box of all the lines is\n determined by the `ha` and `va` properties. This property controls the alignment of the\n text lines within that box.\n mva: str\n Vertical alignment for text within multiline texts. Can be one of `\"top\"`, `\"bottom\"`,\n `\"left\"`, `\"right\"`, `\"center\"`, or `\"baseline\"`. Defaults to `\"baseline\"`.\n xycoords: str\n The coordinate system for `x` and `y`. Must be one of `'axes fraction'` or\n `'figure fraction'`.\n ax: matplotlib.axes.Axes\n Matplotlib `Axes`. The default value means the `Axes` is obtained with `plt.gca()`\n\n Returns\n -------\n annotation_box: matplotlib.offsetbox.AnnotationBbox\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n if xycoords == \"axes fraction\":\n parent = ax\n elif xycoords == \"figure fraction\":\n parent = ax.figure\n xycoords = ax.figure.transFigure\n else:\n raise ValueError(\n f\"'xycoords' must be one of 'axes fraction' or 'figure fraction', not {xycoords}\"\n )\n\n offsetbox = self._make_offset_box(ma, mva)\n box_alignment = self._make_box_alignment(ha, va)\n annotation_box = AnnotationBbox(\n offsetbox,\n (x, y),\n xycoords=xycoords,\n frameon=False,\n box_alignment=box_alignment,\n pad=0,\n )\n\n parent.add_artist(annotation_box)\n return annotation_box\n\n def _make_box_alignment(self, ha, va):\n \"\"\"Convert ha and va to a touple of two numbers\"\"\"\n ha = self.HORIZONTAL_ALIGNMENT[ha]\n va = self.VERTICAL_ALIGNMENT[va]\n return (ha, va)\n\n def _make_offset_box(self, mha, mva):\n \"\"\"Create grid with formatted text\"\"\"\n return make_text_grid(self.texts, mha, mva)\n\n\ndef flexitext(\n x,\n y,\n s,\n ha=\"left\",\n va=\"center\",\n ma=\"left\",\n mva=\"baseline\",\n xycoords=\"axes fraction\",\n ax=None,\n):\n \"\"\"Draw text with multiple formats.\n\n Parameters\n ----------\n x: float\n The horizontal position to place the text. By default, this is in axes fraction\n coordinates.\n y: float\n The vertical position to place the text. By default, this is in axes fraction\n coordinates.\n ha: str\n Horizontal alignment. Must be one of `'center'`, `'right'`, or `'left'`.\n va: str\n Horizontal alignment. Must be one of `'center'`, `'top'`, or `'bottom'`.\n ma: str\n Alignment for multiline texts. The layout of the bounding box of all the lines is\n determined by the `ha` and `va` properties. This property controls the alignment of the\n text lines within that box.\n mva: str\n Vertical alignment for text within multiline texts. Can be one of `\"top\"`, `\"bottom\"`,\n `\"left\"`, `\"right\"`, `\"center\"`, or `\"baseline\"`. Defaults to `\"baseline\"`.\n xycoords: str\n The coordinate system for `x` and `y`. Must be one of `'axes fraction'` or\n `'figure fraction'`.\n ax: matplotlib.axes.Axes\n Matplotlib `Axes`. The default value means the `Axes` is obtained with `plt.gca()`\n\n Returns\n -------\n annotation_box: matplotlib.offsetbox.AnnotationBbox\n \"\"\"\n return FlexiText(*make_texts(s)).plot(x, y, ha, va, ma, mva, xycoords, ax)\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.offsetbox.AnnotationBbox"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mwmoura/Concreto-armado | [
"487d9d513cfa4ba24c765fa8015bf5fc43cd60f1"
] | [
"Concreto/MomxCurv2.py"
] | [
"# Diagrama Momento x curvatura para seções transversais de concreto armado\n#\n# ENTRADA DE DADOS\n# Chamar biblioteca matemática\nimport numpy as np\n#\ndef tensao(esl):\n # Calcula a tensão no aço\n # es = módulo de elasticidade do aço em kN/cm2\n # esl = deformação de entrada\n # fyd = tensão de escoamento de cálculo em kN/cm2\n # tsl = tensão de saída em kN/cm2\n # \n # Trabalhando com deformação positiva\n ess = np.abs(esl)\n eyd = fyd / es\n if ess < eyd:\n tsl = es * ess\n else:\n tsl = fyd\n # Trocando o sinal se necessário\n if esl < 0:\n tsl = -tsl\n return tsl\n\ndef tensaoc (ecl):\n # Calcula a tensão no concreto\n # e0 = deformação do início do patamar de plastificação\n # ecl = deformação de entrada\n # tcd = resistência de cálculo em kN/cm2\n # tcl = tensão de saída em kN/cm2\n # \n ecs = np.abs(ecl)\n e0 = 2 / 1000\n eta = ecs / e0\n if ecs < e0:\n tcl = tcd * (2 * eta - eta ** 2)\n else:\n tcl = tcd\n return tcl\n#\ndef funcao(x):\n #\n # Calcula o valor da função f(x) dada na equaçãoo (6.3.10) do Volume 1 de Curso de Concreto Armado\n # O valor de saída é a variável f\n # \n # Constantes para o cálculo das deformações das camadas de armadura\n xl = eu * di[0] / (eu + 10)\n if x <= xl:\n # A linha neutra está no domínio 2 (C É A CURVATURA)\n c = 0.01 / (di[0] - x)\n else:\n # A linha neutra está nos domínios 3 ou 4\n c = eu / (1000 * x)\n # Resultante de compressão no concreto\n rc = alamb * b * x * tcd\n f = rc\n # Superpondo a contribuição das armaduras\n for i in range (0, n, 1):\n esi = c * (x - di[i])\n tsl = tensao(esi)\n tens[i] = tsl\n f = f + asi[i] * tsl\n # Transformando f em adimensional para testar a convergência\n f = f / (b * di[0] * tcd)\n return f\n#\n#fck=float(input('Resistência característica à compressão do concreto em MPa = '))\nfck = 20\n#fyk=float(input('Tensão de escoamento característica do aço em MPa = '))\nfyk = 500\n#es=float(input('Módulo de elasticidade do aço em GPa = '))\nes = 200\n#gamac=float(input('Coeficientes parciais de segurança para o concreto = '))\ngamac = 1.4\n#gamas=float(input('Coeficientes parciais de segurança para o aço = '))\ngamas = 1.15\n#b =float(input('Largura da seção transversal em cm = '))\nb = 15\n#n =int(input('Número de camadas de armadura = '))\nn = 1\nprint('Inserir dados referentes as camadas de armadura.')\nprint('As camadas são numeradas de baixo para cima e separadas por , .')\nasi = list(range(n))\ndi = list(range(n))\nprint('Dados das camadas de armadura.')\nprint('As camadas são inseridas de baixo para cima.')\nfor i in range (0, n, 1):\n print('Altura útil da camada',(i+1),'.')\n #di[i] = float(input('Valor: '))\n di[i] = 36\nfor i in range (0, n, 1):\n print('Área de aço da camada',(i+1),'.')\n #asi[i] = float(input('Valor: '))\n asi[i] = 2\ndi = np.asarray(di)\nasi = np.asarray(asi)\n#print (di[0])\n#print (asi[0])\n#\n# FIM DA ENTRADA DE DADOS\n#\n# INÍCIO DOS CÁLCULOS\n# \n# Parâmetros do diagrama retangular (PODE SAIR)\n'''if fck <= 50:\n alamb = 0.8\n alfac = 0.85\n eu = 3.5\nelse:\n alamb = 0.8 - (fck - 50) / 400\n alfac = 0.85 * (1 - (fck - 50) / 200)\n eu = 2.6 + 35 * ((90 - fck) / 100) ** 4'''\neu = 3.5\nalfac = 0.85\n#\n# Conversão de unidades: transformando para kN e cm\nfck = fck / 10\nfyk = fyk / 10\nes = 100 * es\n#\n# Resistências de cálculo\nfcd = fck / gamac\ntcd = alfac * fcd\nfyd = fyk / gamas\n#\n# Cálculo do momento de ruína através do processo iterativo da bissecante\n#\n# Valor inicial para a linha neutra\nxi = 0\ntens = list(range(n))\ntens = np.asarray(tens)\ntsl = 0.\nf = 0.\n# Chamar sub-rotina\nf = funcao(xi)\nfi = f\n# Valor final para a linha neutra\nxf = di[0]\n# Chamar sub-rotina\nf = funcao(xf)\nff = f\n# Processo iterativo da bissecante\nfk = 1\nwhile np.abs(fk) > 0.001:\n xk = (xi * ff - xf * fi) / (ff - fi)\n f = funcao(xk)\n fk = f\n prod = fk * fi\n if prod > 0:\n xi = xk\n fi = fk\n else:\n xf = xk\n ff = fk\n# Convergência alcançada\n# xk é a raiz da função f(x) dada na equação (6.3.10) do Volume 1 de Curso de Concreto Armado\n# Momento de ruina de cálculo\nx = xk\nrc = alamb * b * x * tcd\nzc = di[0] - 0.5 * alamb * x\namu = rc * zc\nfor i in range (0, n, 1):\n amu = amu + asi[i] * tens[i] * (di[0] - di[i])\n# Passando o momento para kN.m\namu = amu / 100\n# Convertendo a saída para duas casas decimais\namu = round(amu, 3)\nprint('O momento resistente é de', amu, 'kN.m.')"
] | [
[
"numpy.asarray",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vhn0912/python-snippets | [
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038",
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038"
] | [
"notebook/numpy_swap_select.py",
"notebook/numpy_rot90_image.py",
"notebook/numpy_floor_trunc_ceil.py",
"notebook/pandas_sort_values_sort_index.py",
"notebook/numpy_astype.py",
"notebook/scipy_special_comb.py",
"notebook/numpy_logical_and_or_xor_not.py",
"notebook/pandas_shares_memory_ndarray.py"
] | [
"import numpy as np\n\na = np.arange(10, 35).reshape(5, 5)\nprint(a)\n# [[10 11 12 13 14]\n# [15 16 17 18 19]\n# [20 21 22 23 24]\n# [25 26 27 28 29]\n# [30 31 32 33 34]]\n\ncol_swap = a[:, [3, 2, 4, 0, 1]]\nprint(col_swap)\n# [[13 12 14 10 11]\n# [18 17 19 15 16]\n# [23 22 24 20 21]\n# [28 27 29 25 26]\n# [33 32 34 30 31]]\n\ncol_inverse = a[:, ::-1]\nprint(col_inverse)\n# [[14 13 12 11 10]\n# [19 18 17 16 15]\n# [24 23 22 21 20]\n# [29 28 27 26 25]\n# [34 33 32 31 30]]\n\ncol_select = a[:, [2, 4, 0]]\nprint(col_select)\n# [[12 14 10]\n# [17 19 15]\n# [22 24 20]\n# [27 29 25]\n# [32 34 30]]\n\ncol_select2 = a[:, [2, 2, 2]]\nprint(col_select2)\n# [[12 12 12]\n# [17 17 17]\n# [22 22 22]\n# [27 27 27]\n# [32 32 32]]\n\nrow_swap = a[[3, 2, 4, 0, 1], :]\nprint(row_swap)\n# [[25 26 27 28 29]\n# [20 21 22 23 24]\n# [30 31 32 33 34]\n# [10 11 12 13 14]\n# [15 16 17 18 19]]\n\nrow_swap = a[[3, 2, 4, 0, 1]]\nprint(row_swap)\n# [[25 26 27 28 29]\n# [20 21 22 23 24]\n# [30 31 32 33 34]\n# [10 11 12 13 14]\n# [15 16 17 18 19]]\n\nrow_inverse = a[::-1]\nprint(row_inverse)\n# [[30 31 32 33 34]\n# [25 26 27 28 29]\n# [20 21 22 23 24]\n# [15 16 17 18 19]\n# [10 11 12 13 14]]\n\nrow_select = a[[2, 4, 0]]\nprint(row_select)\n# [[20 21 22 23 24]\n# [30 31 32 33 34]\n# [10 11 12 13 14]]\n\nrow_select2 = a[[2, 2, 2]]\nprint(row_select2)\n# [[20 21 22 23 24]\n# [20 21 22 23 24]\n# [20 21 22 23 24]]\n",
"import numpy as np\nfrom PIL import Image\n\nimg = np.array(Image.open('data/src/lena.jpg'))\nprint(type(img))\n# <class 'numpy.ndarray'>\n\nprint(img.shape)\n# (225, 400, 3)\n\nImage.fromarray(np.rot90(img)).save('data/dst/lena_np_rot90.jpg')\n\nImage.fromarray(np.rot90(img, 2)).save('data/dst/lena_np_rot90_180.jpg')\n\nImage.fromarray(np.rot90(img, 3)).save('data/dst/lena_np_rot90_270.jpg')\n",
"import numpy as np\n\nprint(np.__version__)\n# 1.19.4\n\na = np.array([[10.0, 10.1, 10.9], [-10.0, -10.1, -10.9]])\nprint(a)\n# [[ 10. 10.1 10.9]\n# [-10. -10.1 -10.9]]\n\nprint(np.floor(a))\n# [[ 10. 10. 10.]\n# [-10. -11. -11.]]\n\nprint(np.floor(a).dtype)\n# float64\n\nprint(np.floor(a).astype(int))\n# [[ 10 10 10]\n# [-10 -11 -11]]\n\nprint(np.floor(10.1))\n# 10.0\n\nprint(np.trunc(a))\n# [[ 10. 10. 10.]\n# [-10. -10. -10.]]\n\nprint(np.fix(a))\n# [[ 10. 10. 10.]\n# [-10. -10. -10.]]\n\nprint(a.astype(int))\n# [[ 10 10 10]\n# [-10 -10 -10]]\n\nprint(np.ceil(a))\n# [[ 10. 11. 11.]\n# [-10. -10. -10.]]\n\nprint(np.copysign(np.ceil(np.abs(a)), a))\n# [[ 10. 11. 11.]\n# [-10. -11. -11.]]\n",
"import pandas as pd\n\ndf = pd.read_csv('data/src/sample_pandas_normal.csv')\nprint(df)\n# name age state point\n# 0 Alice 24 NY 64\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 3 Dave 68 TX 70\n# 4 Ellen 24 CA 88\n# 5 Frank 30 NY 57\n\ndf_s = df.sort_values('state')\nprint(df_s)\n# name age state point\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 4 Ellen 24 CA 88\n# 0 Alice 24 NY 64\n# 5 Frank 30 NY 57\n# 3 Dave 68 TX 70\n\ndf_s = df.sort_values('state', ascending=False)\nprint(df_s)\n# name age state point\n# 3 Dave 68 TX 70\n# 0 Alice 24 NY 64\n# 5 Frank 30 NY 57\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 4 Ellen 24 CA 88\n\ndf_s = df.sort_values(['state', 'age'])\nprint(df_s)\n# name age state point\n# 2 Charlie 18 CA 70\n# 4 Ellen 24 CA 88\n# 1 Bob 42 CA 92\n# 0 Alice 24 NY 64\n# 5 Frank 30 NY 57\n# 3 Dave 68 TX 70\n\ndf_s = df.sort_values(['age', 'state'])\nprint(df_s)\n# name age state point\n# 2 Charlie 18 CA 70\n# 4 Ellen 24 CA 88\n# 0 Alice 24 NY 64\n# 5 Frank 30 NY 57\n# 1 Bob 42 CA 92\n# 3 Dave 68 TX 70\n\ndf_s = df.sort_values(['age', 'state'], ascending=[True, False])\nprint(df_s)\n# name age state point\n# 2 Charlie 18 CA 70\n# 0 Alice 24 NY 64\n# 4 Ellen 24 CA 88\n# 5 Frank 30 NY 57\n# 1 Bob 42 CA 92\n# 3 Dave 68 TX 70\n\ndf_nan = df.copy()\ndf_nan.iloc[:2, 1] = pd.np.nan\nprint(df_nan)\n# name age state point\n# 0 Alice NaN NY 64\n# 1 Bob NaN CA 92\n# 2 Charlie 18.0 CA 70\n# 3 Dave 68.0 TX 70\n# 4 Ellen 24.0 CA 88\n# 5 Frank 30.0 NY 57\n\ndf_nan_s = df_nan.sort_values('age')\nprint(df_nan_s)\n# name age state point\n# 2 Charlie 18.0 CA 70\n# 4 Ellen 24.0 CA 88\n# 5 Frank 30.0 NY 57\n# 3 Dave 68.0 TX 70\n# 0 Alice NaN NY 64\n# 1 Bob NaN CA 92\n\ndf_nan_s = df_nan.sort_values('age', na_position='first')\nprint(df_nan_s)\n# name age state point\n# 0 Alice NaN NY 64\n# 1 Bob NaN CA 92\n# 2 Charlie 18.0 CA 70\n# 4 Ellen 24.0 CA 88\n# 5 Frank 30.0 NY 57\n# 3 Dave 68.0 TX 70\n\ndf.sort_values('state', inplace=True)\nprint(df)\n# name age state point\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 4 Ellen 24 CA 88\n# 0 Alice 24 NY 64\n# 5 Frank 30 NY 57\n# 3 Dave 68 TX 70\n\ndf_d = df.drop(['name', 'state'], axis=1)\nprint(df_d)\n# age point\n# 1 42 92\n# 2 18 70\n# 4 24 88\n# 0 24 64\n# 5 30 57\n# 3 68 70\n\ndf_d .sort_values(by=1, axis=1, ascending=False, inplace=True)\nprint(df_d)\n# point age\n# 1 92 42\n# 2 70 18\n# 4 88 24\n# 0 64 24\n# 5 57 30\n# 3 70 68\n\nprint(df)\n# name age state point\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 4 Ellen 24 CA 88\n# 0 Alice 24 NY 64\n# 5 Frank 30 NY 57\n# 3 Dave 68 TX 70\n\ndf_s = df.sort_index()\nprint(df_s)\n# name age state point\n# 0 Alice 24 NY 64\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 3 Dave 68 TX 70\n# 4 Ellen 24 CA 88\n# 5 Frank 30 NY 57\n\ndf_s = df.sort_index(ascending=False)\nprint(df_s)\n# name age state point\n# 5 Frank 30 NY 57\n# 4 Ellen 24 CA 88\n# 3 Dave 68 TX 70\n# 2 Charlie 18 CA 70\n# 1 Bob 42 CA 92\n# 0 Alice 24 NY 64\n\ndf.sort_index(inplace=True)\nprint(df)\n# name age state point\n# 0 Alice 24 NY 64\n# 1 Bob 42 CA 92\n# 2 Charlie 18 CA 70\n# 3 Dave 68 TX 70\n# 4 Ellen 24 CA 88\n# 5 Frank 30 NY 57\n\ndf_s = df.sort_index(axis=1)\nprint(df_s)\n# age name point state\n# 0 24 Alice 64 NY\n# 1 42 Bob 92 CA\n# 2 18 Charlie 70 CA\n# 3 68 Dave 70 TX\n# 4 24 Ellen 88 CA\n# 5 30 Frank 57 NY\n\ndf.sort_index(axis=1, ascending=False, inplace=True)\nprint(df)\n# state point name age\n# 0 NY 64 Alice 24\n# 1 CA 92 Bob 42\n# 2 CA 70 Charlie 18\n# 3 TX 70 Dave 68\n# 4 CA 88 Ellen 24\n# 5 NY 57 Frank 30\n",
"import numpy as np\n\na = np.array([1, 2, 3])\nprint(a)\nprint(a.dtype)\n# [1 2 3]\n# int64\n\na_float = a.astype(np.float32)\nprint(a_float)\nprint(a_float.dtype)\n# [1. 2. 3.]\n# float32\n\nprint(a)\nprint(a.dtype)\n# [1 2 3]\n# int64\n\na_float = a.astype(float)\nprint(a_float)\nprint(a_float.dtype)\n# [1. 2. 3.]\n# float64\n\na_str = a.astype('str')\nprint(a_str)\nprint(a_str.dtype)\n# ['1' '2' '3']\n# <U21\n\na_int = a.astype('int32')\nprint(a_int)\nprint(a_int.dtype)\n# [1 2 3]\n# int32\n\na = np.arange(50).reshape((5, 10)) / 10 - 2\nprint(a)\nprint(a.dtype)\n# [[-2. -1.9 -1.8 -1.7 -1.6 -1.5 -1.4 -1.3 -1.2 -1.1]\n# [-1. -0.9 -0.8 -0.7 -0.6 -0.5 -0.4 -0.3 -0.2 -0.1]\n# [ 0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]\n# [ 1. 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9]\n# [ 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7 2.8 2.9]]\n# float64\n\na_int = a.astype('int64')\nprint(a_int)\nprint(a_int.dtype)\n# [[-2 -1 -1 -1 -1 -1 -1 -1 -1 -1]\n# [-1 0 0 0 0 0 0 0 0 0]\n# [ 0 0 0 0 0 0 0 0 0 0]\n# [ 1 1 1 1 1 1 1 1 1 1]\n# [ 2 2 2 2 2 2 2 2 2 2]]\n# int64\n\nprint(np.round(a).astype(int))\n# [[-2 -2 -2 -2 -2 -2 -1 -1 -1 -1]\n# [-1 -1 -1 -1 -1 0 0 0 0 0]\n# [ 0 0 0 0 0 0 1 1 1 1]\n# [ 1 1 1 1 1 2 2 2 2 2]\n# [ 2 2 2 2 2 2 3 3 3 3]]\n\nmy_round_int = lambda x: np.round((x * 2 + 1) // 2)\n\nprint(my_round_int(a).astype(int))\n# [[-2 -2 -2 -2 -2 -1 -1 -1 -1 -1]\n# [-1 -1 -1 -1 -1 0 0 0 0 0]\n# [ 0 0 0 0 0 1 1 1 1 1]\n# [ 1 1 1 1 1 2 2 2 2 2]\n# [ 2 2 2 2 2 3 3 3 3 3]]\n\ndef my_round(x, digit=0):\n p = 10 ** digit\n s = np.copysign(1, x)\n return (s * x * p * 2 + 1) // 2 / p * s\n\nprint(my_round(a).astype(int))\n# [[-2 -2 -2 -2 -2 -2 -1 -1 -1 -1]\n# [-1 -1 -1 -1 -1 -1 0 0 0 0]\n# [ 0 0 0 0 0 1 1 1 1 1]\n# [ 1 1 1 1 1 2 2 2 2 2]\n# [ 2 2 2 2 2 3 3 3 3 3]]\n",
"from scipy.special import comb\n\nprint(comb(4, 2))\n# 6.0\n\nprint(comb(4, 2, exact=True))\n# 6\n\nprint(comb(4, 0, exact=True))\n# 1\n\nprint(comb(4, 2, exact=True, repetition=True))\n# 10\n",
"import numpy as np\n\nprint(np.__version__)\n# 1.17.3\n\na_bool = np.array([True, True, False, False])\nb_bool = np.array([True, False, True, False])\n\nprint(a_bool.dtype)\n# bool\n\nprint(b_bool.dtype)\n# bool\n\nprint(a_bool & b_bool)\n# [ True False False False]\n\nprint(a_bool | b_bool)\n# [ True True True False]\n\nprint(a_bool ^ b_bool)\n# [False True True False]\n\nprint(~a_bool)\n# [False False True True]\n\nprint(type(a_bool & b_bool))\n# <class 'numpy.ndarray'>\n\nprint((a_bool & b_bool).dtype)\n# bool\n\n# print(a_bool and b_bool)\n# ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\n\nprint(np.logical_and(a_bool, b_bool))\n# [ True False False False]\n\nprint(np.logical_or(a_bool, b_bool))\n# [ True True True False]\n\nprint(np.logical_xor(a_bool, b_bool))\n# [False True True False]\n\nprint(np.logical_not(a_bool))\n# [False False True True]\n\nc_int = np.arange(4)\nprint(c_int)\n# [0 1 2 3]\n\nprint(np.logical_not(c_int))\n# [ True False False False]\n\nd_int = c_int + 4\nprint(d_int)\n# [4 5 6 7]\n\nprint(np.logical_not(d_int))\n# [False False False False]\n\nprint(np.logical_and(c_int, d_int))\n# [False True True True]\n\nprint(c_int & d_int)\n# [0 1 2 3]\n\na_bool_2d = np.array([[True, True, False, False], [False, False, True, True]])\nprint(a_bool_2d)\n# [[ True True False False]\n# [False False True True]]\n\nprint(a_bool_2d & b_bool)\n# [[ True False False False]\n# [False False True False]]\n\nprint(np.logical_and(a_bool_2d, a_bool))\n# [[ True True False False]\n# [False False False False]]\n\nprint(a_bool & True)\n# [ True True False False]\n\nprint(np.logical_and(a_bool, True))\n# [ True True False False]\n\nprint(c_int)\n# [0 1 2 3]\n\nprint(c_int < 2)\n# [ True True False False]\n\nprint(c_int % 2 == 0)\n# [ True False True False]\n\nprint((c_int < 2) & (c_int % 2 == 0))\n# [ True False False False]\n\n# print(c_int < 2 & c_int % 2 == 0)\n# ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\n\n# print(c_int < (2 & (c_int % 2)) == 0)\n# ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\n\nprint(np.logical_and(c_int < 2, c_int % 2 == 0))\n# [ True False False False]\n",
"import pandas as pd\nimport numpy as np\n\na = np.array([[0, 1], [2, 3], [4, 5]])\nprint(a)\n# [[0 1]\n# [2 3]\n# [4 5]]\n\ndf = pd.DataFrame(a)\nprint(df)\n# 0 1\n# 0 0 1\n# 1 2 3\n# 2 4 5\n\nprint(np.shares_memory(a, df))\n# True\n\nprint(df._is_view)\n# True\n\na[0, 0] = 100\nprint(a)\n# [[100 1]\n# [ 2 3]\n# [ 4 5]]\n\nprint(df)\n# 0 1\n# 0 100 1\n# 1 2 3\n# 2 4 5\n\na_str = np.array([['a', 'x'], ['b', 'y'], ['c', 'z']])\nprint(a_str)\n# [['a' 'x']\n# ['b' 'y']\n# ['c' 'z']]\n\ndf_str = pd.DataFrame(a_str)\nprint(df_str)\n# 0 1\n# 0 a x\n# 1 b y\n# 2 c z\n\nprint(np.shares_memory(a_str, df_str))\n# False\n\nprint(df_str._is_view)\n# False\n\na_str[0, 0] = 'n'\nprint(a_str)\n# [['n' 'x']\n# ['b' 'y']\n# ['c' 'z']]\n\nprint(df_str)\n# 0 1\n# 0 a x\n# 1 b y\n# 2 c z\n\ndf_homo = pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]})\nprint(df_homo)\n# a b\n# 0 0 3\n# 1 1 4\n# 2 2 5\n\nprint(df_homo.dtypes)\n# a int64\n# b int64\n# dtype: object\n\na_homo = df_homo.values\nprint(a_homo)\n# [[0 3]\n# [1 4]\n# [2 5]]\n\nprint(np.shares_memory(a_homo, df_homo))\n# True\n\ndf_homo.iat[0, 0] = 100\nprint(df_homo)\n# a b\n# 0 100 3\n# 1 1 4\n# 2 2 5\n\nprint(a_homo)\n# [[100 3]\n# [ 1 4]\n# [ 2 5]]\n\ndf_hetero = pd.DataFrame({'a': [0, 1, 2], 'b': ['x', 'y', 'z']})\nprint(df_hetero)\n# a b\n# 0 0 x\n# 1 1 y\n# 2 2 z\n\nprint(df_hetero.dtypes)\n# a int64\n# b object\n# dtype: object\n\na_hetero = df_hetero.values\nprint(a_hetero)\n# [[0 'x']\n# [1 'y']\n# [2 'z']]\n\nprint(np.shares_memory(a_hetero, df_hetero))\n# False\n\ndf_hetero.iat[0, 0] = 100\nprint(df_hetero)\n# a b\n# 0 100 x\n# 1 1 y\n# 2 2 z\n\nprint(a_hetero)\n# [[0 'x']\n# [1 'y']\n# [2 'z']]\n"
] | [
[
"numpy.arange"
],
[
"numpy.rot90"
],
[
"numpy.trunc",
"numpy.abs",
"numpy.ceil",
"numpy.floor",
"numpy.fix",
"numpy.array"
],
[
"pandas.read_csv"
],
[
"numpy.round",
"numpy.arange",
"numpy.array",
"numpy.copysign"
],
[
"scipy.special.comb"
],
[
"numpy.logical_not",
"numpy.logical_xor",
"numpy.logical_and",
"numpy.arange",
"numpy.logical_or",
"numpy.array"
],
[
"numpy.shares_memory",
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BaconBoi95/tensorflow | [
"484e8acedceebec8d7ea5fb008d4c367041c9cff"
] | [
"tensorflow/python/debug/lib/check_numerics_callback_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.debug.lib import check_numerics_callback\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import optimizer_v2\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\n\n\nclass LimitStringLengthTest(test_util.TensorFlowTestCase):\n\n def testLimitStringLengthWithExplicitLimit(self):\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"\", max_len=2), \"\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"e\", max_len=2), \"e\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"de\", max_len=2), \"de\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"abcde\", max_len=2),\n \"...de\")\n\n def testLimitStringLengthWithNoLimit(self):\n self.assertEqual(check_numerics_callback.limit_string_length(\n \"A\" * 100 + \"B\", max_len=None), \"A\" * 100 + \"B\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"\", max_len=None), \"\")\n\n def testLimitStringLengthWithDefaultLimit(self):\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"A\" * 50 + \"B\"),\n \"...\" + \"A\" * 49 + \"B\")\n\n\nclass CheckNumericsCallbackTest(test_util.TensorFlowTestCase):\n\n def _assertRaisesInvalidArgumentErrorAndGetMessage(self, func):\n caught = None\n try:\n func()\n except errors.InvalidArgumentError as error:\n caught = error\n self.assertTrue(caught, \"Failed to catch expected InvalidArgumentError\")\n return caught.message\n\n def testCatchEagerOpFloat32Inf(self):\n \"\"\"Test catching Infinity in eager op execution: float32.\"\"\"\n with check_numerics_callback.check_numerics():\n x = constant_op.constant([2.0, 3.0])\n y = constant_op.constant([1.0, 0.0])\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: x / y)\n # Check the content of the error message.\n self.assertTrue(re.search(r\"eagerly-executing op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: (2,)\\n\", message)\n self.assertIn(\"# of +Inf elements: 1\\n\", message)\n self.assertIn(\"0: %s\" % x, message)\n self.assertIn(\"1: %s\" % y, message)\n\n def testCatchEagerOpFloat16NaN(self):\n \"\"\"Test catching Infinity in eager op execution: float16.\"\"\"\n with check_numerics_callback.check_numerics():\n def log1p(x):\n y = 1.0 + x\n return math_ops.log(y)\n x = constant_op.constant([[-1.0]], dtype=dtypes.float16)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: log1p(x))\n # Check the content of the error message.\n self.assertTrue(re.search(r\"eagerly-executing op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float16\", message))\n self.assertIn(\"shape: (1, 1)\\n\", message)\n self.assertIn(\"# of -Inf elements: 1\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*0\\.\", message))\n\n def testNoCatchEagerOpExecution(self):\n \"\"\"Test running multiple steps of eager execution without Inf/NaN.\"\"\"\n with check_numerics_callback.check_numerics():\n x = constant_op.constant([2.0, 3.0])\n y = constant_op.constant([1.0, 0.0])\n self.assertAllClose((x + y) * (x - y), [3.0, 9.0])\n\n def testCatchFunctionOpInfFloat64(self):\n \"\"\"Test catching infinites generated in a FuncGraph.\"\"\"\n with check_numerics_callback.check_numerics():\n @def_function.function\n def divide_sum_with_diff(x, y):\n w1 = x + y\n w2 = x - y\n u = w1 / w2\n return u * 2.0\n x = constant_op.constant(2.0, dtype=dtypes.float64)\n y = constant_op.constant(2.0, dtype=dtypes.float64)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: divide_sum_with_diff(x, y))\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float64\", message))\n self.assertIn(\"shape: ()\\n\", message)\n self.assertIn(\"Input tensors (2):\", message)\n # Check that the correct input ops are printed.\n self.assertTrue(re.search(r\"0:.*Tensor.*add:0\", message))\n self.assertTrue(re.search(r\"1:.*Tensor.*sub:0\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"u = w1 / w2\", message)\n\n def testControlFlowGraphWithNaNBFloat16(self):\n \"\"\"Test catching bfloat16 NaNs in a control-flow-v2 FuncGraph.\"\"\"\n @def_function.function\n def my_conditional(x):\n with check_numerics_callback.check_numerics():\n if math_ops.less(math_ops.reduce_sum(x), 0.0):\n return math_ops.log(x)\n else:\n return math_ops.log(-x)\n x = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.bfloat16)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: my_conditional(x))\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*bfloat16\", message))\n self.assertIn(\"shape: (3,)\\n\", message)\n # Check that the correct input op is printed.\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Neg\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"return math_ops.log(-x)\", message)\n self.assertTrue(message.endswith(\"\\n\"))\n\n def testOverflowInTfFunction(self):\n \"\"\"Test catching Infinity caused by overflow in a tf.function with while.\"\"\"\n with check_numerics_callback.check_numerics():\n\n @def_function.function\n def accumulation_function(counter, lim, accum):\n while math_ops.less(counter, lim):\n accum.assign(accum * 2.0)\n counter.assign_add(1)\n\n counter = variables.Variable(0, dtype=dtypes.int32)\n # Repeated `* 2.0` overflows a float32 tensor in 128 steps. So the\n # 1000-step limit is sufficient.\n lim = constant_op.constant(1000, dtype=dtypes.int32)\n accum = variables.Variable(1.0)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: accumulation_function(counter, lim, accum))\n\n self.assertAllClose(counter.numpy(), 128)\n # Check the content of the error message.\n # The overflow to +Infinity happens during the `* 2.0` operation.\n self.assertTrue(re.search(r\"graph op.*\\\"Mul\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: ()\\n\", message)\n # Check that the correct input op is printed.\n self.assertIn(\"Input tensors (2):\", message)\n # Check that the correct input ops are printed.\n self.assertTrue(re.search(r\"0:.*Tensor.*ReadVariableOp:0\", message))\n self.assertTrue(re.search(r\"1:.*Tensor.*mul/y:0\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"accum.assign(accum * 2.0)\", message)\n\n def testKerasModelHealthyPredictAndFitCalls(self):\n \"\"\"Test a simple healthy keras model runs fine under the callback.\"\"\"\n with check_numerics_callback.check_numerics():\n model = models.Sequential()\n model.add(layers.Dense(\n units=100,\n input_shape=(5,),\n activation=\"relu\",\n kernel_initializer=\"ones\"))\n model.add(layers.BatchNormalization())\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(\n units=1,\n activation=\"linear\",\n kernel_initializer=\"ones\"))\n\n model.compile(\n loss=\"mse\", optimizer=optimizer_v2.gradient_descent.SGD(1e-3))\n\n batch_size = 16\n xs = array_ops.zeros([batch_size, 5])\n ys = array_ops.ones([batch_size, 1])\n\n outputs = model.predict(xs)\n self.assertEqual(outputs.shape, (batch_size, 1))\n\n epochs = 100\n history = model.fit(xs, ys, epochs=epochs, verbose=0)\n self.assertEqual(len(history.history[\"loss\"]), epochs)\n\n def testKerasModelUnhealthyPredictAndFitCallsWithLargeLearningRate(self):\n \"\"\"Test keras model training crashes with Infinity is caught by callback.\"\"\"\n with check_numerics_callback.check_numerics():\n model = models.Sequential()\n # Use weight initializers for deterministic behavior during test.\n model.add(layers.Dense(\n units=100,\n input_shape=(5,),\n activation=\"relu\",\n kernel_initializer=\"ones\"))\n model.add(layers.Dense(\n units=1,\n activation=\"linear\",\n kernel_initializer=\"ones\"))\n\n lr = 1e3 # Intentionally huge learning rate.\n model.compile(loss=\"mse\", optimizer=optimizer_v2.gradient_descent.SGD(lr))\n\n batch_size = 16\n xs = array_ops.zeros([batch_size, 5])\n ys = array_ops.ones([batch_size, 1])\n\n outputs = model.predict(xs)\n self.assertEqual(outputs.shape, (batch_size, 1))\n\n epochs = 100\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: model.fit(xs, ys, epochs=epochs, verbose=0))\n\n # Check the content of the error message.\n # Let's not hardcode the op name for future-proof.\n self.assertTrue(re.search(r\"graph op.*\\\".*\\\"\", message))\n self.assertTrue(re.search(r\"dtype:.*float32\", message))\n self.assertTrue(re.search(r\"shape:.*\\(.*\\)\", message))\n # Check that the correct input op is printed.\n self.assertTrue(re.search(r\"Input tensor.*\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"lambda: model.fit(xs, ys,\", message)\n\n def testInfInCustomKerasLayerWithTfFunctionPredictCall(self):\n \"\"\"Test catching Infinity in a custom layer, w/ tf.function.\"\"\"\n\n with check_numerics_callback.check_numerics():\n class DivByXLayer(layers.Layer):\n\n @def_function.function\n def call(self, x):\n \"\"\"The computation performed by the for-test custom layer.\n\n Generates Infinity by intention.\n\n Args:\n x: Input tensor of scalar shape.\n\n Returns:\n A scalar tensor.\n \"\"\"\n one_over_x = 1.0 / x\n return one_over_x\n\n model = models.Sequential()\n model.add(DivByXLayer(input_shape=[5]))\n\n # TODO(b/140245224): Currently the model must be compiled prior to\n # predict() being called(). Or keras will fall back to V1 behavior.\n # Remove this after the bug is fixed.\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n xs = array_ops.ones([1, 5])\n # Calling the model with non-zero inputs should be fine.\n self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]])\n\n xs = array_ops.zeros([1, 5])\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: model.predict(xs))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertTrue(re.search(r\"shape: \\(.*, 5\\)\", message))\n # # Check that the correct input op is printed.\n self.assertIn(\"Input tensors (2):\", message)\n # # # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"one_over_x = 1.0 / x\", message)\n\n def testInfInCustomKerasLayerWithoutTfFuntionPredictCall(self):\n \"\"\"Test catching Infinity in a custom layer, w/o tf.function.\"\"\"\n\n with check_numerics_callback.check_numerics():\n class DivByXLayer(layers.Layer):\n\n # Not using the tf.function decorator here.\n def call(self, x):\n \"\"\"The computation performed by the for-test custom layer.\n\n Generates Infinity by intention.\n\n Args:\n x: Input tensor of scalar shape.\n\n Returns:\n A scalar tensor.\n \"\"\"\n one_over_x = 1.0 / x\n return one_over_x\n\n model = models.Sequential()\n model.add(DivByXLayer(input_shape=[5]))\n\n # TODO(b/140245224): Currently the model must be compiled prior to\n # predict() being called(). Or keras will fall back to V1 behavior.\n # Remove this after the bug is fixed.\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n xs = array_ops.ones([1, 5])\n # Calling the model with non-zero inputs should be fine.\n self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]])\n\n xs = array_ops.zeros([1, 5])\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: model.predict(xs))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertTrue(re.search(r\"shape: \\(.*, 5\\)\", message))\n # Check that the correct input op is printed.\n self.assertIn(\"Input tensors (2):\", message)\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"one_over_x = 1.0 / x\", message)\n\n def testCatchInfinityInDatasetMapFunction(self):\n \"\"\"Test that callback catches NaN in a tf.dataset map function.\"\"\"\n with check_numerics_callback.check_numerics():\n\n def generate_nan(x):\n \"\"\"Intetionally generates NaNs by taking log of negative number.\"\"\"\n casted_x = math_ops.cast(x, dtypes.float32)\n return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x\n\n dataset = dataset_ops.Dataset.range(10).map(generate_nan)\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n iterator.next)\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: (2, 2)\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Log/x:0\", message))\n self.assertIn(\n \"-> | return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x\",\n message)\n\n def testCustomGradietWithNaNWithTfFunction(self):\n \"\"\"Test that callback catches NaN in a gradient function during backprop.\"\"\"\n with check_numerics_callback.check_numerics():\n @custom_gradient.custom_gradient\n def func_with_bad_grad(x):\n output = math_ops.sin(x)\n @def_function.function\n def grad(dy):\n # `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.\n return math_ops.log(-dy)\n return output, grad\n\n x = constant_op.constant(-2.0, dtype=dtypes.float16)\n def f(x):\n return func_with_bad_grad(x)\n\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: gradient_checker_v2.compute_gradient(f, [x]))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float16\", message))\n self.assertIn(\"shape: ()\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Neg:0\", message))\n self.assertIn(\"-> | return math_ops.log(-dy)\", message)\n\n # TODO(cais): Tests for Infs and NaNs during distributed execution.\n # TODO(cais): Benchmark the slowdown due to callbacks and inserted nodes.\n\n\nif __name__ == \"__main__\":\n ops.enable_eager_execution()\n googletest.main()\n"
] | [
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.debug.lib.check_numerics_callback.limit_string_length",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.optimizer_v2.gradient_descent.SGD",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.debug.lib.check_numerics_callback.check_numerics",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
AdityaSidharta/kaggle_humpback_new_whale | [
"779d60746f8eba99d0336836200150fa7a08388e"
] | [
"model/dataset.py"
] | [
"import os\n\nimport torch\nfrom skimage import io\nfrom skimage.color import gray2rgb\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Compose, Resize, RandomHorizontalFlip, \\\n RandomVerticalFlip, RandomAffine, Normalize, ToTensor, ToPILImage, Grayscale\n\ntrain_transform = Compose([\n ToPILImage(),\n Resize((224, 224)),\n Grayscale(3),\n RandomHorizontalFlip(),\n RandomVerticalFlip(),\n RandomAffine(degrees=30),\n ToTensor(),\n Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n])\n\ntest_transform = Compose([\n ToPILImage(),\n Resize((224, 224)),\n Grayscale(3),\n ToTensor(),\n Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n])\n\n\nclass TrainDataset(Dataset):\n def __init__(self, image_label, ohe_label, train_path, train_tsfm, device):\n self.image_label = image_label\n self.ohe_label = ohe_label\n self.train_path = train_path\n self.train_tsfm = train_tsfm\n self.device = device\n\n def __len__(self):\n return len(self.image_label)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.train_path, self.image_label[idx])\n img_array = io.imread(img_path)\n if len(img_array.shape) == 2:\n img_array = gray2rgb(img_array)\n assert img_array.shape[2] == 3\n img_tensor = self.train_tsfm(img_array)\n img_tensor = img_tensor.type(torch.float).to(self.device)\n label = self.ohe_label[idx, :]\n label_tensor = torch.Tensor(label)\n label_tensor = label_tensor.type(torch.float).to(self.device)\n return img_tensor, label_tensor\n\n\nclass TestDataset(Dataset):\n def __init__(self, image_label, test_path, test_tsfm, device):\n self.image_label = image_label\n self.test_path = test_path\n self.test_tsfm = test_tsfm\n self.device = device\n\n def __len__(self):\n return len(self.image_label)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.test_path, self.image_label[idx])\n img_array = io.imread(img_path)\n if len(img_array.shape) == 2:\n img_array = gray2rgb(img_array)\n assert img_array.shape[2] == 3\n img_tensor = self.test_tsfm(img_array)\n img_tensor = img_tensor.type(torch.float).to(self.device)\n return img_tensor\n"
] | [
[
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NVlabs/UMR | [
"d858c4ddd56bdac6e3342609f9c02618c279b990"
] | [
"utils/geometry.py"
] | [
"# -----------------------------------------------------------\n# Code adapted from: https://github.com/akanazawa/cmr/blob/master/utils/geometry.py\n# \n# MIT License\n# \n# Copyright (c) 2018 akanazawa\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# -----------------------------------------------------------\n\n# Geometry stuff.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\ndef triangle_direction_intersection(tri, trg):\n '''\n Finds where an origin-centered ray going in direction trg intersects a triangle.\n Args:\n tri: 3 X 3 vertex locations. tri[0, :] is 0th vertex.\n Returns:\n alpha, beta, gamma\n '''\n p0 = np.copy(tri[0, :])\n # Don't normalize\n d1 = np.copy(tri[1, :]) - p0;\n d2 = np.copy(tri[2, :]) - p0;\n d = trg / np.linalg.norm(trg)\n\n mat = np.stack([d1, d2, d], axis=1)\n\n try:\n inv_mat = np.linalg.inv(mat)\n except np.linalg.LinAlgError:\n return False, 0\n\n a_b_mg = -1*np.matmul(inv_mat, p0)\n is_valid = (a_b_mg[0] >= 0) and (a_b_mg[1] >= 0) and ((a_b_mg[0] + a_b_mg[1]) <= 1) and (a_b_mg[2] < 0)\n if is_valid:\n return True, -a_b_mg[2]*d\n else:\n return False, 0\n\n\ndef project_verts_on_mesh(verts, mesh_verts, mesh_faces):\n verts_out = np.copy(verts)\n for nv in range(verts.shape[0]):\n max_norm = 0\n vert = np.copy(verts_out[nv, :])\n for f in range(mesh_faces.shape[0]):\n face = mesh_faces[f]\n tri = mesh_verts[face, :]\n # is_v=True if it does intersect and returns the point\n is_v, pt = triangle_direction_intersection(tri, vert)\n # Take the furthest away intersection point\n if is_v and np.linalg.norm(pt) > max_norm:\n max_norm = np.linalg.norm(pt)\n verts_out[nv, :] = pt\n\n return verts_out\n"
] | [
[
"numpy.linalg.inv",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.stack",
"numpy.copy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
r-vu/barc | [
"7f90d4f8689df7410f0bf4be8843cfe216da1c9f"
] | [
"workspace/src/labs/src/lab2/plot.py"
] | [
"import rosbag\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib.patches as patches\n\nbag = rosbag.Bag(os.path.expanduser(\"/d/Documents/classes/me131/_2020-02-10-20-44-36.bag\"))\n\n\ntopics = bag.get_type_and_topic_info()[1].keys()\ntypes = []\nfor i in range(0,len(bag.get_type_and_topic_info()[1].values())):\n types.append(bag.get_type_and_topic_info()[1].values()[i][0])\n if bag.get_type_and_topic_info()[1].values()[i][0] == 'barc/ECU':\n dimEcu = bag.get_type_and_topic_info()[1].values()[i][1]\n if bag.get_type_and_topic_info()[1].values()[i][0] == 'labs/Z_DynBkMdl':\n dimxy = bag.get_type_and_topic_info()[1].values()[i][1]\n\n\nx_raw = np.zeros((dimxy, 1))\nv_raw = np.zeros((dimxy, 1))\nv_des = 8*np.ones((dimxy,1))\n\ncounter = 0\nfor counter, (topic, msg, t) in enumerate( bag.read_messages(topics=['/z_vhcl']) ) : \n x_raw[counter] = msg.x\n v_raw[counter] = msg.v_x\n\nplt.figure(1)\nplt.plot(x_raw, v_raw, label = 'Actual Velocity')\nplt.plot(x_raw, v_des, label = 'Desired Velocity')\nplt.ylabel('Velocity [m/s]')\nplt.ylim((0,12))\nplt.xlabel('Longitudinal position [m]')\nplt.title('Longitudinal Velocity Tracking')\nplt.legend()\nplt.show()\n\nbag.close()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.ones",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jwbrun/ASGDSimulator | [
"ecab2cc83986f08b21bc85151cece85a08fcce82"
] | [
"convNet/trainer.py"
] | [
"import time\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom torch.nn.utils import clip_grad_norm_\nfrom utils.meters import AverageMeter, accuracy\n\n\ndef _flatten_duplicates(inputs, target, batch_first=True):\n if batch_first:\n target = target.view(-1, 1).expand(-1, inputs.size(1))\n else:\n target = target.view(1, -1).expand(inputs.size(0), -1)\n inputs = inputs.flatten(0, 1)\n target = target.flatten(0, 1)\n return inputs, target\n\n\nclass Trainer(object):\n\n def __init__(self, model, criterion, optimizer=None,\n device_ids=[0], device=torch.cuda, dtype=torch.float,\n distributed=False, local_rank=-1, adapt_grad_norm=None,\n grad_clip=-1, print_freq=100):\n self._model = model\n self.criterion = criterion\n self.epoch = 0\n self.training_steps = 0\n self.optimizer = optimizer\n self.device = device\n self.dtype = dtype\n self.local_rank = local_rank\n self.print_freq = print_freq\n self.grad_clip = grad_clip\n self.grad_scale = None\n self.adapt_grad_norm = adapt_grad_norm\n\n if distributed:\n self.model = nn.parallel.DistributedDataParallel(model,\n device_ids=device_ids,\n output_device=device_ids[0])\n elif device_ids and len(device_ids) > 1:\n self.model = nn.DataParallel(model, device_ids)\n else:\n self.model = model\n\n def _grad_norm(self, inputs_batch, target_batch, chunk_batch=1):\n self.model.zero_grad()\n for inputs, target in zip(inputs_batch.chunk(chunk_batch, dim=0),\n target_batch.chunk(chunk_batch, dim=0)):\n target = target.to(self.device)\n inputs = inputs.to(self.device, dtype=self.dtype)\n\n # compute output\n output = self.model(inputs)\n loss = self.criterion(output, target)\n\n if chunk_batch > 1:\n loss = loss / chunk_batch\n\n loss.backward() # accumulate gradient\n grad = clip_grad_norm_(self.model.parameters(), float('inf'))\n return grad\n\n def _step(self, inputs_batch, target_batch, training=False, chunk_batch=1):\n outputs = []\n total_loss = 0\n\n if training:\n self.optimizer.zero_grad()\n self.optimizer.update(self.epoch, self.training_steps)\n\n for inputs, target in zip(inputs_batch.chunk(chunk_batch, dim=0),\n target_batch.chunk(chunk_batch, dim=0)):\n target = target.to(self.device)\n inputs = inputs.to(self.device, dtype=self.dtype)\n if training:\n self.optimizer.pre_forward()\n\n # compute output\n output = self.model(inputs)\n loss = self.criterion(output, target)\n grad = None\n\n if chunk_batch > 1:\n loss = loss / chunk_batch\n\n if isinstance(output, list) or isinstance(output, tuple):\n output = output[0]\n\n outputs.append(output.detach())\n\n if training:\n self.optimizer.pre_backward()\n if self.grad_scale is not None:\n loss = loss * self.grad_scale\n loss.backward() # accumulate gradient\n\n total_loss += float(loss)\n\n if training: # post gradient accumulation\n if self.grad_clip > 0:\n grad = clip_grad_norm_(self.model.parameters(), self.grad_clip)\n self.optimizer.step() # SGD step\n self.training_steps += 1\n\n outputs = torch.cat(outputs, dim=0)\n return outputs, total_loss, grad\n\n def forward(self, data_loader, num_steps=None, training=False, duplicates=1, chunk_batch=1):\n meters = {name: AverageMeter()\n for name in ['step', 'data', 'loss', 'prec1', 'prec5']}\n if training and self.grad_clip > 0:\n meters['grad'] = AverageMeter()\n\n def meter_results(meters):\n results = {name: meter.avg for name, meter in meters.items()}\n results['error1'] = 100. - results['prec1']\n results['error5'] = 100. - results['prec5']\n return results\n\n end = time.time()\n\n for i, (inputs, target) in enumerate(data_loader):\n if training and duplicates > 1 and self.adapt_grad_norm is not None \\\n and i % self.adapt_grad_norm == 0:\n grad_mean = 0\n num = inputs.size(1)\n for j in range(num):\n grad_mean += float(self._grad_norm(inputs.select(1, j), target))\n grad_mean /= num\n grad_all = float(self._grad_norm(\n *_flatten_duplicates(inputs, target)))\n self.grad_scale = grad_mean / grad_all\n logging.info('New loss scale: %s', self.grad_scale)\n\n # measure data loading time\n meters['data'].update(time.time() - end)\n if duplicates > 1: # multiple versions for each sample (dim 1)\n inputs, target = _flatten_duplicates(inputs, target)\n\n output, loss, grad = self._step(inputs, target,\n training=training,\n chunk_batch=chunk_batch)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n meters['loss'].update(float(loss), inputs.size(0))\n meters['prec1'].update(float(prec1), inputs.size(0))\n meters['prec5'].update(float(prec5), inputs.size(0))\n if grad is not None:\n meters['grad'].update(float(grad), inputs.size(0))\n\n # measure elapsed time\n meters['step'].update(time.time() - end)\n end = time.time()\n\n if i % self.print_freq == 0:\n report = str('{phase} - Epoch: [{0}][{1}/{2}]\\t'\n 'Time {meters[step].val:.3f} ({meters[step].avg:.3f})\\t'\n 'Data {meters[data].val:.3f} ({meters[data].avg:.3f})\\t'\n 'Loss {meters[loss].val:.4f} ({meters[loss].avg:.4f})\\t'\n 'Prec@1 {meters[prec1].val:.3f} ({meters[prec1].avg:.3f})\\t'\n 'Prec@5 {meters[prec5].val:.3f} ({meters[prec5].avg:.3f})\\t'\n .format(\n self.epoch, i, len(data_loader),\n phase='TRAINING' if training else 'EVALUATING',\n meters=meters))\n if 'grad' in meters.keys():\n report += 'Grad {meters[grad].val:.3f} ({meters[grad].avg:.3f})'\\\n .format(meters=meters)\n logging.info(report)\n\n if num_steps is not None and i >= num_steps:\n break\n\n return meter_results(meters)\n\n def train(self, data_loader, duplicates=1, chunk_batch=1):\n # switch to train mode\n self.model.train()\n\n return self.forward(data_loader, duplicates=duplicates, training=True, chunk_batch=chunk_batch)\n\n def validate(self, data_loader, duplicates=1):\n # switch to evaluate mode\n self.model.eval()\n with torch.no_grad():\n return self.forward(data_loader, duplicates=duplicates, training=False)\n"
] | [
[
"torch.nn.DataParallel",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryu577/survival | [
"0c6bf3dfcbb570ffd9e053bee56c86541acc01f0"
] | [
"survival/misc/misc.py"
] | [
"import numpy as np\n\n\ndef solve_hazard_eqn(fn, val, minval=10.0, maxval=900.0, interval=1.0):\n\t'''\n\tFinds the approximate point where a function crosses a value from below.\n\t'''\n\tprev_val = fn(minval)\n\n\tfor i in np.arange(minval+interval, maxval, interval):\n\t\tnext_val = fn(i)\n\t\tif next_val < val and val < prev_val:\n\t\t\treturn i-interval/2\n\t\tprev_val = next_val\n\tif next_val > val:\n\t\treturn maxval\n\telse:\n\t\treturn minval\n\n\ndef get_opt_tau(fn, pc_cost, q=1.0):\n\tpc_haz = q/pc_cost\n\tprev_haz = fn(9.5)\n\tmax_haz = 0\n\tans = 0.0\n\tfor t in np.arange(10,900,0.5):\n\t\tcurr_haz = fn(t)\n\t\tif curr_haz < pc_haz and prev_haz > pc_haz:\n\t\t\treturn t-0.5/2\n\t\tprev_haz = curr_haz\n\t\tif curr_haz > max_haz:\n\t\t\tmax_haz = curr_haz\n\tif max_haz < pc_haz:\n\t\treturn 10\n\telse:\n\t\treturn t\n\n\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LavinaVRovine/hazard | [
"e0408374dc0b76f8b9a0107f5f12cca2d4c033ef"
] | [
"predictions/lol_predictor.py"
] | [
"import pandas as pd\nimport mlflow.sklearn\nfrom sqlalchemy import create_engine\nfrom config import DATABASE_URI\n\nfrom predictions.common_predictor import CommonPredictor\nfrom config import ROOT_DIR\n\npd.set_option(\"display.width\", 1000)\npd.set_option(\"display.max_columns\", 50)\n\n\nclass LoLPredictor(CommonPredictor):\n def __init__(self, debug: bool = False):\n super().__init__(debug=debug)\n self.training_columns = [\n \"avg\",\n \"gold_per_minute\",\n \"gold_differential_per_minute\",\n \"gold_differential_at_15\",\n \"cs_per_minute\",\n \"cs_differential_at_15\",\n \"tower_differential_at_15\",\n \"tower_ratio\",\n \"first_tower\",\n \"damage_per_minute\",\n \"kills_per_game\",\n \"deaths_per_game\",\n \"kda\",\n \"dragon_game\",\n \"dragons_15\",\n \"nashors_game\",\n \"wards_per_minute\",\n \"c_avg\",\n \"c_gold_per_minute\",\n \"c_gold_differential_per_minute\",\n \"c_gold_differential_at_15\",\n \"c_cs_differential_at_15\",\n \"c_tower_differential_at_15\",\n \"c_tower_ratio\",\n \"c_first_tower\",\n \"c_damage_per_minute\",\n \"c_kills_per_game\",\n \"c_deaths_per_game\",\n \"c_kda\",\n \"c_dragon_game\",\n \"c_dragons_15\",\n \"c_nashors_game\",\n ]\n self.y_col_name = \"main_team_won\"\n\n\nif __name__ == \"__main__\":\n\n print()\n\n mlflow.set_tracking_uri(f\"file:///{ROOT_DIR}/mlruns\")\n mlflow.set_experiment(\"hazard_lol\")\n lol = LoLPredictor()\n con = create_engine(DATABASE_URI + \"lol\", echo=False)\n df_all = pd.read_sql(\"SELECT * FROM averaged_predictions\", con=con)\n lol.main_train(df_all, run_name=\"save run\", n_runs=30)\n print()\n # todo musi byt v current run\n # mlflow.sklearn.save_model(lol.model, path=f\"{ROOT_DIR}/models/ttt\", conda_env=f\"{ROOT_DIR}/environment.yaml\")\n # mlflow.sklearn.log_model(lol.model, artifact_path=f\"{ROOT_DIR}/ttt\", conda_env=f\"{ROOT_DIR}/environment.yaml\")\n"
] | [
[
"pandas.set_option",
"pandas.read_sql"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
saiabinesh/EdgeNets | [
"2b232d3f7fb60658755dad1ebca0ffc895cc795e"
] | [
"nn_layers/efficient_dwise_conv.py"
] | [
"#============================================\n__author__ = \"Sachin Mehta\"\n__maintainer__ = \"Sachin Mehta\"\n#============================================\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom nn_layers.cnn_utils import activation_fn, CBR, Shuffle, BR\nimport math\n\n\nclass EffDWSepConv(nn.Module):\n '''\n This class implements the volume-wise seperable convolutions\n '''\n def __init__(self, channel_in, channel_out, kernel_size=3):\n super().__init__()\n self.conv_channel = CBR(channel_in, channel_in, kSize=kernel_size, stride=1, groups=channel_in)\n\n # project from channel_in to Channel_out\n groups_proj = math.gcd(channel_in, channel_out)\n self.proj_layer = CBR(channel_in, channel_out, kSize=3, stride=1, groups=groups_proj)\n\n self.linear_comb_layer = nn.Sequential(\n nn.AdaptiveAvgPool2d(output_size=1),\n nn.Conv2d(channel_in, channel_out, kernel_size=1, bias=False),\n nn.Sigmoid()\n )\n\n self.channel_in = channel_in\n self.channel_out = channel_out\n self.ksize=kernel_size\n\n def forward(self, x):\n '''\n :param x: input of dimension C x H x W\n :return: output of dimension C1 x H x W\n '''\n bsz, channels, height, width = x.size()\n x = self.conv_channel(x)\n proj_out =self.proj_layer(x)\n linear_comb_out = self.linear_comb_layer(x)\n return proj_out * linear_comb_out\n\n def __repr__(self):\n s = '{name}(in_channels={channel_in}, out_channels={channel_out}, kernel_size={ksize})'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n\nclass StridedEffDWise(nn.Module):\n '''\n This class implements the strided volume-wise seperable convolutions\n '''\n def __init__(self, channel_in, kernel_size=3):\n '''\n :param channel_in: # of input channels\n :param channel_out: # of output channels\n :param height: Height of the input volume\n :param width: Width of the input volume\n :param kernel_size: Kernel size. We use the same kernel size of 3 for each dimension. Larger kernel size would increase the FLOPs and Parameters\n :param dilation: It's a list with 3 elements, each element corresponding to a dilation rate for each dimension.\n :param shuffle: Shuffle the feature maps in the volume-wise separable convolutions\n :param weight_avg: Waighted average for fusing the feature maps in volume-wise separable convolutions\n :param res_conn: Residual connection in the volume-wise separabel convolutions\n :param proj: Want to project the feature maps from channel_in to channel_out or not\n '''\n super().__init__()\n\n self.pool_layer = CBR(channel_in, channel_in, 3, stride=2, groups=channel_in)\n self.dw_layer = EffDWSepConv(channel_in, channel_in, kernel_size=kernel_size)\n self.channel_in = channel_in\n self.channel_out = 2*channel_in\n self.ksize = kernel_size\n\n def forward(self, x):\n x = self.pool_layer(x)\n return torch.cat([x, self.dw_layer(x)], 1)\n\n def __repr__(self):\n s = '{name}(in_channels={channel_in}, out_channels={channel_out}, kernel_size={ksize}, ' \\\n 'width={width}, height={height})'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\nif __name__ == '__main__':\n import numpy as np\n channel_in = 3\n channel_out = 30\n width = 112\n height = 112\n bsz = 2\n input = torch.Tensor(bsz, channel_in, height, width)._fill_(1)\n model = EffDWSepConv(channel_in, channel_out)\n model.eval()\n\n input = torch.Tensor(bsz, channel_in, 56, 56)._fill_(1)\n out = model(input)\n\n n_params = sum([np.prod(p.size()) for p in model.parameters()])\n print('Params: {}'.format(n_params))\n"
] | [
[
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.Tensor",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
selim-karaduman/pytorch-drl-algs | [
"c354399893eae41fe820134e5e57d0d152d9fe5a",
"c354399893eae41fe820134e5e57d0d152d9fe5a"
] | [
"pytorch_drl/utils/exploration.py",
"pytorch_drl/utils/memory/buffer.py"
] | [
"import numpy as np\nfrom pytorch_drl.utils.schedule import LinearSchedule\n\nclass OrnsteinUhlenbeck:\n\n def __init__(self, x_size, mu=0, \n sigma_init=0.2, sigma_final=0.2, \n sigma_horizon=1, theta=0.2, dt=1e-2):\n self.mu = mu\n self.x_size = x_size\n self.dt = dt\n self.theta = theta\n self.x = np.zeros(x_size) + mu\n self.sigma = LinearSchedule(sigma_init, sigma_final, sigma_horizon)\n\n def set(self, x):\n self.x = x\n\n def step(self):\n dw = np.random.randn(*self.x_size) * np.sqrt(self.dt)\n dx = self.theta * (self.mu - self.x) * self.dt + self.sigma.value * dw\n self.x = self.x + dx\n self.sigma.step()\n return self.x\n\n def reset(self):\n self.x = self.x*0 + self.mu\n\nclass Gaussian:\n\n def __init__(self, x_size, mu=0,\n sigma_init=0.2, sigma_final=0.2, sigma_horizon=1):\n self.mu = mu\n self.x_size = x_size\n self.sigma = LinearSchedule(sigma_init, sigma_final, sigma_horizon)\n\n def step(self):\n x = np.random.randn(*self.x_size) * self.sigma.value + self.mu\n self.sigma.step()\n return x\n",
"import math\nimport torch\nimport numpy as np\nimport random\nimport time\nfrom collections import deque, namedtuple\nfrom pytorch_drl.utils.memory.segment_tree import SumTree, MinTree\n\nclass UniformBuffer:\n def __init__(self, size, batch_size, \n seed, device, action_type=torch.long):\n self.size = size\n self.batch_size = batch_size\n self.seed = random.seed(seed)\n self.device = device\n self.Experience = namedtuple('Experience', \n ['state',\n 'action',\n 'reward',\n 'next_state',\n 'done'])\n self.buffer = deque(maxlen=size)\n self.action_type = action_type\n \n def add(self, state, action, reward, next_state, done):\n experience = self.Experience(state, action, reward, next_state, done)\n self.buffer.append(experience)\n\n def sample(self):\n device = self.device\n idx = np.random.choice(len(self.buffer), \n self.batch_size, replace=False)\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n\n for i in idx:\n exp = self.buffer[i]\n states.append(exp.state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n next_states.append(exp.next_state)\n dones.append(exp.done)\n\n states = torch.from_numpy(np.vstack(states)).float().to(device)\n actions = torch.from_numpy(np.vstack(actions))\\\n .type(self.action_type).to(device)\n rewards = torch.from_numpy(np.vstack(rewards)).float().to(device)\n next_states = torch.from_numpy(np.vstack(next_states))\\\n .float().to(device)\n dones = torch.from_numpy(np.vstack(dones).astype(np.uint8))\\\n .float().to(device)\n\n return states, actions, rewards, next_states, dones\n \n def __len__(self):\n return len(self.buffer)\n \n#-----------------------------------------------------------------------\n\nclass PriorityBuffer(UniformBuffer):\n\n def __init__(self, size, batch_size, seed, device, \n action_type=torch.long, alpha=0.6, eps=1e-6):\n super().__init__(size, batch_size, seed, device, action_type)\n self.alpha = alpha\n self.eps = eps\n segment_tree_size = int(np.power(2, np.ceil(np.log2(size))))\n self.sum_tree = SumTree(segment_tree_size)\n self.min_tree = MinTree(segment_tree_size)\n self.max_priority = 1\n self.tree_index = 0\n\n def add(self, state, action, reward, next_state, done):\n super().add(state, action, reward, next_state, done)\n priority = self.max_priority ** self.alpha\n self.sum_tree[self.tree_index] = priority\n self.min_tree[self.tree_index] = priority\n self.tree_index = (self.tree_index + 1) % self.size\n \n def _update(self, ind, priority):\n priority_ = priority ** self.alpha + self.eps\n assert(priority_ > 0)\n self.sum_tree[ind] = priority_\n self.min_tree[ind] = priority_\n self.max_priority = max(self.max_priority, priority)\n \n\n def update_indices(self, inds, priorities):\n for i in range(len(inds)):\n self._update(inds[i].item(), priorities[i].item())\n \n def sample(self, beta=0.6):\n device = self.device\n indices = self.sum_tree.sample_batch_idx(self.batch_size)\n weights = self.sum_tree[indices]\n\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n\n for i in indices:\n exp = self.buffer[i]\n states.append(exp.state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n next_states.append(exp.next_state)\n dones.append(exp.done)\n \n states = torch.from_numpy(np.vstack(states)).float().to(device)\n actions = torch.from_numpy(np.vstack(actions))\\\n .type(self.action_type).to(device)\n rewards = torch.from_numpy(np.vstack(rewards)).float().to(device)\n next_states = torch.from_numpy(np.vstack(next_states))\\\n .float().to(device)\n dones = torch.from_numpy(np.vstack(dones).astype(np.uint8))\\\n .float().to(device)\n indices = torch.from_numpy(np.array(indices)).to(device)\n weights = torch.tensor(weights).float()\n \n p_total = self.sum_tree.get_sum()\n p_min = self.min_tree.get_min() / p_total\n max_weight = (p_min * len(self)) ** (-beta)\n \n weights = weights / p_total\n weights = (weights * len(self)) ** (-beta)\n weights = weights / max_weight\n weights = weights.unsqueeze(1).to(device)\n return (states, actions, rewards, \n next_states, dones, indices, weights)\n\n\n#-----------------------------------------------------------------------\n\n\nclass EpisodicBuffer:\n\n def __init__(self, size, seed, device, \n batch_size, action_type=torch.long):\n self.size = size\n self.seed = seed\n self.device = device\n self.batch_size = batch_size\n self.action_type = action_type\n self.buffer = deque(maxlen=size)\n \n\n def add(self, trajectory):\n \"\"\"\n trajectory: [[s1,s2,..sn], [a..], [r..], [p..], [d...]]\n s: np.array\n a: int\n r: float\n p: np.array\n d: bool\n \"\"\"\n self.buffer.append(trajectory)\n # Return this same trajectory in the same format as self.sample()\n return self.sample(batch=[trajectory])\n\n def sample(self, batch=None):\n device = self.device\n if batch is None:\n indices = np.random.choice(len(self.buffer), \n self.batch_size, replace=False)\n batch = [self.buffer[ind] for ind in indices]\n \n batch = [map(np.vstack, tr) for tr in batch]\n states, actions, rewards, policies, dones = (\n map(lambda x: x.swapaxes(0,1), \n map(np.stack, \n zip(*batch)))\n )\n \"\"\"\n states: (n_steps+1) x batch_size x [*state_size]\n actions: n_steps x batch_size x [*action_size]\n \"\"\"\n \n states = torch.from_numpy(states).float().to(device)\n actions = torch.from_numpy(actions).type(self.action_type).to(device)\n rewards = torch.from_numpy(rewards).float().to(device)\n policies = torch.from_numpy(policies).float().to(device)\n dones = torch.from_numpy(dones.astype(np.uint8)).float().to(device)\n return states, actions, rewards, policies, dones\n\n\n def __len__(self):\n return len(self.buffer)\n\n#-----------------------------------------------------------------------\n\nclass MABuffer(UniformBuffer):\n def __init__(self, size, batch_size, \n seed, device, action_type=torch.float):\n super().__init__(size, batch_size, seed, device, action_type)\n \n def sample(self):\n device = self.device\n idx = np.random.choice(len(self.buffer), \n self.batch_size, replace=False)\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n\n for i in idx:\n exp = self.buffer[i]\n states.append(exp.state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n next_states.append(exp.next_state)\n dones.append(exp.done)\n\n states = [torch.from_numpy(np.vstack(ag)).float().to(device)\n for ag in list(zip(*states))]\n actions = [torch.from_numpy(np.vstack(ag))\\\n .type(self.action_type).to(device)\n for ag in list(zip(*actions))]\n rewards = [torch.from_numpy(np.vstack(ag)).float().to(device)\n for ag in list(zip(*rewards))]\n next_states = [torch.from_numpy(np.vstack(ag)).float().to(device)\n for ag in list(zip(*next_states))]\n dones = [torch.from_numpy(np.vstack(ag).astype(np.uint8)).to(device)\n for ag in list(zip(*dones))]\n\n return states, actions, rewards, next_states, dones\n"
] | [
[
"numpy.random.randn",
"numpy.zeros",
"numpy.sqrt"
],
[
"numpy.log2",
"torch.from_numpy",
"torch.tensor",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
phoenx34/wav.snipe | [
"d6d0b26440e5913dfbd5ea33b53ff226d405339c"
] | [
"development/librosa/core/constantq.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''Constant-Q transforms'''\nfrom __future__ import division\n\nimport numpy as np\nimport scipy.fftpack as fft\n\nfrom . import audio\nfrom .time_frequency import cqt_frequencies, note_to_hz\nfrom .spectrum import stft\nfrom .pitch import estimate_tuning\nfrom .. import cache\nfrom .. import filters\nfrom .. import util\nfrom ..util.exceptions import ParameterError\n\n__all__ = ['cqt', 'hybrid_cqt', 'pseudo_cqt']\n\n\n@cache(level=20)\ndef cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,\n bins_per_octave=12, tuning=0.0, filter_scale=1,\n norm=1, sparsity=0.01, window='hann',\n scale=True,\n real=util.Deprecated()):\n '''Compute the constant-Q transform of an audio signal.\n\n This implementation is based on the recursive sub-sampling method\n described by [1]_.\n\n .. [1] Schoerkhuber, Christian, and Anssi Klapuri.\n \"Constant-Q transform toolbox for music processing.\"\n 7th Sound and Music Computing Conference, Barcelona, Spain. 2010.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter scale factor. Small values (<1) use shorter windows\n for improved time resolution.\n\n norm : {inf, -inf, 0, float > 0}\n Type of norm to use for basis function normalization.\n See `librosa.util.normalize`.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n scale : bool\n If `True`, scale the CQT response by square-root the length of\n each channel's filter. This is analogous to `norm='ortho'` in FFT.\n\n If `False`, do not scale the CQT. This is analogous to\n `norm=None` in FFT.\n\n real : [DEPRECATED]\n .. warning:: This parameter name deprecated in librosa 0.5.0\n It will be removed in librosa 0.6.0.\n\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.complex or np.float]\n Constant-Q value each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n See Also\n --------\n librosa.core.resample\n librosa.util.normalize\n\n Notes\n -----\n This function caches at level 20.\n\n Examples\n --------\n Generate and plot a constant-Q power spectrum\n\n >>> import matplotlib.pyplot as plt\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> C = librosa.cqt(y, sr=sr)\n >>> librosa.display.specshow(librosa.logamplitude(C**2, ref_power=np.max),\n ... sr=sr, x_axis='time', y_axis='cqt_note')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Constant-Q power spectrum')\n >>> plt.tight_layout()\n\n\n Limit the frequency range\n\n >>> C = librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),\n ... n_bins=60)\n >>> C\n array([[ 8.827e-04, 9.293e-04, ..., 3.133e-07, 2.942e-07],\n [ 1.076e-03, 1.068e-03, ..., 1.153e-06, 1.148e-06],\n ...,\n [ 1.042e-07, 4.087e-07, ..., 1.612e-07, 1.928e-07],\n [ 2.363e-07, 5.329e-07, ..., 1.294e-07, 1.611e-07]])\n\n\n Using a higher frequency resolution\n\n >>> C = librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),\n ... n_bins=60 * 2, bins_per_octave=12 * 2)\n >>> C\n array([[ 1.536e-05, 5.848e-05, ..., 3.241e-07, 2.453e-07],\n [ 1.856e-03, 1.854e-03, ..., 2.397e-08, 3.549e-08],\n ...,\n [ 2.034e-07, 4.245e-07, ..., 6.213e-08, 1.463e-07],\n [ 4.896e-08, 5.407e-07, ..., 9.176e-08, 1.051e-07]])\n '''\n\n # How many octaves are we dealing with?\n n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))\n n_filters = min(bins_per_octave, n_bins)\n\n len_orig = len(y)\n\n if fmin is None:\n # C1 by default\n fmin = note_to_hz('C1')\n\n if tuning is None:\n tuning = estimate_tuning(y=y, sr=sr)\n\n # First thing, get the freqs of the top octave\n freqs = cqt_frequencies(n_bins, fmin,\n bins_per_octave=bins_per_octave)[-bins_per_octave:]\n\n fmin_t = np.min(freqs)\n fmax_t = np.max(freqs)\n\n # Determine required resampling quality\n Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)\n filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)\n nyquist = sr / 2.0\n if filter_cutoff < audio.BW_FASTEST * nyquist:\n res_type = 'kaiser_fast'\n else:\n res_type = 'kaiser_best'\n\n y, sr, hop_length = __early_downsample(y, sr, hop_length,\n res_type,\n n_octaves,\n nyquist, filter_cutoff, scale)\n\n cqt_resp = []\n\n if res_type != 'kaiser_fast':\n\n # Do the top octave before resampling to allow for fast resampling\n fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,\n n_filters,\n bins_per_octave,\n tuning,\n filter_scale,\n norm,\n sparsity,\n window=window)\n\n # Compute the CQT filter response and append it to the stack\n cqt_resp.append(__cqt_response(y, n_fft, hop_length, fft_basis))\n\n fmin_t /= 2\n fmax_t /= 2\n n_octaves -= 1\n\n filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)\n\n res_type = 'kaiser_fast'\n\n # Make sure our hop is long enough to support the bottom octave\n num_twos = __num_two_factors(hop_length)\n if num_twos < n_octaves - 1:\n raise ParameterError('hop_length must be a positive integer '\n 'multiple of 2^{0:d} for {1:d}-octave CQT'\n .format(n_octaves - 1, n_octaves))\n\n # Now do the recursive bit\n fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,\n n_filters,\n bins_per_octave,\n tuning,\n filter_scale,\n norm,\n sparsity,\n window=window)\n\n my_y, my_sr, my_hop = y, sr, hop_length\n\n # Iterate down the octaves\n for i in range(n_octaves):\n\n # Resample (except first time)\n if i > 0:\n if len(my_y) < 2:\n raise ParameterError('Input signal length={} is too short for '\n '{:d}-octave CQT'.format(len_orig,\n n_octaves))\n\n # The additional scaling of sqrt(2) here is to implicitly rescale\n # the filters\n my_y = np.sqrt(2) * audio.resample(my_y, my_sr, my_sr/2.0,\n res_type=res_type,\n scale=True)\n my_sr /= 2.0\n my_hop //= 2\n\n # Compute the cqt filter response and append to the stack\n cqt_resp.append(__cqt_response(my_y, n_fft, my_hop, fft_basis))\n\n C = __trim_stack(cqt_resp, n_bins)\n\n if scale:\n lengths = filters.constant_q_lengths(sr, fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n window=window,\n filter_scale=filter_scale)\n C /= np.sqrt(lengths[:, np.newaxis])\n\n return C\n\n\n@cache(level=20)\ndef hybrid_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,\n bins_per_octave=12, tuning=0.0, filter_scale=1,\n norm=1, sparsity=0.01, window='hann', scale=True):\n '''Compute the hybrid constant-Q transform of an audio signal.\n\n Here, the hybrid CQT uses the pseudo CQT for higher frequencies where\n the hop_length is longer than half the filter length and the full CQT\n for lower frequencies.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter filter_scale factor. Larger values use longer windows.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]\n Constant-Q energy for each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n See Also\n --------\n cqt\n pseudo_cqt\n\n Notes\n -----\n This function caches at level 20.\n\n '''\n\n if fmin is None:\n # C1 by default\n fmin = note_to_hz('C1')\n\n if tuning is None:\n tuning = estimate_tuning(y=y, sr=sr)\n\n # Get all CQT frequencies\n freqs = cqt_frequencies(n_bins, fmin,\n bins_per_octave=bins_per_octave,\n tuning=tuning)\n\n # Compute the length of each constant-Q basis function\n lengths = filters.constant_q_lengths(sr, fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n window=window)\n\n # Determine which filters to use with Pseudo CQT\n # These are the ones that fit within 2 hop lengths after padding\n pseudo_filters = 2.0**np.ceil(np.log2(lengths)) < 2 * hop_length\n\n n_bins_pseudo = int(np.sum(pseudo_filters))\n\n n_bins_full = n_bins - n_bins_pseudo\n cqt_resp = []\n\n if n_bins_pseudo > 0:\n fmin_pseudo = np.min(freqs[pseudo_filters])\n\n cqt_resp.append(pseudo_cqt(y, sr,\n hop_length=hop_length,\n fmin=fmin_pseudo,\n n_bins=n_bins_pseudo,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n norm=norm,\n sparsity=sparsity,\n window=window,\n scale=scale))\n\n if n_bins_full > 0:\n cqt_resp.append(np.abs(cqt(y, sr,\n hop_length=hop_length,\n fmin=fmin,\n n_bins=n_bins_full,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n norm=norm,\n sparsity=sparsity,\n window=window,\n scale=scale)))\n\n return __trim_stack(cqt_resp, n_bins)\n\n\n@cache(level=20)\ndef pseudo_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,\n bins_per_octave=12, tuning=0.0, filter_scale=1,\n norm=1, sparsity=0.01, window='hann', scale=True):\n '''Compute the pseudo constant-Q transform of an audio signal.\n\n This uses a single fft size that is the smallest power of 2 that is greater\n than or equal to the max of:\n\n 1. The longest CQT filter\n 2. 2x the hop_length\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter filter_scale factor. Larger values use longer windows.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]\n Pseudo Constant-Q energy for each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n Notes\n -----\n This function caches at level 20.\n\n '''\n\n if fmin is None:\n # C1 by default\n fmin = note_to_hz('C1')\n\n if tuning is None:\n tuning = estimate_tuning(y=y, sr=sr)\n\n fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin, n_bins,\n bins_per_octave,\n tuning, filter_scale,\n norm, sparsity,\n hop_length=hop_length,\n window=window)\n\n fft_basis = np.abs(fft_basis)\n\n # Compute the magnitude STFT with Hann window\n D = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length))\n\n # Project onto the pseudo-cqt basis\n C = fft_basis.dot(D)\n\n if scale:\n C /= np.sqrt(n_fft)\n else:\n lengths = filters.constant_q_lengths(sr, fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n window=window,\n filter_scale=filter_scale)\n\n C *= np.sqrt(lengths[:, np.newaxis] / n_fft)\n\n return C\n\n\n@cache(level=10)\ndef __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,\n filter_scale, norm, sparsity, hop_length=None,\n window='hann'):\n '''Generate the frequency domain constant-Q filter basis.'''\n\n basis, lengths = filters.constant_q(sr,\n fmin=fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n norm=norm,\n pad_fft=True,\n window=window)\n\n # Filters are padded up to the nearest integral power of 2\n n_fft = basis.shape[1]\n\n if (hop_length is not None and\n n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):\n\n n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))\n\n # re-normalize bases with respect to the FFT window length\n basis *= lengths[:, np.newaxis] / float(n_fft)\n\n # FFT and retain only the non-negative frequencies\n fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]\n\n # sparsify the basis\n fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)\n\n return fft_basis, n_fft, lengths\n\n\ndef __trim_stack(cqt_resp, n_bins):\n '''Helper function to trim and stack a collection of CQT responses'''\n\n # cleanup any framing errors at the boundaries\n max_col = min([x.shape[1] for x in cqt_resp])\n\n cqt_resp = np.vstack([x[:, :max_col] for x in cqt_resp][::-1])\n\n # Finally, clip out any bottom frequencies that we don't really want\n # Transpose magic here to ensure column-contiguity\n return np.ascontiguousarray(cqt_resp[-n_bins:].T).T\n\n\ndef __cqt_response(y, n_fft, hop_length, fft_basis):\n '''Compute the filter response with a target STFT hop.'''\n\n # Compute the STFT matrix\n D = stft(y, n_fft=n_fft, hop_length=hop_length, window=np.ones)\n\n # And filter response energy\n return fft_basis.dot(D)\n\n\ndef __early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves):\n '''Compute the number of early downsampling operations'''\n\n downsample_count1 = max(0, int(np.ceil(np.log2(audio.BW_FASTEST * nyquist /\n filter_cutoff)) - 1) - 1)\n\n num_twos = __num_two_factors(hop_length)\n downsample_count2 = max(0, num_twos - n_octaves + 1)\n\n return min(downsample_count1, downsample_count2)\n\n\ndef __early_downsample(y, sr, hop_length, res_type, n_octaves,\n nyquist, filter_cutoff, scale):\n '''Perform early downsampling on an audio signal, if it applies.'''\n\n downsample_count = __early_downsample_count(nyquist, filter_cutoff,\n hop_length, n_octaves)\n\n if downsample_count > 0 and res_type == 'kaiser_fast':\n downsample_factor = 2**(downsample_count)\n\n hop_length //= downsample_factor\n\n if len(y) < downsample_factor:\n raise ParameterError('Input signal length={:d} is too short for '\n '{:d}-octave CQT'.format(len(y), n_octaves))\n\n new_sr = sr / float(downsample_factor)\n y = audio.resample(y, sr, new_sr,\n res_type=res_type,\n scale=True)\n\n # If we're not going to length-scale after CQT, we\n # need to compensate for the downsampling factor here\n if not scale:\n y *= np.sqrt(downsample_factor)\n\n sr = new_sr\n\n return y, sr, hop_length\n\n\ndef __num_two_factors(x):\n \"\"\"Return how many times integer x can be evenly divided by 2.\n\n Returns 0 for non-positive integers.\n \"\"\"\n if x <= 0:\n return 0\n num_twos = 0\n while x % 2 == 0:\n num_twos += 1\n x //= 2\n\n return num_twos\n"
] | [
[
"numpy.log2",
"numpy.sqrt",
"numpy.abs",
"numpy.min",
"numpy.ascontiguousarray",
"scipy.fftpack.fft",
"numpy.max",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
liudengfeng/zipline | [
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac",
"01fdd51d83efeb3453e92b7d02c255a06eba49ac"
] | [
"zipline/pipeline/fundamentals/localdata_wy.py",
"zipline/finance/slippage.py",
"zipline/data/bundles/wy_minute_data.py",
"zipline/optimize/factors/sector/base.py",
"zipline/pipeline/fundamentals/writer_cninfo.py",
"zipline/pipeline/fundamentals/yahoo.py",
"tests/pipeline/test_engine.py",
"tests/pipeline/test_multidimensional_dataset.py"
] | [
"\"\"\"\n\n查询本地数据\n\n尽管`bcolz`最终会丢失时区信息,但写入时依旧将时间列转换为UTC时区。\n除asof_date、timestamp列外,其余时间列无需转换\n\n\"\"\"\n\nimport re\nimport warnings\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom cnswd.cninfo.utils import _rename\nfrom cnswd.mongodb import get_db\nfrom cnswd.setting.constants import MAX_WORKER\nfrom cnswd.utils.tools import filter_a\n\nfrom ..common import AD_FIELD_NAME, TS_FIELD_NAME\nfrom .constants import SW_SECTOR_MAPS\n\n# from cnswd.store import (ClassifyTreeStore, DataBrowseStore, MarginStore,\n# TctGnStore, WyStockDailyStore)\nLOCAL_TZ = 'Asia/Shanghai'\nwarnings.filterwarnings('ignore')\n\nSTOCK_PAT = re.compile(r\"^\\d{6}$\")\nA_STOCK_PAT = re.compile(r\"^[036]\\d{5}$\")\n\nNUM_MAPS = {\n 1: '一级',\n 2: '二级',\n 3: '三级',\n 4: '四级',\n}\n\nTO_DORP_PAT_0 = re.compile(r'^[(]?[一二三四五六七八九][)]?([((]\\d[))])?[、]?')\nTO_DORP_PAT_1 = re.compile(r'^[1-9]、|[(()][1-9][))]')\nTO_DORP_PAT_2 = re.compile(r'[、:()-]|\\_|\\(|\\)')\n\nMATCH_ONLY_A = {\n '$match': {\n '$expr': {\n '$in': [\n {\n '$substrBytes': [\n '$股票代码', 0, 1\n ]\n }, [\n '0', '3', '6'\n ]\n ]\n }\n }\n}\n\n# region 辅助函数\n\n\ndef _to_timestamp(df):\n # 无需 tz 信息\n for col in [AD_FIELD_NAME, TS_FIELD_NAME]:\n if col in df.columns:\n # df[col] = df[col].map(lambda x: pd.Timestamp(\n # x, tz=LOCAL_TZ).tz_convert('UTC').to_pydatetime())\n df[col] = df[col].map(pd.Timestamp)\n return df\n\n\ndef _normalized_col_name(x):\n \"\"\"规范列财务报告项目在`pipeline`中的列名称\n\n 去除列名称中的前导数字,中间符号,保留文字及尾部数字\n \"\"\"\n # 去除前导序号\n x = re.sub(TO_DORP_PAT_0, '', x)\n x = re.sub(TO_DORP_PAT_1, '', x)\n x = re.sub(TO_DORP_PAT_2, '', x)\n return x\n\n\ndef _select_only_a(df, only_A, code_col='股票代码'):\n \"\"\"仅含A股数据\"\"\"\n if only_A:\n cond1 = df[code_col].str.startswith('2')\n cond2 = df[code_col].str.startswith('9')\n df = df.loc[~(cond1 | cond2), :]\n return df\n\n\n# endregion\n\n# region 静态数据\n\n\ndef get_stock_info(only_A=True):\n \"\"\"股票基础信息\"\"\"\n db = get_db('cninfo')\n collection = db['基本资料']\n projection = {\n '_id': 0,\n '股票代码': 1,\n '上市日期': 1,\n # 与行业分类重复\n # '申万行业一级名称': 1,\n # '申万行业二级名称': 1,\n # '申万行业三级名称': 1,\n # '证监会一级行业名称': 1,\n # '证监会二级行业名称': 1,\n '省份': 1,\n '城市': 1,\n '注册资本': 1,\n '上市状态': 1,\n '律师事务所': 1,\n '会计师事务所': 1,\n '上市地点': 1,\n }\n sort = {'股票代码': 1}\n pipeline = [\n {'$project': projection},\n {'$sort': sort}\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n df = pd.DataFrame.from_records(\n collection.aggregate(pipeline))\n df.drop_duplicates('股票代码', inplace=True)\n # 剔除未上市、未交易的无效股票\n cond1 = ~ df['上市日期'].isnull()\n cond2 = df['上市日期'] <= pd.Timestamp('today')\n df = df.loc[cond1 & cond2, :]\n df['timestamp'] = df['上市日期']\n df['asof_date'] = df['上市日期'] - pd.Timedelta(days=1)\n df = _to_timestamp(df)\n # 注册资本转换 -> 十分位数\n df['注册资本十分位数'] = pd.qcut(np.log(df['注册资本'].values), 10, labels=False)\n df.rename(columns={'股票代码': 'sid'}, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n return df\n\n\ndef get_bom_maps(cate, pattern):\n r\"\"\"行业父类编码映射\n\n Example:\n\n >>> pattern = re.compile(r\"^Z\\d{2}$\")\n >>> get_bom_maps('国证行业分类', pattern)\n {'Z01': '能源',\n 'Z02': '原材料',\n 'Z03': '工业',\n 'Z04': '可选消费',\n 'Z05': '主要消费',\n 'Z06': '医药卫生',\n 'Z07': '金融',\n 'Z08': '信息技术',\n 'Z09': '电信业务',\n 'Z10': '公用事业',\n 'Z11': '房地产'}\n \"\"\"\n db = get_db()\n collection = db['分类BOM']\n pipeline = [\n {\n '$match': {\n '分类方式': cate,\n '分类编码': {\n '$regex': pattern\n }\n }\n },\n {\n '$project': {'_id': 0}\n }\n ]\n maps = {}\n for d in collection.aggregate(pipeline):\n maps[d['分类编码']] = d['分类名称']\n return maps\n\n\ndef get_industry_stock_list(cate, only_A):\n db = get_db()\n collection = db['股票分类']\n pipeline = [\n {\n '$match': {\n '分类方式': cate\n }\n }, {\n '$unwind': {\n 'path': '$股票列表'\n }\n }, {\n '$project': {\n '_id': 0,\n '分类编码': 1,\n '分类名称': 1,\n '股票代码': \"$股票列表\"\n }\n }\n ]\n if only_A:\n pipeline.append(MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n return df\n\n\ndef get_cn_industry(only_A=True):\n \"\"\"获取国证四级行业分类\"\"\"\n cate = '国证行业分类'\n col_names = {\n '分类编码': '国证四级行业编码',\n '分类名称': '国证四级行业',\n '股票代码': 'sid',\n }\n df = get_industry_stock_list('国证行业分类', only_A)\n if df.empty:\n msg = '在本地数据库中无法获取行业分类数据。\\n'\n msg += '这将导致股票分类数据缺失。\\n'\n msg += '运行`stock clsf`提取网络数据并存储在本地数据库。'\n warnings.warn(msg)\n return pd.DataFrame(columns=col_names.values())\n df.rename(columns=col_names, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n for level in (1, 2, 3):\n pattern_str = r\"^Z\\d{\" + str(level*2) + \"}$\"\n pattern = re.compile(pattern_str)\n maps = get_bom_maps(cate, pattern)\n digit = level * 2 + 1\n u_num = NUM_MAPS[level]\n code_col = '国证{}行业编码'.format(u_num)\n name_col = '国证{}行业'.format(u_num)\n df[code_col] = df['国证四级行业编码'].map(lambda x: x[:digit])\n df[name_col] = df['国证四级行业编码'].map(lambda x: maps[x[:digit]])\n return df\n\n\ndef get_sw_industry(only_A=True):\n \"\"\"获取申万行业三级分类\"\"\"\n cate = \"申万行业分类\"\n col_names = {\n '分类编码': '申万三级行业编码',\n '分类名称': '申万三级行业',\n '股票代码': 'sid',\n }\n df = get_industry_stock_list(cate, only_A)\n if df.empty:\n msg = '在本地数据库中无法获取行业分类数据。\\n'\n msg += '这将导致股票分类数据缺失。\\n'\n msg += '运行`stock clsf`提取网络数据并存储在本地数据库。'\n warnings.warn(msg)\n return pd.DataFrame(columns=col_names.values())\n df.rename(columns=col_names, inplace=True)\n # S90 为无效数据\n cond = df['申万三级行业编码'] == 'S90'\n df = df[~cond]\n df['sid'] = df['sid'].map(lambda x: int(x))\n for level in (1, 2):\n pattern_str = r\"^S\\d{\" + str(level*2) + \"}$\"\n pattern = re.compile(pattern_str)\n maps = get_bom_maps(cate, pattern)\n digit = level * 2 + 1\n u_num = NUM_MAPS[level]\n code_col = '申万{}行业编码'.format(u_num)\n name_col = '申万{}行业'.format(u_num)\n df[code_col] = df['申万三级行业编码'].map(lambda x: x[:digit])\n df[name_col] = df['申万三级行业编码'].map(lambda x: maps.get(x[:digit], '综合'))\n sw_code_maps = {v: k for k, v in SW_SECTOR_MAPS.items()}\n df['sw_sector'] = df['申万一级行业编码'].map(\n lambda x: sw_code_maps[x]).astype('int64')\n return df\n\n\ndef get_zjh_industry(only_A=True):\n \"\"\"获取证监会行业二级分类\"\"\"\n cate = '证监会行业分类'\n col_names = {\n '分类编码': '证监会二级行业编码',\n '分类名称': '证监会二级行业',\n '股票代码': 'sid',\n }\n df = get_industry_stock_list(cate, only_A)\n if df.empty:\n msg = '在本地数据库中无法获取行业分类数据。\\n'\n msg += '这将导致股票分类数据缺失。\\n'\n msg += '运行`stock clsf`提取网络数据并存储在本地数据库。'\n warnings.warn(msg)\n return pd.DataFrame(columns=col_names.values())\n df.rename(columns=col_names, inplace=True)\n # 混杂了申万编码,剔除\n cond = df['证监会二级行业编码'].str.len() == 3\n df = df[cond]\n df['sid'] = df['sid'].map(lambda x: int(x))\n for level in (1, ):\n pattern_str = r\"^[A-R]$\"\n pattern = re.compile(pattern_str)\n maps = get_bom_maps(cate, pattern)\n digit = (level-1) * 2 + 1\n u_num = NUM_MAPS[level]\n code_col = '证监会{}行业编码'.format(u_num)\n name_col = '证监会{}行业'.format(u_num)\n df[code_col] = df['证监会二级行业编码'].map(lambda x: x[:digit])\n df[name_col] = df['证监会二级行业编码'].map(lambda x: maps.get(x[:digit], '综合'))\n return df\n\n\ndef concept_categories():\n \"\"\"概念类别映射{代码:名称}\"\"\"\n db = get_db()\n collection = db['同花顺概念']\n pipeline = [\n {\n '$project': {\n '_id': 0,\n '概念编码': 1,\n '概念名称': 1,\n }\n }\n ]\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n try:\n df.columns = ['code', 'name']\n except ValueError:\n raise NotImplementedError('本地数据库中\"股票概念数据\"为空,请运行`stock thsgn`')\n df.sort_values('code', inplace=True)\n return df.set_index('code').to_dict()['name']\n\n\ndef field_code_concept_maps():\n \"\"\"\n 概念映射二元组\n\n Returns\n -------\n res : 元组\n 第一项:原始概念编码 -> 数据集字段编码(新编码)\n 第二项:数据集字段编码 -> 概念名称\n\n Example\n -------\n 第一项:{'00010002': 'A001', '00010003': 'A002', '00010004': 'A003', ...\n 第二项:{'A001': '参股金融', 'A002': '可转债', 'A003': '上证红利'...\n\n \"\"\"\n vs = concept_categories()\n no, key = pd.factorize(list(vs.keys()), sort=True)\n id_maps = {v: 'A{}'.format(str(k + 1).zfill(3)) for k, v in zip(no, key)}\n name_maps = {v: vs[k] for (k, v) in id_maps.items()}\n return id_maps, name_maps\n\n\ndef get_concept_info(only_A=True):\n \"\"\"股票概念编码信息\n\n Keyword Arguments:\n only_A {bool} -- 只包含A股代码 (default: {True})\n\n Returns:\n pd.DataFrame -- 股票概念编码信息表\n\n Example:\n >>> get_concept_info().head(3)\n sid A001 A002 A003 A004 A005 ... A205\n 1 False False False False False ... False\n 2 False False False False False ... False\n 4 False False False True False ... False\n \"\"\"\n db = get_db()\n collection = db['同花顺概念']\n pipeline = [\n {\n '$unwind': {\n 'path': '$股票列表'\n }\n }, {\n '$project': {\n '_id': 0,\n '概念编码': 1,\n # '概念名称': 1,\n '股票列表': 1\n }\n }\n ]\n ds = collection.aggregate(pipeline)\n\n def func(x):\n if only_A:\n return A_STOCK_PAT.match(x['股票列表'])\n else:\n return STOCK_PAT.match(x['股票列表'])\n\n ds = filter(func, ds)\n df = pd.DataFrame.from_records(ds)\n df.rename(columns={'股票列表': 'sid'}, inplace=True)\n\n out = pd.pivot_table(df,\n values='概念编码',\n index='sid',\n columns='概念编码',\n aggfunc=np.count_nonzero,\n fill_value=0)\n\n id_maps, _ = field_code_concept_maps()\n out.rename(columns=id_maps, inplace=True)\n out = out.astype('bool').reset_index()\n out['sid'] = out['sid'].map(lambda x: int(x))\n return out\n\n\n# endregion\n\n# region 动态数据\n\ndef _change_hist(code, db=None):\n # 深发展A -> 深发展A\n if db is None:\n db = get_db('wy_stock_daily')\n collection = db[code]\n if collection.estimated_document_count() == 0:\n return pd.DataFrame()\n records = collection.find(\n projection={'_id': 0, '名称': 1, '日期': 1},\n sort=[('日期', 1), ('名称', 1,)])\n df = pd.DataFrame.from_records(records)\n df['名称'] = df['名称'].map(_rename)\n cond = df['名称'] != df['名称'].shift(1)\n df = df.loc[cond, :]\n df.rename(columns={'日期': 'asof_date', '名称': '股票简称'}, inplace=True)\n df['sid'] = int(code)\n return df\n\n\ndef get_short_name_changes(only_A=True):\n \"\"\"股票简称变动历史\"\"\"\n db = get_db('wy_stock_daily')\n codes = db.list_collection_names()\n if only_A:\n codes = filter_a(codes)\n func = partial(_change_hist, db=db)\n # 3878只股票 用时 48s\n with ThreadPoolExecutor(MAX_WORKER) as pool:\n r = pool.map(func, codes)\n df = pd.concat(r, ignore_index=True)\n return df\n\n\ndef get_margin_data(only_A=True):\n \"\"\"融资融券数据\"\"\"\n db = get_db('cninfo')\n collection = db['融资融券明细']\n projection = {\n '_id': 0,\n '股票简称': 0,\n }\n # sort = [('股票代码', 1), ('交易日期', 1)]\n df = pd.DataFrame.from_records(\n collection.find(projection=projection))\n df = _select_only_a(df, only_A, '股票代码')\n df.rename(columns={'交易日期': 'timestamp', '股票代码': 'sid'}, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n # 设置晚8小时\n df['asof_date'] = df['timestamp'] - pd.Timedelta(hours=8)\n df.sort_values(['sid', 'timestamp'], inplace=True, ignore_index=True)\n return df\n\n\ndef get_dividend_data(only_A=True):\n \"\"\"现金股利\"\"\"\n db = get_db('cninfo')\n collection = db['分红指标']\n # 使用股权登记日作为 asof_date\n # 此指标仅用于计算年度股息之用,不涉及到所谓知晓日期\n pipeline = [\n {\n '$project': {\n '_id': 0,\n '股票代码': 1,\n '分红年度': 1,\n 'A股股权登记日': 1,\n '派息比例(人民币)': 1\n }\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n cols = {'股票代码': 'sid', 'A股股权登记日': 'asof_date', '派息比例(人民币)': '每股派息'}\n df.rename(columns=cols, inplace=True)\n # 首先将日期缺失值默认为分红年度后一个季度\n cond = df['asof_date'].isnull()\n df.loc[cond, 'asof_date'] = df.loc[cond, '分红年度'] + pd.Timedelta(days=45)\n # 重要:对未分派的记录,不得舍弃\n # 派息NaN -> 0.0 不影响实际意义,加快读写速度\n values = {'每股派息': 0.0}\n df.fillna(value=values, inplace=True)\n # 数值更改为每股派息\n df['每股派息'] = df['每股派息'] / 10.0\n df.sort_values(['sid', 'asof_date'], inplace=True, ignore_index=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n # datetime -> timestamp\n df = _to_timestamp(df)\n df[TS_FIELD_NAME] = df[AD_FIELD_NAME]\n return df\n\n\n# endregion\n\n# region 定期财务报告\n\n# 废弃\n# def _fix_sid_ad_ts(df, col='报告年度', ndays=45):\n# \"\"\"\n# 修复截止日期、公告日期。\n# 如果`asof_date`为空,则使用`col`的值\n# `timestamp`在`col`的值基础上加`ndays`天\"\"\"\n# df['sid'] = df['sid'].map(lambda x: int(x))\n# cond = df.asof_date.isna()\n# df.loc[cond, 'asof_date'] = df.loc[cond, col]\n# df.loc[cond, 'timestamp'] = df.loc[cond, col] + pd.Timedelta(days=ndays)\n# # 由于存在数据不完整的情形,当timestamp为空,在asof_date基础上加ndays\n# cond1 = df.timestamp.isna()\n# df.loc[cond1,\n# 'timestamp'] = df.loc[cond1, 'asof_date'] + pd.Timedelta(days=ndays)\n# # 1991-12-31 时段数据需要特别修正\n# cond2 = df.timestamp.map(lambda x: x.is_quarter_end)\n# cond3 = df.asof_date == df.timestamp\n# df.loc[cond2 & cond3,\n# 'timestamp'] = df.loc[cond2 & cond3,\n# 'asof_date'] + pd.Timedelta(days=ndays)\n\n\ndef _periodly_report(only_A, item_name):\n # 一般而言,定期财务报告截止日期与报告年度相同\n # 但不排除数据更正等情形下,报告年度与截止日期不一致\n to_drop = [\n '_id', '股票简称', '机构名称', '合并类型编码', '合并类型', '报表来源编码', '报表来源',\n '备注', '截止日期', '开始日期'\n ]\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n # 规范列名称\n df.columns = df.columns.map(_normalized_col_name)\n df.rename(columns={\n \"股票代码\": \"sid\",\n \"报告年度\": \"asof_date\",\n \"公告日期\": \"timestamp\"\n },\n inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n df.sort_values(['sid', 'asof_date'], inplace=True)\n return df\n\n\ndef get_p_balance_data(only_A=True):\n \"\"\"报告期资产负债表\"\"\"\n item_name = '个股报告期资产负债表'\n df = _periodly_report(only_A, item_name)\n return df\n\n\ndef get_p_income_data(only_A=True):\n \"\"\"报告期利润表\"\"\"\n item_name = '个股报告期利润表'\n df = _periodly_report(only_A, item_name)\n return df\n\n\ndef get_p_cash_flow_data(only_A=True):\n \"\"\"报告期现金流量表\"\"\"\n item_name = '个股报告期现金表'\n df = _periodly_report(only_A, item_name)\n return df\n\n\ndef _financial_report_announcement_date(only_A):\n \"\"\"\n 获取财报公告日期,供其他计算类型的表使用\n\n 注:\n 季度报告、财务指标根据定期报告计算得来,数据中不含公告日期。\n 使用定期报告的公告日期作为`timestamp`\n \"\"\"\n db = get_db('cninfo')\n collection = db['个股报告期资产负债表']\n pipeline = [\n {\n '$project': {\n '_id': 0,\n '股票代码': 1,\n '公告日期': 1,\n '报告年度': 1,\n }\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n df.sort_values(['股票代码', '报告年度'], inplace=True, ignore_index=True)\n return df\n\n\ndef _get_report(only_A, item_name, to_drop, col='报告年度', keys=['股票代码', '报告年度']):\n \"\"\"\n 获取财务报告数据\n\n 使用报告期资产负债表的公告日期\n \"\"\"\n if '_id' not in to_drop:\n to_drop.append('_id')\n\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n dates = _financial_report_announcement_date(only_A)\n if col != '报告年度':\n # 处理行业排名\n df['报告年度'] = df.pop(col)\n # 合并使用 公告日期\n df = df.join(dates.set_index(keys), on=keys)\n # 规范列名称\n df.columns = df.columns.map(_normalized_col_name)\n\n df.rename(columns={\n \"股票代码\": \"sid\",\n \"报告年度\": \"asof_date\",\n \"公告日期\": \"timestamp\"\n },\n inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n df.sort_values(['sid', 'asof_date'], inplace=True)\n return df\n\n\n# endregion\n\n# region 单季度财务报告\n\n\ndef get_q_income_data(only_A=True):\n \"\"\"个股单季财务利润表\"\"\"\n item_name = '个股单季财务利润表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_q_cash_flow_data(only_A=True):\n \"\"\"个股单季现金流量表\"\"\"\n item_name = '个股单季现金流量表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\n# endregion\n\n# region TTM\n\n\ndef get_ttm_income_data(only_A=True):\n \"\"\"个股TTM财务利润表\"\"\"\n item_name = '个股TTM财务利润表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_ttm_cash_flow_data(only_A=True):\n \"\"\"个股TTM现金流量表\"\"\"\n item_name = '个股TTM现金流量表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\n# endregion\n\n# region 财务指标\n\n\ndef get_periodly_financial_indicator_data(only_A=True):\n \"\"\"个股报告期指标表\"\"\"\n item_name = '个股报告期指标表'\n to_drop = [\n '股票简称', '机构名称', '开始日期', '数据来源编码', '数据来源', 'last_refresh_time', '备注'\n ]\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_financial_indicator_ranking_data(only_A=True):\n \"\"\"\n 财务指标行业排名\n\n 级别说明:申银万国二级行业\n \"\"\"\n item_name = '财务指标行业排名'\n to_drop = ['股票简称', '行业ID', '行业级别', '级别说明', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_quarterly_financial_indicator_data(only_A=True):\n \"\"\"个股单季财务指标\"\"\"\n item_name = '个股单季财务指标'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\n# endregion\n\n# region 业绩预告\n\n\ndef get_performance_forecaste_data(only_A=True):\n \"\"\"上市公司业绩预告\"\"\"\n item_name = '上市公司业绩预告'\n # 简化写入量,保留`业绩类型`\n to_drop = ['_id', '股票简称', '业绩类型编码', '业绩变化原因', '报告期最新记录标识', '备注']\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n\n # 业绩预告反映未来事件\n\n cond = df['公告日期'].isnull()\n df.loc[cond, '公告日期'] = df.loc[cond, '报告年度'] - pd.Timedelta(days=45)\n # 保留`报告年度`列\n df.rename(columns={\n \"股票代码\": \"sid\",\n # \"报告年度\": \"asof_date\",\n \"公告日期\": \"timestamp\",\n }, inplace=True)\n # 将 asof_date 定义为前一小时\n df['asof_date'] = df['timestamp'] - pd.Timedelta(hours=1)\n df['sid'] = df['sid'].map(lambda x: int(x))\n # 深证信原始数据中 股票代码 \"002746\"\n # 公告日期 2013-10-13 报告年度 2016-09-30\n # 即做出提前三年的业绩预告,有违常理,需删除\n # 一般而言,业绩预告不会领先报告年度一个季度发布\n cond = df['timestamp'] - df['asof_date'] < pd.Timedelta(days=90)\n df = df.loc[cond, :]\n return df\n\n\n# endregion\n\n# region 股东股本\n\n\ndef get_shareholding_concentration_data(only_A=True):\n \"\"\"持股集中度\"\"\"\n item_name = '持股集中度'\n df = _get_report(only_A, item_name, [], col='截止日期')\n df.rename(columns={\n \"A股户数\": \"A股户数\",\n \"B股户数\": \"B股户数\",\n \"H股户数\": \"H股户数\",\n },\n inplace=True)\n # 更改为逻辑类型\n df['前十大股东'] = df['前十大股东'] == '前十大股东'\n df.sort_values(['sid', 'asof_date'], inplace=True)\n return df\n\n\n# endregion\n\n# region 投资评级\n\n\ndef get_investment_rating_data(only_A=True):\n \"\"\"投资评级\"\"\"\n item_name = '投资评级'\n to_drop = ['_id', '前一次投资评级', '股票简称', '投资评级',\n '评级变化', '是否首次评级', \"目标价格(下限)\", \"目标价格(上限)\"]\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n\n df.rename(columns={\n \"股票代码\": \"sid\",\n \"发布日期\": \"asof_date\",\n \"投资评级(经调整)\": \"投资评级\",\n },\n inplace=True)\n df.dropna(subset=['投资评级'], inplace=True)\n df['timestamp'] = df['asof_date']\n # 至少相差一小时\n df['asof_date'] -= pd.Timedelta(hours=1)\n df['sid'] = df['sid'].map(lambda x: int(x))\n return df\n\n\n# endregion\n",
"#\n# Copyright 2017 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import division\n\nfrom abc import abstractmethod\nimport math\n\nimport numpy as np\nfrom pandas import isnull\nfrom six import with_metaclass\nfrom toolz import merge\n\nfrom zipline.assets import Equity, Future\nfrom zipline.errors import HistoryWindowStartsBeforeData\nfrom zipline.finance.constants import ROOT_SYMBOL_TO_ETA\nfrom zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta\nfrom zipline.finance.transaction import create_transaction\nfrom zipline.utils.cache import ExpiringCache\nfrom zipline.utils.dummy import DummyMapping\nfrom zipline.utils.input_validation import (expect_bounded,\n expect_strictly_bounded)\n\nSELL = 1 << 0\nBUY = 1 << 1\nSTOP = 1 << 2\nLIMIT = 1 << 3\n\nSQRT_252 = math.sqrt(252)\n\nDEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025\nDEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05\n\n\nclass LiquidityExceeded(Exception):\n pass\n\n\ndef fill_price_worse_than_limit_price(fill_price, order):\n \"\"\"\n Checks whether the fill price is worse than the order's limit price.\n\n Parameters\n ----------\n fill_price: float\n The price to check.\n\n order: zipline.finance.order.Order\n The order whose limit price to check.\n\n Returns\n -------\n bool: Whether the fill price is above the limit price (for a buy) or below\n the limit price (for a sell).\n \"\"\"\n if order.limit:\n # this is tricky! if an order with a limit price has reached\n # the limit price, we will try to fill the order. do not fill\n # these shares if the impacted price is worse than the limit\n # price. return early to avoid creating the transaction.\n\n # buy order is worse if the impacted price is greater than\n # the limit price. sell order is worse if the impacted price\n # is less than the limit price\n if (order.direction > 0 and fill_price > order.limit) or \\\n (order.direction < 0 and fill_price < order.limit):\n return True\n\n return False\n\n\nclass SlippageModel(with_metaclass(FinancialModelMeta)):\n \"\"\"\n Abstract base class for slippage models.\n\n Slippage models are responsible for the rates and prices at which orders\n fill during a simulation.\n\n To implement a new slippage model, create a subclass of\n :class:`~zipline.finance.slippage.SlippageModel` and implement\n :meth:`process_order`.\n\n Methods\n -------\n process_order(data, order)\n\n Attributes\n ----------\n volume_for_bar : int\n Number of shares that have already been filled for the\n currently-filling asset in the current minute. This attribute is\n maintained automatically by the base class. It can be used by\n subclasses to keep track of the total amount filled if there are\n multiple open orders for a single asset.\n\n Notes\n -----\n Subclasses that define their own constructors should call\n ``super(<subclass name>, self).__init__()`` before performing other\n initialization.\n \"\"\"\n\n # Asset types that are compatible with the given model.\n allowed_asset_types = (Equity, Future)\n\n def __init__(self):\n self._volume_for_bar = 0\n\n @property\n def volume_for_bar(self):\n return self._volume_for_bar\n\n @abstractmethod\n def process_order(self, data, order):\n \"\"\"\n Compute the number of shares and price to fill for ``order`` in the\n current minute.\n\n Parameters\n ----------\n data : zipline.protocol.BarData\n The data for the given bar.\n order : zipline.finance.order.Order\n The order to simulate.\n\n Returns\n -------\n execution_price : float\n The price of the fill.\n execution_volume : int\n The number of shares that should be filled. Must be between ``0``\n and ``order.amount - order.filled``. If the amount filled is less\n than the amount remaining, ``order`` will remain open and will be\n passed again to this method in the next minute.\n\n Raises\n ------\n zipline.finance.slippage.LiquidityExceeded\n May be raised if no more orders should be processed for the current\n asset during the current bar.\n\n Notes\n -----\n Before this method is called, :attr:`volume_for_bar` will be set to the\n number of shares that have already been filled for ``order.asset`` in\n the current minute.\n\n :meth:`process_order` is not called by the base class on bars for which\n there was no historical volume.\n \"\"\"\n raise NotImplementedError('process_order')\n\n def simulate(self, data, asset, orders_for_asset):\n self._volume_for_bar = 0\n volume = data.current(asset, \"volume\")\n\n if volume == 0:\n return\n\n # can use the close price, since we verified there's volume in this\n # bar.\n price = data.current(asset, \"close\")\n\n # BEGIN\n #\n # Remove this block after fixing data to ensure volume always has\n # corresponding price.\n if isnull(price):\n return\n # END\n dt = data.current_dt\n\n for order in orders_for_asset:\n if order.open_amount == 0:\n continue\n\n order.check_triggers(price, dt)\n if not order.triggered:\n continue\n\n txn = None\n\n try:\n execution_price, execution_volume = \\\n self.process_order(data, order)\n\n if execution_price is not None:\n txn = create_transaction(\n order,\n data.current_dt,\n execution_price,\n execution_volume\n )\n\n except LiquidityExceeded:\n break\n\n if txn:\n self._volume_for_bar += abs(txn.amount)\n yield order, txn\n\n def asdict(self):\n return self.__dict__\n\n\nclass NoSlippage(SlippageModel):\n \"\"\"A slippage model where all orders fill immediately and completely at the\n current close price.\n\n Notes\n -----\n This is primarily used for testing.\n \"\"\"\n @staticmethod\n def process_order(data, order):\n return (\n data.current(order.asset, 'close'),\n order.amount,\n )\n\n\nclass EquitySlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)):\n \"\"\"\n Base class for slippage models which only support equities.\n \"\"\"\n allowed_asset_types = (Equity,)\n\n\nclass FutureSlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)):\n \"\"\"\n Base class for slippage models which only support futures.\n \"\"\"\n allowed_asset_types = (Future,)\n\n\nclass VolumeShareSlippage(SlippageModel):\n \"\"\"\n Model slippage as a quadratic function of percentage of historical volume.\n\n Orders to buy will be filled at::\n\n price * (1 + price_impact * (volume_share ** 2))\n\n Orders to sell will be filled at::\n\n price * (1 - price_impact * (volume_share ** 2))\n\n where ``price`` is the close price for the bar, and ``volume_share`` is the\n percentage of minutely volume filled, up to a max of ``volume_limit``.\n\n Parameters\n ----------\n volume_limit : float, optional\n Maximum percent of historical volume that can fill in each bar. 0.5\n means 50% of historical volume. 1.0 means 100%. Default is 0.025 (i.e.,\n 2.5%).\n price_impact : float, optional\n Scaling coefficient for price impact. Larger values will result in more\n simulated price impact. Smaller values will result in less simulated\n price impact. Default is 0.1.\n \"\"\"\n def __init__(self,\n volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT,\n price_impact=0.1):\n\n super(VolumeShareSlippage, self).__init__()\n\n self.volume_limit = volume_limit\n self.price_impact = price_impact\n\n def __repr__(self):\n return \"\"\"\n{class_name}(\n volume_limit={volume_limit},\n price_impact={price_impact})\n\"\"\".strip().format(class_name=self.__class__.__name__,\n volume_limit=self.volume_limit,\n price_impact=self.price_impact)\n\n def process_order(self, data, order):\n volume = data.current(order.asset, \"volume\")\n\n max_volume = self.volume_limit * volume\n\n # price impact accounts for the total volume of transactions\n # created against the current minute bar\n remaining_volume = max_volume - self.volume_for_bar\n if remaining_volume < 1:\n # we can't fill any more transactions\n raise LiquidityExceeded()\n\n # the current order amount will be the min of the\n # volume available in the bar or the open amount.\n cur_volume = int(min(remaining_volume, abs(order.open_amount)))\n\n if cur_volume < 1:\n return None, None\n\n # tally the current amount into our total amount ordered.\n # total amount will be used to calculate price impact\n total_volume = self.volume_for_bar + cur_volume\n\n volume_share = min(total_volume / volume,\n self.volume_limit)\n\n price = data.current(order.asset, \"close\")\n\n # BEGIN\n #\n # Remove this block after fixing data to ensure volume always has\n # corresponding price.\n if isnull(price):\n return\n # END\n\n simulated_impact = volume_share ** 2 \\\n * math.copysign(self.price_impact, order.direction) \\\n * price\n impacted_price = price + simulated_impact\n\n if fill_price_worse_than_limit_price(impacted_price, order):\n return None, None\n\n return (\n impacted_price,\n math.copysign(cur_volume, order.direction)\n )\n\n\nclass FixedSlippage(SlippageModel):\n \"\"\"\n Simple model assuming a fixed-size spread for all assets.\n\n Parameters\n ----------\n spread : float, optional\n Size of the assumed spread for all assets.\n Orders to buy will be filled at ``close + (spread / 2)``.\n Orders to sell will be filled at ``close - (spread / 2)``.\n\n Notes\n -----\n This model does not impose limits on the size of fills. An order for an\n asset will always be filled as soon as any trading activity occurs in the\n order's asset, even if the size of the order is greater than the historical\n volume.\n \"\"\"\n def __init__(self, spread=0.0):\n super(FixedSlippage, self).__init__()\n self.spread = spread\n\n def __repr__(self):\n return '{class_name}(spread={spread})'.format(\n class_name=self.__class__.__name__, spread=self.spread,\n )\n\n def process_order(self, data, order):\n price = data.current(order.asset, \"close\")\n\n return (\n price + (self.spread / 2.0 * order.direction),\n order.amount\n )\n\n\nclass MarketImpactBase(SlippageModel):\n \"\"\"\n Base class for slippage models which compute a simulated price impact\n according to a history lookback.\n \"\"\"\n\n NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000\n\n def __init__(self):\n super(MarketImpactBase, self).__init__()\n self._window_data_cache = ExpiringCache()\n\n @abstractmethod\n def get_txn_volume(self, data, order):\n \"\"\"\n Return the number of shares we would like to order in this minute.\n\n Parameters\n ----------\n data : BarData\n order : Order\n\n Return\n ------\n int : the number of shares\n \"\"\"\n raise NotImplementedError('get_txn_volume')\n\n @abstractmethod\n def get_simulated_impact(self,\n order,\n current_price,\n current_volume,\n txn_volume,\n mean_volume,\n volatility):\n \"\"\"\n Calculate simulated price impact.\n\n Parameters\n ----------\n order : The order being processed.\n current_price : Current price of the asset being ordered.\n current_volume : Volume of the asset being ordered for the current bar.\n txn_volume : Number of shares/contracts being ordered.\n mean_volume : Trailing ADV of the asset.\n volatility : Annualized daily volatility of returns.\n\n Return\n ------\n int : impact on the current price.\n \"\"\"\n raise NotImplementedError('get_simulated_impact')\n\n def process_order(self, data, order):\n if order.open_amount == 0:\n return None, None\n\n minute_data = data.current(order.asset, ['volume', 'high', 'low'])\n mean_volume, volatility = self._get_window_data(data, order.asset, 20)\n\n # Price to use is the average of the minute bar's open and close.\n price = np.mean([minute_data['high'], minute_data['low']])\n\n volume = minute_data['volume']\n if not volume:\n return None, None\n\n txn_volume = int(\n min(self.get_txn_volume(data, order), abs(order.open_amount))\n )\n\n # If the computed transaction volume is zero or a decimal value, 'int'\n # will round it down to zero. In that case just bail.\n if txn_volume == 0:\n return None, None\n\n if mean_volume == 0 or np.isnan(volatility):\n # If this is the first day the contract exists or there is no\n # volume history, default to a conservative estimate of impact.\n simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT\n else:\n simulated_impact = self.get_simulated_impact(\n order=order,\n current_price=price,\n current_volume=volume,\n txn_volume=txn_volume,\n mean_volume=mean_volume,\n volatility=volatility,\n )\n\n impacted_price = \\\n price + math.copysign(simulated_impact, order.direction)\n\n if fill_price_worse_than_limit_price(impacted_price, order):\n return None, None\n\n return impacted_price, math.copysign(txn_volume, order.direction)\n\n def _get_window_data(self, data, asset, window_length):\n \"\"\"\n Internal utility method to return the trailing mean volume over the\n past 'window_length' days, and volatility of close prices for a\n specific asset.\n\n Parameters\n ----------\n data : The BarData from which to fetch the daily windows.\n asset : The Asset whose data we are fetching.\n window_length : Number of days of history used to calculate the mean\n volume and close price volatility.\n\n Returns\n -------\n (mean volume, volatility)\n \"\"\"\n try:\n values = self._window_data_cache.get(asset, data.current_session)\n except KeyError:\n try:\n # Add a day because we want 'window_length' complete days,\n # excluding the current day.\n volume_history = data.history(\n asset, 'volume', window_length + 1, '1d',\n )\n close_history = data.history(\n asset, 'close', window_length + 1, '1d',\n )\n except HistoryWindowStartsBeforeData:\n # If there is not enough data to do a full history call, return\n # values as if there was no data.\n return 0, np.NaN\n\n # Exclude the first value of the percent change array because it is\n # always just NaN.\n close_volatility = close_history[:-1].pct_change()[1:].std(\n skipna=False,\n )\n values = {\n 'volume': volume_history[:-1].mean(),\n 'close': close_volatility * SQRT_252,\n }\n self._window_data_cache.set(asset, values, data.current_session)\n\n return values['volume'], values['close']\n\n\nclass VolatilityVolumeShare(MarketImpactBase):\n \"\"\"\n Model slippage for futures contracts according to the following formula:\n\n new_price = price + (price * MI / 10000),\n\n where 'MI' is market impact, which is defined as:\n\n MI = eta * sigma * sqrt(psi)\n\n - ``eta`` is a constant which varies by root symbol.\n - ``sigma`` is 20-day annualized volatility.\n - ``psi`` is the volume traded in the given bar divided by 20-day ADV.\n\n Parameters\n ----------\n volume_limit : float\n Maximum percentage (as a decimal) of a bar's total volume that can be\n traded.\n eta : float or dict\n Constant used in the market impact formula. If given a float, the eta\n for all futures contracts is the same. If given a dictionary, it must\n map root symbols to the eta for contracts of that symbol.\n \"\"\"\n\n NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000\n allowed_asset_types = (Future,)\n\n def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA):\n super(VolatilityVolumeShare, self).__init__()\n self.volume_limit = volume_limit\n\n # If 'eta' is a constant, use a dummy mapping to treat it as a\n # dictionary that always returns the same value.\n # NOTE: This dictionary does not handle unknown root symbols, so it may\n # be worth revisiting this behavior.\n if isinstance(eta, (int, float)):\n self._eta = DummyMapping(float(eta))\n else:\n # Eta is a dictionary. If the user's dictionary does not provide a\n # value for a certain contract, fall back on the pre-defined eta\n # values per root symbol.\n self._eta = merge(ROOT_SYMBOL_TO_ETA, eta)\n\n def __repr__(self):\n if isinstance(self._eta, DummyMapping):\n # Eta is a constant, so extract it.\n eta = self._eta['dummy key']\n else:\n eta = '<varies>'\n return '{class_name}(volume_limit={volume_limit}, eta={eta})'.format(\n class_name=self.__class__.__name__,\n volume_limit=self.volume_limit,\n eta=eta,\n )\n\n def get_simulated_impact(self,\n order,\n current_price,\n current_volume,\n txn_volume,\n mean_volume,\n volatility):\n eta = self._eta[order.asset.root_symbol]\n psi = txn_volume / mean_volume\n\n market_impact = eta * volatility * math.sqrt(psi)\n\n # We divide by 10,000 because this model computes to basis points.\n # To convert from bps to % we need to divide by 100, then again to\n # convert from % to fraction.\n return (current_price * market_impact) / 10000\n\n def get_txn_volume(self, data, order):\n volume = data.current(order.asset, 'volume')\n return volume * self.volume_limit\n\n\nclass FixedBasisPointsSlippage(SlippageModel):\n \"\"\"\n Model slippage as a fixed percentage difference from historical minutely\n close price, limiting the size of fills to a fixed percentage of historical\n minutely volume.\n\n Orders to buy are filled at::\n\n historical_price * (1 + (basis_points * 0.0001))\n\n Orders to sell are filled at::\n\n historical_price * (1 - (basis_points * 0.0001))\n\n Fill sizes are capped at::\n\n historical_volume * volume_limit\n\n Parameters\n ----------\n basis_points : float, optional\n Number of basis points of slippage to apply for each fill. Default\n is 5 basis points.\n volume_limit : float, optional\n Fraction of trading volume that can be filled each minute. Default is\n 10% of trading volume.\n\n Notes\n -----\n - A basis point is one one-hundredth of a percent.\n - This class, default-constructed, is zipline's default slippage model for\n equities.\n \"\"\"\n @expect_bounded(\n basis_points=(0, None),\n __funcname='FixedBasisPointsSlippage',\n )\n @expect_strictly_bounded(\n volume_limit=(0, None),\n __funcname='FixedBasisPointsSlippage',\n )\n def __init__(self, basis_points=5.0, volume_limit=0.1):\n super(FixedBasisPointsSlippage, self).__init__()\n self.basis_points = basis_points\n self.percentage = self.basis_points / 10000.0\n self.volume_limit = volume_limit\n\n def __repr__(self):\n return \"\"\"\n{class_name}(\n basis_points={basis_points},\n volume_limit={volume_limit},\n)\n\"\"\".strip().format(\n class_name=self.__class__.__name__,\n basis_points=self.basis_points,\n volume_limit=self.volume_limit,\n )\n\n def process_order(self, data, order):\n\n volume = data.current(order.asset, \"volume\")\n max_volume = int(self.volume_limit * volume)\n\n price = data.current(order.asset, \"close\")\n shares_to_fill = min(abs(order.open_amount),\n max_volume - self.volume_for_bar)\n\n if shares_to_fill == 0:\n raise LiquidityExceeded()\n\n return (\n price + price * (self.percentage * order.direction),\n shares_to_fill * order.direction\n )",
"\"\"\"\n网易分钟级别数据\n\n后台任务提取股票、指数实时报价数据特点\n\n+ 每分钟采集一次数据\n+ 定时采集已经排除午休时段\n+ 指数数据 2020-09-28 开始\n\"\"\"\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom functools import lru_cache, partial\n\nimport pandas as pd\nfrom cnswd.mongodb import get_db\nfrom cnswd.setting.constants import MAX_WORKER\nfrom trading_calendars import get_calendar\n\nfrom .wy_data import _fetch_single_index\n\n\nINDEX_QUOTE_START = pd.Timestamp('2020-11-02')\n\n\ndef encode_index_code(x, offset=1000000):\n i = int(x) + offset\n return str(i).zfill(7)\n\n\ndef decode_index_code(x, offset=1000000):\n i = int(x) - offset\n return str(i).zfill(6)\n\n\n@lru_cache(None)\ndef tminutes(start, end):\n calendar = get_calendar('XSHG')\n fmt = r\"%Y-%m-%d\"\n sessions = calendar.sessions_in_range(\n start.strftime(fmt), end.strftime(fmt))\n return calendar.minutes_for_sessions_in_range(\n sessions[0], sessions[-1]\n ).tz_convert(calendar.tz).tz_localize(None)\n\n\ndef _single_minutely_equity(one_day, code, db=None, is_index=False):\n if db is None:\n db = get_db('wy_index_quotes') if is_index else get_db('wy_quotes')\n name = one_day.strftime(r\"%Y-%m-%d\")\n if name not in db.list_collection_names():\n return pd.DataFrame()\n collection = db[name]\n # 存在延时\n start = one_day.replace(hour=9, minute=30)\n end = one_day.replace(hour=15, minute=1)\n predicate = {\n 'code': code,\n 'time': {'$gte': start, '$lte': end},\n }\n projection = {\n 'datetime': '$time',\n 'close': '$price',\n 'open': 1,\n 'high': 1,\n 'low': 1,\n 'volume': 1,\n '_id': 0\n }\n sort = [('datetime', 1)]\n cursor = collection.find(predicate, projection=projection, sort=sort)\n df = pd.DataFrame.from_records(cursor)\n if df.empty:\n return df\n df['datetime'] = df['datetime'].dt.floor('T')\n df.drop_duplicates(['datetime'], keep='last', inplace=True)\n df.set_index(['datetime'], inplace=True)\n return df\n\n\ndef _quote_to_ohlcv(df, one_day):\n m_index = tminutes(one_day, one_day)\n df = df.copy()\n df = df.reindex(m_index, method='bfill')\n\n resampled = df.resample('1T', label='right')\n ohlc = resampled['close'].ohlc()\n\n ohlc = ohlc.reindex(m_index, method='ffill')\n # 反应在实时报价的成交量为累积值\n v = df['volume'].diff()\n ohlcv = pd.concat([ohlc, v], axis=1)\n\n first_loc = df.index.indexer_at_time('09:31')\n if len(first_loc):\n ohlcv.iloc[0, :] = df.iloc[first_loc, :][ohlcv.columns].values[0]\n return ohlcv.sort_index()\n\n\ndef _fetch_single_minutely_equity(one_day, stock_code, db=None, is_index=False):\n \"\"\"\n Notes:\n ------\n 每天交易数据长度应为240\n index.tz is None 本地时区时间\n Examples\n --------\n >>> stock_code = '000333'\n >>> one_day = pd.Timestamp('2020-07-31 00:00:00', freq='B')\n >>> df = _fetch_single_minutely_equity(one_day, stock_code)\n >>> df\n open\thigh\tlow\tclose\tvolume\n 2020-09-24 09:31:00\t15.59\t15.61\t15.51\t15.55\t1601609.0\n 2020-09-24 09:32:00\t15.55\t15.55\t15.55\t15.55\t491256.0\n 2020-09-24 09:33:00\t15.55\t15.55\t15.55\t15.55\t279342.0\n 2020-09-24 09:34:00\t15.54\t15.54\t15.54\t15.54\t308431.0\n 2020-09-24 09:35:00\t15.51\t15.51\t15.51\t15.51\t376372.0\n ...\t...\t...\t...\t...\t...\n 2020-09-24 14:56:00\t15.14\t15.14\t15.14\t15.14\t458404.0\n 2020-09-24 14:57:00\t15.13\t15.13\t15.13\t15.13\t350426.0\n 2020-09-24 14:58:00\t15.14\t15.14\t15.14\t15.14\t0.0\n 2020-09-24 14:59:00\t15.14\t15.14\t15.14\t15.14\t0.0\n 2020-09-24 15:00:00\t15.14\t15.14\t15.14\t15.14\t1547479.0\n 240 rows × 5 columns\n \"\"\"\n df = _single_minutely_equity(one_day, stock_code, db, is_index)\n cols = ['open', 'high', 'low', 'close', 'volume']\n index = tminutes(one_day, one_day)\n default = pd.DataFrame(0.0, columns=cols, index=index)\n if df.empty:\n return default\n try:\n return _quote_to_ohlcv(df, one_day)\n except ValueError:\n return default\n\n\ndef _index_daily_to_minute(code, one_day):\n \"\"\"将指数日线数据转换为分钟级别数据\"\"\"\n cols = ['date', 'open', 'high', 'low', 'close', 'volume']\n index = tminutes(one_day, one_day)\n default = pd.DataFrame(\n 0.0, columns=['open', 'high', 'low', 'close', 'volume'], index=index)\n try:\n df = _fetch_single_index(code, one_day, one_day)\n except KeyError:\n return default\n if df.empty:\n return default\n df = df[cols]\n df['date'] = df['date'].map(lambda x: x.replace(hour=9, minute=31))\n df.set_index('date', inplace=True)\n df = df.reindex(index, method='ffill')\n return df\n\n\ndef _index_minute_data(code, dates):\n # 日线 -> 分钟\n d_dates = [d for d in dates if d < INDEX_QUOTE_START]\n # 直接使用分钟数据\n m_dates = [d for d in dates if d >= INDEX_QUOTE_START]\n\n d_dfs = [_index_daily_to_minute(code, d) for d in d_dates]\n\n db = get_db('wy_index_quotes')\n code = decode_index_code(code)\n func = partial(_fetch_single_minutely_equity,\n stock_code=code, db=db, is_index=True)\n with ThreadPoolExecutor(MAX_WORKER) as executor:\n m_dfs = executor.map(func, m_dates)\n\n dfs = d_dfs + [df for df in m_dfs if df is not None]\n return pd.concat(dfs).sort_index()\n\n\ndef fetch_single_minutely_equity(code, start, end):\n \"\"\"\n 从本地数据库读取单个股票期间分钟级别交易明细数据\n\n **注意** \n 交易日历分钟自9:31~11:30 13:01~15:00\n 在数据库中,分钟级别成交数据分日期存储\n\n Parameters\n ----------\n code : str\n 要获取数据的股票代码\n start_date : datetime-like\n 自开始日期(包含该日)\n end_date : datetime-like\n 至结束日期\n\n return\n ----------\n DataFrame: OHLCV列的DataFrame对象。\n\n Examples\n --------\n >>> stock_code = '000333'\n >>> start = '2020-06-29'\n >>> end = pd.Timestamp('2020-06-30')\n >>> df = fetch_single_minutely_equity(stock_code, start, end)\n >>> df.tail()\n close high low open volume\n 2018-04-19 14:56:00 51.55 51.56 51.50 51.55 376400\n 2018-04-19 14:57:00 51.55 51.55 51.55 51.55 20000\n 2018-04-19 14:58:00 51.55 51.55 51.55 51.55 0\n 2018-04-19 14:59:00 51.55 51.55 51.55 51.55 0\n 2018-04-19 15:00:00 51.57 51.57 51.57 51.57 353900\n \"\"\"\n calendar = get_calendar('XSHG')\n fmt = r\"%Y-%m-%d\"\n dates = calendar.sessions_in_range(\n start.strftime(fmt), end.strftime(fmt)).tz_localize(None)\n cols = ['open', 'high', 'low', 'close', 'volume']\n\n # 指数分钟级别数据\n if len(code) == 7:\n return _index_minute_data(code, dates)\n\n db = get_db('wy_quotes')\n func = partial(_fetch_single_minutely_equity,\n stock_code=code, db=db, is_index=False)\n with ThreadPoolExecutor(MAX_WORKER) as executor:\n dfs = executor.map(func, dates)\n return pd.concat(dfs).sort_index()\n",
"import numpy as np\n\nfrom zipline.pipeline.data import CNEquityPricing\nfrom zipline.pipeline.factors import CustomFactor\nfrom zipline.pipeline.factors.statistical import vectorized_beta\nfrom zipline.pipeline.fundamentals import Fundamentals\nfrom zipline.utils.math_utils import nanmean\n\n\nPPY = 244 # 每年交易天数\n\n\nclass BaseExposure(CustomFactor):\n\n window_length = PPY * 2 # 2年\n sector_code = None\n\n def compute(self, today, assets, out, closes, sectors):\n res = np.zeros(closes.shape[1])\n change_ratio = np.diff(closes, axis=0) / closes[:-1]\n latest_sectors = sectors[-1]\n\n stock_in_sector = latest_sectors == self.sector_code\n change_ratio_in_sector = change_ratio[:, stock_in_sector]\n\n # epsilon = 0.000001\n # nan_locs = np.where(np.isnan(change_ratio_in_sector))[1] # 列\n # print(assets[np.unique(nan_locs)])\n\n # change_ratio_in_sector = np.where(np.isnan(change_ratio_in_sector), epsilon, change_ratio_in_sector)\n # 行业收益率\n sector_returns = nanmean(change_ratio_in_sector, axis=1).reshape(-1, 1)\n\n allowed_missing = int(self.window_length * 0.25)\n # 行业内各股票收益率基于行业平均收益率回归得到各股票的β值,即敞口\n beta = vectorized_beta(\n dependents=change_ratio_in_sector,\n independent=sector_returns,\n allowed_missing=allowed_missing,\n )\n # 更新β值,其余部分为0\n res[stock_in_sector] = beta\n out[:] = res\n\n\nclass CNSectorExposure(BaseExposure):\n # 使用复权价确保正确计算收益率\n # 暂时缺失 b_close\n inputs = (CNEquityPricing.close,\n Fundamentals.info.sector_code)\n\n\nclass SWSectorExposure(BaseExposure):\n # 使用复权价确保正确计算收益率\n # 暂时缺失 b_close\n inputs = (CNEquityPricing.close,\n Fundamentals.info.sw_sector)\n",
"\"\"\"\n如表行数超大,bcolz写入str数据会异常缓慢,此时应尽量避免写入字符串类型数据,而是转换为类型进行处理。\n\n使用默认输入缺失值\n bool_value False\n dt_value NaT\n float_value NaN\n int_value 0\n str_value None\n\n替代方案:\n 以附加属性写入信息\n 或者更改为类别\n\n性能:\n 写入1千万行3列长度为6的随机数DataFrame,耗时不到1秒\n\"\"\"\nimport os\nimport sys\nimport time\nimport warnings\nfrom shutil import rmtree\n\nimport bcolz\nimport logbook\nimport pandas as pd\n\nfrom cnswd.utils import make_logger\n\nfrom ..common import AD_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME\nfrom .base import bcolz_table_path\nfrom .localdata import (get_dividend_data,\n get_financial_indicator_ranking_data, get_margin_data,\n get_p_balance_data, get_p_cash_flow_data,\n get_p_income_data, get_performance_forecaste_data,\n get_periodly_financial_indicator_data,\n get_q_cash_flow_data, get_q_income_data,\n get_quarterly_financial_indicator_data,\n get_ttm_cash_flow_data, get_ttm_income_data)\nfrom .preprocess import (get_investment_rating,\n get_short_name_history, get_static_info_table)\n\n# from .yahoo import YAHOO_ITEMS, read_item_data\n\n# 设置显示日志\nlogbook.set_datetime_format('local')\nlogbook.StreamHandler(sys.stdout).push_application()\nlogger = make_logger('深证信数据包')\n\nTAB_MAPS = {\n # 定期财务报告\n 'periodly_balance_sheets': get_p_balance_data,\n 'periodly_income_statements': get_p_income_data,\n 'periodly_cash_flow_statements': get_p_cash_flow_data,\n # TTM财务报告\n 'ttm_income_statements': get_ttm_income_data,\n 'ttm_cash_flow_statements': get_ttm_cash_flow_data,\n # 报告期财务指标\n 'periodly_financial_indicators': get_periodly_financial_indicator_data,\n # 季度财务指标\n 'quarterly_financial_indicators': get_quarterly_financial_indicator_data,\n # 财务指标行业排名\n 'financial_indicator_rankings': get_financial_indicator_ranking_data,\n # 上市公司业绩预告\n 'performance_forecastes': get_performance_forecaste_data,\n # 季度利润表\n 'quarterly_income_statements': get_q_income_data,\n # 季度现金流量表\n 'quarterly_cash_flow_statements': get_q_cash_flow_data,\n}\n\n\ndef _fix_mixed_type(df):\n # 1. 修复str类型中存在混合类型的列 如 ldf np.NaN lyy\n # 2. bool 类型含空值\n for col in df.columns:\n if pd.api.types.is_string_dtype(df[col]):\n # 注意,必须输入字符None,否则会出错\n df.fillna(value={col: 'None'}, inplace=True)\n if pd.api.types.is_bool_dtype(df[col]):\n df.fillna(value={col: False}, inplace=True)\n if pd.api.types.is_integer_dtype(df[col]):\n df.fillna(value={col: -1}, inplace=True)\n if pd.api.types.is_datetime64tz_dtype(df[col]):\n raise ValueError('时间列不得带时区信息')\n\n\ndef write_dataframe(df, table_name, attr_dict=None):\n \"\"\"以bcolz格式写入数据框\"\"\"\n # 转换为bcolz格式并存储\n rootdir = bcolz_table_path(table_name)\n if os.path.exists(rootdir):\n rmtree(rootdir)\n for c in (AD_FIELD_NAME, TS_FIELD_NAME, SID_FIELD_NAME):\n if c in df.columns and df[c].hasnans:\n warnings.warn(f'{c}列含有空值,已移除')\n df = df.loc[~df[c].isnull(), :]\n # 修复`asof_date newer than timestamp`\n # 至少相差一小时\n if AD_FIELD_NAME in df.columns and TS_FIELD_NAME in df.columns:\n cond = df[AD_FIELD_NAME] == df[TS_FIELD_NAME]\n df.loc[cond, AD_FIELD_NAME] = df.loc[cond,\n TS_FIELD_NAME] - pd.Timedelta(hours=1)\n # 修复混合类型,填充默认值,否则bcolz.ctable.fromdataframe会出错\n _fix_mixed_type(df)\n # 丢失tz信息\n ct = bcolz.ctable.fromdataframe(df, rootdir=rootdir)\n if attr_dict:\n # 设置属性\n for k, v in attr_dict.items():\n ct.attrs[k] = v\n ct.flush()\n logger.info(f'{len(df)} 行 写入:{rootdir}')\n\n\ndef write_static_info_to_bcolz():\n \"\"\"写入股票分类等静态数据\"\"\"\n logger.info('读取股票分类数据')\n table_name = 'infoes'\n df, attr_dict = get_static_info_table()\n write_dataframe(df, table_name, attr_dict)\n\n\ndef write_dynamic_data_to_bcolz():\n \"\"\"\n 将每日变动数据以bcolz格式存储,提高数据集加载速度\n\n 项目:\n 1. 交易数据(含融资融券)\n 2. 现金股利\n 3. 股票简称变动历史\n 4. 投资评级\n \"\"\"\n logger.info('读取融资融券')\n df_m = get_margin_data()\n write_dataframe(df_m, 'margin')\n logger.info('读取现金股利')\n df_dd = get_dividend_data()\n write_dataframe(df_dd, 'dividend')\n logger.info('读取股票简称变动历史')\n df_sn = get_short_name_history()\n write_dataframe(df_sn, 'shortname', {})\n logger.info('读取股票投资评级')\n df_ir, attr_dic = get_investment_rating()\n write_dataframe(df_ir, 'investment_rating', attr_dic)\n\n\ndef write_financial_data_to_bcolz():\n \"\"\"写入财务报告数据\n\n 项目:\n 1. 定期资产负债表\n 2. 定期利润表\n 3. 定期现金流量表\n 4. TTM利润表\n 5. TTM现金流量表\n 6. 报告期财务指标\n 7. 季度财务指标\n 8. 财务指标行业排名\n 9. 上市公司业绩预告\n 10. 季度利润表\n 11. 季度现金流量表\n \"\"\"\n for table, func in TAB_MAPS.items():\n logger.info(f'读取{table}')\n write_dataframe(func(), table)\n\n\n# def write_yahoo():\n# for item in YAHOO_ITEMS:\n# df = read_item_data(item)\n# write_dataframe(df, item)\n\n\ndef write_data_to_bcolz():\n \"\"\"写入Fundamentals数据\"\"\"\n print('准备写入Fundamentals数据......')\n s = time.time()\n write_static_info_to_bcolz()\n write_dynamic_data_to_bcolz()\n write_financial_data_to_bcolz()\n # write_yahoo()\n print(f\"用时{time.time() - s:.2f}秒\")\n",
"\"\"\"\n\n雅虎财经\n\nTODO:待完成\n\"\"\"\nfrom cnswd.mongodb import get_db\nimport pandas as pd\n\n\ndef get_ttm_valuation_measures():\n \"\"\"TTM估值指标\"\"\"\n db = get_db('yahoo')\n collection = db['valuation_measures']\n pipeline = [\n {\n '$match': {\n '期间类型': 'TTM',\n }\n },\n {\n '$project': {'_id': 0, '期间类型': 0}\n }\n ]\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n # df.drop(['期间类型'], axis=1, inplace=True)\n df.rename(columns={'符号': 'sid', '截至日期': 'asof_date'}, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n return df\n",
"\"\"\"\nTests for SimplePipelineEngine\n完成测试 ✔\n\"\"\"\nfrom __future__ import division\nfrom collections import OrderedDict\nfrom itertools import product\nfrom operator import add, sub\nfrom unittest import skipIf\nimport pytest\nfrom parameterized import parameterized\nimport numpy as np\nfrom numpy import (\n arange,\n array,\n concatenate,\n float32,\n float64,\n full,\n full_like,\n log,\n nan,\n tile,\n where,\n zeros,\n)\nfrom numpy.testing import assert_almost_equal\nfrom pandas import (\n Categorical,\n DataFrame,\n date_range,\n Int64Index,\n MultiIndex,\n Series,\n Timestamp,\n)\nfrom pandas.compat.chainmap import ChainMap\nfrom pandas.testing import assert_frame_equal\nfrom six import iteritems, itervalues\nfrom toolz import merge\n\nfrom zipline.assets.synthetic import make_rotating_equity_info\nfrom zipline.errors import NoFurtherDataError\nfrom zipline.lib.adjustment import MULTIPLY\nfrom zipline.lib.labelarray import LabelArray\nfrom zipline.pipeline import CustomFactor, Pipeline\nfrom zipline.pipeline.data import (\n Column, DataSet, EquityPricing, USEquityPricing, CNEquityPricing,\n)\nfrom zipline.pipeline.data.testing import TestingDataSet\nfrom zipline.pipeline.domain import (\n EquitySessionDomain,\n GENERIC,\n JP_EQUITIES,\n CN_EQUITIES,\n US_EQUITIES,\n)\nfrom zipline.pipeline.engine import SimplePipelineEngine\nfrom zipline.pipeline.factors import (\n AverageDollarVolume,\n EWMA,\n EWMSTD,\n ExponentialWeightedMovingAverage,\n ExponentialWeightedMovingStdDev,\n MaxDrawdown,\n SimpleMovingAverage,\n)\nfrom zipline.pipeline.filters import CustomFilter\nfrom zipline.pipeline.loaders.equity_pricing_loader import (\n EquityPricingLoader,\n)\nfrom zipline.pipeline.loaders.frame import DataFrameLoader\nfrom zipline.pipeline.loaders.synthetic import (\n PrecomputedLoader,\n make_bar_data,\n expected_bar_values_2d,\n)\nfrom zipline.pipeline.sentinels import NotSpecified\nfrom zipline.pipeline.term import InputDates\nfrom zipline.testing import (\n AssetID,\n AssetIDPlusDay,\n check_arrays,\n make_alternating_boolean_array,\n make_cascading_boolean_array,\n OpenPrice,\n parameter_space,\n product_upper_triangle,\n)\nimport zipline.testing.fixtures as zf\nfrom zipline.utils.exploding_object import NamedExplodingObject\nfrom zipline.testing.core import create_simple_domain\nfrom zipline.testing.predicates import assert_equal\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.numpy_utils import bool_dtype, datetime64ns_dtype\nfrom zipline.utils.pandas_utils import new_pandas, skip_pipeline_new_pandas\nfrom trading_calendars import get_calendar\n\n\ndef trading_sessions(start, end):\n \"\"\"期间交易日\"\"\"\n calendar = get_calendar('XSHG')\n sessions = calendar.sessions_in_range(start, end)\n if len(sessions) < 1:\n raise ValueError(f\"期间{start} ~ {end} 无交易日\")\n return sessions\n\n\nclass RollingSumDifference(CustomFactor):\n window_length = 3\n inputs = [EquityPricing.open, EquityPricing.close]\n\n def compute(self, today, assets, out, open, close):\n out[:] = (open - close).sum(axis=0)\n\n\nclass MultipleOutputs(CustomFactor):\n window_length = 1\n inputs = [EquityPricing.open, EquityPricing.close]\n outputs = ['open', 'close']\n\n def compute(self, today, assets, out, open, close):\n out.open[:] = open\n out.close[:] = close\n\n\nclass OpenCloseSumAndDiff(CustomFactor):\n \"\"\"\n Used for testing a CustomFactor with multiple outputs operating over a non-\n trivial window length.\n \"\"\"\n inputs = [EquityPricing.open, EquityPricing.close]\n\n def compute(self, today, assets, out, open, close):\n out.sum_[:] = open.sum(axis=0) + close.sum(axis=0)\n out.diff[:] = open.sum(axis=0) - close.sum(axis=0)\n\n\ndef assert_multi_index_is_product(testcase, index, *levels):\n \"\"\"Assert that a MultiIndex contains the product of `*levels`.\"\"\"\n testcase.assertIsInstance(\n index, MultiIndex, \"%s is not a MultiIndex\" % index\n )\n testcase.assertEqual(set(index), set(product(*levels)))\n\n\nclass ColumnArgs(tuple):\n \"\"\"A tuple of Columns that defines equivalence based on the order of the\n columns' DataSets, instead of the columns themselves. This is used when\n comparing the columns passed to a loader's load_adjusted_array method,\n since we want to assert that they are ordered by DataSet.\n \"\"\"\n def __new__(cls, *cols):\n return super(ColumnArgs, cls).__new__(cls, cols)\n\n @classmethod\n def sorted_by_ds(cls, *cols):\n return cls(*sorted(cols, key=lambda col: col.dataset))\n\n def by_ds(self):\n return tuple(col.dataset for col in self)\n\n def __eq__(self, other):\n return set(self) == set(other) and self.by_ds() == other.by_ds()\n\n def __hash__(self):\n return hash(frozenset(self))\n\n\nclass RecordingPrecomputedLoader(PrecomputedLoader):\n def __init__(self, *args, **kwargs):\n super(RecordingPrecomputedLoader, self).__init__(*args, **kwargs)\n\n self.load_calls = []\n\n def load_adjusted_array(self, domain, columns, dates, sids, mask):\n self.load_calls.append(ColumnArgs(*columns))\n\n return super(RecordingPrecomputedLoader, self).load_adjusted_array(\n domain, columns, dates, sids, mask,\n )\n\n\nclass RollingSumSum(CustomFactor):\n def compute(self, today, assets, out, *inputs):\n assert len(self.inputs) == len(inputs)\n out[:] = sum(inputs).sum(axis=0)\n\n\nclass WithConstantInputs(zf.WithAssetFinder):\n asset_ids = ASSET_FINDER_EQUITY_SIDS = 1, 2, 3, 4\n START_DATE = Timestamp('2014-01-02', tz='utc')\n END_DATE = Timestamp('2014-03-01', tz='utc')\n ASSET_FINDER_COUNTRY_CODE = 'CN'\n\n @classmethod\n def init_class_fixtures(cls):\n super(WithConstantInputs, cls).init_class_fixtures()\n cls.domain = create_simple_domain(\n start=cls.START_DATE,\n end=cls.END_DATE,\n country_code=cls.ASSET_FINDER_COUNTRY_CODE,\n )\n cls.constants = {\n # Every day, assume every stock starts at 2, goes down to 1,\n # goes up to 4, and finishes at 3.\n EquityPricing.low: 1,\n EquityPricing.open: 2,\n EquityPricing.close: 3,\n EquityPricing.high: 4,\n }\n\n cls.dates = date_range(\n cls.START_DATE,\n cls.END_DATE,\n freq='D',\n tz='UTC',\n )\n cls.loader = PrecomputedLoader(\n constants=cls.constants,\n dates=cls.dates,\n sids=cls.asset_ids,\n )\n cls.assets = cls.asset_finder.retrieve_all(cls.asset_ids)\n cls.engine = SimplePipelineEngine(\n lambda c: cls.loader,\n cls.asset_finder,\n default_domain=cls.domain\n )\n\n\nclass ConstantInputTestCase(WithConstantInputs,\n zf.WithAssetFinder,\n zf.WithTradingCalendars,\n zf.ZiplineTestCase):\n\n def test_bad_dates(self):\n p = Pipeline()\n\n msg = \"start_date must be before or equal to end_date .*\"\n with self.assertRaisesRegex(ValueError, msg):\n self.engine.run_pipeline(p, self.dates[2], self.dates[1])\n\n def test_fail_usefully_on_insufficient_data(self):\n class SomeFactor(CustomFactor):\n inputs = [EquityPricing.close]\n window_length = 10\n\n def compute(self, today, assets, out, closes):\n pass\n\n p = Pipeline(columns={'t': SomeFactor()})\n\n # self.dates[9] is the earliest date we should be able to compute.\n # 2014-01-11 周末为非交易日\n # 调整为 2014-01-13 即dates[10]\n # 违背测试本意\n self.engine.run_pipeline(p, self.dates[10], self.dates[10])\n\n # We shouldn't be able to compute dates[8], since we only know about 8\n # prior dates, and we need a window length of 10.\n with self.assertRaises(NoFurtherDataError):\n self.engine.run_pipeline(p, self.dates[8], self.dates[8])\n\n def test_input_dates_provided_by_default(self):\n\n class TestFactor(CustomFactor):\n inputs = [InputDates(), EquityPricing.close]\n window_length = 10\n dtype = datetime64ns_dtype\n\n def compute(self, today, assets, out, dates, closes):\n first, last = dates[[0, -1], 0]\n assert last == today.asm8\n assert len(dates) == len(closes) == self.window_length\n out[:] = first\n\n p = Pipeline(columns={'t': TestFactor()})\n results = self.engine.run_pipeline(p, self.dates[9], self.dates[10])\n\n # All results are the same, so just grab one column.\n column = results.unstack().iloc[:, 0].values\n check_arrays(column, self.dates[:2].values)\n\n def test_same_day_pipeline(self):\n factor = AssetID()\n asset = self.asset_ids[0]\n p = Pipeline(columns={'f': factor}, screen=factor <= asset)\n\n # The crux of this is that when we run the pipeline for a single day\n # (i.e. start and end dates are the same) we should accurately get\n # data for the day prior.\n result = self.engine.run_pipeline(p, self.dates[1], self.dates[1])\n self.assertEqual(result['f'][0], 1.0)\n\n def test_screen(self):\n asset_ids = array(self.asset_ids)\n num_dates = 5\n dates = self.dates[10:10 + num_dates]\n\n factor = AssetID()\n for asset_id in asset_ids:\n p = Pipeline(columns={'f': factor}, screen=factor <= asset_id)\n result = self.engine.run_pipeline(p, dates[0], dates[-1])\n\n expected_sids = asset_ids[asset_ids <= asset_id]\n expected_assets = self.asset_finder.retrieve_all(expected_sids)\n expected_result = DataFrame(\n index=MultiIndex.from_product([dates, expected_assets]),\n data=tile(expected_sids.astype(float), [len(dates)]),\n columns=['f'],\n )\n expected_result.index.set_names(\n ['datetime', 'asset'], inplace=True)\n\n assert_frame_equal(result, expected_result)\n\n def test_single_factor(self):\n assets = self.assets\n result_shape = (num_dates, num_assets) = (5, len(assets))\n dates = self.dates[10:10 + num_dates]\n\n factor = RollingSumDifference()\n expected_result = -factor.window_length\n\n # Since every asset will pass the screen, these should be equivalent.\n pipelines = [\n Pipeline(columns={'f': factor}),\n Pipeline(\n columns={'f': factor},\n screen=factor.eq(expected_result),\n ),\n ]\n\n for p in pipelines:\n result = self.engine.run_pipeline(p, dates[0], dates[-1])\n self.assertEqual(set(result.columns), {'f'})\n assert_multi_index_is_product(\n self, result.index, dates, assets\n )\n\n check_arrays(\n result['f'].unstack().values,\n full(result_shape, expected_result, dtype=float),\n )\n\n def test_multiple_rolling_factors(self):\n assets = self.assets\n\n shape = num_dates, num_assets = (5, len(assets))\n dates = self.dates[10:10 + num_dates]\n\n short_factor = RollingSumDifference(window_length=3)\n long_factor = RollingSumDifference(window_length=5)\n high_factor = RollingSumDifference(\n window_length=3,\n inputs=[EquityPricing.open, EquityPricing.high],\n )\n\n pipeline = Pipeline(\n columns={\n 'short': short_factor,\n 'long': long_factor,\n 'high': high_factor,\n }\n )\n results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])\n\n self.assertEqual(set(results.columns), {'short', 'high', 'long'})\n assert_multi_index_is_product(\n self, results.index, dates, assets\n )\n\n # row-wise sum over an array whose values are all (1 - 2)\n check_arrays(\n results['short'].unstack().values,\n full(shape, -short_factor.window_length, dtype=float),\n )\n check_arrays(\n results['long'].unstack().values,\n full(shape, -long_factor.window_length, dtype=float),\n )\n # row-wise sum over an array whose values are all (1 - 3)\n check_arrays(\n results['high'].unstack().values,\n full(shape, -2 * high_factor.window_length, dtype=float),\n )\n\n def test_numeric_factor(self):\n constants = self.constants\n num_dates = 5\n dates = self.dates[10:10 + num_dates]\n high, low = EquityPricing.high, EquityPricing.low\n open, close = EquityPricing.open, EquityPricing.close\n\n high_minus_low = RollingSumDifference(inputs=[high, low])\n open_minus_close = RollingSumDifference(inputs=[open, close])\n avg = (high_minus_low + open_minus_close) / 2\n\n results = self.engine.run_pipeline(\n Pipeline(\n columns={\n 'high_low': high_minus_low,\n 'open_close': open_minus_close,\n 'avg': avg,\n },\n ),\n dates[0],\n dates[-1],\n )\n\n high_low_result = results['high_low'].unstack()\n high_low_result.columns.name = None\n expected_high_low = 3.0 * (constants[high] - constants[low])\n expected_high_low_df = DataFrame(\n expected_high_low, index=dates, columns=self.assets)\n expected_high_low_df.index.set_names(['datetime'], inplace=True)\n assert_frame_equal(\n high_low_result,\n expected_high_low_df,\n )\n\n open_close_result = results['open_close'].unstack()\n open_close_result.columns.name = None\n expected_open_close = 3.0 * (constants[open] - constants[close])\n expected_open_close_df = DataFrame(\n expected_open_close, index=dates, columns=self.assets)\n expected_open_close_df.index.set_names(['datetime'], inplace=True)\n assert_frame_equal(\n open_close_result,\n expected_open_close_df,\n )\n\n avg_result = results['avg'].unstack()\n avg_result.columns.name = None\n expected_avg = (expected_high_low + expected_open_close) / 2.0\n expected_avg_df = DataFrame(\n expected_avg, index=dates, columns=self.assets)\n expected_avg_df.index.set_names(['datetime'], inplace=True)\n assert_frame_equal(\n avg_result,\n expected_avg_df,\n )\n\n def test_masked_factor(self):\n \"\"\"\n Test that a Custom Factor computes the correct values when passed a\n mask. The mask/filter should be applied prior to computing any values,\n as opposed to computing the factor across the entire universe of\n assets. Any assets that are filtered out should be filled with missing\n values.\n \"\"\"\n dates = self.dates[5:8]\n assets = self.assets\n asset_ids = self.asset_ids\n constants = self.constants\n num_dates = len(dates)\n num_assets = len(assets)\n open = EquityPricing.open\n close = EquityPricing.close\n\n factor1_value = constants[open]\n factor2_value = 3.0 * (constants[open] - constants[close])\n\n def create_expected_results(expected_value, mask):\n expected_values = where(mask, expected_value, nan)\n df = DataFrame(expected_values, index=dates, columns=assets)\n df.index.set_names(['datetime'], inplace=True)\n return df\n\n cascading_mask = AssetIDPlusDay() < (asset_ids[-1] + dates[0].day)\n expected_cascading_mask_result = make_cascading_boolean_array(\n shape=(num_dates, num_assets),\n )\n # 🆗 更改为1\n alternating_mask = (AssetIDPlusDay() % 2).eq(1)\n expected_alternating_mask_result = make_alternating_boolean_array(\n shape=(num_dates, num_assets), first_value=False,\n )\n\n masks = cascading_mask, alternating_mask\n expected_mask_results = (\n expected_cascading_mask_result,\n expected_alternating_mask_result,\n )\n for mask, expected_mask in zip(masks, expected_mask_results): \n # Test running a pipeline with a single masked factor.\n columns = {'factor1': OpenPrice(mask=mask), 'mask': mask}\n pipeline = Pipeline(columns=columns)\n results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])\n\n mask_results = results['mask'].unstack()\n check_arrays(mask_results.values, expected_mask)\n\n factor1_results = results['factor1'].unstack()\n factor1_results.columns.name = None\n factor1_expected = create_expected_results(factor1_value,\n mask_results)\n assert_frame_equal(factor1_results, factor1_expected)\n\n # Test running a pipeline with a second factor. This ensures that\n # adding another factor to the pipeline with a different window\n # length does not cause any unexpected behavior, especially when\n # both factors share the same mask.\n columns['factor2'] = RollingSumDifference(mask=mask)\n pipeline = Pipeline(columns=columns)\n results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])\n\n mask_results = results['mask'].unstack()\n check_arrays(mask_results.values, expected_mask)\n\n factor1_results = results['factor1'].unstack()\n factor2_results = results['factor2'].unstack()\n # 🆗 设置列对象名称\n factor1_results.columns.name = None\n factor2_results.columns.name = None\n factor1_expected = create_expected_results(factor1_value,\n mask_results)\n factor2_expected = create_expected_results(factor2_value,\n mask_results)\n assert_frame_equal(factor1_results, factor1_expected)\n assert_frame_equal(factor2_results, factor2_expected)\n\n def test_rolling_and_nonrolling(self):\n open_ = EquityPricing.open\n close = EquityPricing.close\n volume = EquityPricing.volume\n\n # Test for thirty days up to the last day that we think all\n # the assets existed.\n dates_to_test = self.dates[-30:]\n\n constants = {\n open_: 1,\n close: 2,\n volume: 3,\n }\n loader = PrecomputedLoader(\n constants=constants,\n dates=self.dates,\n sids=self.asset_ids,\n )\n engine = SimplePipelineEngine(lambda column: loader, self.asset_finder)\n\n sumdiff = RollingSumDifference()\n\n result = engine.run_pipeline(\n Pipeline(\n columns={\n 'sumdiff': sumdiff,\n 'open': open_.latest,\n 'close': close.latest,\n 'volume': volume.latest,\n },\n domain=self.domain,\n ),\n dates_to_test[0],\n dates_to_test[-1]\n )\n self.assertIsNotNone(result)\n self.assertEqual(\n {'sumdiff', 'open', 'close', 'volume'},\n set(result.columns)\n )\n\n result_index = self.asset_ids * len(dates_to_test)\n result_shape = (len(result_index),)\n check_arrays(\n result['sumdiff'],\n Series(\n index=result_index,\n data=full(result_shape, -3, dtype=float),\n ),\n )\n\n for name, const in [('open', 1), ('close', 2), ('volume', 3)]:\n check_arrays(\n result[name],\n Series(\n index=result_index,\n data=full(result_shape, const, dtype=float),\n ),\n )\n\n def test_factor_with_single_output(self):\n \"\"\"\n Test passing an `outputs` parameter of length 1 to a CustomFactor.\n \"\"\"\n # 🆗 包含周末\n dates = self.dates[5:12]\n assets = self.assets\n num_dates = len(dates)\n open = EquityPricing.open\n open_values = [self.constants[open]] * num_dates\n open_values_as_tuple = [(self.constants[open],)] * num_dates\n\n single_output = OpenPrice(outputs=['open'])\n pipeline = Pipeline(\n columns={\n 'open_instance': single_output,\n 'open_attribute': single_output.open,\n },\n )\n results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])\n\n # The instance `single_output` itself will compute a numpy.recarray\n # when added as a column to our pipeline, so we expect its output\n # values to be 1-tuples.\n open_instance_expected = {\n asset: open_values_as_tuple for asset in assets\n }\n open_attribute_expected = {asset: open_values for asset in assets}\n\n for colname, expected_values in (\n ('open_instance', open_instance_expected),\n ('open_attribute', open_attribute_expected)):\n print(colname)\n column_results = results[colname].unstack()\n expected_results = DataFrame(\n expected_values, index=dates, columns=assets, dtype=float64,\n )\n expected_results.index.set_names(['datetime'], inplace=True)\n column_results.columns.name = None\n assert_frame_equal(column_results, expected_results)\n\n def test_factor_with_multiple_outputs(self):\n dates = self.dates[5:12]\n assets = self.assets\n asset_ids = self.asset_ids\n constants = self.constants\n num_dates = len(dates)\n num_assets = len(assets)\n open = EquityPricing.open\n close = EquityPricing.close\n\n def create_expected_results(expected_value, mask):\n expected_values = where(mask, expected_value, nan)\n df = DataFrame(expected_values, index=dates, columns=assets)\n df.index.set_names(['datetime'], inplace=True)\n return df\n\n cascading_mask = AssetIDPlusDay() < (asset_ids[-1] + dates[0].day)\n expected_cascading_mask_result = make_cascading_boolean_array(\n shape=(num_dates, num_assets),\n )\n\n alternating_mask = (AssetIDPlusDay() % 2).eq(1)\n expected_alternating_mask_result = make_alternating_boolean_array(\n shape=(num_dates, num_assets), first_value=False,\n )\n\n expected_no_mask_result = full(\n shape=(num_dates, num_assets), fill_value=True, dtype=bool_dtype,\n )\n\n masks = cascading_mask, alternating_mask, NotSpecified\n expected_mask_results = (\n expected_cascading_mask_result,\n expected_alternating_mask_result,\n expected_no_mask_result,\n )\n for mask, expected_mask in zip(masks, expected_mask_results):\n open_price, close_price = MultipleOutputs(mask=mask)\n pipeline = Pipeline(\n columns={'open_price': open_price, 'close_price': close_price},\n )\n if mask is not NotSpecified:\n pipeline.add(mask, 'mask')\n\n results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])\n for colname, case_column in (('open_price', open),\n ('close_price', close)):\n if mask is not NotSpecified:\n mask_results = results['mask'].unstack()\n check_arrays(mask_results.values, expected_mask)\n output_results = results[colname].unstack()\n output_expected = create_expected_results(\n constants[case_column], expected_mask,\n )\n output_results.columns.name = None\n assert_frame_equal(output_results, output_expected)\n\n def test_instance_of_factor_with_multiple_outputs(self):\n \"\"\"\n Test adding a CustomFactor instance, which has multiple outputs, as a\n pipeline column directly. Its computed values should be tuples\n containing the computed values of each of its outputs.\n \"\"\"\n # 🆗 包含周末\n dates = self.dates[5:12]\n assets = self.assets\n num_dates = len(dates)\n num_assets = len(assets)\n constants = self.constants\n\n open_values = [constants[EquityPricing.open]] * num_assets\n close_values = [constants[EquityPricing.close]] * num_assets\n expected_values = [list(zip(open_values, close_values))] * num_dates\n expected_results = DataFrame(\n expected_values, index=dates, columns=assets, dtype=float64,\n )\n expected_results.index.set_names(['datetime'], inplace=True)\n\n multiple_outputs = MultipleOutputs()\n pipeline = Pipeline(columns={'instance': multiple_outputs})\n results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])\n instance_results = results['instance'].unstack()\n instance_results.columns.name = None\n assert_frame_equal(instance_results, expected_results)\n\n def test_custom_factor_outputs_parameter(self):\n # 🆗 中间包含周末\n dates = self.dates[5:12]\n assets = self.assets\n num_dates = len(dates)\n num_assets = len(assets)\n constants = self.constants\n\n def create_expected_results(expected_value):\n expected_values = full(\n (num_dates, num_assets), expected_value, float64,\n )\n df = DataFrame(expected_values, index=dates, columns=assets)\n df.index.set_names(['datetime'], inplace=True)\n return df\n\n for window_length in range(1, 3):\n sum_, diff = OpenCloseSumAndDiff(\n outputs=['sum_', 'diff'], window_length=window_length,\n )\n pipeline = Pipeline(columns={'sum_': sum_, 'diff': diff})\n results = self.engine.run_pipeline(\n pipeline, dates[0], dates[-1])\n for colname, op in ('sum_', add), ('diff', sub):\n output_results = results[colname].unstack()\n output_results.columns.name = None\n output_expected = create_expected_results(\n op(\n constants[EquityPricing.open] * window_length,\n constants[EquityPricing.close] * window_length,\n )\n )\n assert_frame_equal(output_results, output_expected)\n\n def test_loader_given_multiple_columns(self):\n\n class Loader1DataSet1(DataSet):\n col1 = Column(float)\n col2 = Column(float32)\n domain = self.domain\n\n class Loader1DataSet2(DataSet):\n col1 = Column(float32)\n col2 = Column(float32)\n domain = self.domain\n\n class Loader2DataSet(DataSet):\n col1 = Column(float32)\n col2 = Column(float32)\n domain = self.domain\n\n constants1 = {Loader1DataSet1.col1: 1,\n Loader1DataSet1.col2: 2,\n Loader1DataSet2.col1: 3,\n Loader1DataSet2.col2: 4}\n\n loader1 = RecordingPrecomputedLoader(constants=constants1,\n dates=self.dates,\n sids=self.assets)\n constants2 = {Loader2DataSet.col1: 5,\n Loader2DataSet.col2: 6}\n loader2 = RecordingPrecomputedLoader(constants=constants2,\n dates=self.dates,\n sids=self.assets)\n\n engine = SimplePipelineEngine(\n lambda column:\n loader2 if column.dataset == Loader2DataSet else loader1,\n self.asset_finder,\n )\n\n pipe_col1 = RollingSumSum(inputs=[Loader1DataSet1.col1,\n Loader1DataSet2.col1,\n Loader2DataSet.col1],\n window_length=2)\n\n pipe_col2 = RollingSumSum(inputs=[Loader1DataSet1.col2,\n Loader1DataSet2.col2,\n Loader2DataSet.col2],\n window_length=3)\n\n pipe_col3 = RollingSumSum(inputs=[Loader2DataSet.col1],\n window_length=3)\n\n columns = OrderedDict([\n ('pipe_col1', pipe_col1),\n ('pipe_col2', pipe_col2),\n ('pipe_col3', pipe_col3),\n ])\n result = engine.run_pipeline(\n Pipeline(columns=columns, domain=self.domain),\n self.dates[2], # index is >= the largest window length - 1\n self.dates[-1]\n )\n min_window = min(pip_col.window_length\n for pip_col in itervalues(columns))\n col_to_val = ChainMap(constants1, constants2)\n vals = {name: (sum(col_to_val[col] for col in pipe_col.inputs)\n * pipe_col.window_length)\n for name, pipe_col in iteritems(columns)}\n\n index = MultiIndex.from_product([self.dates[2:], self.assets])\n\n def expected_for_col(col):\n val = vals[col]\n offset = columns[col].window_length - min_window\n return concatenate(\n [\n full(offset * index.levshape[1], nan),\n full(\n (index.levshape[0] - offset) * index.levshape[1],\n val,\n float,\n )\n ],\n )\n\n expected = DataFrame(\n data={col: expected_for_col(col) for col in vals},\n index=index,\n columns=columns,\n )\n expected.index.set_names(['datetime', 'asset'], inplace=True)\n\n assert_frame_equal(result, expected)\n\n self.assertEqual(set(loader1.load_calls),\n {ColumnArgs.sorted_by_ds(Loader1DataSet1.col1,\n Loader1DataSet2.col1),\n ColumnArgs.sorted_by_ds(Loader1DataSet1.col2,\n Loader1DataSet2.col2)})\n self.assertEqual(set(loader2.load_calls),\n {ColumnArgs.sorted_by_ds(Loader2DataSet.col1,\n Loader2DataSet.col2)})\n\n\n# Use very large sids that don't fit in that doesn't fit in an int32 as a\n# regression test against bugs with 32 bit integer overflow in the adjustment\n# reader.\nHUGE_SID = np.iinfo('int32').max + 1\n\n\nclass FrameInputTestCase(zf.WithAssetFinder,\n zf.WithTradingCalendars,\n zf.ZiplineTestCase):\n asset_ids = ASSET_FINDER_EQUITY_SIDS = range(HUGE_SID, HUGE_SID + 3)\n start = START_DATE = Timestamp('2015-01-01', tz='utc')\n end = END_DATE = Timestamp('2015-01-31', tz='utc')\n ASSET_FINDER_COUNTRY_CODE = 'CN'\n\n @classmethod\n def init_class_fixtures(cls):\n super(FrameInputTestCase, cls).init_class_fixtures()\n cls.dates = date_range(\n cls.start,\n cls.end,\n freq=cls.trading_calendar.day,\n tz='UTC',\n )\n cls.assets = cls.asset_finder.retrieve_all(cls.asset_ids)\n cls.domain = CN_EQUITIES\n\n @lazyval\n def base_mask(self):\n return self.make_frame(True)\n\n def make_frame(self, data):\n return DataFrame(data, columns=self.assets, index=self.dates)\n\n def test_compute_with_adjustments(self):\n dates, asset_ids = self.dates, self.asset_ids\n low, high = CNEquityPricing.low, CNEquityPricing.high\n apply_idxs = [3, 10, 16]\n\n def apply_date(idx, offset=0):\n return dates[apply_idxs[idx] + offset]\n\n adjustments = DataFrame.from_records(\n [\n dict(\n kind=MULTIPLY,\n sid=asset_ids[1],\n value=2.0,\n start_date=None,\n end_date=apply_date(0, offset=-1),\n apply_date=apply_date(0),\n ),\n dict(\n kind=MULTIPLY,\n sid=asset_ids[1],\n value=3.0,\n start_date=None,\n end_date=apply_date(1, offset=-1),\n apply_date=apply_date(1),\n ),\n dict(\n kind=MULTIPLY,\n sid=asset_ids[1],\n value=5.0,\n start_date=None,\n end_date=apply_date(2, offset=-1),\n apply_date=apply_date(2),\n ),\n ]\n )\n low_base = DataFrame(self.make_frame(30.0))\n low_base.index.set_names(['datetime'], inplace=True)\n low_loader = DataFrameLoader(low, low_base.copy(), adjustments=None)\n\n # Pre-apply inverse of adjustments to the baseline.\n high_base = DataFrame(self.make_frame(30.0))\n high_base.iloc[:apply_idxs[0], 1] /= 2.0\n high_base.iloc[:apply_idxs[1], 1] /= 3.0\n high_base.iloc[:apply_idxs[2], 1] /= 5.0\n\n high_loader = DataFrameLoader(high, high_base, adjustments)\n\n # Dispatch uses the concrete specializations, not generic columns.\n get_loader = {\n CNEquityPricing.low: low_loader,\n CNEquityPricing.high: high_loader\n }.__getitem__\n\n engine = SimplePipelineEngine(get_loader, self.asset_finder)\n\n for window_length in range(1, 4):\n low_mavg = SimpleMovingAverage(\n inputs=[CNEquityPricing.low],\n window_length=window_length,\n )\n high_mavg = SimpleMovingAverage(\n inputs=[CNEquityPricing.high],\n window_length=window_length,\n )\n bounds = product_upper_triangle(range(window_length, len(dates)))\n for start, stop in bounds:\n results = engine.run_pipeline(\n Pipeline(\n columns={'low': low_mavg, 'high': high_mavg},\n domain=self.domain,\n ),\n dates[start],\n dates[stop],\n )\n self.assertEqual(set(results.columns), {'low', 'high'})\n iloc_bounds = slice(start, stop + 1) # +1 to include end date\n\n low_results = results.unstack()['low']\n low_results.columns.name = None\n assert_frame_equal(low_results, low_base.iloc[iloc_bounds])\n\n high_results = results.unstack()['high']\n high_results.columns.name = None\n assert_frame_equal(high_results, high_base.iloc[iloc_bounds])\n\n\nclass SyntheticBcolzTestCase(zf.WithAdjustmentReader,\n zf.WithAssetFinder,\n zf.ZiplineTestCase):\n first_asset_start = Timestamp('2015-04-01', tz='UTC')\n START_DATE = Timestamp('2015-01-01', tz='utc')\n END_DATE = Timestamp('2015-08-01', tz='utc')\n\n @classmethod\n def make_equity_info(cls):\n cls.equity_info = ret = make_rotating_equity_info(\n num_assets=6,\n first_start=cls.first_asset_start,\n frequency=cls.trading_calendar.day,\n periods_between_starts=4,\n asset_lifetime=8,\n exchange='XSHG',\n )\n return ret\n\n @classmethod\n def make_exchanges_info(cls, *args, **kwargs):\n return DataFrame({'exchange': ['XSHG'], 'country_code': ['CN']})\n\n @classmethod\n def make_equity_daily_bar_data(cls, country_code, sids):\n return make_bar_data(\n cls.equity_info,\n cls.equity_daily_bar_days,\n )\n\n @classmethod\n def init_class_fixtures(cls):\n super(SyntheticBcolzTestCase, cls).init_class_fixtures()\n cls.all_asset_ids = cls.asset_finder.sids\n cls.last_asset_end = cls.equity_info['end_date'].max()\n cls.pipeline_loader = EquityPricingLoader.without_fx(\n cls.bcolz_equity_daily_bar_reader,\n cls.adjustment_reader,\n )\n cls.engine = SimplePipelineEngine(\n lambda c: cls.pipeline_loader,\n cls.asset_finder,\n default_domain=CN_EQUITIES,\n )\n\n def write_nans(self, df):\n \"\"\"\n Write nans to the locations in data corresponding to the (date, asset)\n pairs for which we wouldn't have data for `asset` on `date` in a\n backtest.\n\n Parameters\n ----------\n df : pd.DataFrame\n A DataFrame with a DatetimeIndex as index and an object index of\n Assets as columns.\n\n This means that we write nans for dates after an asset's end_date and\n **on or before** an asset's start_date. The assymetry here is because\n of the fact that, on the morning of an asset's first date, we haven't\n yet seen any trades for that asset, so we wouldn't be able to show any\n useful data to the user.\n \"\"\"\n # Mask out with nans all the dates on which each asset didn't exist\n index = df.index\n min_, max_ = index[[0, -1]]\n for asset in df.columns:\n if asset.start_date >= min_:\n start = index.get_loc(asset.start_date, method='bfill')\n df.loc[:start + 1, asset] = nan # +1 to overwrite start_date\n if asset.end_date <= max_:\n end = index.get_loc(asset.end_date)\n df.loc[end + 1:, asset] = nan # +1 to *not* overwrite end_date\n\n def test_SMA(self):\n window_length = 5\n asset_ids = self.all_asset_ids\n dates = date_range(\n self.first_asset_start + self.trading_calendar.day,\n self.last_asset_end,\n freq=self.trading_calendar.day,\n )\n dates_to_test = dates[window_length:]\n\n SMA = SimpleMovingAverage(\n inputs=(EquityPricing.close,),\n window_length=window_length,\n )\n\n results = self.engine.run_pipeline(\n Pipeline(columns={'sma': SMA}),\n dates_to_test[0],\n dates_to_test[-1],\n )\n\n # Shift back the raw inputs by a trading day because we expect our\n # computed results to be computed using values anchored on the\n # **previous** day's data.\n expected_raw = DataFrame(\n expected_bar_values_2d(\n dates - self.trading_calendar.day,\n asset_ids,\n self.equity_info,\n 'close',\n ),\n ).rolling(window_length, min_periods=1).mean().values\n\n expected = DataFrame(\n # Truncate off the extra rows needed to compute the SMAs.\n expected_raw[window_length:],\n index=dates_to_test, # dates_to_test is dates[window_length:]\n columns=self.asset_finder.retrieve_all(asset_ids),\n )\n self.write_nans(expected)\n result = results['sma'].unstack()\n expected.index.set_names(['datetime'], inplace=True)\n result.columns.name = None\n assert_frame_equal(result, expected)\n\n def test_drawdown(self):\n # The monotonically-increasing data produced by SyntheticDailyBarWriter\n # exercises two pathological cases for MaxDrawdown. The actual\n # computed results are pretty much useless (everything is either NaN)\n # or zero, but verifying we correctly handle those corner cases is\n # valuable.\n window_length = 5\n asset_ids = self.all_asset_ids\n dates = date_range(\n self.first_asset_start + self.trading_calendar.day,\n self.last_asset_end,\n freq=self.trading_calendar.day,\n )\n dates_to_test = dates[window_length:]\n\n drawdown = MaxDrawdown(\n inputs=(EquityPricing.close,),\n window_length=window_length,\n )\n\n results = self.engine.run_pipeline(\n Pipeline(columns={'drawdown': drawdown}),\n dates_to_test[0],\n dates_to_test[-1],\n )\n\n # We expect NaNs when the asset was undefined, otherwise 0 everywhere,\n # since the input is always increasing.\n expected = DataFrame(\n data=zeros((len(dates_to_test), len(asset_ids)), dtype=float),\n index=dates_to_test,\n columns=self.asset_finder.retrieve_all(asset_ids),\n )\n self.write_nans(expected)\n result = results['drawdown'].unstack()\n expected.index.set_names(['datetime'], inplace=True)\n result.columns.name = None\n assert_frame_equal(expected, result)\n\n\nclass ParameterizedFactorTestCase(zf.WithAssetFinder,\n zf.WithTradingCalendars,\n zf.ZiplineTestCase):\n sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])\n START_DATE = Timestamp('2015-01-31', tz='UTC')\n END_DATE = Timestamp('2015-03-01', tz='UTC')\n ASSET_FINDER_COUNTRY_CODE = 'CN'\n\n @classmethod\n def init_class_fixtures(cls):\n super(ParameterizedFactorTestCase, cls).init_class_fixtures()\n day = cls.trading_calendar.day\n\n cls.dates = dates = date_range(\n '2015-02-01',\n '2015-02-28',\n freq=day,\n tz='UTC',\n )\n sids = cls.sids\n\n cls.raw_data = DataFrame(\n data=arange(len(dates) * len(sids), dtype=float).reshape(\n len(dates), len(sids),\n ),\n index=dates,\n columns=cls.asset_finder.retrieve_all(sids),\n )\n cls.raw_data_with_nans = cls.raw_data.where((cls.raw_data % 2) != 0)\n\n open_loader = DataFrameLoader(\n EquityPricing.open,\n cls.raw_data_with_nans,\n )\n close_loader = DataFrameLoader(EquityPricing.close, cls.raw_data)\n volume_loader = DataFrameLoader(\n EquityPricing.volume,\n cls.raw_data * 2,\n )\n\n loader_map = {\n EquityPricing.open: open_loader,\n EquityPricing.close: close_loader,\n EquityPricing.volume: volume_loader,\n }\n\n def get_loader(c):\n return loader_map[c.unspecialize()]\n\n cls.engine = SimplePipelineEngine(\n get_loader,\n cls.asset_finder,\n default_domain=EquitySessionDomain(cls.dates, 'CN'),\n )\n\n def expected_ewma(self, window_length, decay_rate):\n alpha = 1 - decay_rate\n span = (2 / alpha) - 1\n\n # XXX: This is a comically inefficient way to compute a windowed EWMA.\n # Don't use it outside of testing. We're using rolling-apply of an\n # ewma (which is itself a rolling-window function) because we only want\n # to look at ``window_length`` rows at a time.\n return self.raw_data.rolling(window_length).apply(\n lambda subarray: (DataFrame(subarray)\n .ewm(span=span)\n .mean()\n .values[-1])\n )[window_length:]\n\n def expected_ewmstd(self, window_length, decay_rate):\n alpha = 1 - decay_rate\n span = (2 / alpha) - 1\n\n # XXX: This is a comically inefficient way to compute a windowed\n # EWMSTD. Don't use it outside of testing. We're using rolling-apply\n # of an ewma (which is itself a rolling-window function) because we\n # only want to look at ``window_length`` rows at a time.\n return self.raw_data.rolling(window_length).apply(\n lambda subarray: (DataFrame(subarray)\n .ewm(span=span)\n .std()\n .values[-1])\n )[window_length:]\n\n @parameterized.expand([\n (3,),\n (5,),\n ])\n def test_ewm_stats(self, window_length):\n\n def ewma_name(decay_rate):\n return 'ewma_%s' % decay_rate\n\n def ewmstd_name(decay_rate):\n return 'ewmstd_%s' % decay_rate\n\n decay_rates = [0.25, 0.5, 0.75]\n ewmas = {\n ewma_name(decay_rate): EWMA(\n inputs=(EquityPricing.close,),\n window_length=window_length,\n decay_rate=decay_rate,\n )\n for decay_rate in decay_rates\n }\n\n ewmstds = {\n ewmstd_name(decay_rate): EWMSTD(\n inputs=(EquityPricing.close,),\n window_length=window_length,\n decay_rate=decay_rate,\n )\n for decay_rate in decay_rates\n }\n\n all_results = self.engine.run_pipeline(\n Pipeline(columns=merge(ewmas, ewmstds)),\n self.dates[window_length],\n self.dates[-1],\n )\n\n for decay_rate in decay_rates:\n ewma_result = all_results[ewma_name(decay_rate)].unstack()\n ewma_expected = self.expected_ewma(window_length, decay_rate)\n ewma_expected.index.set_names(['datetime'], inplace=True)\n ewma_result.columns.name = None\n assert_frame_equal(ewma_result, ewma_expected)\n\n ewmstd_result = all_results[ewmstd_name(decay_rate)].unstack()\n ewmstd_expected = self.expected_ewmstd(window_length, decay_rate)\n ewmstd_expected.index.set_names(['datetime'], inplace=True)\n ewmstd_result.columns.name = None\n assert_frame_equal(ewmstd_result, ewmstd_expected)\n\n @staticmethod\n def decay_rate_to_span(decay_rate):\n alpha = 1 - decay_rate\n return (2 / alpha) - 1\n\n @staticmethod\n def decay_rate_to_com(decay_rate):\n alpha = 1 - decay_rate\n return (1 / alpha) - 1\n\n @staticmethod\n def decay_rate_to_halflife(decay_rate):\n return log(.5) / log(decay_rate)\n\n def ewm_cases():\n return product([EWMSTD, EWMA], [3, 5, 10])\n\n @parameterized.expand(ewm_cases())\n def test_from_span(self, type_, span):\n from_span = type_.from_span(\n inputs=[EquityPricing.close],\n window_length=20,\n span=span,\n )\n implied_span = self.decay_rate_to_span(from_span.params['decay_rate'])\n assert_almost_equal(span, implied_span)\n\n @parameterized.expand(ewm_cases())\n def test_from_halflife(self, type_, halflife):\n from_hl = EWMA.from_halflife(\n inputs=[EquityPricing.close],\n window_length=20,\n halflife=halflife,\n )\n implied_hl = self.decay_rate_to_halflife(from_hl.params['decay_rate'])\n assert_almost_equal(halflife, implied_hl)\n\n @parameterized.expand(ewm_cases())\n def test_from_com(self, type_, com):\n from_com = EWMA.from_center_of_mass(\n inputs=[EquityPricing.close],\n window_length=20,\n center_of_mass=com,\n )\n implied_com = self.decay_rate_to_com(from_com.params['decay_rate'])\n assert_almost_equal(com, implied_com)\n\n del ewm_cases\n\n def test_ewm_aliasing(self):\n self.assertIs(ExponentialWeightedMovingAverage, EWMA)\n self.assertIs(ExponentialWeightedMovingStdDev, EWMSTD)\n\n def test_dollar_volume(self):\n results = self.engine.run_pipeline(\n Pipeline(\n columns={\n 'dv1': AverageDollarVolume(window_length=1),\n 'dv5': AverageDollarVolume(window_length=5),\n 'dv1_nan': AverageDollarVolume(\n window_length=1,\n inputs=[EquityPricing.open, EquityPricing.volume],\n ),\n 'dv5_nan': AverageDollarVolume(\n window_length=5,\n inputs=[EquityPricing.open, EquityPricing.volume],\n ),\n }\n ),\n self.dates[5],\n self.dates[-1],\n )\n results.index.set_names([None, None], inplace=True)\n expected_1 = (self.raw_data[5:] ** 2) * 2\n actual = results['dv1'].unstack()\n assert_frame_equal(actual, expected_1)\n\n expected_5 = ((self.raw_data ** 2) * 2).rolling(5).mean()[5:]\n actual = results['dv5'].unstack()\n assert_frame_equal(actual, expected_5)\n\n # The following two use EquityPricing.open and .volume as inputs.\n # The former uses self.raw_data_with_nans, and the latter uses\n # .raw_data * 2. Thus we multiply instead of squaring as above.\n expected_1_nan = (self.raw_data_with_nans[5:]\n * self.raw_data[5:] * 2).fillna(0)\n actual = results['dv1_nan'].unstack()\n assert_frame_equal(actual, expected_1_nan)\n\n expected_5_nan = ((self.raw_data_with_nans * self.raw_data * 2)\n .fillna(0)\n .rolling(5).mean()\n [5:])\n actual = results['dv5_nan'].unstack()\n assert_frame_equal(actual, expected_5_nan)\n\n\nclass StringColumnTestCase(zf.WithSeededRandomPipelineEngine,\n zf.ZiplineTestCase):\n ASSET_FINDER_COUNTRY_CODE = 'CN'\n SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = CN_EQUITIES\n\n @skipIf(new_pandas, skip_pipeline_new_pandas)\n def test_string_classifiers_produce_categoricals(self):\n \"\"\"\n Test that string-based classifiers produce pandas categoricals as their\n outputs.\n \"\"\"\n col = TestingDataSet.categorical_col\n pipe = Pipeline(columns={'c': col.latest})\n\n run_dates = self.trading_days[-10:]\n start_date, end_date = run_dates[[0, -1]]\n\n result = self.run_pipeline(pipe, start_date, end_date)\n assert isinstance(result.c.values, Categorical)\n\n expected_raw_data = self.raw_expected_values(\n col,\n start_date,\n end_date,\n )\n expected_labels = LabelArray(expected_raw_data, col.missing_value)\n expected_final_result = expected_labels.as_categorical_frame(\n index=run_dates,\n columns=self.asset_finder.retrieve_all(self.asset_finder.sids),\n )\n assert_frame_equal(result.c.unstack(), expected_final_result)\n\n\nclass WindowSafetyPropagationTestCase(zf.WithSeededRandomPipelineEngine,\n zf.ZiplineTestCase):\n ASSET_FINDER_COUNTRY_CODE = 'CN'\n SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = CN_EQUITIES\n SEEDED_RANDOM_PIPELINE_SEED = 5\n\n def test_window_safety_propagation(self):\n dates = self.trading_days[-30:]\n start_date, end_date = dates[[-10, -1]]\n\n col = TestingDataSet.float_col\n pipe = Pipeline(\n columns={\n 'average_of_rank_plus_one': SimpleMovingAverage(\n inputs=[col.latest.rank() + 1],\n window_length=10,\n ),\n 'average_of_aliased_rank_plus_one': SimpleMovingAverage(\n inputs=[col.latest.rank().alias('some_alias') + 1],\n window_length=10,\n ),\n 'average_of_rank_plus_one_aliased': SimpleMovingAverage(\n inputs=[(col.latest.rank() + 1).alias('some_alias')],\n window_length=10,\n ),\n }\n )\n results = self.run_pipeline(pipe, start_date, end_date).unstack()\n\n expected_ranks = DataFrame(\n self.raw_expected_values(\n col,\n dates[-19],\n dates[-1],\n ),\n index=dates[-19:],\n columns=self.asset_finder.retrieve_all(\n self.ASSET_FINDER_EQUITY_SIDS,\n )\n ).rank(axis='columns')\n\n # All three expressions should be equivalent and evaluate to this.\n expected_result = (\n (expected_ranks + 1)\n .rolling(10)\n .mean()\n .dropna(how='any')\n )\n expected_result.index.set_names(['datetime'], inplace=True)\n\n for colname in results.columns.levels[0]:\n actual = results[colname]\n actual.columns.name = None\n assert_equal(expected_result, actual)\n\n\nclass PopulateInitialWorkspaceTestCase(WithConstantInputs,\n zf.WithAssetFinder,\n zf.WithTradingCalendars,\n zf.ZiplineTestCase):\n\n @parameter_space(window_length=[3, 5], pipeline_length=[5, 10])\n def test_populate_initial_workspace(self, window_length, pipeline_length):\n column = EquityPricing.low\n base_term = column.latest\n\n # Take a Z-Score here so that the precomputed term is window-safe. The\n # z-score will never actually get computed because we swap it out.\n precomputed_term = (base_term.zscore()).alias('precomputed_term')\n\n # A term that has `precomputed_term` as an input.\n depends_on_precomputed_term = precomputed_term + 1\n # A term that requires a window of `precomputed_term`.\n depends_on_window_of_precomputed_term = SimpleMovingAverage(\n inputs=[precomputed_term],\n window_length=window_length,\n )\n\n precomputed_term_with_window = SimpleMovingAverage(\n inputs=(column,),\n window_length=window_length,\n ).alias('precomputed_term_with_window')\n depends_on_precomputed_term_with_window = (\n precomputed_term_with_window + 1\n )\n\n column_value = self.constants[column]\n precomputed_term_value = -column_value\n precomputed_term_with_window_value = -(column_value + 1)\n\n def populate_initial_workspace(initial_workspace,\n root_mask_term,\n execution_plan,\n dates,\n assets):\n def shape_for_term(term):\n ndates = len(execution_plan.mask_and_dates_for_term(\n term,\n root_mask_term,\n initial_workspace,\n dates,\n )[1])\n nassets = len(assets)\n return (ndates, nassets)\n\n ws = initial_workspace.copy()\n ws[precomputed_term] = full(\n shape_for_term(precomputed_term),\n precomputed_term_value,\n dtype=float64,\n )\n ws[precomputed_term_with_window] = full(\n shape_for_term(precomputed_term_with_window),\n precomputed_term_with_window_value,\n dtype=float64,\n )\n return ws\n\n def dispatcher(c):\n self.assertIsNot(\n c, column, \"Shouldn't need to dispatch precomputed term input!\"\n )\n return self.loader\n\n engine = SimplePipelineEngine(\n dispatcher,\n self.asset_finder,\n populate_initial_workspace=populate_initial_workspace,\n )\n\n results = engine.run_pipeline(\n Pipeline({\n 'precomputed_term': precomputed_term,\n 'precomputed_term_with_window': precomputed_term_with_window,\n 'depends_on_precomputed_term': depends_on_precomputed_term,\n 'depends_on_precomputed_term_with_window':\n depends_on_precomputed_term_with_window,\n 'depends_on_window_of_precomputed_term':\n depends_on_window_of_precomputed_term,\n }, domain=self.domain),\n self.dates[-pipeline_length],\n self.dates[-1],\n )\n\n assert_equal(\n results['precomputed_term'].values,\n full_like(\n results['precomputed_term'],\n precomputed_term_value,\n ),\n ),\n assert_equal(\n results['precomputed_term_with_window'].values,\n full_like(\n results['precomputed_term_with_window'],\n precomputed_term_with_window_value,\n ),\n ),\n assert_equal(\n results['depends_on_precomputed_term'].values,\n full_like(\n results['depends_on_precomputed_term'],\n precomputed_term_value + 1,\n ),\n )\n assert_equal(\n results['depends_on_precomputed_term_with_window'].values,\n full_like(\n results['depends_on_precomputed_term_with_window'],\n precomputed_term_with_window_value + 1,\n ),\n )\n assert_equal(\n results['depends_on_window_of_precomputed_term'].values,\n full_like(\n results['depends_on_window_of_precomputed_term'],\n precomputed_term_value,\n ),\n )\n\n\nclass ChunkedPipelineTestCase(zf.WithSeededRandomPipelineEngine,\n zf.ZiplineTestCase):\n\n PIPELINE_START_DATE = Timestamp('2006-01-05', tz='UTC')\n END_DATE = Timestamp('2006-12-29', tz='UTC')\n ASSET_FINDER_COUNTRY_CODE = 'CN'\n\n def test_run_chunked_pipeline(self):\n \"\"\"\n Test that running a pipeline in chunks produces the same result as if\n it were run all at once\n \"\"\"\n\n pipe = Pipeline(\n columns={\n 'float': TestingDataSet.float_col.latest,\n 'custom_factor': SimpleMovingAverage(\n inputs=[TestingDataSet.float_col],\n window_length=10,\n ),\n },\n domain=CN_EQUITIES,\n )\n\n if not new_pandas:\n # Categoricals only work on old pandas.\n pipe.add(TestingDataSet.categorical_col.latest, 'categorical')\n\n pipeline_result = self.run_pipeline(\n pipe,\n start_date=self.PIPELINE_START_DATE,\n end_date=self.END_DATE,\n )\n chunked_result = self.run_chunked_pipeline(\n pipeline=pipe,\n start_date=self.PIPELINE_START_DATE,\n end_date=self.END_DATE,\n chunksize=22\n )\n self.assertTrue(chunked_result.equals(pipeline_result))\n\n def test_concatenate_empty_chunks(self):\n # Test that we correctly handle concatenating chunked pipelines when\n # some of the chunks are empty. This is slightly tricky b/c pandas\n # DataFrames lose dtype information when they're empty.\n\n class FalseOnOddMonths(CustomFilter):\n \"\"\"Filter that returns False for all assets during odd months.\n \"\"\"\n inputs = ()\n window_length = 1\n\n def compute(self, today, assets, out):\n out[:] = (today.month % 2 == 0)\n\n pipe = Pipeline(\n columns={\n 'float': TestingDataSet.float_col.latest,\n 'bool': TestingDataSet.bool_col.latest,\n },\n # Define a screen that's False for all assets a significant portion\n # of the time.\n screen=FalseOnOddMonths(),\n domain=CN_EQUITIES,\n )\n\n if not new_pandas:\n # Categoricals only work on old pandas.\n pipe.add(TestingDataSet.categorical_col.latest, 'categorical')\n\n self.run_chunked_pipeline(\n pipeline=pipe,\n start_date=self.PIPELINE_START_DATE,\n end_date=self.END_DATE,\n # Make chunksize small enough that some chunks are guaranteed to\n # have no assets pass the screen.\n chunksize=5,\n )\n\n\nclass MaximumRegressionTest(zf.WithSeededRandomPipelineEngine,\n zf.ZiplineTestCase):\n ASSET_FINDER_EQUITY_SIDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n\n def test_no_groupby_maximum(self):\n # This is a regression test for a bug where factor.top(1) would fail\n # when not passed a groupby parameter.\n\n factor = TestingDataSet.float_col.latest\n maximum = factor.top(1)\n pipe = Pipeline(\n {'factor': factor, 'maximum': maximum},\n domain=EquitySessionDomain(\n self.trading_days,\n self.ASSET_FINDER_COUNTRY_CODE,\n ),\n )\n result = self.run_pipeline(\n pipe, self.trading_days[-5], self.trading_days[-1]\n )\n\n # We should have one maximum every day.\n maxes_per_day = result.groupby(level=0)['maximum'].sum()\n self.assertTrue((maxes_per_day == 1).all())\n\n # The maximum computed by pipeline should match the maximum computed by\n # doing a groupby in pandas.None\n groupby_max = result.groupby(level=0).factor.max()\n pipeline_max = (result.factor[result.maximum]\n .reset_index(level=1, drop=True))\n # TODO:目前运行pipeline其结果 datetime index freq == None\n # 理应与日历一致 == C\n pipeline_max.index.freq = None\n assert_equal(groupby_max, pipeline_max)\n\n\nclass ResolveDomainTestCase(zf.ZiplineTestCase):\n\n def test_resolve_domain(self):\n # 域主要由注册列决定\n # we need to pass a get_loader and an asset_finder to construct\n # SimplePipelineEngine, but do not expect to use them\n get_loader = NamedExplodingObject(\n 'self._get_loader',\n 'SimplePipelineEngine does not currently depend on get_loader '\n 'at construction time. Update this test if it now does.'\n )\n asset_finder = NamedExplodingObject(\n 'self._finder',\n 'SimplePipelineEngine does not currently depend on asset_finder '\n 'at construction time. Update this test if it now does.'\n )\n\n engine_generic = SimplePipelineEngine(\n get_loader, asset_finder, default_domain=GENERIC\n )\n engine_jp = SimplePipelineEngine(\n get_loader, asset_finder, default_domain=JP_EQUITIES\n )\n\n pipe_generic = Pipeline()\n pipe_us = Pipeline(domain=US_EQUITIES)\n\n # the engine should resolve a pipeline that already has a domain\n # to that domain\n self.assertIs(\n engine_jp.resolve_domain(pipe_us),\n US_EQUITIES\n )\n\n # the engine should resolve a pipeline without a domain to the engine's\n # default\n self.assertIs(\n engine_jp.resolve_domain(pipe_generic),\n JP_EQUITIES\n )\n\n # a generic engine should resolve to the pipeline's domain\n # if it has one\n self.assertIs(\n engine_generic.resolve_domain(pipe_us),\n US_EQUITIES\n )\n\n # an engine with a default of GENERIC should raise a ValueError when\n # trying to infer a pipeline whose domain is also GENERIC\n with self.assertRaises(ValueError):\n engine_generic.resolve_domain(pipe_generic)\n\n # infer domain from the column if the pipeline and engine have\n # a GENERIC domain\n pipe = Pipeline({'close': USEquityPricing.close.latest})\n self.assertIs(\n engine_generic.resolve_domain(pipe),\n US_EQUITIES,\n )\n\n # infer domain from the column if the pipeline and engine have\n # a GENERIC domain\n pipe = Pipeline({'close': CNEquityPricing.close.latest})\n self.assertIs(\n engine_generic.resolve_domain(pipe),\n CN_EQUITIES,\n )\n",
"# 完成测试 ✔\nfrom collections import OrderedDict\nimport itertools\nfrom textwrap import dedent\n\nfrom parameterized import parameterized\nimport numpy as np\n\nfrom zipline.pipeline.data import (\n Column,\n DataSetFamily,\n DataSetFamilySlice,\n)\nfrom zipline.testing import ZiplineTestCase\nfrom zipline.testing.predicates import (\n assert_equal,\n assert_is,\n assert_is_not,\n assert_is_subclass,\n assert_raises_str,\n)\n\n\nclass TestDataSetFamily(ZiplineTestCase):\n def test_repr(self):\n class MD1(DataSetFamily):\n extra_dims = [('dim_0', [])]\n\n expected_repr = (\n \"<DataSetFamily: 'MD1', extra_dims=['dim_0']>\"\n )\n assert_equal(repr(MD1), expected_repr)\n\n class MD2(DataSetFamily):\n extra_dims = [('dim_0', []), ('dim_1', [])]\n\n expected_repr = (\n \"<DataSetFamily: 'MD2', extra_dims=['dim_0', 'dim_1']>\"\n )\n assert_equal(repr(MD2), expected_repr)\n\n class MD3(DataSetFamily):\n extra_dims = [('dim_1', []), ('dim_0', [])]\n\n expected_repr = (\n \"<DataSetFamily: 'MD3', extra_dims=['dim_1', 'dim_0']>\"\n )\n assert_equal(repr(MD3), expected_repr)\n\n def test_cache(self):\n class MD1(DataSetFamily):\n extra_dims = [('dim_0', ['a', 'b', 'c'])]\n\n class MD2(DataSetFamily):\n extra_dims = [('dim_0', ['a', 'b', 'c'])]\n\n MD1Slice = MD1.slice(dim_0='a')\n MD2Slice = MD2.slice(dim_0='a')\n\n assert_equal(MD1Slice.extra_coords, MD2Slice.extra_coords)\n assert_is_not(MD1Slice, MD2Slice)\n\n def test_empty_extra_dims(self):\n msg = (\n \"DataSetFamily must be defined with non-empty extra_dims,\"\n \" or with `_abstract = True`\"\n )\n with assert_raises_str(ValueError, msg):\n class NoExtraDims(DataSetFamily):\n pass\n\n with assert_raises_str(ValueError, msg):\n class EmptyExtraDims(DataSetFamily):\n extra_dims = []\n\n class AbstractParent(DataSetFamily):\n _abstract = True\n\n with assert_raises_str(ValueError, msg):\n class NoExtraDimsChild(AbstractParent):\n pass\n\n with assert_raises_str(ValueError, msg):\n class EmptyExtraDimsChild(AbstractParent):\n extra_dims = []\n\n class AbstractChild(AbstractParent):\n _abstract = True\n\n class Child(AbstractParent):\n extra_dims = [\n ('dim_0', {'a', 'b', 'c'}),\n ('dim_1', {'d', 'e', 'f'}),\n ]\n\n def spec(*cs):\n return (cs,)\n\n @parameterized.expand([\n spec(\n ('dim_0', range(10))\n ),\n spec(\n ('dim_0', range(10)),\n ('dim_1', range(10, 15)),\n ),\n spec(\n ('dim_0', range(10)),\n ('dim_1', range(10, 15)),\n ('dim_2', range(5, 15)),\n ),\n spec(\n ('dim_0', range(6)),\n ('dim_1', {'a', 'b', 'c'}),\n ('dim_2', range(5, 15)),\n ('dim_3', {'b', 'c', 'e'}),\n ),\n ])\n def test_valid_slice(self, dims_spec):\n class MD(DataSetFamily):\n extra_dims = dims_spec\n\n f8 = Column('f8')\n i8 = Column('i8', missing_value=0)\n ob = Column('O')\n M8 = Column('M8[ns]')\n boolean = Column('?')\n\n expected_dims = OrderedDict([(k, frozenset(v)) for k, v in dims_spec])\n assert_equal(MD.extra_dims, expected_dims)\n\n for valid_combination in itertools.product(*expected_dims.values()):\n Slice = MD.slice(*valid_combination)\n alternate_constructions = [\n # all positional\n MD.slice(*valid_combination),\n # all keyword\n MD.slice(**dict(zip(expected_dims.keys(), valid_combination))),\n # mix keyword/positional\n MD.slice(\n *valid_combination[:len(valid_combination) // 2],\n **dict(\n list(zip(expected_dims.keys(), valid_combination))[\n len(valid_combination) // 2:\n ],\n )\n ),\n ]\n for alt in alternate_constructions:\n assert_is(Slice, alt, msg='Slices are not properly memoized')\n\n expected_coords = OrderedDict(\n zip(expected_dims, valid_combination),\n )\n assert_equal(Slice.extra_coords, expected_coords)\n\n assert_is(Slice.dataset_family, MD)\n\n assert_is_subclass(Slice, DataSetFamilySlice)\n\n expected_columns = {\n ('f8', np.dtype('f8'), Slice),\n ('i8', np.dtype('i8'), Slice),\n ('ob', np.dtype('O'), Slice),\n ('M8', np.dtype('M8[ns]'), Slice),\n ('boolean', np.dtype('?'), Slice),\n }\n actual_columns = {\n (c.name, c.dtype, c.dataset) for c in Slice.columns\n }\n assert_equal(actual_columns, expected_columns)\n\n del spec\n\n def test_slice_unknown_dims(self):\n class MD(DataSetFamily):\n extra_dims = [\n ('dim_0', {'a', 'b', 'c'}),\n ('dim_1', {'c', 'd', 'e'}),\n ]\n\n def expect_slice_fails(*args, **kwargs):\n expected_msg = kwargs.pop('expected_msg')\n\n with assert_raises_str(TypeError, expected_msg):\n MD.slice(*args, **kwargs)\n\n # insufficient positional\n expect_slice_fails(\n expected_msg=(\n 'no coordinate provided to MD for the following dimensions:'\n ' dim_0, dim_1'\n ),\n )\n expect_slice_fails(\n 'a',\n expected_msg=(\n 'no coordinate provided to MD for the following dimension:'\n ' dim_1'\n ),\n )\n\n # too many positional\n expect_slice_fails(\n 'a', 'b', 'c',\n expected_msg='MD has 2 extra dimensions but 3 were given',\n )\n\n # mismatched keys\n expect_slice_fails(\n dim_2='??',\n expected_msg=(\n 'MD does not have the following dimension: dim_2\\n'\n 'Valid dimensions are: dim_0, dim_1'\n ),\n )\n expect_slice_fails(\n dim_1='??', dim_2='??',\n expected_msg=(\n 'MD does not have the following dimension: dim_2\\n'\n 'Valid dimensions are: dim_0, dim_1'\n ),\n )\n expect_slice_fails(\n dim_0='??', dim_1='??', dim_2='??',\n expected_msg=(\n 'MD does not have the following dimension: dim_2\\n'\n 'Valid dimensions are: dim_0, dim_1'\n ),\n )\n\n # the extra keyword dims should be sorted\n expect_slice_fails(\n dim_3='??', dim_2='??',\n expected_msg=(\n 'MD does not have the following dimensions: dim_2, dim_3\\n'\n 'Valid dimensions are: dim_0, dim_1'\n ),\n )\n\n def test_slice_unknown_dim_label(self):\n class MD(DataSetFamily):\n extra_dims = [\n ('dim_0', {'a', 'b', 'c'}),\n ('dim_1', {'c', 'd', 'e'}),\n ]\n\n def expect_slice_fails(*args, **kwargs):\n expected_msg = kwargs.pop('expected_msg')\n\n with assert_raises_str(ValueError, expected_msg):\n MD.slice(*args, **kwargs)\n\n expect_slice_fails(\n 'not-in-0', 'c',\n expected_msg=(\n \"'not-in-0' is not a value along the dim_0 dimension of MD\"\n ),\n )\n expect_slice_fails(\n dim_0='not-in-0', dim_1='c',\n expected_msg=(\n \"'not-in-0' is not a value along the dim_0 dimension of MD\"\n ),\n )\n\n expect_slice_fails(\n 'a', 'not-in-1',\n expected_msg=(\n \"'not-in-1' is not a value along the dim_1 dimension of MD\"\n ),\n )\n expect_slice_fails(\n dim_0='a', dim_1='not-in-1',\n expected_msg=(\n \"'not-in-1' is not a value along the dim_1 dimension of MD\"\n ),\n )\n\n def test_inheritance(self):\n class Parent(DataSetFamily):\n extra_dims = [\n ('dim_0', {'a', 'b', 'c'}),\n ('dim_1', {'d', 'e', 'f'}),\n ]\n\n column_0 = Column('f8')\n column_1 = Column('?')\n\n class Child(Parent):\n column_2 = Column('O')\n column_3 = Column('i8', -1)\n\n assert_is_subclass(Child, Parent)\n assert_equal(Child.extra_dims, Parent.extra_dims)\n\n ChildSlice = Child.slice(dim_0='a', dim_1='d')\n\n expected_child_slice_columns = frozenset({\n ChildSlice.column_0,\n ChildSlice.column_1,\n ChildSlice.column_2,\n ChildSlice.column_3,\n })\n assert_equal(ChildSlice.columns, expected_child_slice_columns)\n\n def test_column_access_without_slice(self):\n class Parent(DataSetFamily):\n extra_dims = [\n ('dim_0', {'a', 'b', 'c'}),\n ('dim_1', {'d', 'e', 'f'}),\n ]\n\n column_0 = Column('f8')\n column_1 = Column('?')\n\n class Child(Parent):\n column_2 = Column('O')\n column_3 = Column('i8', -1)\n\n def make_expected_msg(ds, attr):\n return dedent(\n \"\"\"\\\n Attempted to access column {c} from DataSetFamily {d}:\n\n To work with dataset families, you must first select a\n slice using the ``slice`` method:\n\n {d}.slice(...).{c}\n \"\"\"\n .format(c=attr, d=ds), # noqa\n )\n\n expected_msg = make_expected_msg('Parent', 'column_0')\n with assert_raises_str(AttributeError, expected_msg):\n Parent.column_0\n\n expected_msg = make_expected_msg('Parent', 'column_1')\n with assert_raises_str(AttributeError, expected_msg):\n Parent.column_1\n\n expected_msg = make_expected_msg('Child', 'column_0')\n with assert_raises_str(AttributeError, expected_msg):\n Child.column_0\n\n expected_msg = make_expected_msg('Child', 'column_1')\n with assert_raises_str(AttributeError, expected_msg):\n Child.column_1\n\n expected_msg = make_expected_msg('Child', 'column_2')\n with assert_raises_str(AttributeError, expected_msg):\n Child.column_2\n\n expected_msg = make_expected_msg('Child', 'column_3')\n with assert_raises_str(AttributeError, expected_msg):\n Child.column_3\n"
] | [
[
"pandas.concat",
"numpy.log",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.DataFrame.from_records",
"pandas.Timestamp",
"pandas.pivot_table"
],
[
"numpy.isnan",
"numpy.mean",
"pandas.isnull"
],
[
"pandas.DataFrame.from_records",
"pandas.concat",
"pandas.Timestamp",
"pandas.DataFrame"
],
[
"numpy.zeros",
"numpy.diff"
],
[
"pandas.Timedelta",
"pandas.api.types.is_datetime64tz_dtype",
"pandas.api.types.is_integer_dtype",
"pandas.api.types.is_string_dtype",
"pandas.api.types.is_bool_dtype"
],
[
"pandas.DataFrame.from_records"
],
[
"numpy.log",
"pandas.compat.chainmap.ChainMap",
"pandas.Timestamp",
"pandas.DataFrame",
"numpy.full",
"numpy.full_like",
"numpy.testing.assert_almost_equal",
"pandas.testing.assert_frame_equal",
"pandas.Int64Index",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.iinfo",
"numpy.array",
"numpy.where"
],
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.3",
"1.1",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DuckerMan/ru-gpts | [
"cbe8b1e4ae3b88642292590f51c5eacdea7b6846"
] | [
"utils.py"
] | [
"# coding=utf-8\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for logging and serialization\"\"\"\n\nimport os\nimport random\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.nn.parallel.distributed import DistributedDataParallel as torchDDP\n\nimport mpu\nfrom fp16 import FP16_Optimizer\n\n\ndef print_rank_0(message):\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(message, flush=True)\n else:\n print(message, flush=True)\n\n\ndef print_args(args):\n \"\"\"Print arguments.\"\"\"\n\n print('arguments:', flush=True)\n for arg in vars(args):\n dots = '.' * (29 - len(arg))\n print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True)\n\n\ndef print_params_min_max_norm(optimizer, iteration):\n \"\"\"Print min, max, and norm of all parameters.\"\"\"\n index = 0\n rank = torch.distributed.get_rank()\n string = 'iteration, rank, index, model-parallel,min, max, norm\\n'\n optimizer_ = optimizer\n if isinstance(optimizer, FP16_Optimizer):\n optimizer_ = optimizer.optimizer\n for param_group in optimizer_.param_groups:\n for param in param_group['params']:\n index += 1\n min_ = param.data.min()\n max_ = param.data.max()\n norm = param.data.norm()\n string += '{:7d}, {:4d}, {:4d}, {:2d}, '.format(\n iteration, rank, index, int(param.model_parallel))\n string += '{:.6E}, {:.6E}, {:.6E}\\n'.format(min_, max_, norm)\n print(string, flush=True)\n\n\nclass Timers:\n \"\"\"Group of timers.\"\"\"\n\n class Timer:\n \"\"\"Timer.\"\"\"\n\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = self.Timer(name)\n return self.timers[name]\n\n def log(self, names, normalizer=1.0, reset=True):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(\n reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n print_rank_0(string)\n\n\ndef report_memory(name):\n \"\"\"Simple GPU memory report.\"\"\"\n\n mega_bytes = 1024.0 * 1024.0\n string = name + ' memory (MB)'\n string += ' | allocated: {}'.format(\n torch.cuda.memory_allocated() / mega_bytes)\n string += ' | max allocated: {}'.format(\n torch.cuda.max_memory_allocated() / mega_bytes)\n string += ' | cached: {}'.format(torch.cuda.memory_cached() / mega_bytes)\n string += ' | max cached: {}'.format(\n torch.cuda.max_memory_cached() / mega_bytes)\n print_rank_0(string)\n\n\ndef get_checkpoint_name(checkpoints_path, iteration, release=False, zero=False):\n if release:\n d = 'release'\n else:\n d = 'iter_{:07d}'.format(iteration)\n if zero:\n dp_rank = mpu.get_data_parallel_rank()\n d += '_zero_dp_rank_{}'.format(dp_rank)\n return os.path.join(checkpoints_path, d,\n 'mp_rank_{:02d}'.format(mpu.get_model_parallel_rank()),\n 'model_optim_rng.pt')\n\n\ndef ensure_directory_exists(filename):\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\ndef get_checkpoint_tracker_filename(checkpoints_path):\n return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')\n\n\ndef save_zero_checkpoint(args, iteration, optimizer):\n zero_sd = {'iteration': iteration,\n 'optimizer_state_dict': optimizer.state_dict()}\n zero_checkpoint_name = get_checkpoint_name(args.save, iteration, zero=True)\n ensure_directory_exists(zero_checkpoint_name)\n torch.save(zero_sd, zero_checkpoint_name)\n print(' successfully saved {}'.format(zero_checkpoint_name))\n\n\ndef save_checkpoint(iteration, model, optimizer,\n lr_scheduler, args):\n \"\"\"Save a model checkpoint.\"\"\"\n if args.deepspeed:\n save_ds_checkpoint(iteration, model, args)\n else:\n # Only rank zer0 of the data parallel writes to the disk.\n if isinstance(model, torchDDP):\n model = model.module\n\n if mpu.get_data_parallel_rank() == 0:\n checkpoint_name = get_checkpoint_name(args.save, iteration)\n print('global rank {} is saving checkpoint at iteration {:7d} to {}'.\n format(torch.distributed.get_rank(), iteration, checkpoint_name))\n\n sd = {}\n sd['iteration'] = iteration\n sd['model'] = model.state_dict()\n\n # Optimizer stuff.\n if not args.no_save_optim:\n if optimizer is not None:\n sd['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n sd['lr_scheduler'] = lr_scheduler.state_dict()\n\n # rng states.\n if not args.no_save_rng:\n sd['random_rng_state'] = random.getstate()\n sd['np_rng_state'] = np.random.get_state()\n sd['torch_rng_state'] = torch.get_rng_state()\n sd['cuda_rng_state'] = torch.cuda.get_rng_state()\n sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states()\n\n ensure_directory_exists(checkpoint_name)\n torch.save(sd, checkpoint_name)\n print(' successfully saved {}'.format(checkpoint_name))\n\n # Wait so everyone is done (necessary)\n torch.distributed.barrier()\n # And update the latest iteration\n if torch.distributed.get_rank() == 0:\n tracker_filename = get_checkpoint_tracker_filename(args.save)\n with open(tracker_filename, 'w') as f:\n f.write(str(iteration))\n # Wait so everyone is done (not necessary)\n torch.distributed.barrier()\n\n\ndef save_ds_checkpoint(iteration, model, args):\n \"\"\"Save a model checkpoint.\"\"\"\n\n sd = {}\n sd['iteration'] = iteration\n # rng states.\n if not args.no_save_rng:\n sd['random_rng_state'] = random.getstate()\n sd['np_rng_state'] = np.random.get_state()\n sd['torch_rng_state'] = torch.get_rng_state()\n sd['cuda_rng_state'] = torch.cuda.get_rng_state()\n sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states()\n\n model.save_checkpoint(args.save, iteration, client_state=sd)\n\n\ndef get_checkpoint_iteration(args):\n # Read the tracker file and set the iteration.\n tracker_filename = get_checkpoint_tracker_filename(args.load)\n if not os.path.isfile(tracker_filename):\n print_rank_0('WARNING: could not find the metadata file {} '.format(\n tracker_filename))\n print_rank_0(' will not load any checkpoints and will start from '\n 'random')\n return 0, False, False\n iteration = 0\n release = False\n with open(tracker_filename, 'r') as f:\n metastring = f.read().strip()\n try:\n iteration = int(metastring)\n except ValueError:\n release = metastring == 'release'\n if not release:\n print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(\n tracker_filename))\n exit()\n\n assert iteration > 0 or release, 'error parsing metadata file {}'.format(\n tracker_filename)\n\n return iteration, release, True\n\n\ndef load_checkpoint(model, optimizer, lr_scheduler, args):\n \"\"\"Load a model checkpoint.\"\"\"\n\n iteration, release, success = get_checkpoint_iteration(args)\n\n if not success:\n return 0\n\n if args.deepspeed:\n raise NotImplemented(\"No installed deep speed\")\n\n else:\n\n # Checkpoint.\n checkpoint_name = get_checkpoint_name(args.load, iteration, release)\n\n if mpu.get_data_parallel_rank() == 0:\n print('global rank {} is loading checkpoint {}'.format(\n torch.distributed.get_rank(), checkpoint_name))\n\n if args.load_openai:\n from utils import move_weights\n from model import DistributedDataParallel as DDP\n from fp16 import FP16_Module\n model_path = args.load\n from transformers import GPT2LMHeadModel\n print('global rank {} is loading openai weights {}'.format(\n torch.distributed.get_rank(), model_path))\n model.cpu()\n gpt2model = GPT2LMHeadModel.from_pretrained(model_path, cache_dir='gpt2_weights')\n model2fill = model\n while isinstance(model2fill, (DDP, FP16_Module)):\n model2fill = model2fill.module\n move_weights(model2fill, gpt2model)\n model.cuda(torch.cuda.current_device())\n sd = {}\n else:\n sd = torch.load(checkpoint_name, map_location='cpu')\n\n if isinstance(model, torchDDP):\n model = model.module\n\n # Model.\n try:\n model.load_state_dict(sd['model'])\n except KeyError:\n print_rank_0('A metadata file exists but unable to load model '\n 'from checkpoint {}, exiting'.format(checkpoint_name))\n exit()\n\n # Optimizer.\n if not release and not args.finetune and not args.no_load_optim:\n try:\n if optimizer is not None:\n optimizer.load_state_dict(sd['optimizer'])\n if lr_scheduler is not None:\n lr_scheduler.load_state_dict(sd['lr_scheduler'])\n except KeyError:\n print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '\n 'Specify --no-load-optim or --finetune to prevent '\n 'attempting to load the optimizer '\n 'state.'.format(checkpoint_name))\n exit()\n\n # Iterations.\n if args.finetune or release:\n iteration = 0\n else:\n try:\n iteration = sd['iteration']\n except KeyError:\n try: # Backward compatible with older checkpoints\n iteration = sd['total_iters']\n except KeyError:\n print_rank_0('A metadata file exists but Unable to load iteration '\n ' from checkpoint {}, exiting'.format(checkpoint_name))\n exit()\n\n # rng states.\n if not release and not args.finetune and not args.no_load_rng:\n try:\n random.setstate(sd['random_rng_state'])\n np.random.set_state(sd['np_rng_state'])\n torch.set_rng_state(sd['torch_rng_state'])\n torch.cuda.set_rng_state(sd['cuda_rng_state'])\n mpu.get_cuda_rng_tracker().set_states(sd['rng_tracker_states'])\n except KeyError:\n print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '\n 'Specify --no-load-optim or --finetune to prevent '\n 'attempting to load the optimizer '\n 'state.'.format(checkpoint_name))\n exit()\n\n torch.distributed.barrier()\n if mpu.get_data_parallel_rank() == 0:\n print(' successfully loaded {}'.format(checkpoint_name))\n\n return iteration\n\n\ndef load_weights(src, dst, dst2src=False):\n \"\"\"\n Loads weights from src to dst via in place copy.\n src is a huggingface gpt2model, while dst is one of our models.\n dst2src=True loads parameters from our models into huggingface's.\n ^dst2src is still untested\n \"\"\"\n conv_layer = 'Conv1D' in str(type(src))\n for n, p in src.named_parameters():\n if dst2src:\n data = dst._parameters[n].data\n load = p.data\n else:\n data = p.data\n load = dst._parameters[n].data\n if conv_layer and 'weight' in n:\n data = data.t().contiguous()\n load.copy_(data)\n\n\n# dst._parameters[n].data.copy_(data)\n\ndef load_mlp(our, oai, dst2src=False):\n load_weights(oai.c_fc, our.dense_h_to_4h, dst2src)\n load_weights(oai.c_proj, our.dense_4h_to_h, dst2src)\n\n\ndef load_attention(our, oai, dst2src=False):\n load_weights(oai.c_attn, our.query_key_value, dst2src)\n load_weights(oai.c_proj, our.dense, dst2src)\n\n\ndef load_transformer_layer(our, oai, dst2src=False):\n load_weights(oai.ln_1, our.input_layernorm, dst2src)\n load_weights(oai.ln_2, our.post_attention_layernorm, dst2src)\n load_mlp(our.mlp, oai.mlp, dst2src)\n load_attention(our.attention, oai.attn, dst2src)\n\n\ndef move_weights(our, oai, dst2src=False):\n \"\"\"\n Loads weights from `oai` to `our` via in place copy.\n `oai` is a huggingface gpt2model, while `our` is one of our models.\n dst2src=True loads parameters from our models into huggingface's.\n ^dst2src=True is still untested\n \"\"\"\n # while isinstance(our, (torchDDP, model.distributed.DistributedDataParallel, FP16_Module)):\n # our=our.module\n transformer_model = oai.transformer\n load_weights(transformer_model.ln_f, our.transformer.final_layernorm, dst2src)\n load_weights(transformer_model.wte, our.word_embeddings, dst2src)\n load_weights(transformer_model.wpe, our.position_embeddings, dst2src)\n\n for our_layer, oai_layer in zip(our.transformer.layers, oai.transformer.h):\n load_transformer_layer(our_layer, oai_layer, dst2src)\n"
] | [
[
"torch.set_rng_state",
"numpy.random.get_state",
"torch.cuda.synchronize",
"torch.load",
"torch.cuda.current_device",
"torch.cuda.get_rng_state",
"torch.distributed.is_initialized",
"torch.distributed.barrier",
"torch.get_rng_state",
"torch.cuda.max_memory_allocated",
"torch.cuda.memory_cached",
"torch.cuda.max_memory_cached",
"numpy.random.set_state",
"torch.distributed.get_rank",
"torch.cuda.set_rng_state",
"torch.cuda.memory_allocated",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jesbu1/h-baselines | [
"f6f775bb18de22527f2d01d73bd733ed2e435ba3",
"f745d7db323b82050360618110f907c3e43638d2"
] | [
"SocialRobotCustom/python/social_bot/gazebo_agent.py",
"hbaselines/envs/mujoco/humanoid_env.py"
] | [
"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport os\nimport time\nimport random\nimport json\nimport gin\nimport numpy as np\nimport PIL.Image\nfrom collections import OrderedDict\nimport gym\nfrom absl import logging\nimport social_bot\nimport social_bot.pygazebo as gazebo\n\n\[email protected]\nclass GazeboAgent():\n \"\"\" Class for the agent of gazebo-based SocialRobot enviroments\n \"\"\"\n\n def __init__(self,\n world,\n agent_type,\n name=None,\n config=None,\n use_image_observation=True,\n resized_image_size=None,\n image_with_internal_states=False,\n with_language=False,\n with_agent_language=False,\n vocab_sequence_length=20,\n action_wrapper=None):\n \"\"\"\n Args:\n world (pygazebo.World): the world\n agent_type (str): the agent_type, supporting pr2_noplugin,\n pioneer2dx_noplugin, turtlebot, youbot_noplugin and icub_with_hands for now\n note that 'agent_type' should be exactly the same string as the model's\n name at the beginning of model's sdf file\n name (str): the name of the agent in world\n if None it will be set the same as agent_type\n config (dict): the configuarations for the agent\n see `agent_cfg.jason` for details\n use_image_observation (bool): Use image or not\n resized_image_size (None|tuple): If None, use the original image size\n from the camera. Otherwise, the original image will be resized\n to (width, height)\n image_with_internal_states (bool): If true, the agent's self internal states\n i.e., joint position and velocities would be available together with image.\n Only affect if use_image_observation is true\n with_language (bool): The observation will be a dict with an extra sentence\n with_agent_language (bool): Include language in agent's action space\n vocab_sequence_length (int): the length of encoded sequence if with_language\n action_wrapper (None|class): Some times primitive joints is not wanted, e.g., has\n redundant dimensions or offset. If not None, this is used to transform the agent\n actions. See ActionWrapper of gazebo_agent.py for example.\n \"\"\"\n self._world = world\n self.type = agent_type\n self._use_image_observation = use_image_observation\n self._resized_image_size = resized_image_size\n self._image_with_internal_states = image_with_internal_states\n self._with_language = with_language\n self._with_agent_language = with_agent_language\n self._vocab_sequence_length = vocab_sequence_length\n self._sentence_space = None\n\n if config == None:\n # Load agent configurations\n with open(\n os.path.join(social_bot.get_model_dir(), \"agent_cfg.json\"),\n 'r') as cfg_file:\n agent_cfgs = json.load(cfg_file)\n config = agent_cfgs[agent_type]\n self.config = config\n joints = config['control_joints']\n if action_wrapper is not None:\n self._action_wrapper = action_wrapper()\n self._action_dim = self._action_wrapper.get_actions_dim()\n else:\n self._action_wrapper = None\n self._action_dim = len(joints)\n\n if name:\n # the agent is wrapped by a new name in world\n self.name = name\n self.joints = []\n for joint in joints:\n self.joints.append(name + '::' + joint)\n else:\n self.name = agent_type\n self.joints = joints\n self._agent = self._world.get_agent(self.name)\n\n # Set the funtions from pygazebo.agent to Agent\n self.get_pose = self._agent.get_pose\n self.set_pose = self._agent.set_pose\n self.get_link_pose = self._agent.get_link_pose\n self.set_link_pose = self._agent.set_link_pose\n self.get_joint_state = self._agent.get_joint_state\n self.set_joint_state = self._agent.set_joint_state\n self.set_pid_controller = self._agent.set_pid_controller\n self.get_collisions = self._agent.get_collisions\n self.get_velocities = self._agent.get_velocities\n\n # Setup joints and sensors\n self._camera = config['camera_sensor']\n self.action_range = self.setup_joints(self._agent, self.joints, config)\n logging.debug(\"joints to control: %s\" % self.joints)\n\n def reset(self):\n \"\"\" Reset the agent. \"\"\"\n self._agent.reset()\n\n def take_action(self, action):\n \"\"\" Take actions.\n \n Args:\n the actions to be taken.\n \"\"\"\n if self._action_wrapper is not None:\n action = self._action_wrapper.wrap_actions(action)\n controls = np.clip(action, -1.0, 1.0) * self.action_range\n controls_dict = dict(zip(self.joints, controls))\n self._agent.take_action(controls_dict)\n\n def get_observation(self, teacher, sentence_raw=\"hello\"):\n \"\"\" Get the observation of agent.\n\n Args:\n teacher (social_bot.Teacher): the teacher, used to get the task specific\n observations from teacher's taskgroups.\n sentence_raw (string): the sentence intened to sent to the Agent. This can\n be ignored if with_language is False.\n Returns:\n obs (dict |numpy.array): the return depends on the configurations: with\n language or not, use image or not, and image_with_internal_states or not.\n Possible situations:\n low-dimensional full states\n low-dimensional full states with language sentence\n image from the camera of agent\n image with internal states\n image with language sentence\n image with both internal states and language sentence\n Note that low-dimensional full states is defined in\n \"Task.task_specific_observation()\", which has all the infomation need\n for the task. While the internal states that used as a supplementary\n to image is form \"Agent.get_internal_states()\", which only contains\n self joint positions and velocities. Joint positions are wrapped with\n sin() and cos() to avoid the discontinuous point at 0 to 2*pi.\n \"\"\"\n if self._image_with_internal_states or self._with_language:\n # observation is an OrderedDict\n obs = self._create_observation_dict(teacher, sentence_raw)\n elif self._use_image_observation: # observation is pure image\n obs = self.get_camera_observation()\n else: # observation is pure low-dimentional states\n obs = teacher.get_task_specific_observation(self)\n return obs\n\n def get_camera_observation(self):\n \"\"\" Get the camera image.\n\n Returns:\n a numpy.array of the image.\n \"\"\"\n image = np.array(\n self._agent.get_camera_observation(self._camera), copy=False)\n if self._resized_image_size:\n image = PIL.Image.fromarray(image).resize(self._resized_image_size,\n PIL.Image.ANTIALIAS)\n image = np.array(image, copy=False)\n return image\n\n def get_internal_states(self):\n \"\"\" Get the internal joint states of the agent.\n\n Returns:\n a numpy.array including joint positions and velocities\n \"\"\"\n joint_pos = []\n joint_vel = []\n for joint_id in range(len(self.joints)):\n joint_name = self.joints[joint_id]\n joint_state = self._agent.get_joint_state(joint_name)\n joint_pos.append(joint_state.get_positions())\n joint_vel.append(joint_state.get_velocities())\n joint_pos = np.array(joint_pos).flatten()\n joint_vel = np.array(joint_vel).flatten()\n # pos of continous joint could be huge, wrap the range with sin and cos.\n joint_pos_sin = np.sin(joint_pos)\n joint_pos_cos = np.cos(joint_pos)\n internal_states = np.concatenate(\n (joint_pos_sin, joint_pos_cos, joint_vel), axis=0)\n return internal_states\n\n def get_control_space(self):\n \"\"\" Get the pure controlling space without language. \"\"\"\n control_space = gym.spaces.Box(\n low=-1.0, high=1.0, shape=[self._action_dim], dtype=np.float32)\n return control_space\n\n def get_action_space(self):\n \"\"\" Get the action space with optional language. \"\"\"\n control_space = self.get_control_space()\n if self._with_agent_language and self._with_language:\n action_space = gym.spaces.Dict(\n control=control_space, sentence=self._sentence_space)\n else:\n action_space = control_space\n return action_space\n\n def get_observation_space(self, teacher):\n \"\"\"\n Get the observation space with optional language.\n\n Args:\n teacher (social_bot.Teacher): the teacher, used to get the task specific\n observations from teacher's taskgroups as a sample.\n \"\"\"\n obs_sample = self.get_observation(teacher)\n if self._with_language or self._image_with_internal_states:\n # observation is a dictionary\n observation_space = self._construct_dict_space(obs_sample)\n elif self._use_image_observation:\n # observation is image\n observation_space = gym.spaces.Box(\n low=0, high=255, shape=obs_sample.shape, dtype=np.uint8)\n else:\n # observation is spare states\n observation_space = gym.spaces.Box(\n low=-np.inf,\n high=np.inf,\n shape=obs_sample.shape,\n dtype=np.float32)\n return observation_space\n\n def set_sentence_space(self, sentence_space):\n \"\"\" Set the sentence if with_languange is enabled.\n\n Args:\n sentence_space (gym.spaces): the space for sentence sequence\n \"\"\"\n self._sentence_space = sentence_space\n\n def _create_observation_dict(self, teacher, sentence_raw):\n obs = OrderedDict()\n if self._use_image_observation:\n obs['image'] = self.get_camera_observation()\n if self._image_with_internal_states:\n obs['states'] = self.get_internal_states()\n else:\n obs['states'] = teacher.get_task_specific_observation(self)\n if self._with_language:\n obs['sentence'] = teacher.sentence_to_sequence(\n sentence_raw, self._vocab_sequence_length)\n return obs\n\n def _construct_dict_space(self, obs_sample):\n \"\"\" A helper function when gym.spaces.Dict is used as observation.\n\n Args:\n obs_sample (numpy.array|dict) : a sample observation\n Returns:\n Return a gym.spaces.Dict with keys 'image', 'states', 'sentence'\n Possible situation:\n image with internal states\n image with language sentence\n image with both internal states and language sentence\n pure low-dimensional states with language sentence\n \"\"\"\n ob_space_dict = dict()\n if 'image' in obs_sample.keys():\n ob_space_dict['image'] = gym.spaces.Box(\n low=0,\n high=255,\n shape=obs_sample['image'].shape,\n dtype=np.uint8)\n if 'states' in obs_sample.keys():\n ob_space_dict['states'] = gym.spaces.Box(\n low=-np.inf,\n high=np.inf,\n shape=obs_sample['states'].shape,\n dtype=np.float32)\n if 'sentence' in obs_sample.keys():\n ob_space_dict['sentence'] = self._sentence_space\n ob_space = gym.spaces.Dict(ob_space_dict)\n return ob_space\n\n def setup_joints(self, agent, joints, agent_cfg):\n \"\"\" Setup the joints acrroding to agent configuration.\n\n Args:\n agent (pygazebo.Agent): the agent\n joints (list of string): the name of joints\n agent_cfg (dict): the configuration\n \"\"\"\n joint_states = list(map(lambda s: agent.get_joint_state(s), joints))\n joints_limits = list(\n map(lambda s: s.get_effort_limits()[0], joint_states))\n print(\"JOINT LIMITS: %s\" % joints_limits)\n print(\"USE PID: %s\" % str(agent_cfg['use_pid']))\n if agent_cfg['use_pid']:\n for joint_index in range(len(joints)):\n agent.set_pid_controller(\n joint_name=joints[joint_index],\n pid_control_type=agent_cfg['pid_type'][joint_index],\n p=agent_cfg['pid'][joint_index][0],\n i=agent_cfg['pid'][joint_index][1],\n d=agent_cfg['pid'][joint_index][2],\n max_force=joints_limits[joint_index])\n control_range = agent_cfg['pid_control_limit']\n else:\n control_range = np.array(joints_limits)\n return control_range\n\n def get_egocentric_cord_2d(self, x, y, agent_yaw):\n \"\"\" Get the egocentric coordinate from a global 2D x-y plane coordinate.\n\n This is achieved by rotating the global coordinates x, y by -agent_yaw.\n\n Args:\n x (float): x of global x-y plane coordinate\n y (float): y of global x-y plane coordinate\n agent_yaw (float): agent yaw (rotation in z-axis), in radian\n Returns:\n tuple of float, the position in the transformed coordinate\n \"\"\"\n rotate = -agent_yaw\n rotated_x = x * np.cos(rotate) - y * np.sin(rotate)\n rotated_y = x * np.sin(rotate) + y * np.cos(rotate)\n return (rotated_x, rotated_y)\n\n def get_contacts(self, contacts_sensor, contact_collision):\n \"\"\" Get contacts to the link.\n\n Args:\n contacts_sensor(string): the name of contacts_sensor\n contact_collision(string): the collision to check contacts\n Returns:\n bool, there is contact or not\n \"\"\"\n contacts = self.get_collisions(contacts_sensor)\n for collision in contacts:\n if collision[0] == contact_collision or collision[\n 1] == contact_collision:\n return True\n return False\n\n\nclass ActionWrapper():\n \"\"\" The action wrapper transform a new actions to primitive actions.\n\n The primitive actions (like the force/velocity/position of joints) may have redundant\n dimensions or offsets. By the action wrapper, we can transform the action to more\n efficency one. The sub class should define the new action space in _NEW_ACTION_LIST.\n \"\"\"\n\n _NEW_ACTION_LIST = []\n\n def get_actions_dim(self):\n \"\"\" Get the dimension of the new action space\n \"\"\"\n return len(self._NEW_ACTION_LIST)\n\n def wrap_actions(self, action):\n \"\"\" Wrap transformed actions to primitive actions.\n\n Args:\n action (nparray): the new action from policy network\n Returns:\n np.array, the primitive actions send to simulator\n \"\"\"\n raise NotImplementedError(\"wrap_actions not implemented!\")\n\n\[email protected]\nclass YoubotActionWrapper(ActionWrapper):\n \"\"\" This action wrapper transform a new actions to primitive actions.\n\n The new action space is the same as keyboard demostration interface, defined in _NEW_ACTION_LIST\n The primitive actions (the joints) please refer to social_bot/models/agent_cfg.json.\n \"\"\"\n\n _NEW_ACTION_LIST = [\n 'arm_joint_yaw', 'arm_joint_pitch', 'arm_joint_pitch_2', 'palm_joint',\n 'gripper_finger_joint', 'wheel_speed', 'wheel_turning'\n ]\n\n def wrap_actions(self, action):\n \"\"\" Wrap transformed actions to primitive actions.\n\n Args:\n action (nparray): the new action from policy network\n Returns:\n np.array, the primitive actions send to simulator\n \"\"\"\n action = dict(zip(self._NEW_ACTION_LIST, action))\n primitive_actions = [\n # arm joints\n action['arm_joint_yaw'],\n 0.25 + action['arm_joint_pitch'] / 2, # add pi/4 offset\n 0.25 + action['arm_joint_pitch'] / 2,\n 0.25 + action['arm_joint_pitch_2'],\n action['palm_joint'],\n # gripper joints\n action['gripper_finger_joint'],\n action['gripper_finger_joint'],\n # wheel joints\n action['wheel_speed'] + action['wheel_turning'],\n action['wheel_speed'] - action['wheel_turning']\n ]\n return primitive_actions\n",
"\"\"\"Base humanoid environment.\"\"\"\nimport numpy as np\nimport os\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\n# Directory that contains mujoco xml files.\nSCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))\nMODEL_DIR = os.path.join(SCRIPT_PATH, 'assets')\n\n\ndef mass_center(model, sim):\n \"\"\"Compute the position of the agent's center of mass.\"\"\"\n mass = np.expand_dims(model.body_mass, 1)\n xpos = sim.data.xipos\n return (np.sum(mass * xpos, 0) / np.sum(mass))[0]\n\n\nclass HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n \"\"\"Humanoid mujoco environment.\"\"\"\n\n FILE = 'humanoid.xml'\n\n def __init__(\n self,\n horizon=1000):\n\n self.horizon = horizon\n self.t = 0\n\n file_path = os.path.join(MODEL_DIR, HumanoidEnv.FILE)\n mujoco_env.MujocoEnv.__init__(self, file_path, 5)\n utils.EzPickle.__init__(self)\n\n def reset(self):\n \"\"\"Reset the environment.\"\"\"\n self.t = 0\n return mujoco_env.MujocoEnv.reset(self)\n\n def _get_obs(self):\n data = self.sim.data\n return np.concatenate([data.qpos.flat,\n data.qvel.flat,\n data.cinert.flat,\n data.cvel.flat,\n data.qfrc_actuator.flat,\n data.cfrc_ext.flat])\n\n def step(self, a):\n \"\"\"Advance the simulation by one step.\"\"\"\n pos_before = mass_center(self.model, self.sim)\n self.do_simulation(a, self.frame_skip)\n pos_after = mass_center(self.model, self.sim)\n alive_bonus = 5.0\n data = self.sim.data\n lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt\n quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()\n quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()\n quad_impact_cost = min(quad_impact_cost, 10)\n r = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus\n qpos = self.sim.data.qpos\n self.t += 1\n d = bool((qpos[2] < 1.0) or (qpos[2] > 2.0) or self.t > self.horizon)\n return self._get_obs(), np.nan_to_num(r), d, dict(\n reward_linvel=lin_vel_cost,\n reward_quadctrl=-quad_ctrl_cost,\n reward_alive=alive_bonus,\n reward_impact=-quad_impact_cost)\n\n def reset_model(self):\n \"\"\"Reset the position of the agent.\"\"\"\n c = 0.01\n rand_qpos = self.np_random.uniform(low=-c, high=c, size=self.model.nq)\n rand_qvel = self.np_random.uniform(low=-c, high=c, size=self.model.nv)\n qpos = self.init_qpos + rand_qpos\n qvel = self.init_qvel + rand_qvel\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n \"\"\"Create the viewer.\"\"\"\n self.viewer.cam.trackbodyid = 1\n self.viewer.cam.distance = self.model.stat.extent * 1.0\n self.viewer.cam.lookat[2] = 2.0\n self.viewer.cam.elevation = -20\n"
] | [
[
"numpy.clip",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.array"
],
[
"numpy.square",
"numpy.expand_dims",
"numpy.nan_to_num",
"numpy.concatenate",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tgordon523/ps5_analysis | [
"019b8f956ef552624dc823583e19bcf384dcfc08"
] | [
"ps5_shortage.py"
] | [
"### Script to pull and update data tracking\n### import packages\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport requests\nimport io\nimport pathlib\nfrom bs4 import BeautifulSoup\n\n\ndef dataset_load() -> pd.DataFrame():\n \"\"\"\n Function to load and save data regarding ps5 availability history\n \"\"\"\n ### get site\n url = \"https://www.nowinstock.net/videogaming/consoles/sonyps5/full_history.php\"\n\n if isinstance(url, str):\n try:\n res = requests.get(url)\n res.raise_for_status()\n except requests.exceptions.HTTPError as e:\n print(e.response.text)\n else:\n return None\n\n if res.status_code == 200:\n r = res.text\n soup = BeautifulSoup(r)\n ### get table and load to df\n table = soup.find_all(\"table\")\n df = pd.read_html(str(table))[0]\n\n return df\n\n\nif __name__ == \"__main__\":\n data_raw = dataset_load()\n save_dataset = (\n pathlib.Path(r\"C:\\Users\\tgord\\MyPyScripts\\PS5_EDA\")\n / \"ps5_analysis\"\n / \"data\"\n / \"dataset_raw.csv\"\n )\n print(save_dataset)\n data_raw.to_csv(\n save_dataset,\n index=False,\n )\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jakartaresearch/receipt-ocr | [
"003e067eb7d80495226ad15235fa1d626a09103e",
"003e067eb7d80495226ad15235fa1d626a09103e"
] | [
"src/text_detector/load_model.py",
"src/text_detector/modules/refinenet.py"
] | [
"import torch\nimport torch.backends.cudnn as cudnn\n\nfrom collections import OrderedDict\nfrom .modules.utils import yaml_loader, create_model_for_provider\nfrom .modules.craft import CRAFT\n\n\ndef copy_state_dict(state_dict):\n if list(state_dict.keys())[0].startswith(\"module\"):\n start_idx = 1\n else:\n start_idx = 0\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \".\".join(k.split(\".\")[start_idx:])\n new_state_dict[name] = v\n return new_state_dict\n\n\ndef load_craft(config_file, model_pth):\n cfg = yaml_loader(config_file)\n net = CRAFT()\n\n print(\"Loading weights from checkpoint (\" + model_pth + \")\")\n if cfg[\"cuda\"]:\n net.load_state_dict(copy_state_dict(torch.load(model_pth)))\n else:\n net.load_state_dict(copy_state_dict(torch.load(model_pth, map_location=\"cpu\")))\n\n if cfg[\"cuda\"]:\n net = net.cuda()\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = False\n\n net.eval()\n return cfg, net\n\n\ndef load_craft_onnx(config_file, model_pth):\n cfg = yaml_loader(config_file)\n device = \"CUDAExecutionProvider\" if torch.cuda.is_available() else \"CPUExecutionProvider\"\n print(\"Loading weights from checkpoint (\" + model_pth + \")\")\n net = create_model_for_provider(model_pth, device)\n return cfg, net\n",
"\"\"\"\nCopyright (c) 2019-present NAVER Corp.\nMIT License\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom ..basenet.vgg16_bn import init_weights\n\n\nclass RefineNet(nn.Module):\n def __init__(self):\n super(RefineNet, self).__init__()\n\n self.last_conv = nn.Sequential(\n nn.Conv2d(34, 64, kernel_size=3, padding=1), nn.BatchNorm2d(\n 64), nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(\n 64), nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(\n 64), nn.ReLU(inplace=True)\n )\n\n self.aspp1 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, dilation=6,\n padding=6), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(\n 128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 1, kernel_size=1)\n )\n\n self.aspp2 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, dilation=12,\n padding=12), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(\n 128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 1, kernel_size=1)\n )\n\n self.aspp3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, dilation=18,\n padding=18), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(\n 128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 1, kernel_size=1)\n )\n\n self.aspp4 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, dilation=24,\n padding=24), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(\n 128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 1, kernel_size=1)\n )\n\n init_weights(self.last_conv.modules())\n init_weights(self.aspp1.modules())\n init_weights(self.aspp2.modules())\n init_weights(self.aspp3.modules())\n init_weights(self.aspp4.modules())\n\n def forward(self, y, upconv4):\n refine = torch.cat([y.permute(0, 3, 1, 2), upconv4], dim=1)\n refine = self.last_conv(refine)\n\n aspp1 = self.aspp1(refine)\n aspp2 = self.aspp2(refine)\n aspp3 = self.aspp3(refine)\n aspp4 = self.aspp4(refine)\n\n # out = torch.add([aspp1, aspp2, aspp3, aspp4], dim=1)\n out = aspp1 + aspp2 + aspp3 + aspp4\n return out.permute(0, 2, 3, 1) # , refine.permute(0,2,3,1)\n"
] | [
[
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.load"
],
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MannyKayy/DeepSpeed | [
"67821f95e4ee04f65965eac4ecc1ffacab4302e6"
] | [
"deepspeed/pt/deepspeed_timer.py"
] | [
"'''\nCopyright 2019 The Microsoft DeepSpeed Team\n'''\n\nimport time\nimport psutil\nimport torch\n\nfrom deepspeed.pt.log_utils import logger\n\n\ndef print_rank_0(message):\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n logger.info(message)\n else:\n logger.info(message)\n\n\nclass SynchronizedWallClockTimer:\n \"\"\"Group of timers. Borrowed from Nvidia Megatron code\"\"\"\n class Timer:\n \"\"\"Timer.\"\"\"\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = self.Timer(name)\n return self.timers[name]\n\n @staticmethod\n def memory_usage():\n alloc = \"mem_allocated: {:.4f} GB\".format(torch.cuda.memory_allocated() /\n (1024 * 1024 * 1024))\n max_alloc = \"max_mem_allocated: {:.4f} GB\".format(\n torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024))\n cache = \"cache_allocated: {:.4f} GB\".format(torch.cuda.memory_cached() /\n (1024 * 1024 * 1024))\n max_cache = \"max_cache_allocated: {:.4f} GB\".format(\n torch.cuda.max_memory_cached() / (1024 * 1024 * 1024))\n return \" | {} | {} | {} | {}\".format(alloc, max_alloc, cache, max_cache)\n\n def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n if memory_breakdown:\n string += self.memory_usage()\n print_rank_0(string)\n\n\nclass ThroughputTimer():\n def __init__(self,\n batch_size,\n num_workers,\n start_step=2,\n steps_per_output=50,\n monitor_memory=True,\n logging_fn=None):\n self.start_time = 0\n self.end_time = 0\n self.started = False\n self.batch_size = batch_size\n if batch_size is None:\n self.batch_size = 1\n self.num_workers = num_workers\n self.start_step = start_step\n self.epoch_count = 0\n self.local_step_count = 0\n self.total_step_count = 0\n self.total_elapsed_time = 0\n self.steps_per_output = steps_per_output\n self.monitor_memory = monitor_memory\n self.logging = logging_fn\n if self.logging is None:\n self.logging = logger.info\n self.initialized = False\n\n def update_epoch_count(self):\n self.epoch_count += 1\n self.local_step_count = 0\n\n def _init_timer(self):\n self.initialized = True\n\n def start(self):\n self._init_timer()\n self.started = True\n if self.total_step_count >= self.start_step:\n torch.cuda.synchronize()\n self.start_time = time.time()\n\n def stop(self, report_speed=True):\n if not self.started:\n return\n self.started = False\n self.total_step_count += 1\n self.local_step_count += 1\n if self.total_step_count > self.start_step:\n torch.cuda.synchronize()\n self.end_time = time.time()\n duration = self.end_time - self.start_time\n self.total_elapsed_time += duration\n if self.local_step_count % self.steps_per_output == 0:\n if report_speed:\n self.logging(\"{}/{}, SamplesPerSec={}\".format(\n self.epoch_count,\n self.local_step_count,\n self.avg_samples_per_sec()))\n if self.monitor_memory:\n virt_mem = psutil.virtual_memory()\n swap = psutil.swap_memory()\n self.logging(\"{}/{}, vm percent: {}, swap percent: {}\".format(\n self.epoch_count,\n self.local_step_count,\n virt_mem.percent,\n swap.percent))\n\n def avg_samples_per_sec(self):\n if self.total_step_count > 0:\n samples_per_step = self.batch_size * self.num_workers\n total_step_offset = self.total_step_count - self.start_step\n avg_time_per_step = self.total_elapsed_time / total_step_offset\n # training samples per second\n return samples_per_step / avg_time_per_step\n return float(\"-inf\")\n"
] | [
[
"torch.cuda.synchronize",
"torch.distributed.is_initialized",
"torch.cuda.max_memory_allocated",
"torch.cuda.max_memory_cached",
"torch.distributed.get_rank",
"torch.cuda.memory_allocated",
"torch.cuda.memory_cached"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lccatala/tfg_ros | [
"d8da2bc6b1e0036e34460d174e708764a3c6f4ca"
] | [
"tfg/src/pytorch_segmentation/utils/helpers.py"
] | [
"import os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport math\nimport PIL\nimport matplotlib.pyplot as plt\n\ndef show_images(images, in_row=True):\n '''\n Helper function to show 3 images\n '''\n total_images = len(images)\n\n rc_tuple = (1, total_images)\n if not in_row:\n rc_tuple = (total_images, 1)\n \n\t#figure = plt.figure(figsize=(20, 10))\n for ii in range(len(images)):\n plt.subplot(*rc_tuple, ii+1)\n plt.title(images[ii][0])\n plt.axis('off')\n plt.imshow(images[ii][1])\n plt.show()\n\ndef dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef initialize_weights(*models):\n for model in models:\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1.)\n m.bias.data.fill_(1e-4)\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0.0, 0.0001)\n m.bias.data.zero_()\n\ndef get_upsampling_weight(in_channels, out_channels, kernel_size):\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)\n weight[list(range(in_channels)), list(range(out_channels)), :, :] = filt\n return torch.from_numpy(weight).float()\n\ndef colorize_mask(mask, palette):\n zero_pad = 256 * 3 - len(palette)\n for i in range(zero_pad):\n palette.append(0)\n new_mask = PIL.Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n return new_mask\n\ndef set_trainable_attr(m,b):\n m.trainable = b\n for p in m.parameters(): p.requires_grad = b\n\ndef apply_leaf(m, f):\n c = m if isinstance(m, (list, tuple)) else list(m.children())\n if isinstance(m, nn.Module):\n f(m)\n if len(c)>0:\n for l in c:\n apply_leaf(l,f)\n\ndef set_trainable(l, b):\n apply_leaf(l, lambda m: set_trainable_attr(m,b))"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"torch.from_numpy",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.zeros",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ray-Young/Machine_Learning_Study | [
"f665d4b9fe7c1c1f722c76e91a5b3f99b2b8a19c"
] | [
"K-Nearest-Neighbor/bk/process.py"
] | [
"import numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom decimal import Decimal\nimport csv\n\ndef processFile(inFile):\n lst = []\n with open(inFile) as f:\n for line in f:\n tmp = line.strip().split(\",\")\n lst.append(tmp)\n # sums = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n # count = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n \n arr = []\n for j in range(len(lst[0])):\n tmp = []\n for i in range(len(lst)): #get list without \"?\"\n if lst[i][j]!='?':\n tmp.append(lst[i][j])\n arr.append(tmp)\n #print(arr)\n \n \n median = []\n for l in arr:\n l.sort() #find median, then assign to \"?\" value\n m = l[int(len(l)/2)]\n median.append(m)\n #print(median)\n \n newlst = []\n for i in range(len(lst)):\n tmp = []\n for j in range(len(lst[0])):\n if lst[i][j]!='?':\n tmp.append(lst[i][j])\n else:\n tmp.append(median[j])\n newlst.append(tmp)\n #print(newlst)\n \n #newlst2 = []\n #std = []\n #for j in range(len(lst[0])):\n # temp = []\n # for i in range(len(lst)):\n # temp.append(newlst[i][j])\n # newlst2.append(temp)\n #std.append(np.std(temp))\n #print(newlst2)\n #print(std)\n \n #for l in newlst2:\n # np.mean(l)\n \n #vectorizer = TfidfVectorizer(stop_words='english', min_df=10,max_df=0.8)\n #dtm = vectorizer.fit_transform(newlst) \n #print(dtm)\n cat = []\n for i in range(len(newlst[0])):\n tmp = []\n cat.append(tmp)\n #print(cat)\n notDigital = [0,3,4,5,6,8,9,11,12]\n for i in range(len(newlst)):\n for j in range(len(newlst[0])):\n x = newlst[i][j]\n if j in notDigital:\n if x not in cat[j]:\n cat[j].append(x)\n \n \n # newlst2 will make all attributes become digital numbers\n newlst2=[]\n for i in range(len(newlst)):\n tmp = []\n for j in range(len(newlst[0])):\n x = newlst[i][j]\n if j in notDigital:\n tmp.append(cat[j].index(x))\n else:\n tmp.append(x)\n newlst2.append(tmp)\n #print(newlst2)\n \n std = []\n average = []\n \n for j in range (len(newlst2[0])-1):\n tmp = []\n for i in range (len(newlst2)):\n tmp.append(float(newlst2[i][j]))\n std.append(np.std(tmp))\n average.append(np.average(tmp))\n #print(std)\n #print(average)\n \n normalize = []\n for i in range(len(newlst2)):\n tmp = []\n for j in range(len(newlst2[0])):\n if(j == len(newlst2[0])-1):\n if(newlst2[i][j] == '+'):\n tmp.append(1)\n else:\n tmp.append(2)\n else:\n x = float(newlst2[i][j])\n z = (x-average[j])/std[j]\n tmp.append(z)\n normalize.append(tmp)\n #print(normalize)\n \n # int_normalize = []\n # for i in range(len(normalize)):\n # tmp = []\n # for j in range(len(normalize[0])):\n # s = normalize[i][j]\n # x = int(s*100)\n # tmp.append(x)\n # int_normalize.append(tmp)\n\n\n\n if(inFile == 'crx.data.training'):\n with open(\"crx.training.processed\",'a') as f:\n datawriter = csv.writer(f, delimiter= ',')\n for line in normalize:\n datawriter.writerow(line)\n if(inFile == 'crx.data.testing'):\n with open(\"crx.testing.processed\",'a') as f:\n datawriter = csv.writer(f, delimiter= ',')\n for line in normalize:\n datawriter.writerow(line)\n\n\n # test = [0,1,2,3]\n # std = np.std(test)\n # average = np.average(test)\n # print((test[3]-average)/std)\n\ndef run(infile1, infile2):\n processFile(infile1)\n \n processFile(infile2)\n "
] | [
[
"numpy.std",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
franklili3/pyfolio | [
"a63245b768e9b90154f31ca1a7a1a2472caafbfd"
] | [
"pyfolio/interesting_periods.py"
] | [
"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generates a list of historical event dates that may have had\nsignificant impact on markets. See extract_interesting_date_ranges.\"\"\"\n\nimport pandas as pd\n\nfrom collections import OrderedDict\n\nPERIODS = OrderedDict()\n# Dotcom bubble\nPERIODS['Dotcom'] = (pd.Timestamp('20000310'), pd.Timestamp('20000910'))\n\n# 9/11\nPERIODS['9/11'] = (pd.Timestamp('20010911'), pd.Timestamp('20011011'))\n\n# 01/08/03 US Housing Bubble 2003\nPERIODS['US Housing'] = (\n pd.Timestamp('20030108'), pd.Timestamp('20030208'))\n\n# Market regimes\nPERIODS['Low Volatility Bull Market'] = (pd.Timestamp('20050101'),\n pd.Timestamp('20070801'))\n\n# August 2007, March and September of 2008, Q1 & Q2 2009,\nPERIODS['Fall2007'] = (pd.Timestamp('20071001'), pd.Timestamp('20081031'))\nPERIODS['Mar2008'] = (pd.Timestamp('20080301'), pd.Timestamp('20080401'))\n\n# Lehmann Brothers\nPERIODS['June2008'] = (pd.Timestamp('20080601'), pd.Timestamp('20080630'))\n\nPERIODS['Fall2009'] = (pd.Timestamp('20090801'), pd.Timestamp('20090831'))\n\nPERIODS['Fall2010'] = (\n pd.Timestamp('20100401'), pd.Timestamp('20100630'))\n\nPERIODS['2011年下跌期'] = (pd.Timestamp('20110901'),\n pd.Timestamp('20111230'))\n\nPERIODS['2012年下跌期'] = (\n pd.Timestamp('20120601'), pd.Timestamp('20121130'))\n\n\n# Market down-turn in August/Sept 2015\nPERIODS['2015年下跌期'] = (pd.Timestamp('20150601'), pd.Timestamp('20150930'))\n\nPERIODS['2016年下跌期'] = (pd.Timestamp('20160101'), pd.Timestamp('20160129'))\n\nPERIODS['2018年下跌期'] = (pd.Timestamp('20180201'), pd.Timestamp('20181228'))"
] | [
[
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gqkc/CLOSURE | [
"a0204396822ae70d91e44ecb12ae05e2e02e69d7",
"a0204396822ae70d91e44ecb12ae05e2e02e69d7"
] | [
"scripts/preprocess_questions.py",
"vr/ns_vqa/clevr_executor.py"
] | [
"#!/usr/bin/env python3\n\n# This code is released under the MIT License in association with the following paper:\n#\n# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).\n#\n# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).\n\nimport sys\nimport os\nsys.path.insert(0, os.path.abspath('.'))\n\nimport argparse\n\nimport json\nimport os\n\nimport h5py\nimport numpy as np\n\nimport vr.programs\nfrom vr.preprocess import tokenize, encode, build_vocab\n\n\n\"\"\"\nPreprocessing script for CLEVR question files.\n\"\"\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', default='prefix',\n choices=['chain', 'prefix', 'postfix'])\nparser.add_argument('--input_questions_json', required=True, action='append')\nparser.add_argument('--q_family_shift', type=int, action='append')\nparser.add_argument('--input_vocab_json', default='')\nparser.add_argument('--expand_vocab', default=0, type=int)\nparser.add_argument('--unk_threshold', default=1, type=int)\nparser.add_argument('--encode_unk', default=0, type=int)\n\nparser.add_argument('--output_h5_file', required=True)\nparser.add_argument('--output_vocab_json', default='')\n\n\ndef program_to_str(program, mode):\n converter = vr.programs.ProgramConverter()\n if mode == 'chain':\n if not converter.is_chain(program):\n return None\n return vr.programs.list_to_str(program)\n elif mode == 'prefix':\n program_prefix = converter.list_to_prefix(program)\n return vr.programs.list_to_str(program_prefix)\n elif mode == 'postfix':\n program_postfix = converter.list_to_postfix(program)\n return vr.programs.list_to_str(program_postfix)\n return None\n\n\ndef main(args):\n if (args.input_vocab_json == '') and (args.output_vocab_json == ''):\n print('Must give one of --input_vocab_json or --output_vocab_json')\n return\n\n print('Loading data from', args.input_questions_json)\n if args.q_family_shift and len(args.q_family_shift):\n if len(args.q_family_shift) != len(args.input_questions_json):\n raise ValueError(\"shift must be provided for each question file\")\n q_family_shifts = args.q_family_shift\n else:\n q_family_shifts = [0] * len(args.input_questions_json)\n questions = []\n for q_file, shift in zip(args.input_questions_json, q_family_shifts):\n print(q_file)\n with open(q_file, 'r') as f:\n more_questions = json.load(f)['questions']\n for q in more_questions:\n q['question_family_index'] += shift\n questions.extend(more_questions)\n\n # Either create the vocab or load it from disk\n if args.input_vocab_json == '' or args.expand_vocab == 1:\n print('Building vocab')\n if 'answer' in questions[0]:\n answer_token_to_idx = build_vocab(\n (q['answer'] for q in questions)\n )\n question_token_to_idx = build_vocab(\n (q['question'] for q in questions),\n min_token_count=args.unk_threshold,\n punct_to_keep=[';', ','], punct_to_remove=['?', '.']\n )\n all_program_strs = []\n for q in questions:\n if 'program' not in q:\n continue\n program_str = program_to_str(q['program'], args.mode)\n if program_str is not None:\n all_program_strs.append(program_str)\n program_token_to_idx = build_vocab(all_program_strs)\n vocab = {\n 'question_token_to_idx': question_token_to_idx,\n 'program_token_to_idx': program_token_to_idx,\n 'answer_token_to_idx': answer_token_to_idx,\n }\n def arity(name):\n if name == 'scene':\n return 0\n if 'equal' in name or name in ['union', 'intersect', 'less_than', 'greater_than']:\n return 2\n return 1\n vocab['program_token_arity'] = {name: arity(name) for name in program_token_to_idx}\n if args.input_vocab_json != '':\n print('Loading vocab')\n if args.expand_vocab == 1:\n new_vocab = vocab\n with open(args.input_vocab_json, 'r') as f:\n vocab = json.load(f)\n if args.expand_vocab == 1:\n num_new_words = 0\n for word in new_vocab['question_token_to_idx']:\n if word not in vocab['question_token_to_idx']:\n print('Found new word %s' % word)\n idx = len(vocab['question_token_to_idx'])\n vocab['question_token_to_idx'][word] = idx\n num_new_words += 1\n print('Found %d new words' % num_new_words)\n\n if args.output_vocab_json != '':\n with open(args.output_vocab_json, 'w') as f:\n json.dump(vocab, f)\n\n # Encode all questions and programs\n print('Encoding data')\n questions_encoded = []\n programs_encoded = []\n question_families = []\n orig_idxs = []\n image_idxs = []\n answers = []\n types = []\n for orig_idx, q in enumerate(questions):\n question = q['question']\n if 'program' in q:\n types += [q['program'][-1]['function']]\n\n orig_idxs.append(orig_idx)\n image_idxs.append(q['image_index'])\n if 'question_family_index' in q:\n question_families.append(q['question_family_index'])\n question_tokens = tokenize(question,\n punct_to_keep=[';', ','],\n punct_to_remove=['?', '.'])\n question_encoded = encode(question_tokens,\n vocab['question_token_to_idx'],\n allow_unk=args.encode_unk == 1)\n questions_encoded.append(question_encoded)\n\n if 'program' in q:\n program = q['program']\n program_str = program_to_str(program, args.mode)\n program_tokens = tokenize(program_str)\n program_encoded = encode(program_tokens, vocab['program_token_to_idx'])\n programs_encoded.append(program_encoded)\n\n if 'answer' in q:\n answers.append(vocab['answer_token_to_idx'][q['answer']])\n\n # Pad encoded questions and programs\n max_question_length = max(len(x) for x in questions_encoded)\n for qe in questions_encoded:\n while len(qe) < max_question_length:\n qe.append(vocab['question_token_to_idx']['<NULL>'])\n\n if len(programs_encoded) > 0:\n max_program_length = max(len(x) for x in programs_encoded)\n for pe in programs_encoded:\n while len(pe) < max_program_length:\n pe.append(vocab['program_token_to_idx']['<NULL>'])\n\n # Create h5 file\n print('Writing output')\n questions_encoded = np.asarray(questions_encoded, dtype=np.int32)\n programs_encoded = np.asarray(programs_encoded, dtype=np.int32)\n print(questions_encoded.shape)\n print(programs_encoded.shape)\n\n mapping = {}\n for i, t in enumerate(set(types)):\n mapping[t] = i\n\n print(mapping)\n\n types_coded = []\n for t in types:\n types_coded += [mapping[t]]\n\n with h5py.File(args.output_h5_file, 'w') as f:\n f.create_dataset('questions', data=questions_encoded)\n f.create_dataset('image_idxs', data=np.asarray(image_idxs))\n f.create_dataset('orig_idxs', data=np.asarray(orig_idxs))\n\n if len(programs_encoded) > 0:\n f.create_dataset('programs', data=programs_encoded)\n if len(question_families) > 0:\n f.create_dataset('question_families', data=np.asarray(question_families))\n if len(answers) > 0:\n f.create_dataset('answers', data=np.asarray(answers))\n if len(types) > 0:\n f.create_dataset('types', data=np.asarray(types_coded))\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n",
"import torch\nimport random\nimport json\n\n\nCLEVR_COLORS = ['blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow']\nCLEVR_MATERIALS = ['rubber', 'metal']\nCLEVR_SHAPES = ['cube', 'cylinder', 'sphere']\nCLEVR_SIZES = ['large', 'small']\n\n\nCLEVR_ANSWER_CANDIDATES = {\n 'count': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'],\n 'equal_color': ['yes', 'no'],\n 'equal_integer': ['yes', 'no'],\n 'equal_material': ['yes', 'no'],\n 'equal_shape': ['yes', 'no'],\n 'equal_size': ['yes', 'no'],\n 'exist': ['yes', 'no'],\n 'greater_than': ['yes', 'no'],\n 'less_than': ['yes', 'no'],\n 'query_color': ['blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow'],\n 'query_material': ['metal', 'rubber'],\n 'query_size': ['small', 'large'],\n 'query_shape': ['cube', 'cylinder', 'sphere'],\n 'same_color': ['yes', 'no'],\n 'same_material': ['yes', 'no'],\n 'same_size': ['yes', 'no'],\n 'same_shape': ['yes', 'no']\n}\n\n\nclass ClevrExecutor:\n \"\"\"Symbolic program executor for CLEVR\"\"\"\n\n def __init__(self, vocab):\n self.vocab = vocab\n self.colors = CLEVR_COLORS\n self.materials = CLEVR_MATERIALS\n self.shapes = CLEVR_SHAPES\n self.sizes = CLEVR_SIZES\n self.answer_candidates = CLEVR_ANSWER_CANDIDATES\n\n self.modules = {}\n self._register_modules()\n\n def __call__(self, scenes, programs):\n preds = []\n for i in range(programs.shape[0]):\n pred = self.run(programs[i].cpu().numpy(), scenes[i])\n preds.append(self.vocab['answer_token_to_idx'].get(pred, -1))\n return torch.LongTensor(preds)\n\n def run(self, x, scene, guess=False, debug=False):\n assert self.modules, 'Must have scene annotations and define modules first'\n\n ans, temp = None, None\n\n # Find the length of the program sequence before the '<END>' token\n length = 0\n for k in range(len(x)):\n l = len(x) - k\n if self.vocab['program_idx_to_token'][x[l-1]] == '<END>':\n length = l\n if length == 0:\n return 'error'\n\n self.exe_trace = []\n for j in range(length):\n i = length - 1 - j\n token = self.vocab['program_idx_to_token'][x[i]]\n if token == 'scene':\n if temp is not None:\n ans = 'error'\n break\n temp = ans\n ans = list(scene)\n elif token in self.modules:\n module = self.modules[token]\n if token.startswith('same') or token.startswith('relate'):\n ans = module(ans, scene)\n else:\n ans = module(ans, temp)\n if ans == 'error':\n break\n self.exe_trace.append(ans)\n if debug:\n print(token)\n print('ans:')\n self._print_debug_message(ans)\n print('temp: ')\n self._print_debug_message(temp)\n print()\n ans = str(ans)\n\n if ans == 'error' and guess:\n final_module = self.vocab['program_idx_to_token'][x[0]]\n if final_module in self.answer_candidates:\n ans = random.choice(self.answer_candidates[final_module])\n return ans\n\n def _print_debug_message(self, x):\n if type(x) == list:\n for o in x:\n print(self._object_info(o))\n elif type(x) == dict:\n print(self._object_info(x))\n else:\n print(x)\n\n def _object_info(self, obj):\n return '%s %s %s %s at %s' % (obj['size'], obj['color'], obj['material'], obj['shape'], str(obj['position']))\n\n def _register_modules(self):\n self.modules['count'] = self.count\n self.modules['equal_color'] = self.equal_color\n self.modules['equal_integer'] = self.equal_integer\n self.modules['equal_material'] = self.equal_material\n self.modules['equal_shape'] = self.equal_shape\n self.modules['equal_size'] = self.equal_size\n self.modules['exist'] = self.exist\n self.modules['filter_color[blue]'] = self.filter_blue\n self.modules['filter_color[brown]'] = self.filter_brown\n self.modules['filter_color[cyan]'] = self.filter_cyan\n self.modules['filter_color[gray]'] = self.filter_gray\n self.modules['filter_color[green]'] = self.filter_green\n self.modules['filter_color[purple]'] = self.filter_purple\n self.modules['filter_color[red]'] = self.filter_red\n self.modules['filter_color[yellow]'] = self.filter_yellow\n self.modules['filter_material[rubber]'] = self.filter_rubber\n self.modules['filter_material[metal]'] = self.filter_metal\n self.modules['filter_shape[cube]'] = self.filter_cube\n self.modules['filter_shape[cylinder]'] = self.filter_cylinder\n self.modules['filter_shape[sphere]'] = self.filter_sphere\n self.modules['filter_size[large]'] = self.filter_large\n self.modules['filter_size[small]'] = self.filter_small\n self.modules['greater_than'] = self.greater_than\n self.modules['less_than'] = self.less_than\n self.modules['intersect'] = self.intersect\n self.modules['query_color'] = self.query_color\n self.modules['query_material'] = self.query_material\n self.modules['query_shape'] = self.query_shape\n self.modules['query_size'] = self.query_size\n self.modules['relate[behind]'] = self.relate_behind\n self.modules['relate[front]'] = self.relate_front\n self.modules['relate[left]'] = self.relate_left\n self.modules['relate[right]'] = self.relate_right\n self.modules['same_color'] = self.same_color\n self.modules['same_material'] = self.same_material\n self.modules['same_shape'] = self.same_shape\n self.modules['same_size'] = self.same_size\n self.modules['union'] = self.union\n self.modules['unique'] = self.unique\n\n def count(self, scene, _):\n if type(scene) == list:\n return len(scene)\n return 'error'\n\n def equal_color(self, color1, color2):\n if type(color1) == str and color1 in self.colors and type(color2) == str and color2 in self.colors:\n if color1 == color2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def equal_integer(self, integer1, integer2):\n if type(integer1) == int and type(integer2) == int:\n if integer1 == integer2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def equal_material(self, material1, material2):\n if type(material1) == str and material1 in self.materials and type(material2) == str and material2 in self.materials:\n if material1 == material2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def equal_shape(self, shape1, shape2):\n if type(shape1) == str and shape1 in self.shapes and type(shape2) == str and shape2 in self.shapes:\n if shape1 == shape2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def equal_size(self, size1, size2):\n if type(size1) == str and size1 in self.sizes and type(size2) == str and size2 in self.sizes:\n if size1 == size2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def exist(self, scene, _):\n if type(scene) == list:\n if len(scene) != 0:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def filter_blue(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'blue':\n output.append(o)\n return output\n return 'error'\n\n def filter_brown(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'brown':\n output.append(o)\n return output\n return 'error'\n\n def filter_cyan(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'cyan':\n output.append(o)\n return output\n return 'error'\n\n def filter_gray(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'gray':\n output.append(o)\n return output\n return 'error'\n\n def filter_green(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'green':\n output.append(o)\n return output\n return 'error'\n\n def filter_purple(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'purple':\n output.append(o)\n return output\n return 'error'\n\n def filter_red(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'red':\n output.append(o)\n return output\n return 'error'\n\n def filter_yellow(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == 'yellow':\n output.append(o)\n return output\n return 'error'\n\n def filter_rubber(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['material'] == 'rubber':\n output.append(o)\n return output\n return 'error'\n\n def filter_metal(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['material'] == 'metal':\n output.append(o)\n return output\n return 'error'\n\n def filter_cube(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['shape'] == 'cube':\n output.append(o)\n return output\n return 'error'\n\n def filter_cylinder(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['shape'] == 'cylinder':\n output.append(o)\n return output\n return 'error'\n\n def filter_sphere(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['shape'] == 'sphere':\n output.append(o)\n return output\n return 'error'\n\n def filter_large(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['size'] == 'large':\n output.append(o)\n return output\n return 'error'\n\n def filter_small(self, scene, _):\n if type(scene) == list:\n output = []\n for o in scene:\n if o['size'] == 'small':\n output.append(o)\n return output\n return 'error'\n\n def greater_than(self, integer1, integer2):\n if type(integer1) == int and type(integer2) == int:\n if integer1 > integer2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def less_than(self, integer1, integer2):\n if type(integer1) == int and type(integer2) == int:\n if integer1 < integer2:\n return 'yes'\n else:\n return 'no'\n return 'error'\n\n def intersect(self, scene1, scene2):\n if type(scene1) == list and type(scene2) == list:\n output = []\n for o in scene1:\n if o in scene2:\n output.append(o)\n return output\n return 'error'\n\n def query_color(self, obj, _):\n if type(obj) == dict and 'color' in obj:\n return obj['color']\n return 'error'\n\n def query_material(self, obj, _):\n if type(obj) == dict and 'material' in obj:\n return obj['material']\n return 'error'\n\n def query_shape(self, obj, _):\n if type(obj) == dict and 'shape' in obj:\n return obj['shape']\n return 'error'\n\n def query_size(self, obj, _):\n if type(obj) == dict and 'size' in obj:\n return obj['size']\n return 'error'\n\n def relate_behind(self, obj, scene):\n if type(obj) == dict and 'position' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['position'][1] < obj['position'][1]:\n output.append(o)\n return output\n return 'error'\n\n def relate_front(self, obj, scene):\n if type(obj) == dict and 'position' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['position'][1] > obj['position'][1]:\n output.append(o)\n return output\n return 'error'\n\n def relate_left(self, obj, scene):\n if type(obj) == dict and 'position' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['position'][0] < obj['position'][0]:\n output.append(o)\n return output\n return 'error'\n\n def relate_right(self, obj, scene):\n if type(obj) == dict and 'position' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['position'][0] > obj['position'][0]:\n output.append(o)\n return output\n return 'error'\n\n def same_color(self, obj, scene):\n if type(obj) == dict and 'color' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['color'] == obj['color'] and o['id'] != obj['id']:\n output.append(o)\n return output\n return 'error'\n\n def same_material(self, obj, scene):\n if type(obj) == dict and 'material' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['material'] == obj['material'] and o['id'] != obj['id']:\n output.append(o)\n return output\n return 'error'\n\n def same_shape(self, obj, scene):\n if type(obj) == dict and 'shape' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['shape'] == obj['shape'] and o['id'] != obj['id']:\n output.append(o)\n return output\n return 'error'\n\n def same_size(self, obj, scene):\n if type(obj) == dict and 'size' in obj and type(scene) == list:\n output = []\n for o in scene:\n if o['size'] == obj['size'] and o['id'] != obj['id']:\n output.append(o)\n return output\n return 'error'\n\n def union(self, scene1, scene2):\n if type(scene1) == list and type(scene2) == list:\n output = list(scene2)\n for o in scene1:\n if o not in scene2:\n output.append(o)\n return output\n return 'error'\n\n def unique(self, scene, _):\n if type(scene) == list and len(scene) > 0:\n return scene[0]\n return 'error'\n"
] | [
[
"numpy.asarray"
],
[
"torch.LongTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeDuySon/Vehicle-tracking-deepsort | [
"ab03375d11d83def0452260d7071e9c1cc7406c2",
"ab03375d11d83def0452260d7071e9c1cc7406c2"
] | [
"yolov3_deepsort.py",
"detector/YOLOv3/yolo_utils.py"
] | [
"import os\nimport cv2\nimport time\nimport argparse\nimport torch\nimport warnings\nimport numpy as np\n\nfrom detector import build_detector\nfrom deep_sort import build_tracker\nfrom utils.draw import draw_boxes\nfrom utils.parser import get_config\nfrom utils.log import get_logger\nfrom utils.io import write_results\n\n\nclass VideoTracker(object):\n def __init__(self, cfg, args, video_path):\n self.cfg = cfg\n self.args = args\n self.video_path = video_path\n self.logger = get_logger(\"root\")\n self.video_name = video_path.split(\"/\")[-1].split(\".\")[0]\n use_cuda = args.use_cuda and torch.cuda.is_available()\n if not use_cuda:\n warnings.warn(\"Running in cpu mode which maybe very slow!\", UserWarning)\n\n if args.display:\n cv2.namedWindow(\"test\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"test\", args.display_width, args.display_height)\n\n if args.cam != -1:\n print(\"Using webcam \" + str(args.cam))\n self.vdo = cv2.VideoCapture(args.cam)\n else:\n self.vdo = cv2.VideoCapture()\n self.detector = build_detector(cfg, use_cuda=use_cuda)\n self.deepsort = build_tracker(cfg, use_cuda=use_cuda)\n self.class_names = self.detector.class_names\n print(\"Class name: \", self.class_names)\n\n def __enter__(self):\n if self.args.cam != -1:\n ret, frame = self.vdo.read()\n assert ret, \"Error: Camera error\"\n self.im_width = frame.shape[0]\n self.im_height = frame.shape[1]\n\n else:\n assert os.path.isfile(self.video_path), \"Path error\"\n self.vdo.open(self.video_path)\n self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))\n assert self.vdo.isOpened()\n\n if self.args.save_path:\n os.makedirs(self.args.save_path, exist_ok=True)\n\n # path of saved video and results\n self.save_video_path = os.path.join(self.args.save_path, self.video_name + \"_results.avi\")\n self.save_results_path = os.path.join(self.args.save_path, self.video_name + \"_results.txt\")\n\n # create video writer\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))\n\n # logging\n self.logger.info(\"Save results to {}\".format(self.args.save_path))\n\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n if exc_type:\n print(exc_type, exc_value, exc_traceback)\n\n def run(self):\n results = []\n idx_frame = 0\n while self.vdo.grab():\n idx_frame += 1\n if idx_frame % self.args.frame_interval:\n continue\n\n start = time.time()\n _, ori_im = self.vdo.retrieve()\n im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)\n\n # do detection\n bbox_xywh, cls_conf, cls_ids = self.detector(im)\n\n # select person class\n mask = cls_ids < 7\n\n bbox_xywh = bbox_xywh[mask]\n # bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector\n bbox_xywh[:, 3:] *= 1.2\n cls_conf = cls_conf[mask]\n\n # do tracking\n outputs = self.deepsort.update(bbox_xywh, cls_conf, im)\n\n # draw boxes for visualization\n if len(outputs) > 0:\n bbox_tlwh = []\n bbox_xyxy = outputs[:, :4]\n identities = outputs[:, -1]\n ori_im = draw_boxes(ori_im, bbox_xyxy, identities)\n\n for bb_xyxy in bbox_xyxy:\n bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))\n\n results.append((idx_frame - 1, bbox_tlwh, identities))\n\n end = time.time()\n\n if self.args.display:\n cv2.imshow(\"test\", ori_im)\n cv2.waitKey(1)\n\n if self.args.save_path:\n self.writer.write(ori_im)\n\n # save results\n write_results(self.save_results_path, results, 'mot')\n\n # logging\n self.logger.info(\"time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}\" \\\n .format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"VIDEO_PATH\", type=str)\n parser.add_argument(\"--config_detection\", type=str, default=\"./configs/yolov3.yaml\")\n parser.add_argument(\"--config_deepsort\", type=str, default=\"./configs/deep_sort.yaml\")\n # parser.add_argument(\"--ignore_display\", dest=\"display\", action=\"store_false\", default=True)\n parser.add_argument(\"--display\", action=\"store_true\")\n parser.add_argument(\"--frame_interval\", type=int, default=1)\n parser.add_argument(\"--display_width\", type=int, default=800)\n parser.add_argument(\"--display_height\", type=int, default=600)\n parser.add_argument(\"--save_path\", type=str, default=\"./output/\")\n parser.add_argument(\"--cpu\", dest=\"use_cuda\", action=\"store_false\", default=True)\n parser.add_argument(\"--camera\", action=\"store\", dest=\"cam\", type=int, default=\"-1\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n cfg = get_config()\n cfg.merge_from_file(args.config_detection)\n cfg.merge_from_file(args.config_deepsort)\n\n with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:\n vdo_trk.run()\n",
"import os\nimport time\nimport math\nimport torch\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport struct # get_image_size\nimport imghdr # get_image_size\n\n\ndef sigmoid(x):\n return 1.0 / (math.exp(-x) + 1.)\n\n\ndef softmax(x):\n x = torch.exp(x - torch.max(x))\n x /= x.sum()\n return x\n\n\ndef bbox_iou(box1, box2, x1y1x2y2=True):\n if x1y1x2y2:\n x1_min = min(box1[0], box2[0])\n x2_max = max(box1[2], box2[2])\n y1_min = min(box1[1], box2[1])\n y2_max = max(box1[3], box2[3])\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n else:\n w1, h1 = box1[2], box1[3]\n w2, h2 = box2[2], box2[3]\n x1_min = min(box1[0] - w1 / 2.0, box2[0] - w2 / 2.0)\n x2_max = max(box1[0] + w1 / 2.0, box2[0] + w2 / 2.0)\n y1_min = min(box1[1] - h1 / 2.0, box2[1] - h2 / 2.0)\n y2_max = max(box1[1] + h1 / 2.0, box2[1] + h2 / 2.0)\n\n w_union = x2_max - x1_min\n h_union = y2_max - y1_min\n w_cross = w1 + w2 - w_union\n h_cross = h1 + h2 - h_union\n carea = 0\n if w_cross <= 0 or h_cross <= 0:\n return 0.0\n\n area1 = w1 * h1\n area2 = w2 * h2\n carea = w_cross * h_cross\n uarea = area1 + area2 - carea\n return float(carea / uarea)\n\n\ndef multi_bbox_ious(boxes1, boxes2, x1y1x2y2=True):\n if x1y1x2y2:\n x1_min = torch.min(boxes1[0], boxes2[0])\n x2_max = torch.max(boxes1[2], boxes2[2])\n y1_min = torch.min(boxes1[1], boxes2[1])\n y2_max = torch.max(boxes1[3], boxes2[3])\n w1, h1 = boxes1[2] - boxes1[0], boxes1[3] - boxes1[1]\n w2, h2 = boxes2[2] - boxes2[0], boxes2[3] - boxes2[1]\n else:\n w1, h1 = boxes1[2], boxes1[3]\n w2, h2 = boxes2[2], boxes2[3]\n x1_min = torch.min(boxes1[0] - w1 / 2.0, boxes2[0] - w2 / 2.0)\n x2_max = torch.max(boxes1[0] + w1 / 2.0, boxes2[0] + w2 / 2.0)\n y1_min = torch.min(boxes1[1] - h1 / 2.0, boxes2[1] - h2 / 2.0)\n y2_max = torch.max(boxes1[1] + h1 / 2.0, boxes2[1] + h2 / 2.0)\n\n w_union = x2_max - x1_min\n h_union = y2_max - y1_min\n w_cross = w1 + w2 - w_union\n h_cross = h1 + h2 - h_union\n mask = (((w_cross <= 0) + (h_cross <= 0)) > 0)\n area1 = w1 * h1\n area2 = w2 * h2\n carea = w_cross * h_cross\n carea[mask] = 0\n uarea = area1 + area2 - carea\n return carea / uarea\n\n\nfrom nms import boxes_nms\n\n\ndef post_process(boxes, num_classes, conf_thresh=0.01, nms_thresh=0.45, obj_thresh=0.3):\n batch_size = boxes.size(0)\n\n # nms\n results_boxes = []\n for batch_id in range(batch_size):\n processed_boxes = []\n for cls_id in range(num_classes):\n mask = (boxes[batch_id, :, -1] == cls_id) * (boxes[batch_id, :, 4] > obj_thresh)\n masked_boxes = boxes[batch_id, mask]\n\n keep = boxes_nms(masked_boxes[:, :4], masked_boxes[:, 5], nms_thresh)\n\n nmsed_boxes = masked_boxes[keep, :]\n\n processed_boxes.append(nmsed_boxes)\n processed_boxes = torch.cat(processed_boxes, dim=0)\n\n results_boxes.append(processed_boxes)\n\n return results_boxes\n\n\ndef xywh_to_xyxy(boxes_xywh):\n boxes_xyxy = boxes_xywh.copy()\n boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.\n boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.\n boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.\n boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.\n\n return boxes_xyxy\n\n\ndef xyxy_to_xywh(boxes_xyxy):\n if isinstance(boxes_xyxy, torch.Tensor):\n boxes_xywh = boxes_xyxy.clone()\n elif isinstance(boxes_xyxy, np.ndarray):\n boxes_xywh = boxes_xyxy.copy()\n\n boxes_xywh[:, 0] = (boxes_xyxy[:, 0] + boxes_xyxy[:, 2]) / 2.\n boxes_xywh[:, 1] = (boxes_xyxy[:, 1] + boxes_xyxy[:, 3]) / 2.\n boxes_xywh[:, 2] = boxes_xyxy[:, 2] - boxes_xyxy[:, 0]\n boxes_xywh[:, 3] = boxes_xyxy[:, 3] - boxes_xyxy[:, 1]\n\n return boxes_xywh\n\n\ndef nms(boxes, nms_thresh):\n if len(boxes) == 0:\n return boxes\n\n det_confs = torch.zeros(len(boxes))\n for i in range(len(boxes)):\n det_confs[i] = boxes[i][4]\n\n _, sortIds = torch.sort(det_confs, descending=True)\n out_boxes = []\n for i in range(len(boxes)):\n box_i = boxes[sortIds[i]]\n if box_i[4] > 0:\n out_boxes.append(box_i)\n for j in range(i + 1, len(boxes)):\n box_j = boxes[sortIds[j]]\n if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:\n # print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))\n box_j[4] = 0\n return out_boxes\n\n\ndef convert2cpu(gpu_matrix):\n return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)\n\n\ndef convert2cpu_long(gpu_matrix):\n return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)\n\n\ndef get_all_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False, use_cuda=True):\n # total number of inputs (batch size)\n # first element (x) for first tuple (x, anchor_mask, num_anchor)\n batchsize = output[0]['x'].data.size(0)\n\n all_boxes = []\n for i in range(len(output)):\n pred, anchors, num_anchors = output[i]['x'].data, output[i]['a'], output[i]['n'].item()\n boxes = get_region_boxes(pred, conf_thresh, num_classes, anchors, num_anchors, \\\n only_objectness=only_objectness, validation=validation, use_cuda=use_cuda)\n\n all_boxes.append(boxes)\n return torch.cat(all_boxes, dim=1)\n\n\ndef get_region_boxes(output, obj_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False,\n use_cuda=True):\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n anchors = anchors.to(device)\n anchor_step = anchors.size(0) // num_anchors\n if output.dim() == 3:\n output = output.unsqueeze(0)\n batch = output.size(0)\n assert (output.size(1) == (5 + num_classes) * num_anchors)\n h = output.size(2)\n w = output.size(3)\n cls_anchor_dim = batch * num_anchors * h * w\n\n # all_boxes = []\n output = output.view(batch * num_anchors, 5 + num_classes, h * w).transpose(0, 1).contiguous().view(5 + num_classes,\n cls_anchor_dim)\n\n grid_x = torch.linspace(0, w - 1, w).repeat(batch * num_anchors, h, 1).view(cls_anchor_dim).to(device)\n grid_y = torch.linspace(0, h - 1, h).repeat(w, 1).t().repeat(batch * num_anchors, 1, 1).view(cls_anchor_dim).to(\n device)\n ix = torch.LongTensor(range(0, 2)).to(device)\n anchor_w = anchors.view(num_anchors, anchor_step).index_select(1, ix[0]).repeat(1, batch, h * w).view(\n cls_anchor_dim)\n anchor_h = anchors.view(num_anchors, anchor_step).index_select(1, ix[1]).repeat(1, batch, h * w).view(\n cls_anchor_dim)\n\n xs, ys = torch.sigmoid(output[0]) + grid_x, torch.sigmoid(output[1]) + grid_y\n ws, hs = torch.exp(output[2]) * anchor_w.detach(), torch.exp(output[3]) * anchor_h.detach()\n det_confs = torch.sigmoid(output[4])\n\n # by ysyun, dim=1 means input is 2D or even dimension else dim=0\n cls_confs = torch.nn.Softmax(dim=1)(output[5:5 + num_classes].transpose(0, 1)).detach()\n cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)\n cls_max_confs = cls_max_confs.view(-1)\n cls_max_ids = cls_max_ids.view(-1).float()\n\n # sz_hw = h*w\n # sz_hwa = sz_hw*num_anchors\n # det_confs = convert2cpu(det_confs)\n # cls_max_confs = convert2cpu(cls_max_confs)\n # cls_max_ids = convert2cpu_long(cls_max_ids)\n # xs, ys = convert2cpu(xs), convert2cpu(ys)\n # ws, hs = convert2cpu(ws), convert2cpu(hs)\n\n cls_confs = det_confs * cls_max_confs\n\n # boxes = [xs/w, ys/h, ws/w, hs/h, det_confs, cls_confs, cls_max_ids]\n xs, ys, ws, hs = xs / w, ys / h, ws / w, hs / h\n x1, y1, x2, y2 = torch.clamp_min(xs - ws / 2., 0.), torch.clamp_min(ys - hs / 2., 0.), torch.clamp_max(xs + ws / 2.,\n 1.), torch.clamp_max(\n ys + hs / 2., 1.)\n boxes = [x1, y1, x2, y2, det_confs, cls_confs, cls_max_ids]\n boxes = list(map(lambda x: x.view(batch, -1), boxes))\n boxes = torch.stack(boxes, dim=2)\n\n # for b in range(batch):\n # boxes = []\n # for cy in range(h):\n # for cx in range(w):\n # for i in range(num_anchors):\n # ind = b*sz_hwa + i*sz_hw + cy*w + cx\n # det_conf = det_confs[ind]\n # if only_objectness:\n # conf = det_confs[ind]\n # else:\n # conf = det_confs[ind] * cls_max_confs[ind]\n\n # if conf > conf_thresh:\n # bcx = xs[ind]\n # bcy = ys[ind]\n # bw = ws[ind]\n # bh = hs[ind]\n # cls_max_conf = cls_max_confs[ind]\n # cls_max_id = cls_max_ids[ind]\n # box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]\n\n # boxes.append(box)\n # all_boxes.append(boxes)\n return boxes\n\n\n# def get_all_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False, use_cuda=True):\n# # total number of inputs (batch size)\n# # first element (x) for first tuple (x, anchor_mask, num_anchor)\n# tot = output[0]['x'].data.size(0)\n# all_boxes = [[] for i in range(tot)]\n# for i in range(len(output)):\n# pred, anchors, num_anchors = output[i]['x'].data, output[i]['a'], output[i]['n'].item()\n# b = get_region_boxes(pred, conf_thresh, num_classes, anchors, num_anchors, \\\n# only_objectness=only_objectness, validation=validation, use_cuda=use_cuda)\n# for t in range(tot):\n# all_boxes[t] += b[t]\n# return all_boxes\n\n# def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False, use_cuda=True):\n# device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n# anchors = anchors.to(device)\n# anchor_step = anchors.size(0)//num_anchors\n# if output.dim() == 3:\n# output = output.unsqueeze(0)\n# batch = output.size(0)\n# assert(output.size(1) == (5+num_classes)*num_anchors)\n# h = output.size(2)\n# w = output.size(3)\n# cls_anchor_dim = batch*num_anchors*h*w\n\n# t0 = time.time()\n# all_boxes = []\n# output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, cls_anchor_dim)\n\n# grid_x = torch.linspace(0, w-1, w).repeat(batch*num_anchors, h, 1).view(cls_anchor_dim).to(device)\n# grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(cls_anchor_dim).to(device)\n# ix = torch.LongTensor(range(0,2)).to(device)\n# anchor_w = anchors.view(num_anchors, anchor_step).index_select(1, ix[0]).repeat(1, batch, h*w).view(cls_anchor_dim)\n# anchor_h = anchors.view(num_anchors, anchor_step).index_select(1, ix[1]).repeat(1, batch, h*w).view(cls_anchor_dim)\n\n# xs, ys = torch.sigmoid(output[0]) + grid_x, torch.sigmoid(output[1]) + grid_y\n# ws, hs = torch.exp(output[2]) * anchor_w.detach(), torch.exp(output[3]) * anchor_h.detach()\n# det_confs = torch.sigmoid(output[4])\n\n# # by ysyun, dim=1 means input is 2D or even dimension else dim=0\n# cls_confs = torch.nn.Softmax(dim=1)(output[5:5+num_classes].transpose(0,1)).detach()\n# cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)\n# cls_max_confs = cls_max_confs.view(-1)\n# cls_max_ids = cls_max_ids.view(-1)\n# t1 = time.time()\n\n# sz_hw = h*w\n# sz_hwa = sz_hw*num_anchors\n# det_confs = convert2cpu(det_confs)\n# cls_max_confs = convert2cpu(cls_max_confs)\n# cls_max_ids = convert2cpu_long(cls_max_ids)\n# xs, ys = convert2cpu(xs), convert2cpu(ys)\n# ws, hs = convert2cpu(ws), convert2cpu(hs)\n# if validation:\n# cls_confs = convert2cpu(cls_confs.view(-1, num_classes))\n\n# t2 = time.time()\n# for b in range(batch):\n# boxes = []\n# for cy in range(h):\n# for cx in range(w):\n# for i in range(num_anchors):\n# ind = b*sz_hwa + i*sz_hw + cy*w + cx\n# det_conf = det_confs[ind]\n# if only_objectness:\n# conf = det_confs[ind]\n# else:\n# conf = det_confs[ind] * cls_max_confs[ind]\n\n# if conf > conf_thresh:\n# bcx = xs[ind]\n# bcy = ys[ind]\n# bw = ws[ind]\n# bh = hs[ind]\n# cls_max_conf = cls_max_confs[ind]\n# cls_max_id = cls_max_ids[ind]\n# box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]\n# if (not only_objectness) and validation:\n# for c in range(num_classes):\n# tmp_conf = cls_confs[ind][c]\n# if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:\n# box.append(tmp_conf)\n# box.append(c)\n# boxes.append(box)\n# all_boxes.append(boxes)\n# t3 = time.time()\n# if False:\n# print('---------------------------------')\n# print('matrix computation : %f' % (t1-t0))\n# print(' gpu to cpu : %f' % (t2-t1))\n# print(' boxes filter : %f' % (t3-t2))\n# print('---------------------------------')\n# return all_boxes\n\ndef plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):\n import cv2\n colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]])\n\n def get_color(c, x, max_val):\n ratio = float(x) / max_val * 5\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio -= i\n r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]\n return int(r * 255)\n\n width = img.shape[1]\n height = img.shape[0]\n for i in range(len(boxes)):\n box = boxes[i]\n x1 = int(round((box[0] - box[2] / 2.0) * width))\n y1 = int(round((box[1] - box[3] / 2.0) * height))\n x2 = int(round((box[0] + box[2] / 2.0) * width))\n y2 = int(round((box[1] + box[3] / 2.0) * height))\n\n if color:\n rgb = color\n else:\n rgb = (255, 0, 0)\n if len(box) >= 7 and class_names:\n cls_conf = box[5]\n cls_id = box[6]\n # print('%s: %f' % (class_names[cls_id], cls_conf))\n classes = len(class_names)\n offset = cls_id * 123457 % classes\n red = get_color(2, offset, classes)\n green = get_color(1, offset, classes)\n blue = get_color(0, offset, classes)\n if color is None:\n rgb = (red, green, blue)\n img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)\n img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)\n if savename:\n print(\"save plot results to %s\" % savename)\n cv2.imwrite(savename, img)\n return img\n\n\ndef plot_boxes(img, boxes, savename=None, class_names=None):\n colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]])\n\n def get_color(c, x, max_val):\n ratio = float(x) / max_val * 5\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio -= i\n r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]\n return int(r * 255)\n\n width = img.width\n height = img.height\n draw = ImageDraw.Draw(img)\n print(\"%d box(es) is(are) found\" % len(boxes))\n for i in range(len(boxes)):\n box = boxes[i]\n x1 = (box[0] - box[2] / 2.0) * width\n y1 = (box[1] - box[3] / 2.0) * height\n x2 = (box[0] + box[2] / 2.0) * width\n y2 = (box[1] + box[3] / 2.0) * height\n\n rgb = (255, 0, 0)\n if len(box) >= 7 and class_names:\n cls_conf = box[5]\n cls_id = box[6]\n print('%s: %f' % (class_names[cls_id], cls_conf))\n classes = len(class_names)\n offset = cls_id * 123457 % classes\n red = get_color(2, offset, classes)\n green = get_color(1, offset, classes)\n blue = get_color(0, offset, classes)\n rgb = (red, green, blue)\n draw.text((x1, y1), class_names[cls_id], fill=rgb)\n draw.rectangle([x1, y1, x2, y2], outline=rgb)\n if savename:\n print(\"save plot results to %s\" % savename)\n img.save(savename)\n return img\n\n\ndef read_truths(lab_path):\n if not os.path.exists(lab_path):\n return np.array([])\n if os.path.getsize(lab_path):\n truths = np.loadtxt(lab_path)\n truths = truths.reshape(truths.size // 5, 5) # to avoid single truth problem\n return truths\n else:\n return np.array([])\n\n\ndef read_truths_args(lab_path, min_box_scale):\n truths = read_truths(lab_path)\n new_truths = []\n for i in range(truths.shape[0]):\n if truths[i][3] < min_box_scale:\n continue\n new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]])\n return np.array(new_truths)\n\n\ndef load_class_names(namesfile):\n class_names = []\n with open(namesfile, 'r', encoding='utf8') as fp:\n lines = fp.readlines()\n for line in lines:\n class_names.append(line.strip())\n return class_names\n\n\ndef image2torch(img):\n if isinstance(img, Image.Image):\n width = img.width\n height = img.height\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))\n img = img.view(height, width, 3).transpose(0, 1).transpose(0, 2).contiguous()\n img = img.view(1, 3, height, width)\n img = img.float().div(255.0)\n elif type(img) == np.ndarray: # cv2 image\n img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)\n else:\n print(\"unknown image type\")\n exit(-1)\n return img\n\n\ndef do_detect(model, img, conf_thresh, nms_thresh, use_cuda=True):\n model.eval()\n t0 = time.time()\n img = image2torch(img)\n t1 = time.time()\n\n img = img.to(torch.device(\"cuda\" if use_cuda else \"cpu\"))\n t2 = time.time()\n\n out_boxes = model(img)\n boxes = get_all_boxes(out_boxes, conf_thresh, model.num_classes, use_cuda=use_cuda)[0]\n\n t3 = time.time()\n boxes = nms(boxes, nms_thresh)\n t4 = time.time()\n\n if False:\n print('-----------------------------------')\n print(' image to tensor : %f' % (t1 - t0))\n print(' tensor to cuda : %f' % (t2 - t1))\n print(' predict : %f' % (t3 - t2))\n print(' nms : %f' % (t4 - t3))\n print(' total : %f' % (t4 - t0))\n print('-----------------------------------')\n return boxes\n\n\ndef read_data_cfg(datacfg):\n options = dict()\n options['gpus'] = '0,1,2,3'\n options['num_workers'] = '10'\n with open(datacfg) as fp:\n lines = fp.readlines()\n\n for line in lines:\n line = line.strip()\n if line == '':\n continue\n key, value = line.split('=')\n key = key.strip()\n value = value.strip()\n options[key] = value\n return options\n\n\ndef scale_bboxes(bboxes, width, height):\n import copy\n dets = copy.deepcopy(bboxes)\n for i in range(len(dets)):\n dets[i][0] = dets[i][0] * width\n dets[i][1] = dets[i][1] * height\n dets[i][2] = dets[i][2] * width\n dets[i][3] = dets[i][3] * height\n return dets\n\n\ndef file_lines(thefilepath):\n count = 0\n thefile = open(thefilepath, 'rb')\n while True:\n buffer = thefile.read(8192 * 1024)\n if not buffer:\n break\n count += buffer.count(b'\\n')\n thefile.close()\n return count\n\n\ndef get_image_size(fname):\n \"\"\"\n Determine the image type of fhandle and return its size.\n from draco\n \"\"\"\n with open(fname, 'rb') as fhandle:\n head = fhandle.read(24)\n if len(head) != 24:\n return\n if imghdr.what(fname) == 'png':\n check = struct.unpack('>i', head[4:8])[0]\n if check != 0x0d0a1a0a:\n return\n width, height = struct.unpack('>ii', head[16:24])\n elif imghdr.what(fname) == 'gif':\n width, height = struct.unpack('<HH', head[6:10])\n elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':\n try:\n fhandle.seek(0) # Read 0xff next\n size = 2\n ftype = 0\n while not 0xc0 <= ftype <= 0xcf:\n fhandle.seek(size, 1)\n byte = fhandle.read(1)\n while ord(byte) == 0xff:\n byte = fhandle.read(1)\n ftype = ord(byte)\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\n # We are at a SOFn block\n fhandle.seek(1, 1) # Skip `precision' byte.\n height, width = struct.unpack('>HH', fhandle.read(4))\n except Exception: # IGNORE:W0703\n return\n else:\n return\n return width, height\n\n\ndef logging(message):\n print('%s %s' % (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), message))\n"
] | [
[
"torch.cuda.is_available"
],
[
"torch.nn.Softmax",
"torch.sigmoid",
"torch.linspace",
"torch.max",
"torch.clamp_max",
"torch.cat",
"torch.min",
"torch.exp",
"torch.FloatTensor",
"torch.sort",
"torch.clamp_min",
"torch.device",
"numpy.array",
"numpy.loadtxt",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GandalfSaxe/letomes | [
"5f73a4066fcf69260cb538c105acf898b22e756d"
] | [
"code/cudasim/cuda_rocketry.py"
] | [
"from orbsim.r3b_2d.simulators import run_sim\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# import pygmo as pg\n# from pygmo import algorithm\nimport os\nimport sys\nfrom orbsim.r3b_2d.simulators import run_sim\n\n# from orbsim.plotting import orbitplot2d, orbitplot_non_inertial\nfrom orbsim.r3b_2d.analyticals import (\n ensure_bounds,\n random_disjoint_intervals,\n collapse_intervals,\n)\nimport time\nfrom numba import jit, njit\nimport math\nfrom math import pi\nfrom scipy.stats import rankdata\n\n# from ctypes import cdll\nfrom ctypes import *\n\ncudasim = cdll.LoadLibrary(\"./libcudasim.so\")\n\npi8 = pi / 8\npi4 = pi / 4\npi2 = pi / 2\ntau = 2 * pi\n\n\ndef evolve(psis, bounds, nIterations, nIndividuals, nJitter, maxDuration, maxSteps):\n init_sigma = 0.2 # spread\n init_alpha = 0.3 # learningrate\n sigma, alpha = init_sigma, init_alpha\n # sigma = np.ones(nIndividuals) * init_sigma\n # alpha = np.ones(nIndividuals) * init_alpha\n allscores=[]\n winners = []\n intermediate_winners = []\n bounds_list = bounds.values()\n np.random.seed(0)\n for _ in range(nIterations):\n\n \"\"\"\n make list of all paths to integrate\n \"\"\"\n jitter = []\n for _ in range(nIndividuals):\n noise = np.random.randn(nJitter, 3)\n halfway = int(noise.shape[0]/2)\n for i in range(halfway):\n noise[halfway+i] = -1*noise[i]\n jitter.append(noise)\n jitter = np.array(jitter)\n jitter = np.array([sigma * jitt for idx, jitt in enumerate(jitter)])\n jitter = jitter.reshape(nJitter, nIndividuals, 3)\n jitter[0] *= 0 # Make sure all set individuals are evaluated without jitter\n points = jitter + psis\n points = points.reshape(nIndividuals * nJitter, 3)\n for i, pt in enumerate(points):\n points[i] = ensure_bounds(pt, bounds_list)\n points = points.reshape(nJitter, nIndividuals, 3)\n successes = np.zeros(nIndividuals * nJitter, dtype=bool)\n scores = np.zeros(nIndividuals * nJitter)\n\n \"\"\"\n cudasim.integrate\n \n Input:\n nIndividuals Number of individuals (size of population)\n nJitter Number of random jitter points\n maxSteps Maximum number of steps of integration algorithm\n maxDuration Maximum t (in days) of integration algorithm\n inArray 1D input array of doubles; size is 3 x nIndividuals \n\n Output:\n successArray 1D ouput array of bools; size is 1 x nIndividuals\n scoreArray 1D ouput array of doubles; size is 1 x nIndividuals\n \n \"\"\"\n cudasim.integrate.restype = None\n cudasim.integrate.argtypes = [\n c_int,\n c_int,\n c_double,\n c_int,\n POINTER(c_double),\n POINTER(c_bool),\n POINTER(c_double),\n ]\n inArray = points.ctypes.data_as(POINTER(c_double))\n successArray = successes.ctypes.data_as(POINTER(c_bool))\n scoreArray = scores.ctypes.data_as(POINTER(c_double))\n cudasim.integrate(\n nIndividuals,\n nJitter,\n maxDuration,\n int(maxSteps),\n inArray,\n successArray,\n scoreArray,\n )\n\n print(\"successes=\", successes.sum())\n points = points.reshape(nIndividuals * nJitter, 3)\n for i, _ in enumerate(scores):\n scores[i] += points[i][2] # add burn dv\n if not successes[i]:\n scores[i] += 1\n scores[i] *= 10\n\n \"\"\"transform scores -- ranking\"\"\"\n scores = scores.reshape(nIndividuals, nJitter)\n ranked_scores = np.array(\n [rankdata(-1 * sig_eps, method=\"ordinal\") for sig_eps in scores]\n )\n for rscores in ranked_scores:\n rsum = rscores.sum()\n rscores = [\n rscore / rsum for rscore in rscores\n ] # make scores sum to 1\n # ranked_scores = -1 * ranked_scores\n\n steps = np.zeros([nIndividuals, 3])\n jitter = jitter.transpose(1, 0, 2)\n steps = np.array(\n [\n np.dot(ranked_scores[idx], jitter[idx]) * sigma**2 * alpha\n for idx in range(len(steps))\n ]\n )\n\n \"\"\"report winners\"\"\"\n points = points.reshape(nIndividuals, nJitter, 3)\n scores = scores.reshape(nIndividuals, nJitter)\n successes = successes.reshape(nIndividuals, nJitter)\n for idx, psi in enumerate(psis):\n allscores.append(f\"{scores[idx][0]} \")\n if successes[idx][0]:\n winners.append(str([idx, psi, scores[idx][0]]) + \"\\n\")\n for jdx, succ in enumerate(\n successes[idx][1:]\n ): # all but the first value, since the first value is the individual itself\n if succ:\n intermediate_winners.append(\n \" -- \"\n + str([idx, points[idx][jdx + 1], scores[idx][jdx + 1]])\n + \"\\n\"\n )\n allscores.append(\"\\n\")\n psis += steps\n\n scoresfile = open('cuda_moon_scores.txt', 'w')\n scoresfile.writelines(allscores)\n scoresfile.close()\n logfile = open(f\"cudaES.log\", \"w\")\n logfile.writelines(winners)\n logfile.writelines(intermediate_winners)\n logfile.close()\n\n\ndef initialize_psis(n, bounds):\n psis = [[random_disjoint_intervals(bound) for bound in bounds] for _ in range(n)]\n return psis\n\n\nif __name__ == \"__main__\":\n nIterations = 300\n nIndividuals = 1024\n nJitter = 32\n maxDuration = 100\n maxSteps = 1e7\n bounds = {\n \"pos\": np.array([[0, 1 * tau]]),\n \"ang\": np.array([[0, 1 * tau / 16], [tau / 2 - tau / 16, tau / 2]]),\n \"burn\": np.array([[3.1, 3.15]]),\n }\n psis = initialize_psis(nIndividuals, bounds.values())\n # pop.set_x(0, [-2.277654673852600, 0.047996554429844, 3.810000000000000])\n # pop.set_x(1, [-0.138042744751570, -0.144259374836607, 3.127288444444444])\n # pop.set_x(2, [-2.086814820119193, -0.000122173047640, 3.111181716545691])\n # print(pop)\n psis[0] = [4.005_530_633_326_986, 0.047_996_554_429_844, 3.810_000_000_000_000]\n evolve(psis, bounds, nIterations, nIndividuals, nJitter, maxDuration, maxSteps)\n"
] | [
[
"numpy.dot",
"numpy.random.seed",
"scipy.stats.rankdata",
"numpy.random.randn",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
frankstratton/eps | [
"37e8b2f739df68db9d49e66852e294c110b8bf8a"
] | [
"test/support/python/naive_bayes.py"
] | [
"import pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn2pmml import sklearn2pmml\nfrom sklearn2pmml.decoration import ContinuousDomain, CategoricalDomain\nfrom sklearn2pmml.pipeline import PMMLPipeline\nfrom sklearn2pmml.feature_extraction.text import Splitter\nfrom sklearn_pandas import DataFrameMapper\n\ndata = pd.read_csv(\"test/support/mpg.csv\")\n\nnumeric_features = [\"displ\", \"year\", \"cyl\"]\ncategorical_features = [\"class\"]\ntext_features = []\n\nmapper = DataFrameMapper(\n [(numeric_features, [ContinuousDomain()])] +\n [([f], [CategoricalDomain(), OneHotEncoder()]) for f in categorical_features] +\n [(f, [CategoricalDomain(), CountVectorizer(tokenizer=Splitter())]) for f in text_features]\n)\n\npipeline = PMMLPipeline([\n (\"mapper\", mapper),\n (\"model\", GaussianNB())\n])\npipeline.fit(data, data[\"drv\"])\n\nsklearn2pmml(pipeline, \"test/support/python/naive_bayes.pmml\")\n\nprint(pipeline.predict(data[:10]))\n"
] | [
[
"pandas.read_csv",
"sklearn.naive_bayes.GaussianNB",
"sklearn.preprocessing.OneHotEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
EM-AutoML/AutoDL-Projects | [
"8ff416fe5d6cb1b310b885fe376e6f2790fbda14"
] | [
"exps/algos/R_EA.py"
] | [
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n##################################################################\n# Regularized Evolution for Image Classifier Architecture Search #\n##################################################################\nimport os, sys, time, glob, random, argparse\nimport numpy as np, collections\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import load_config, dict2config, configure2str\nfrom datasets import get_datasets, SearchDataset\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler\nfrom utils import get_model_infos, obtain_accuracy\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom nas_201_api import NASBench201API as API\nfrom models import CellStructure, get_search_spaces\n\n\nclass Model(object):\n\n def __init__(self):\n self.arch = None\n self.accuracy = None\n \n def __str__(self):\n \"\"\"Prints a readable version of this bitstring.\"\"\"\n return '{:}'.format(self.arch)\n \n\n# This function is to mimic the training and evaluatinig procedure for a single architecture `arch`.\n# The time_cost is calculated as the total training time for a few (e.g., 12 epochs) plus the evaluation time for one epoch.\n# For use_converged_LR = True, the architecture is trained for 12 epochs, with LR being decaded from 0.1 to 0.\n# In this case, the LR schedular is converged.\n# For use_converged_LR = False, the architecture is planed to be trained for 200 epochs, but we early stop its procedure.\n# \ndef train_and_eval(arch, nas_bench, extra_info, dataname='cifar10-valid', use_converged_LR=True):\n if use_converged_LR and nas_bench is not None:\n arch_index = nas_bench.query_index_by_arch( arch )\n assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)\n info = nas_bench.get_more_info(arch_index, dataname, None, True)\n valid_acc, time_cost = info['valid-accuracy'], info['train-all-time'] + info['valid-per-time']\n #_, valid_acc = info.get_metrics('cifar10-valid', 'x-valid' , 25, True) # use the validation accuracy after 25 training epochs\n elif not use_converged_LR and nas_bench is not None:\n # Please use `use_converged_LR=False` for cifar10 only.\n # It did return values for cifar100 and ImageNet16-120, but it has some potential issues. (Please email me for more details)\n arch_index, nepoch = nas_bench.query_index_by_arch( arch ), 25\n assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)\n xoinfo = nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)\n xocost = nas_bench.get_cost_info(arch_index, 'cifar10-valid', False)\n info = nas_bench.get_more_info(arch_index, dataname, nepoch, False, True) # use the validation accuracy after 25 training epochs, which is used in our ICLR submission (not the camera ready).\n cost = nas_bench.get_cost_info(arch_index, dataname, False)\n # The following codes are used to estimate the time cost.\n # When we build NAS-Bench-201, architectures are trained on different machines and we can not use that time record.\n # When we create checkpoints for converged_LR, we run all experiments on 1080Ti, and thus the time for each architecture can be fairly compared.\n nums = {'ImageNet16-120-train': 151700, 'ImageNet16-120-valid': 3000,\n 'cifar10-valid-train' : 25000, 'cifar10-valid-valid' : 25000,\n 'cifar100-train' : 50000, 'cifar100-valid' : 5000}\n estimated_train_cost = xoinfo['train-per-time'] / nums['cifar10-valid-train'] * nums['{:}-train'.format(dataname)] / xocost['latency'] * cost['latency'] * nepoch\n estimated_valid_cost = xoinfo['valid-per-time'] / nums['cifar10-valid-valid'] * nums['{:}-valid'.format(dataname)] / xocost['latency'] * cost['latency']\n try:\n valid_acc, time_cost = info['valid-accuracy'], estimated_train_cost + estimated_valid_cost\n except:\n valid_acc, time_cost = info['est-valid-accuracy'], estimated_train_cost + estimated_valid_cost\n else:\n # train a model from scratch.\n raise ValueError('NOT IMPLEMENT YET')\n return valid_acc, time_cost\n\n\ndef random_architecture_func(max_nodes, op_names):\n # return a random architecture\n def random_architecture():\n genotypes = []\n for i in range(1, max_nodes):\n xlist = []\n for j in range(i):\n node_str = '{:}<-{:}'.format(i, j)\n op_name = random.choice( op_names )\n xlist.append((op_name, j))\n genotypes.append( tuple(xlist) )\n return CellStructure( genotypes )\n return random_architecture\n\n\ndef mutate_arch_func(op_names):\n \"\"\"Computes the architecture for a child of the given parent architecture.\n The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.\n \"\"\"\n def mutate_arch_func(parent_arch):\n child_arch = deepcopy( parent_arch )\n node_id = random.randint(0, len(child_arch.nodes)-1)\n node_info = list( child_arch.nodes[node_id] )\n snode_id = random.randint(0, len(node_info)-1)\n xop = random.choice( op_names )\n while xop == node_info[snode_id][0]:\n xop = random.choice( op_names )\n node_info[snode_id] = (xop, node_info[snode_id][1])\n child_arch.nodes[node_id] = tuple( node_info )\n return child_arch\n return mutate_arch_func\n\n\ndef regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, nas_bench, extra_info, dataname):\n \"\"\"Algorithm for regularized evolution (i.e. aging evolution).\n \n Follows \"Algorithm 1\" in Real et al. \"Regularized Evolution for Image\n Classifier Architecture Search\".\n \n Args:\n cycles: the number of cycles the algorithm should run for.\n population_size: the number of individuals to keep in the population.\n sample_size: the number of individuals that should participate in each tournament.\n time_budget: the upper bound of searching cost\n\n Returns:\n history: a list of `Model` instances, representing all the models computed\n during the evolution experiment.\n \"\"\"\n population = collections.deque()\n history, total_time_cost = [], 0 # Not used by the algorithm, only used to report results.\n\n # Initialize the population with random models.\n while len(population) < population_size:\n model = Model()\n model.arch = random_arch()\n model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info)\n population.append(model)\n history.append(model)\n total_time_cost += time_cost\n\n # Carry out evolution in cycles. Each cycle produces a model and removes\n # another.\n #while len(history) < cycles:\n while total_time_cost < time_budget:\n # Sample randomly chosen models from the current population.\n start_time, sample = time.time(), []\n while len(sample) < sample_size:\n # Inefficient, but written this way for clarity. In the case of neural\n # nets, the efficiency of this line is irrelevant because training neural\n # nets is the rate-determining step.\n candidate = random.choice(list(population))\n sample.append(candidate)\n\n # The parent is the best model in the sample.\n parent = max(sample, key=lambda i: i.accuracy)\n\n # Create the child model and store it.\n child = Model()\n child.arch = mutate_arch(parent.arch)\n total_time_cost += time.time() - start_time\n child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info)\n if total_time_cost + time_cost > time_budget: # return\n return history, total_time_cost\n else:\n total_time_cost += time_cost\n population.append(child)\n history.append(child)\n\n # Remove the oldest model.\n population.popleft()\n return history, total_time_cost\n\n\ndef main(xargs, nas_bench):\n assert torch.cuda.is_available(), 'CUDA is not available.'\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_num_threads( xargs.workers )\n prepare_seed(xargs.rand_seed)\n logger = prepare_logger(args)\n\n assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'\n if xargs.dataset == 'cifar10':\n dataname = 'cifar10-valid'\n else:\n dataname = xargs.dataset\n if xargs.data_path is not None:\n train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)\n split_Fpath = 'configs/nas-benchmark/cifar-split.txt'\n cifar_split = load_config(split_Fpath, None, None)\n train_split, valid_split = cifar_split.train, cifar_split.valid\n logger.log('Load split file from {:}'.format(split_Fpath))\n config_path = 'configs/nas-benchmark/algos/R-EA.config'\n config = load_config(config_path, {'class_num': class_num, 'xshape': xshape}, logger)\n # To split data\n train_data_v2 = deepcopy(train_data)\n train_data_v2.transform = valid_data.transform\n valid_data = train_data_v2\n search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split)\n # data loader\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split) , num_workers=xargs.workers, pin_memory=True)\n valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True)\n logger.log('||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size))\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n extra_info = {'config': config, 'train_loader': train_loader, 'valid_loader': valid_loader}\n else:\n config_path = 'configs/nas-benchmark/algos/R-EA.config'\n config = load_config(config_path, None, logger)\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n extra_info = {'config': config, 'train_loader': None, 'valid_loader': None}\n\n search_space = get_search_spaces('cell', xargs.search_space_name)\n random_arch = random_architecture_func(xargs.max_nodes, search_space)\n mutate_arch = mutate_arch_func(search_space)\n #x =random_arch() ; y = mutate_arch(x)\n x_start_time = time.time()\n logger.log('{:} use nas_bench : {:}'.format(time_string(), nas_bench))\n logger.log('-'*30 + ' start searching with the time budget of {:} s'.format(xargs.time_budget))\n history, total_cost = regularized_evolution(xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, nas_bench if args.ea_fast_by_api else None, extra_info, dataname)\n logger.log('{:} regularized_evolution finish with history of {:} arch with {:.1f} s (real-cost={:.2f} s).'.format(time_string(), len(history), total_cost, time.time()-x_start_time))\n best_arch = max(history, key=lambda i: i.accuracy)\n best_arch = best_arch.arch\n logger.log('{:} best arch is {:}'.format(time_string(), best_arch))\n \n info = nas_bench.query_by_arch( best_arch )\n if info is None: logger.log('Did not find this architecture : {:}.'.format(best_arch))\n else : logger.log('{:}'.format(info))\n logger.log('-'*100)\n logger.close()\n return logger.log_dir, nas_bench.query_index_by_arch( best_arch )\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Regularized Evolution Algorithm\")\n parser.add_argument('--data_path', type=str, help='Path to dataset')\n parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')\n # channels and number-of-cells\n parser.add_argument('--search_space_name', type=str, help='The search space name.')\n parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')\n parser.add_argument('--channel', type=int, help='The number of channels.')\n parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')\n parser.add_argument('--ea_cycles', type=int, help='The number of cycles in EA.')\n parser.add_argument('--ea_population', type=int, help='The population size in EA.')\n parser.add_argument('--ea_sample_size', type=int, help='The sample size in EA.')\n parser.add_argument('--ea_fast_by_api', type=int, help='Use our API to speed up the experiments or not.')\n parser.add_argument('--time_budget', type=int, help='The total time cost budge for searching (in seconds).')\n # log\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')\n parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')\n parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')\n parser.add_argument('--rand_seed', type=int, default=-1, help='manual seed')\n args = parser.parse_args()\n #if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)\n args.ea_fast_by_api = args.ea_fast_by_api > 0\n\n if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):\n nas_bench = None\n else:\n print ('{:} build NAS-Benchmark-API from {:}'.format(time_string(), args.arch_nas_dataset))\n nas_bench = API(args.arch_nas_dataset)\n if args.rand_seed < 0:\n save_dir, all_indexes, num = None, [], 500\n for i in range(num):\n print ('{:} : {:03d}/{:03d}'.format(time_string(), i, num))\n args.rand_seed = random.randint(1, 100000)\n save_dir, index = main(args, nas_bench)\n all_indexes.append( index )\n torch.save(all_indexes, save_dir / 'results.pth')\n else:\n main(args, nas_bench)\n"
] | [
[
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.set_num_threads",
"torch.cuda.is_available",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VuongLong/DANCE_W | [
"8a7dc39a16908bb4726ed57049c6a7d6698a76bc"
] | [
"models/algorithms.py"
] | [
"import torch\nfrom torch.autograd import Function\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef op_copy(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr0'] = param_group['lr']\n return optimizer\n\ndef lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):\n decay = (1 + gamma * iter_num / max_iter) ** (-power)\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr0'] * decay\n param_group['weight_decay'] = 1e-3\n param_group['momentum'] = 0.9\n param_group['nesterov'] = True\n return optimizer\n \n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n nn.init.zeros_(m.bias)\n\n\nclass KanNet(nn.Module):\n def __init__(self, output=1, bottleneck_dim=256, type=\"linear\"):\n super(KanNet, self).__init__()\n self.type = type\n if type == 'wn':\n self.fc = weightNorm(nn.Linear(bottleneck_dim, output), name=\"weight\")\n self.fc.apply(init_weights)\n else:\n self.fc = nn.Linear(bottleneck_dim, output)\n self.fc.apply(init_weights)\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n def get_weight(self):\n return self.fc.weight\n\n def get_bias(self):\n return self.fc.bias"
] | [
[
"torch.nn.init.xavier_normal_",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.init.zeros_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hoxmark/Deep_reinforcement_active_learning | [
"7458916d6f75c7fbfcfd4bc81763ab5ba16208ad"
] | [
"selection_strategies/download_graphs/cnn/cnn_sim_umich_n_del.py"
] | [
"from pprint import pprint\nfrom visdom import Visdom\nimport pathlib\nimport json\nimport sys\nimport matplotlib.pyplot as plt\n\ndef download_env(env):\n vis = Visdom('http://logserver.duckdns.org', port=5010)\n data = vis.get_window_data(env=env)\n d = json.loads(data)\n \n n_deleted = []\n test_acc_avg = []\n \n for key in d:\n try:\n #1 for MR 0 for UMICH\n x = list(d[key][\"content\"][\"data\"][1][\"x\"])\n y = list(d[key][\"content\"][\"data\"][1][\"y\"]) \n if 'n-deleted' in key:\n n_deleted = (x,y)\n\n #1 for MR 0 for UMICH\n x = list(d[key][\"content\"][\"data\"][1][\"x\"])\n y = list(d[key][\"content\"][\"data\"][1][\"y\"]) \n if 'test-acc-avg' in key:\n test_acc_avg = (x,y)\n except:\n pass\n\n\n return n_deleted, test_acc_avg\n\nif __name__ == \"__main__\":\n\n source = [ \"SS_bjornhox_11-07-18_14:22_UMICH_cnn_sim_0.08_28ef\",\n \"SS_bjornhox_11-07-18_14:34_UMICH_cnn_sim_0.12_2366\",\n \"SS_bjornhox_11-07-18_14:34_UMICH_cnn_sim_0.14_2f39\"]\n\n legend = [\"0.08\", \"0.12\", \"0.14\"]\n path = './results/'\n\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\n n_deleted = []\n test_acc_avg = []\n \n \n # for i in sys.argv[1:]:\n # legend.append(i.split(\"_\")[7])\n # legend.append(i.split(\"_\")[6])\n # legend.append(i.split(\"_\")[8])\n\n for i in range(0, len(source)): \n env = source[i]\n res1, res2 = download_env(env)\n n_deleted.append(res1)\n test_acc_avg.append(res2)\n\n plt.figure(1)\n plt.axis([0,250,0,1100])\n plt.subplot(111)\n\n plt.xlabel(\"Amount of labeled data\")\n plt.ylabel(\"Number of deleted samples\")\n\n new_plot = []\n\n for i in range(0,len(n_deleted)):\n # print(test_acc_avg[i])\n # print(n_deleted[i])\n # # new = (test_acc_avg[i][0][0:8], n_deleted[i][1][0:8])\n new = (test_acc_avg[i][0][0:15], n_deleted[i][1][0:15])\n\n new[0].insert(0,0) \n new[1].insert(0,0)\n new_plot.append(new)\n # print(new)\n # print(\"---\")\n # quit()\n \n \n\n plt.plot(*new_plot[0], dashes=[4, 2], color='#9467bd')\n plt.plot(*new_plot[1], color='#1f77b4')\n plt.plot(*new_plot[2], dashes=[6, 2], color='#17becf')\n\n plt.legend(legend,\n loc='center right')\n plt.savefig('results/CNN_UMICH_N_DEL.png' , dpi=600)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lauritowal/plainFDM_public | [
"3d47352e5aad15b0c632488b048b470d0528c652"
] | [
"plain_fdm/lfz_controlFDM/pid_regler.py"
] | [
"import numpy as np\nimport logging\n\nfrom plain_fdm.servicesFDM.utils import Utils\n\nlogging.basicConfig(filename='pid_regler.log', filemode='w', level=logging.DEBUG)\n\n\n# siehe auch Drohne-Code Unity\nclass PidRegler(object):\n\n def __init__(self):\n self.utils = Utils()\n self.kpaElevator = 110\n self.kdaElevator = 4.5\n self.kpaAileron = 100.0\n self.kdaAileron = 10\n\n \n # Headline: Aileron -->\n def get_aileron_command(self, heading_reference, heading, roll_angle, roll_angle_rate, delta_aileron):\n roll_angle_reference = self._outerLoopAileron(heading_reference, heading)\n return self._innerLoopAileron(roll_angle_reference, roll_angle, roll_angle_rate, delta_aileron)\n\n def _outerLoopAileron(self, heading_reference, heading):\n\n logging.debug(\"heading_reference: %s\", heading_reference)\n logging.debug(\"heading: %s\", heading)\n\n heading_difference = np.deg2rad(self.utils.normalize_angle(heading_reference - heading))\n logging.debug(\"heading_difference: %s (rad)\", heading_difference)\n\n logging.debug(\"heading_reference: %s (degree)\", np.rad2deg(heading_difference))\n\n heading_roll_angle_reference = heading_difference * 1.0 #Vorsteuerung als P-Regler\n return heading_roll_angle_reference\n\n # innerLoop: heading_roll->Aileron\n def _innerLoopAileron(self, roll_angle_reference, roll_angle, roll_angle_rate, delta_aileron):\n\n logging.debug(\"roll_angle_reference: %s\", roll_angle_reference)\n logging.debug(\"roll_angle: %s\", roll_angle)\n\n diff_rollAngle = roll_angle_reference - roll_angle\n\n logging.debug(\"diff_rollAngle: %s\", diff_rollAngle)\n\n #if np.rad2deg(rollAngle_Current) < -2:\n # print(\"jetzt\")\n AileronCommand = (diff_rollAngle * self.kpaAileron - roll_angle_rate * self.kdaAileron)\n AileronCommand = AileronCommand + delta_aileron\n AileronCommand = np.deg2rad(np.clip(AileronCommand, -1, 1) * (-15))\n\n logging.debug(\"AileronCommand: %s (in degrees)\", AileronCommand)\n\n return AileronCommand\n \n # Headline: Elevator\n def getElevatorCommand(self, TASReference, TASCurrent, pitchAngleCurrent, pitchAngleRateCurrent, elevatorCurrent):\n pitchAngleReference = self._outerLoopElevator(TASReference, TASCurrent)\n elevatorCommand = self._innerLoopElevator(pitchAngleReference, pitchAngleCurrent, pitchAngleRateCurrent, elevatorCurrent)\n return elevatorCommand\n\n def _outerLoopElevator(self, TASReference, TASCurrent):\n pitchAngleReference = (TASCurrent - TASReference) * 1.0 #Vorsteuerung als P-Regler\n return pitchAngleReference\n\n # innerLoop: Pitch->Elevator\n def _innerLoopElevator(self, pitchAngleReference, pitchAngleCurrent, pitchAngleRateCurrent, elevatorCurrent):\n diffPitchAngle = pitchAngleReference - pitchAngleCurrent\n elevatorCommand = np.clip(diffPitchAngle * self.kpaElevator - pitchAngleRateCurrent * self.kdaElevator, -1, 1)\n elevatorCommand = elevatorCommand + elevatorCurrent\n elevatorCommand = np.deg2rad(np.clip(elevatorCommand, -1, 1) * (-20))\n return elevatorCommand\n\n def __difference_yaw_angle(self, heading_reference, heading_current):\n # keep values between 0 and 360 degrees\n heading_reference = heading_reference % 360\n heading_current = heading_current % 360\n\n logging.debug(\"heading_reference mod 360: %s\", heading_reference)\n logging.debug(\"heading_current mod 360: %s\", heading_current)\n\n heading_difference = heading_reference - heading_current\n\n logging.debug(\"heading_difference: %s\", heading_difference)\n\n normalized = self.utils.normalize_angle(heading_difference)\n\n logging.debug(\"normalized: %s\", normalized)\n\n return normalized\n\n\ndef main():\n pid = PidRegler()\n print(pid.getElevatorCommand(70,60,0,0,0))\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.rad2deg",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaoyili/elasticdl | [
"93e58c42eb5e2ef14661469777d0224884d7bf1d",
"93e58c42eb5e2ef14661469777d0224884d7bf1d"
] | [
"elasticdl/python/common/model_handler.py",
"elasticdl/python/tests/lr_scheduler_test.py"
] | [
"import abc\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom elasticdl.python.common.constants import DistributionStrategy\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.save_utils import CheckpointSaver\nfrom elasticdl.python.elasticdl.layers.embedding import Embedding\nfrom elasticdl.python.keras.layers import SparseEmbedding\nfrom elasticdl.python.ps.embedding_table import EmbeddingTable\n\n\ndef _get_trained_params_from_checkpoint(checkpoint_dir):\n \"\"\"Get parameters from a checkpoint directory saved by ElasticDL\"\"\"\n parameters = CheckpointSaver.restore_params_from_checkpoint(\n checkpoint_dir, 0, 1\n )\n\n trained_params = parameters.non_embedding_params\n for name, table in parameters.embedding_params.items():\n # The name of variable in a tf.keras.layers.Embedding layer is\n # \"{layer_name}/embeddings:0\"\n var_name = name + \"/embeddings:0\"\n trained_params[var_name] = table\n return trained_params\n\n\ndef _convert_embedding_table_to_numpy_array(embedding_table, embedding_shape):\n \"\"\"Convert an embedding table to a np.ndarray which can be assigned\n to trainable weights in keras embedding layers.\n\n Args:\n embedding_table: A `EmbeddingTable` instance.\n embedding_shape: a tuple with two elements\n\n Returns:\n A np.ndarray\n \"\"\"\n embedding_ids = list(embedding_table.embedding_vectors.keys())\n embedding_values = list(embedding_table.embedding_vectors.values())\n embedding_weights = np.zeros(embedding_shape)\n embedding_weights[embedding_ids] = embedding_values\n return embedding_weights\n\n\ndef _need_partition_embedding(layer):\n \"\"\"The embedding layer will be partitioned on multiple\n PS instances if the memory of the layer.train_weights is\n bigger than 2MB.\n \"\"\"\n EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION = 2 * 1024 * 1024 # 2MB\n FLOAT32_BYTES = 4\n weights_memory = layer.input_dim * layer.output_dim * FLOAT32_BYTES\n return weights_memory > EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION\n\n\nclass ModelHandler(metaclass=abc.ABCMeta):\n \"\"\"Generate the model to train in ElasticDL for different distributed\n strategies and export trained model in ElasticDL to SavedModel.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_train(self, model):\n \"\"\"Generate a model to train in ElasticDL.\n\n Args:\n model: A native keras model instance.\n\n Returns:\n A keras model instance for ElasticDL training.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported a SavedModel\n by tf.saved_model.save.\n\n Args:\n model: A keras model instance trained by ElasticDL and\n it may contains `elasticdl.layers.Embedding` layers.\n dataset: A `tf.data.Dataset` instance which has the same outputs as\n the training dataset.\n\n Returns:\n A keras model instance trained by ElasticDL.\n \"\"\"\n\n @classmethod\n def get_model_handler(\n cls, distribution_strategy=None, checkpoint_dir=None\n ):\n \"\"\"Create a model handler to process the model for the\n distributed strategy.\n\n Args:\n distribution_strategy (string): distribution strategy name\n checkpoint_dir: Checkpoint directory to save model parametes\n during training.\n\n Return:\n ModelHandler subclass instance.\n \"\"\"\n if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:\n return ParameterServerModelHandler(checkpoint_dir=checkpoint_dir)\n elif distribution_strategy == DistributionStrategy.ALLREDUCE:\n logger.warning(\n \"Allreduce distribution strategy is not supported yet. \"\n \"Switching to use the default distribution strategy.\"\n )\n return DefaultModelHandler()\n\n\nclass DefaultModelHandler(ModelHandler):\n \"\"\"Return the origin model to train and export.\"\"\"\n\n def get_model_to_train(self, model):\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"\n Get model with inputs and trained parameters to export.\n \"\"\"\n if not model.inputs:\n model._build_model_with_inputs(inputs=dataset, targets=None)\n return model\n\n\nclass ParameterServerModelHandler(ModelHandler):\n \"\"\"Model handler for parameter server strategy.\n For training, The handler will replace `tf.keras.layers.Embedding`\n layers with`elasticdl.layers.Embedding` for training.\n For saving model, the handler will restore Keras model definition and\n pull trained parameters from parameter server(s) for the model.\n \"\"\"\n\n def __init__(self, checkpoint_dir=None):\n \"\"\"\n Arguments:\n checkpoint_dir: A checkpoint directory to save all model\n parameters during training.\n \"\"\"\n self._checkpoint_dir = checkpoint_dir\n\n def get_model_to_train(self, model):\n \"\"\"Replace the tf.keras.layers.Embedding layer in the model with\n an elasticdl.layers.Embedding layer in ParameterServerStrategy.\n \"\"\"\n if type(model) == tf.keras.Sequential or model._is_graph_network:\n model = self._clone_model_with_edl_embedding(model)\n else:\n model = self._replace_attr_with_edl_embedding(model)\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported to a SavedModel by\n `tf.saved_model.save`.\n \"\"\"\n model = self._restore_keras_model_def(model)\n if not model.inputs:\n # build model to add inputs and outputs that\n # can be consumed by tf-serving\n model._build_model_with_inputs(inputs=dataset, targets=None)\n\n checkpoint_dir = CheckpointSaver.get_valid_lastest_version_dir(\n self._checkpoint_dir\n )\n if checkpoint_dir is None:\n logger.warning(\"No available checkpoint to export model\")\n return model\n\n trained_params = _get_trained_params_from_checkpoint(checkpoint_dir)\n for var in model.trainable_variables:\n if isinstance(trained_params[var.name], EmbeddingTable):\n embedding_params = _convert_embedding_table_to_numpy_array(\n trained_params[var.name], var.shape\n )\n var.assign(embedding_params)\n else:\n var.assign(trained_params[var.name].numpy())\n return model\n\n def _restore_keras_model_def(self, model):\n \"\"\"Restore Keras model definition by replacing\n `elasticdl.layers.Embedding` layers with\n `tf.keras.layers.Embedding` layers.\n \"\"\"\n # clear keras model session to avoid clutter from old models/layers.\n tf.keras.backend.clear_session()\n if (\n isinstance(model, tf.keras.models.Model)\n and not model._is_graph_network\n ):\n model = self._replace_attr_with_keras_embedding(model)\n else:\n model = self._clone_model_with_keras_embedding(model)\n return model\n\n @staticmethod\n def _clone_model_with_edl_embedding(model):\n \"\"\"Clone a new model and replace keras embedding layers including\n `tf.keras.layers.Embedding` and `SparseEmbedding` with\n `elasticdl.layers.Embedding`\n \"\"\"\n\n def _clone_function(layer):\n if type(layer) in [\n tf.keras.layers.Embedding,\n SparseEmbedding,\n ] and _need_partition_embedding(layer):\n logger.debug(\n \"Replace {} with {}\".format(layer.name, Embedding)\n )\n # ElasticDL embedding only accept a string type initializer\n init = tf.keras.initializers.serialize(\n layer.embeddings_initializer\n )[\"class_name\"]\n\n if type(layer) == tf.keras.layers.Embedding:\n embedding_layer = Embedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=init,\n mask_zero=layer.mask_zero,\n input_length=layer.input_length,\n name=layer.name,\n )\n else:\n embedding_layer = Embedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=init,\n name=layer.name,\n combiner=layer.combiner,\n )\n return embedding_layer\n return layer\n\n return tf.keras.models.clone_model(\n model, clone_function=_clone_function\n )\n\n @staticmethod\n def _clone_model_with_keras_embedding(model):\n \"\"\"Clone a new model and replace the `elasticdl.layers.Embedding`\n layers with `tf.keras.layers.Embedding` or `SparseEmbedding` layers\n \"\"\"\n\n def _clone_function(layer):\n if type(layer) == Embedding:\n logger.info(\n \"Replace embedding layer with \"\n \"elasticdl.layers.Embedding\"\n )\n # The combiner is not None only for SparseEmbedding,\n if layer.combiner is not None:\n embedding_layer = SparseEmbedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=layer.embeddings_initializer,\n name=layer.name,\n combiner=layer.combiner,\n )\n else:\n embedding_layer = tf.keras.layers.Embedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=layer.embeddings_initializer,\n mask_zero=layer.mask_zero,\n input_length=layer.input_length,\n name=layer.name,\n )\n return embedding_layer\n return layer\n\n return tf.keras.models.clone_model(\n model, clone_function=_clone_function\n )\n\n @staticmethod\n def _replace_attr_with_edl_embedding(model):\n \"\"\"Replace the keras embedding attributes in the model with\n `elasticdl.layers.Embedding` layers.\n \"\"\"\n for name, value in model.__dict__.items():\n if type(\n value\n ) == tf.keras.layers.Embedding and _need_partition_embedding(\n value\n ):\n logger.info(\n \"Replace {} layer with \"\n \"elasticdl.layers.Embedding\".format(value)\n )\n initializer_name = tf.keras.initializers.serialize(\n value.embeddings_initializer\n )[\"class_name\"]\n embedding_layer = Embedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=initializer_name,\n mask_zero=value.mask_zero,\n input_length=value.input_length,\n )\n setattr(model, name, embedding_layer)\n elif type(value) == SparseEmbedding and _need_partition_embedding(\n value\n ):\n logger.info(\n \"Replace {} layer with \"\n \"elasticdl.layers.Embedding\".format(value)\n )\n embedding_layer = Embedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=initializer_name,\n combiner=value.combiner,\n )\n setattr(model, name, embedding_layer)\n return model\n\n @staticmethod\n def _replace_attr_with_keras_embedding(model):\n \"\"\"Replace the elasticdl.layers.Embedding attributes in the model\n with `tf.keras.layers.Embedding` or `SparseEmbedding` layers.\n \"\"\"\n for name, value in model.__dict__.items():\n if type(value) == Embedding:\n # The combiner is not None only for SparseEmbedding,\n if value.combiner is not None:\n logger.info(\"Replace elasticdl with SparseEmbedding\")\n embedding_layer = SparseEmbedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=value.embeddings_initializer,\n combiner=value.combiner,\n )\n else:\n logger.info(\n \"Replace elasticdl with \", \"tf.kerasl.layers.Embedding\"\n )\n embedding_layer = tf.keras.layers.Embedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=value.embeddings_initializer,\n mask_zero=value.mask_zero,\n input_length=value.input_length,\n )\n setattr(model, name, embedding_layer)\n return model\n",
"import time\nimport unittest\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport tensorflow as tf\n\nfrom elasticdl.python.common.lr_scheduler import add_lr_scheduler_to_optimizer\n\n\ndef lr_scheduler_func(model_version):\n if model_version < 1:\n return 1\n elif model_version < 2:\n return 0.5\n else:\n return 0.1\n\n\nclass LearningRateTest(unittest.TestCase):\n @staticmethod\n def get_lr(lr_scheduler, opt, model_version):\n lr_scheduler.set_model_version(model_version)\n # sleep 1s to wait that all threads are in this method call\n time.sleep(1)\n return opt.learning_rate\n\n @staticmethod\n def apply_gradients_with_scheduler(\n lr_scheduler, opt, model_version, variables, grads\n ):\n grads_and_vars = zip(grads, variables)\n lr_scheduler.set_model_version(model_version)\n # sleep 1s to wait that all threads are in this method call\n time.sleep(1)\n opt.apply_gradients(grads_and_vars)\n return [v.numpy() for v in variables]\n\n def test_lr_scheduler(self):\n opt = tf.optimizers.SGD(0.1)\n lr_scheduler = add_lr_scheduler_to_optimizer(opt, lr_scheduler_func)\n\n model_versions = [0, 1, 2]\n counts = len(model_versions)\n executor = ThreadPoolExecutor(max_workers=counts)\n tasks = [\n executor.submit(self.get_lr, lr_scheduler, opt, v)\n for v in model_versions\n ]\n results = [tasks[i].result() for i in range(counts)]\n for i in range(counts):\n self.assertAlmostEqual(\n results[i], lr_scheduler_func(model_versions[i])\n )\n\n variables = []\n grads = []\n original_values = [1.2, 0.8]\n grad_values = [0.2, 0.1]\n\n for i in range(counts):\n variables.append([tf.Variable(v) for v in original_values])\n grads.append([tf.convert_to_tensor(g) for g in grad_values])\n\n tasks = [\n executor.submit(\n self.apply_gradients_with_scheduler,\n lr_scheduler,\n opt,\n model_versions[i],\n variables[i],\n grads[i],\n )\n for i in range(counts)\n ]\n results = [tasks[i].result() for i in range(counts)]\n place = 5\n for i in range(0, counts):\n i_diff = [\n original_values[j] - results[i][j]\n for j in range(len(original_values))\n ]\n for j in range(len(original_values)):\n # variable value change ratio equals the learning rate ratio\n # for SGD without momentum\n self.assertAlmostEqual(\n i_diff[j],\n grad_values[j] * lr_scheduler_func(model_versions[i]),\n place,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"tensorflow.keras.models.clone_model",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.backend.clear_session",
"numpy.zeros"
],
[
"tensorflow.Variable",
"tensorflow.convert_to_tensor",
"tensorflow.optimizers.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BitBottleneck/cvprreview | [
"5beff8aa6948cfb1665301f4ece1769175fd546f"
] | [
"transfer_learning.py"
] | [
"# -*-coding:utf-8-*-\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python import pywrap_tensorflow\nimport os\n\n# The save address of the weight of baseline quantization.\nFILE_PATH_old = \"/home/zxc/Liu/Bit-Bottleneck-ResNet/logs_Bit_Bottleneck/old/model.ckpt\"\n# The new address used to save the transfer weight for new model.\nFILE_PATH_new = \"/home/zxc/Liu/Bit-Bottleneck-ResNet/logs_Bit_Bottleneck/model.ckpt\"\n# The save address of the weight of new model which is inserted Bit Bottleneck layers.\nOUTPUT_FILE = \"/home/zxc/Bit-Bottleneck-ResNet/logs_Bit_Bottleneck/new/\"\n\nold_data = []\nold_name = []\n\nnew_data = []\nnew_name = []\n\n# Read the baseline quantization weights.\nfor var_name_old, _ in tf.contrib.framework.list_variables(FILE_PATH_old):\n var_old = tf.contrib.framework.load_variable(FILE_PATH_old, var_name_old)\n old_data.append(var_old)\n old_name.append(var_name_old)\n\n# Read the weights of new model.\nfor var_name_new, _ in tf.contrib.framework.list_variables(FILE_PATH_new):\n var_new = tf.contrib.framework.load_variable(FILE_PATH_new, var_name_new)\n new_data.append(var_new)\n new_name.append(var_name_new)\n\n\ntransform_variable_list = []\n# If the name of variable is same , then use the old value to replace the new value.\nfor i in range(0, len(new_name)):\n for j in range(0, len(old_name)):\n if new_name[i] == old_name[j]:\n new_data[i] = old_data[j]\n print(new_name[i])\n rename = new_name[i]\n redata = new_data[i]\n # the variable of Variable_1 and Variable are int32 type, Others are float32 type\n if rename.find('Variable_1') != -1 or rename.find('Variable') != -1:\n renamed_var = tf.Variable(redata, name=rename, dtype=tf.int32)\n else:\n renamed_var = tf.Variable(redata, name=rename, dtype=tf.float32)\n transform_variable_list.append(renamed_var)\n\n\ndef save(saver, sess, logdir):\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n saver.save(sess, checkpoint_path, write_meta_graph=False)\n print('The weights have been converted to {}.'.format(checkpoint_path))\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(var_list=transform_variable_list, write_version=1)\n save(saver, sess, OUTPUT_FILE)\nprint(\"It's finished!\")\n\n\n\n\n\n\n"
] | [
[
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.framework.load_variable",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.contrib.framework.list_variables"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tefavidal/py-pde | [
"427be3f2f4b096775f46111cd5a5d05af50e94bc",
"427be3f2f4b096775f46111cd5a5d05af50e94bc"
] | [
"pde/solvers/scipy.py",
"pde/pdes/base.py"
] | [
"\"\"\"\nDefines a solver using :mod:`scipy.integrate`\n \n.. codeauthor:: David Zwicker <[email protected]> \n\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom ..fields.base import FieldBase\nfrom ..pdes.base import PDEBase\nfrom .base import SolverBase\n\n\nclass ScipySolver(SolverBase):\n \"\"\"class for solving partial differential equations using scipy\n\n This class is a thin wrapper around :func:`scipy.integrate.solve_ivp`. In\n particular, it supports all the methods implemented by this function.\n \"\"\"\n\n name = \"scipy\"\n\n def __init__(self, pde: PDEBase, backend: str = \"auto\", **kwargs):\n r\"\"\"\n Args:\n pde (:class:`~pde.pdes.base.PDEBase`):\n The instance describing the pde that needs to be solved\n backend (str):\n Determines how the function is created. Accepted values are\n 'numpy` and 'numba'. Alternatively, 'auto' lets the code decide\n for the most optimal backend.\n **kwargs:\n All extra arguments are forwarded to\n :func:`scipy.integrate.solve_ivp`.\n \"\"\"\n super().__init__(pde)\n self.backend = backend\n self.solver_params = kwargs\n\n def make_stepper(\n self, state: FieldBase, dt: float = None\n ) -> Callable[[FieldBase, float, float], float]:\n \"\"\"return a stepper function\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other information can\n be extracted.\n dt (float):\n Initial time step for the simulation. If `None`, the solver will choose\n a suitable initial value.\n\n Returns:\n Function that can be called to advance the `state` from time\n `t_start` to time `t_end`.\n \"\"\"\n if self.pde.is_sde:\n raise RuntimeError(\"Cannot use scipy stepper for a stochastic equation\")\n\n from scipy import integrate\n\n shape = state.data.shape\n self.info[\"dt\"] = dt\n self.info[\"steps\"] = 0\n self.info[\"stochastic\"] = False\n\n # obtain function for evaluating the right hand side\n rhs = self._make_pde_rhs(state, backend=self.backend)\n\n def rhs_helper(t: float, state_flat: np.ndarray) -> np.ndarray:\n \"\"\"helper function to provide the correct call convention\"\"\"\n return rhs(state_flat.reshape(shape), t).flat # type: ignore\n\n def stepper(state: FieldBase, t_start: float, t_end: float) -> float:\n \"\"\"use scipy.integrate.odeint to advance `state` from `t_start` to\n `t_end`\"\"\"\n if dt is not None:\n self.solver_params[\"first_step\"] = min(t_end - t_start, dt)\n\n sol = integrate.solve_ivp(\n rhs_helper,\n t_span=(t_start, t_end),\n y0=np.ravel(state.data),\n t_eval=[t_end], # only store necessary\n **self.solver_params,\n )\n self.info[\"steps\"] += sol.nfev\n state.data[:] = sol.y.reshape(shape)\n return sol.t[0] # type: ignore\n\n if dt:\n self._logger.info(\n f\"Initialized {self.__class__.__name__} stepper with dt=%g\", dt\n )\n else:\n self._logger.info(f\"Initialized {self.__class__.__name__} stepper\")\n return stepper\n",
"\"\"\"\nBase classes\n \n.. codeauthor:: David Zwicker <[email protected]> \n\"\"\"\n\nimport logging\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Optional # @UnusedImport\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Tuple, Union\n\nimport numpy as np\n\nfrom ..fields import FieldCollection\nfrom ..fields.base import FieldBase\nfrom ..tools.numba import jit\nfrom ..tools.typing import ArrayLike\nfrom ..trackers.base import TrackerCollectionDataType\n\nif TYPE_CHECKING:\n from ..solvers.controller import TRangeType # @UnusedImport\n\n\nclass PDEBase(metaclass=ABCMeta):\n \"\"\"base class for solving partial differential equations\n\n Custom PDEs can be implemented by specifying their evolution rate. In the\n simple case of deterministic PDEs, the methods\n :meth:`PDEBase.evolution_rate` and :meth:`PDEBase._make_pde_rhs_numba` need\n to be overwritten for the `numpy` and `numba` backend, respectively.\n \"\"\"\n\n check_implementation: bool = True\n \"\"\" bool: Flag determining whether (some) numba-compiled functions should be\n checked against their numpy counter-parts. This can help with implementing a\n correct compiled version for a PDE class. \"\"\"\n\n cache_rhs: bool = False\n \"\"\" bool: Flag indicating whether the right hand side of the equation should be\n cached. If True, the same implementation is used in subsequent calls to `solve`.\n Note that this might lead to wrong results if the parameters of the PDE were changed\n after the first call. This option is thus disabled by default and should be used\n with care. \n \"\"\"\n\n explicit_time_dependence: Optional[bool] = None\n \"\"\" bool: Flag indicating whether the right hand side of the PDE has an\n explicit time dependence. \"\"\"\n\n complex_valued: bool = False\n \"\"\" bool: Flag indicating whether the right hand side is a complex-valued PDE, which\n requires all involved variables to be of complex type \"\"\"\n\n def __init__(self, noise: ArrayLike = 0, rng: np.random.Generator = None):\n \"\"\"\n Args:\n noise (float or :class:`~numpy.ndarray`):\n Magnitude of the additive Gaussian white noise that is supported for all\n PDEs by default. If set to zero, a deterministic partial differential\n equation will be solved. Different noise magnitudes can be supplied for\n each field in coupled PDEs.\n rng (:class:`~numpy.random.Generator`):\n Random number generator (default: :func:`~numpy.random.default_rng()`).\n Note that this random number generator is only used for numpy function,\n while compiled numba code is unaffected.\n\n Note:\n If more complicated noise structures are required, the methods\n :meth:`PDEBase.noise_realization` and\n :meth:`PDEBase._make_noise_realization_numba` need to be overwritten\n for the `numpy` and `numba` backend, respectively.\n \"\"\"\n self._logger = logging.getLogger(self.__class__.__name__)\n self._cache: Dict[str, Any] = {}\n self.noise = np.asanyarray(noise)\n if rng is None:\n self.rng = np.random.default_rng()\n else:\n self.rng = rng\n\n @property\n def is_sde(self) -> bool:\n \"\"\"flag indicating whether this is a stochastic differential equation\n\n The :class:`BasePDF` class supports additive Gaussian white noise, whose\n magnitude is controlled by the `noise` property. In this case, `is_sde`\n is `True` if `self.noise != 0`.\n \"\"\"\n # check for self.noise, in case __init__ is not called in a subclass\n return hasattr(self, \"noise\") and np.any(self.noise != 0) # type: ignore\n\n def make_modify_after_step(self, state: FieldBase) -> Callable[[np.ndarray], float]:\n \"\"\"returns a function that can be called to modify a state\n\n This function is applied to the state after each integration step when\n an explicit stepper is used. The default behavior is to not change the\n state.\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other information can\n be extracted\n\n Returns:\n Function that can be applied to a state to modify it and which\n returns a measure for the corrections applied to the state\n \"\"\"\n\n def modify_after_step(state_data: np.ndarray) -> float:\n \"\"\"no-op function\"\"\"\n return 0\n\n return modify_after_step\n\n @abstractmethod\n def evolution_rate(self, state: FieldBase, t: float = 0) -> FieldBase:\n pass\n\n def _make_pde_rhs_numba(\n self, state: FieldBase\n ) -> Callable[[np.ndarray, float], np.ndarray]:\n \"\"\"create a compiled function for evaluating the right hand side\"\"\"\n raise NotImplementedError\n\n def _make_pde_rhs_numba_cached(\n self, state: FieldBase\n ) -> Callable[[np.ndarray, float], np.ndarray]:\n \"\"\"create a compiled function for evaluating the right hand side\n\n This method implements caching and checking of the actual method, which is\n defined by overwriting the method `_make_pde_rhs_numba`.\n \"\"\"\n check_implementation = self.check_implementation\n\n if self.cache_rhs:\n # support caching of the right hand side\n grid_state = state.grid.state_serialized\n if self._cache.get(\"pde_rhs_numba_state\") == grid_state:\n # cache was successful\n self._logger.info(\"Use compiled rhs from cache\")\n check_implementation = False # skip checking to save time\n else:\n # cache was not hit\n self._logger.info(\"Write compiled rhs to cache\")\n self._cache[\"pde_rhs_numba_state\"] = grid_state\n self._cache[\"pde_rhs_numba\"] = self._make_pde_rhs_numba(state)\n rhs = self._cache[\"pde_rhs_numba\"]\n\n else:\n # caching was skipped\n rhs = self._make_pde_rhs_numba(state)\n\n if check_implementation:\n # obtain and check result from the numpy implementation\n res_numpy = self.evolution_rate(state.copy(), 0.0).data\n if not np.all(np.isfinite(res_numpy)):\n self._logger.warning(\n \"The numpy implementation of the PDE returned non-finite values.\"\n )\n\n # obtain and check result from the numba implementation\n test_state = state.copy()\n res_numba = rhs(test_state.data, 0.0)\n if not np.all(np.isfinite(res_numba)):\n self._logger.warning(\n \"The numba implementation of the PDE returned non-finite values.\"\n )\n\n # compare the two implementations\n msg = (\n \"The numba compiled implementation of the right hand side is not \"\n \"compatible with the numpy implementation. This check can be disabled \"\n \"by setting the class attribute `check_implementation` to `False`.\"\n )\n np.testing.assert_allclose(\n res_numba, res_numpy, err_msg=msg, rtol=1e-7, atol=1e-7, equal_nan=True\n )\n return rhs # type: ignore\n\n def make_pde_rhs(\n self, state: FieldBase, backend: str = \"auto\"\n ) -> Callable[[np.ndarray, float], np.ndarray]:\n \"\"\"return a function for evaluating the right hand side of the PDE\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other\n information can be extracted\n backend (str): Determines how the function is created. Accepted\n values are 'python` and 'numba'. Alternatively, 'auto' lets the\n code decide for the most optimal backend.\n\n Returns:\n Function determining the right hand side of the PDE\n \"\"\"\n if backend == \"auto\":\n try:\n rhs = self._make_pde_rhs_numba_cached(state)\n except NotImplementedError:\n backend = \"numpy\"\n else:\n rhs._backend = \"numba\" # type: ignore\n\n if backend == \"numba\":\n rhs = self._make_pde_rhs_numba_cached(state)\n rhs._backend = \"numba\" # type: ignore\n\n elif backend == \"numpy\":\n state = state.copy()\n\n def evolution_rate_numpy(state_data: np.ndarray, t: float) -> np.ndarray:\n \"\"\"evaluate the rhs given only a state without the grid\"\"\"\n state.data = state_data\n return self.evolution_rate(state, t).data\n\n rhs = evolution_rate_numpy\n rhs._backend = \"numpy\" # type: ignore\n\n elif backend != \"auto\":\n raise ValueError(\n f\"Unknown backend `{backend}`. Possible values are ['auto', 'numpy', \"\n \"'numba']\"\n )\n\n return rhs\n\n def noise_realization(\n self, state: FieldBase, t: float = 0, label: str = \"Noise realization\"\n ) -> FieldBase:\n \"\"\"returns a realization for the noise\n\n Args:\n state (:class:`~pde.fields.ScalarField`):\n The scalar field describing the concentration distribution\n t (float):\n The current time point\n label (str):\n The label for the returned field\n\n Returns:\n :class:`~pde.fields.ScalarField`:\n Scalar field describing the evolution rate of the PDE\n \"\"\"\n if self.is_sde:\n result = state.copy(label=label)\n\n if np.isscalar(self.noise) or self.noise.size == 1:\n # a single noise value is given for all fields\n result.data = self.rng.normal(scale=self.noise, size=state.data.shape)\n\n elif isinstance(state, FieldCollection):\n # different noise strengths, assuming one for each field\n for f, n in zip(result, np.broadcast_to(self.noise, len(state))): # type: ignore\n if n == 0:\n f.data = 0\n else:\n f.data = self.rng.normal(scale=n, size=f.data.shape)\n\n else:\n # different noise strengths, but a single field\n raise RuntimeError(\n f\"Multiple noise strengths were given for the single field {state}\"\n )\n\n else:\n # no noise\n result = state.copy(label=label)\n result.data[:] = 0\n\n return result\n\n def _make_noise_realization_numba(\n self, state: FieldBase\n ) -> Callable[[np.ndarray, float], np.ndarray]:\n \"\"\"return a function for evaluating the noise term of the PDE\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other\n information can be extracted\n\n Returns:\n Function determining the right hand side of the PDE\n \"\"\"\n if self.is_sde:\n data_shape = state.data.shape\n\n if np.isscalar(self.noise) or self.noise.size == 1:\n # a single noise value is given for all fields\n noise_strength = float(self.noise)\n\n @jit\n def noise_realization(state_data: np.ndarray, t: float) -> np.ndarray:\n \"\"\"helper function returning a noise realization\"\"\"\n return noise_strength * np.random.randn(*data_shape)\n\n elif isinstance(state, FieldCollection):\n # different noise strengths, assuming one for each field\n noise_strengths = np.empty(data_shape[0])\n noise_arr = np.broadcast_to(self.noise, len(state))\n for i, noise in enumerate(noise_arr):\n noise_strengths[state._slices[i]] = noise\n\n @jit\n def noise_realization(state_data: np.ndarray, t: float) -> np.ndarray:\n \"\"\"helper function returning a noise realization\"\"\"\n out = np.random.randn(*data_shape)\n for i in range(data_shape[0]):\n # TODO: Avoid creating random numbers when noise_strengths == 0\n out[i] *= noise_strengths[i]\n return out\n\n else:\n # different noise strengths, but a single field\n raise RuntimeError(\n f\"Multiple noise strengths were given for the single field {state}\"\n )\n\n else:\n\n @jit\n def noise_realization(state_data: np.ndarray, t: float) -> None:\n \"\"\"helper function returning a noise realization\"\"\"\n return None\n\n return noise_realization # type: ignore\n\n def _make_sde_rhs_numba(\n self, state: FieldBase\n ) -> Callable[[np.ndarray, float], Tuple[np.ndarray, np.ndarray]]:\n \"\"\"return a function for evaluating the noise term of the PDE\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other\n information can be extracted\n\n Returns:\n Function determining the right hand side of the PDE\n \"\"\"\n evolution_rate = self._make_pde_rhs_numba_cached(state)\n noise_realization = self._make_noise_realization_numba(state)\n\n @jit\n def sde_rhs(state_data: np.ndarray, t: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"compiled helper function returning a noise realization\"\"\"\n return (evolution_rate(state_data, t), noise_realization(state_data, t))\n\n return sde_rhs # type: ignore\n\n def _make_sde_rhs_numba_cached(\n self, state: FieldBase\n ) -> Callable[[np.ndarray, float], Tuple[np.ndarray, np.ndarray]]:\n \"\"\"create a compiled function for evaluating the noise term of the PDE\n\n This method implements caching and checking of the actual method, which is\n defined by overwriting the method `_make_pde_rhs_numba`.\n \"\"\"\n if self.cache_rhs:\n # support caching of the noise term\n grid_state = state.grid.state_serialized\n if self._cache.get(\"sde_rhs_numba_state\") == grid_state:\n # cache was successful\n self._logger.info(\"Use compiled noise term from cache\")\n else:\n # cache was not hit\n self._logger.info(\"Write compiled noise term to cache\")\n self._cache[\"sde_rhs_numba_state\"] = grid_state\n self._cache[\"sde_rhs_numba\"] = self._make_sde_rhs_numba(state)\n sde_rhs = self._cache[\"sde_rhs_numba\"]\n\n else:\n # caching was skipped\n sde_rhs = self._make_sde_rhs_numba(state)\n\n return sde_rhs # type: ignore\n\n def make_sde_rhs(\n self, state: FieldBase, backend: str = \"auto\"\n ) -> Callable[[np.ndarray, float], Tuple[np.ndarray, np.ndarray]]:\n \"\"\"return a function for evaluating the right hand side of the SDE\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other\n information can be extracted\n backend (str): Determines how the function is created. Accepted\n values are 'python` and 'numba'. Alternatively, 'auto' lets the\n code decide for the most optimal backend.\n\n Returns:\n Function determining the deterministic part of the right hand side\n of the PDE together with a noise realization.\n \"\"\"\n if backend == \"auto\":\n try:\n sde_rhs = self._make_sde_rhs_numba_cached(state)\n except NotImplementedError:\n backend = \"numpy\"\n else:\n sde_rhs._backend = \"numba\" # type: ignore\n return sde_rhs\n\n if backend == \"numba\":\n sde_rhs = self._make_sde_rhs_numba_cached(state)\n sde_rhs._backend = \"numba\" # type: ignore\n\n elif backend == \"numpy\":\n state = state.copy()\n\n def sde_rhs(\n state_data: np.ndarray, t: float\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"evaluate the rhs given only a state without the grid\"\"\"\n state.data = state_data\n return (\n self.evolution_rate(state, t).data,\n self.noise_realization(state, t).data,\n )\n\n sde_rhs._backend = \"numpy\" # type: ignore\n\n else:\n raise ValueError(f\"Unknown backend `{backend}`\")\n\n return sde_rhs\n\n def solve(\n self,\n state: FieldBase,\n t_range: \"TRangeType\",\n dt: float = None,\n tracker: TrackerCollectionDataType = [\"progress\", \"consistency\"],\n method: str = \"auto\",\n ret_info: bool = False,\n **kwargs,\n ) -> Union[FieldBase, Tuple[FieldBase, Dict[str, Any]]]:\n \"\"\"convenience method for solving the partial differential equation\n\n The method constructs a suitable solver\n (:class:`~pde.solvers.base.SolverBase`) and controller\n (:class:`~pde.controller.Controller`) to advance the state over the\n temporal range specified by `t_range`. To obtain full flexibility, it is\n advisable to construct these classes explicitly.\n\n Args:\n state (:class:`~pde.fields.base.FieldBase`):\n The initial state (which also defines the spatial grid)\n t_range (float or tuple):\n Sets the time range for which the PDE is solved. This should typically\n be a tuple of two numbers, `(t_start, t_end)`, specifying the initial\n and final time of the simulation. If only a single value is given, it is\n interpreted as `t_end` and the time range is assumed to be `(0, t_end)`.\n dt (float):\n Time step of the chosen stepping scheme. If `None`, a default value\n based on the stepper will be chosen. In particular, if\n `method == 'auto'`, the :class:`~pde.solvers.ScipySolver` with an\n automatic, adaptive time step is used. This is a flexible choice, but\n might be slower than using a fixed time step.\n tracker:\n Defines a tracker that process the state of the simulation at specified\n time intervals. A tracker is either an instance of\n :class:`~pde.trackers.base.TrackerBase` or a string, which identifies a\n tracker. All possible identifiers can be obtained by calling\n :func:`~pde.trackers.base.get_named_trackers`. Multiple trackers can be\n specified as a list. The default value is `['progress', 'consistency']`,\n which displays a progress bar and checks the state for consistency,\n aborting the simulation when not-a-number values appear. More general\n trackers are defined in :mod:`~pde.trackers`, where all options are\n explained in detail. In particular, the interval at which the tracker is\n evaluated can be chosen when creating a tracker object explicitly.\n method (:class:`~pde.solvers.base.SolverBase` or str):\n Specifies a method for solving the differential equation. This\n can either be an instance of\n :class:`~pde.solvers.base.SolverBase` or a descriptive name\n like 'explicit' or 'scipy'. The valid names are given by\n :meth:`pde.solvers.base.SolverBase.registered_solvers`.\n ret_info (bool):\n Flag determining whether diagnostic information about the solver\n process should be returned.\n **kwargs:\n Additional keyword arguments are forwarded to the solver class chosen\n with the `method` argument.\n\n Returns:\n :class:`~pde.fields.base.FieldBase`:\n The state at the final time point. In the case `ret_info == True`, a\n tuple with the final state and a dictionary with additional\n information is returned.\n \"\"\"\n from ..solvers.base import SolverBase\n\n if method == \"auto\":\n method = \"scipy\" if dt is None else \"explicit\"\n\n # create solver\n if callable(method):\n solver = method(pde=self, **kwargs)\n if not isinstance(solver, SolverBase):\n self._logger.warn(\n \"Solver is not an instance of `SolverBase`. Specified wrong method?\"\n )\n else:\n solver = SolverBase.from_name(method, pde=self, **kwargs)\n\n # create controller\n from ..solvers import Controller\n\n controller = Controller(solver, t_range=t_range, tracker=tracker)\n\n # run the simulation\n final_state = controller.run(state, dt)\n\n if ret_info:\n info = controller.diagnostics.copy()\n info[\"controller\"].pop(\"solver_class\") # remove redundant information\n return final_state, info\n else:\n return final_state\n\n\ndef expr_prod(factor: float, expression: str) -> str:\n \"\"\"helper function for building an expression with an (optional) pre-factor\n\n Args:\n factor (float): The value of the prefactor\n expression (str): The remaining expression\n\n Returns:\n str: The expression with the factor appended if necessary\n \"\"\"\n if factor == 0:\n return \"0\"\n elif factor == 1:\n return expression\n elif factor == -1:\n return \"-\" + expression\n else:\n return f\"{factor:g} * {expression}\"\n"
] | [
[
"numpy.ravel"
],
[
"numpy.isfinite",
"numpy.empty",
"numpy.asanyarray",
"numpy.random.randn",
"numpy.any",
"numpy.isscalar",
"numpy.testing.assert_allclose",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bintulab/storm-analysis | [
"71ae493cbd17ddb97938d0ae2032d97a0eaa76b2",
"71ae493cbd17ddb97938d0ae2032d97a0eaa76b2",
"71ae493cbd17ddb97938d0ae2032d97a0eaa76b2"
] | [
"storm_analysis/spliner/measure_psf_utils.py",
"storm_analysis/multi_plane/analysis_io.py",
"storm_analysis/diagnostics/spliner_2d/configure.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nVarious utility functions for PSF measurement. Basically\ntrying to consolidate/improve what is common between the \nseveral different scripts that do this.\n\nHazen 03/18\n\"\"\"\nimport numpy\nimport scipy\nimport scipy.ndimage\n\nimport storm_analysis.sa_library.imagecorrelation as imgCorr\n\n\nclass ZScaler(object):\n \"\"\"\n Used in PSF measurement to convert a floating point z value into\n a z index.\n \"\"\"\n def __init__(self, z_range, z_step):\n super(ZScaler, self).__init__()\n\n assert(z_range > 0.0), \"The z range must be positive.\"\n assert(z_step > 0.0), \"The z step must be positive.\"\n assert(z_range >= z_step), \"The z range must be greater than or equal to the step size.\"\n \n # Assert that the z_step size is a multiple of the z_range.\n assert ((int(z_range*1.0e+3) % int(z_step*1.0e+3)) == 0), \"The z range must be a multiple of the z step.\"\n\n self.z_mid = int(round(z_range/z_step))\n self.z_max = 2 * self.z_mid + 1\n self.z_step = z_step\n\n def convert(self, z):\n return int(round(z/self.z_step) + self.z_mid)\n\n def getMaxZ(self):\n return self.z_max\n \n def inRange(self, zi):\n return ((zi > -1) and (zi < self.z_max))\n\n\ndef alignPSFs(psfs, max_xy = 2, max_z = 2, max_reps = 10, verbose = True):\n \"\"\"\n Align multiple PSFs in x,y,z.\n\n psfs - A list of PSFs, each of these has shape (nz, nxy, nxy).\n max_xy - The maximum expected alignment error xy in pixels.\n max_z - The maximum expected alignment error in z in z steps.\n max_reps - Maximum number of cycles of refinement.\n verbose - Verbose, or not.\n\n Returns the average PSF after alignment.\n \"\"\"\n\n # Create working list for aligned PSFs.\n aligned_psfs = []\n for i in range(len(psfs)):\n aligned_psfs.append(psfs[i])\n\n starting_score = psfCorrelation(aligned_psfs)\n \n # Repeat aligning a PSF to the average of all the other PSFs.\n for i in range(max_reps):\n moving = False\n for j in range(len(psfs)):\n\n # Compute average of all the PSFs except the current PSF.\n sum_psf = averagePSF(aligned_psfs, skip = j)\n\n # Align the current PSF to the average PSF and update\n # the list of aligned PSFs.\n #\n psf_aligner = imgCorr.Align3DProductNewtonCG(sum_psf,\n xy_margin = max_xy,\n z_margin = max_z)\n\n psf_aligner.setOtherImage(aligned_psfs[j])\n\n [aligned_psfs[j], q_score, disp] = psf_aligner.align()\n\n # Check if the PSF was translated.\n if not numpy.allclose(numpy.zeros(disp.size), disp, atol = 1.0e-3):\n moving = True\n \n if verbose:\n print(i, j, q_score, disp)\n\n current_score = psfCorrelation(aligned_psfs)\n \n # Print current score.\n if verbose:\n print(\"Quality score: {0:.6f}\".format(current_score/starting_score))\n print()\n \n # Stop if the PSFs are no longer being adjusted.\n if not moving:\n break\n \n i += 1\n\n # Compute average of aligned PSFs.\n return [averagePSF(aligned_psfs), current_score/starting_score]\n\n\ndef averagePSF(psfs, skip = -1):\n \"\"\"\n Compute average of a list of PSFs.\n \"\"\"\n n_psfs = 0\n average_psf = numpy.zeros_like(psfs[0])\n for i in range(len(psfs)):\n if (i == skip):\n continue\n average_psf += psfs[i]\n n_psfs += 1\n\n return average_psf/float(n_psfs)\n\n\ndef extractAOI(frame, aoi_size, xf, yf, zoom = 1):\n \"\"\"\n Extract AOI for PSF measurements.\n\n frame - An image.\n aoi_size - 1/2 the AOI size in pixels.\n xf - AOI x offset in pixels.\n yf - AOI y offset in pixels.\n zoom - Zoom factor, default is 2.0.\n \"\"\"\n xi = int(xf)\n yi = int(yf)\n\n sx = xi - aoi_size\n ex = xi + aoi_size\n sy = yi - aoi_size\n ey = yi + aoi_size\n \n # Check that the slice is inside the image.\n assert (sx >= 0), \"X position is too small ({0:d}).\".format(sx)\n assert (sy >= 0), \"Y position is too small ({0:d}).\".format(sy)\n assert (ex <= frame.shape[0]), \"X position is too large ({0:d}).\".format(ex)\n assert (ey <= frame.shape[1]), \"Y position is too large ({0:d}).\".format(ey)\n\n # Slice.\n im_slice = frame[sx:ex,sy:ey]\n\n # Zoom and center.\n if(zoom != 1):\n im_slice_up = scipy.ndimage.interpolation.zoom(im_slice, zoom)\n else:\n im_slice_up = im_slice\n \n im_slice_up = scipy.ndimage.interpolation.shift(im_slice_up, (-zoom*(xf-xi), -zoom*(yf-yi)), mode='nearest')\n\n return im_slice_up\n\n \ndef makeZIndexArray(z_offsets, z_range, z_step):\n \"\"\"\n Create the array that specifies which slice the image at\n a particular z offset should be added to. If the image \n should not be added to any slice then z_index will have\n the value of -1.\n\n Note: The bins are centered on the z_step.\n\n All units are in microns.\n\n z_offsets - The different z offsets, an array of shape\n (N,2) as contained for example in z_offsets.txt\n file.\n z_range - The range the PSF will cover (+- z_range).\n z_step - The z step size.\n \"\"\"\n assert(len(z_offsets.shape) == 2), \"Z offsets must have shape (N,2).\"\n assert(z_offsets.shape[1] == 2), \"Z offsets must have shape (N,2).\"\n\n z_sclr = ZScaler(z_range, z_step)\n z_index = numpy.zeros(z_offsets.shape[0], dtype = numpy.int) - 1\n for i in range(z_offsets.shape[0]):\n if (z_offsets[i][0] < 1.0e-6):\n continue\n zi = z_sclr.convert(z_offsets[i][1])\n if z_sclr.inRange(zi):\n z_index[i] = zi\n \n #if (z_offsets[i][1] > (-z_range - 0.5*z_step)) and (z_offsets[i][1] < (z_range + 0.5*z_step)):\n\n assert(numpy.max(z_index) > -0.5), \"No valid frames for PSF measurement.\"\n \n return z_index\n\n\ndef meanEdge(psf_slice):\n \"\"\"\n Return the mean of the boundary pixels of a PSF slice.\n \"\"\"\n edge = numpy.concatenate((psf_slice[0,:],\n psf_slice[-1,:],\n psf_slice[:,0],\n psf_slice[:,-1]))\n return numpy.mean(edge)\n\n\ndef measureSinglePSFBeads(frame_reader, z_index, aoi_size, x, y, drift_xy = None, zoom = 1):\n \"\"\"\n Measures a single PSF from a PSF z stack movie that you\n might take using beads.\n\n frame_reader - A sa_library.analysis_io.FrameReader like object.\n z_index - Z slice in the PSF for each frame, as returned for\n example by makeZIndexArray().\n aoi_size - Size of the PSF AOI.\n x - Bead center position in x.\n y - Bead center position in y.\n drift_xy - An array containing x,y drift information. This should\n have a shape of (N,2). The x drift is the first entry and\n the y drift is the second entry.\n zoom - Amount to magnify the final PSF by. Must be an integer.\n\n Returns - [psf, samples per z section]\n \"\"\"\n if drift_xy is not None:\n assert(drift_xy.shape[0] == z_index.size), \"XY drift must have the same number of points a z_index.\"\n assert(drift_xy.shape[1] == 2), \"XY drift can only have an x and a y offset for each frame.\"\n\n assert(isinstance(aoi_size, int)), \"PSF AOI must be an integer.\"\n assert(isinstance(zoom, int)), \"Zoom must be an integer.\"\n\n z_size = numpy.max(z_index) + 1\n\n psf = numpy.zeros((z_size, 2*aoi_size*zoom, 2*aoi_size*zoom))\n samples = numpy.zeros(z_size, dtype = numpy.int)\n for i in range(z_index.size):\n\n\n # Ignore frames with 'bad' z index.\n if(z_index[i] < 0):\n continue\n\n # Load the frame.\n frame = frame_reader.loadAFrame(i)\n\n # Figure out where to slice.\n xf = x\n yf = y\n\n # Apply drift correction (if specified).\n if drift_xy is not None:\n xf += drift_xy[i,0]\n yf += drift_xy[i,1]\n\n # Extract AOI.\n im_slice_up = extractAOI(frame, aoi_size, xf, yf, zoom = zoom)\n\n # Update accumulators.\n zi = z_index[i]\n psf[zi,:,:] += im_slice_up\n samples[zi] += 1\n\n return [psf, samples]\n\n\ndef psfCorrelation(psfs):\n \"\"\"\n Calculate the correlation score of the PSFs, this is just the\n sum of the product of all the PSFs.\n \"\"\"\n product = numpy.copy(psfs[0])\n for i in range(1,len(psfs)):\n product = product * psfs[i]\n product = product/float(len(psfs))\n return numpy.sum(product)\n\n\ndef psfSharpness(psf):\n \"\"\"\n Calculates how 'sharp' the PSF is as defined here by how large \n the mean frequency component is. The idea is that a better average\n PSF will be less blurred out, so it will have more power in\n the larger frequencies.\n \"\"\"\n psd = numpy.abs(numpy.fft.fftn(psf))**2\n\n k1 = numpy.abs(numpy.fft.fftfreq(psf.shape[0]))\n k2 = numpy.abs(numpy.fft.fftfreq(psf.shape[1]))\n k3 = numpy.abs(numpy.fft.fftfreq(psf.shape[2]))\n\n # Ignore the highest frequencies as these are mostly pixel noise.\n k1[(k1 > 0.4)] = 0\n k2[(k2 > 0.4)] = 0\n k2[(k3 > 0.4)] = 0\n\n [m_k1, m_k2, m_k3] = numpy.meshgrid(k1, k2, k3, indexing = 'ij')\n return numpy.mean(psd * m_k1 * m_k2 * m_k3)\n\n\ndef smoothPSF(psf, xy_sigma = 0.5, z_sigma = 0.5):\n \"\"\"\n Apply gaussian smoothing to a PSF.\n \"\"\"\n return scipy.ndimage.filters.gaussian_filter(psf,\n [z_sigma, xy_sigma, xy_sigma],\n mode = \"nearest\")\n\n \ndef sumPSF(psfs):\n \"\"\"\n Compute sum of a list of PSFs.\n \"\"\"\n sum_psf = numpy.zeros_like(psfs[0])\n for psf in psfs:\n sum_psf += psf\n\n return sum_psf\n",
"#!/usr/bin/env python\n\"\"\"\nAnalysis IO specialized for multiplane fitting.\n\nHazen 09/17\n\"\"\"\nimport numpy\nimport os\nimport sys\n\nfrom xml.etree import ElementTree\n\nimport storm_analysis.sa_library.analysis_io as analysisIO\nimport storm_analysis.sa_library.sa_h5py as saH5Py\n\nimport storm_analysis.multi_plane.mp_utilities as mpUtil\n\n\nclass MPDataWriter(analysisIO.DataWriter):\n \"\"\"\n Data writer specialized for multi-plane data.\n \"\"\"\n def __init__(self, parameters = None, sa_type = None, **kwds):\n super(MPDataWriter, self).__init__(**kwds)\n\n self.movie_info_set = False\n self.offsets = []\n\n # Figure out how many planes there are.\n self.n_planes = len(mpUtil.getExtAttrs(parameters))\n\n # Save frame offsets for each plane.\n for offset in mpUtil.getOffsetAttrs(parameters):\n self.offsets.append(parameters.getAttr(offset)) \n\n # Figure out where to start if the analysis file already exists.\n if os.path.exists(self.filename):\n print(\"Existing analysis file found. Restarting from last analyzed frame.\")\n self.h5 = saH5Py.SAH5Py(filename = self.filename)\n\n self.movie_info_set = True\n \n # Find the last frame that we analyzed.\n i = self.h5.getMovieLength()\n while (i > 0):\n if self.h5.isAnalyzed(i):\n break\n i -= 1\n self.start_frame = i\n\n # Otherwise start from the beginning.\n else:\n self.h5 = saH5Py.SAH5Py(filename = self.filename,\n is_existing = False,\n sa_type = sa_type)\n \n # Save analysis parameters.\n etree = parameters.toXMLElementTree(False)\n if (sys.version_info > (3, 0)):\n self.h5.addMetadata(ElementTree.tostring(etree, 'unicode'))\n else:\n self.h5.addMetadata(ElementTree.tostring(etree, 'ISO-8859-1'))\n\n # Save pixel size.\n self.h5.setPixelSize(parameters.getAttr(\"pixel_size\"))\n \n # Adjust starting frame based on channel 0 offset.\n if (self.offsets[0] != 0):\n assert(self.offsets[0] > 0), \"Channel 0 offset cannot be negative.\"\n self.start_frame = self.offsets[0]\n print(\"Adjusted start frame to\", self.start_frame, \"based on channel 0 offset.\")\n\n self.h5.setAnalysisFinished(False)\n\n def addPeaks(self, peaks, movie_reader):\n assert(len(peaks) == self.n_planes)\n super(MPDataWriter, self).addPeaks(peaks[0], movie_reader)\n\n if not self.movie_info_set:\n self.h5.addMovieInformation(movie_reader)\n self.movie_info_set = True\n \n for i in range(len(peaks)):\n self.h5.addLocalizations(peaks[i],\n movie_reader.getCurrentFrameNumber(),\n channel = i)\n\n def close(self, finished):\n self.h5.setAnalysisFinished(finished)\n self.h5.close(verbose = True)\n\n \nclass MPMovieReader(object):\n \"\"\"\n analysisIO.MovieReader like object for multi-plane data. This is\n primarily designed to be used in the standard storm-analysis \n analysis pipeline but has some additional functionality to make\n it easier to use in other modules that need to be able to manipulate\n these sorts of movies.\n \"\"\"\n def __init__(self, base_name = None, parameters = None, **kwds):\n super(MPMovieReader, self).__init__(**kwds)\n\n self.backgrounds = []\n self.bg_estimators = []\n self.cur_frame = 0\n self.frames = []\n self.max_frame = 0\n self.offsets = []\n self.parameters = parameters\n self.planes = []\n\n #\n # Load the movies and offsets for each plane/channel. At present\n # multiplane expects the sCMOS camera calibration data.\n #\n calib_name = mpUtil.getCalibrationAttrs(parameters)\n for i, ext in enumerate(mpUtil.getExtAttrs(parameters)):\n movie_name = base_name + parameters.getAttr(ext)\n self.planes.append(analysisIO.FrameReaderSCMOS(parameters = parameters,\n movie_file = movie_name,\n calibration_file = parameters.getAttr(calib_name[i])))\n\n for offset in mpUtil.getOffsetAttrs(parameters):\n self.offsets.append(parameters.getAttr(offset))\n\n print(\"Found data for\", len(self.planes), \"planes.\")\n\n [self.movie_x, self.movie_y, self.movie_l] = self.planes[0].filmSize()\n self.movie_l -= self.offsets[0]\n\n # Check if the movies for the other channels (adjusted for their offsets)\n # are shorter than the movie for channel 0.\n #\n for i in range(1, len(self.planes)):\n [px, py, pl] = self.planes[1].filmSize()\n pl -= self.offsets[i]\n if (pl < self.movie_l):\n self.movie_l = pl\n\n # Assert that all the movies are the same size, at least in x,y.\n for i in range(1, len(self.planes)):\n assert(self.movie_x == self.planes[i].filmSize()[0])\n assert(self.movie_y == self.planes[i].filmSize()[1])\n\n def close(self):\n for plane in self.planes:\n plane.close()\n \n def getBackground(self, plane):\n if (len(self.backgrounds) > 0):\n return self.backgrounds[plane]\n else:\n return None\n\n def getCurrentFrameNumber(self):\n return self.cur_frame\n\n def getFilmSize(self):\n return [self.movie_x, self.movie_y, self.movie_l]\n \n def getFrame(self, plane):\n \"\"\"\n This returns a particular one of the currently loaded frames.\n \"\"\"\n return self.frames[plane]\n\n def getFrames(self, frame_number):\n \"\"\"\n This loads all the frames for the specified frame_number, corrects\n them for gain and offset (self.planes is a list of analysisIO.FrameReader\n objects) and returns them as a list.\n \"\"\"\n frames = []\n for i, plane in enumerate(self.planes):\n frames.append(plane.loadAFrame(frame_number + self.offsets[i]))\n return frames\n\n def getMovieL(self):\n return self.movie_l\n \n def getMovieX(self):\n return self.movie_x\n\n def getMovieY(self):\n return self.movie_y\n\n def getNPlanes(self):\n return len(self.planes)\n \n def hashID(self):\n return self.planes[0].hashID()\n\n def nextFrame(self):\n self.cur_frame += 1\n if (self.cur_frame < self.max_frame):\n\n # Update background estimate.\n self.backgrounds = []\n for i, bg_estimator in enumerate(self.bg_estimators):\n self.backgrounds.append(bg_estimator.estimateBG(self.cur_frame + self.offsets[i]))\n\n # Load planes & remove all values less than 1.0 as we are doing MLE fitting.\n self.frames = []\n frames = self.getFrames(self.cur_frame)\n for frame in frames:\n mask = (frame < 1.0)\n if numpy.count_nonzero(mask):\n frame[mask] = 1.0\n self.frames.append(frame)\n\n return True\n else:\n return False\n\n def setup(self, start_frame):\n\n # Figure out where to start.\n self.cur_frame = start_frame\n if self.parameters.hasAttr(\"start_frame\"):\n if (self.parameters.getAttr(\"start_frame\") > self.cur_frame):\n if (self.parameters.getAttr(\"start_frame\") < self.movie_l):\n self.cur_frame = self.parameters.getAttr(\"start_frame\") - 1\n \n # Figure out where to stop.\n self.max_frame = self.movie_l\n\n # If the user specified a max frame then just use it and\n # assume that they knew what they were doing.\n if self.parameters.hasAttr(\"max_frame\"):\n if (self.parameters.getAttr(\"max_frame\") > 0):\n if (self.parameters.getAttr(\"max_frame\") < self.movie_l):\n self.max_frame = self.parameters.getAttr(\"max_frame\")\n\n # Configure background estimator, if any.\n #\n # FIXME: Use of a background estimator has not been tested.\n #\n if (self.parameters.getAttr(\"static_background_estimate\", 0) > 0):\n print(\"Using static background estimator.\")\n s_size = self.parameters.getAttr(\"static_background_estimate\")\n for i in range(len(self.planes)):\n bg_est = static_background.StaticBGEstimator(self.planes[i],\n start_frame = self.cur_frame + self.offsets[i],\n sample_size = s_size)\n self.bg_estimators.append(bg_est)\n",
"#!/usr/bin/env python\n\"\"\"\nConfigure folder for Spliner testing.\n\nHazen 09/17\n\"\"\"\nimport argparse\nimport inspect\nimport numpy\nimport os\n\nimport storm_analysis\nimport storm_analysis.sa_library.parameters as parameters\nimport storm_analysis.sa_library.sa_h5py as saH5Py\n\nimport storm_analysis.simulator.background as background\nimport storm_analysis.simulator.camera as camera\nimport storm_analysis.simulator.emitters_on_grid as emittersOnGrid\nimport storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom\nimport storm_analysis.simulator.photophysics as photophysics\nimport storm_analysis.simulator.psf as psf\nimport storm_analysis.simulator.simulate as simulate\n\nimport storm_analysis.spliner.measure_psf as measurePSF\nimport storm_analysis.spliner.psf_to_spline as psfToSpline\n\nimport storm_analysis.diagnostics.spliner_2d.settings as settings\n\n\ndef testingParameters(cal_file = None):\n \"\"\"\n Create a Spliner parameters object.\n \"\"\"\n params = parameters.ParametersSpliner()\n\n params.setAttr(\"max_frame\", \"int\", -1) \n params.setAttr(\"start_frame\", \"int\", -1) \n\n params.setAttr(\"background_sigma\", \"float\", 8.0)\n\n if cal_file is not None:\n params.setAttr(\"camera_calibration\", \"filename\", cal_file)\n else:\n params.setAttr(\"camera_gain\", \"float\", settings.camera_gain)\n params.setAttr(\"camera_offset\", \"float\", settings.camera_offset)\n\n params.setAttr(\"find_max_radius\", \"int\", 5)\n params.setAttr(\"iterations\", \"int\", settings.iterations)\n params.setAttr(\"no_fitting\", \"int\", 0)\n params.setAttr(\"pixel_size\", \"float\", settings.pixel_size)\n params.setAttr(\"sigma\", \"float\", 1.5)\n params.setAttr(\"spline\", \"filename\", \"psf.spline\")\n params.setAttr(\"threshold\", \"float\", 6.0)\n\n # Don't do tracking.\n params.setAttr(\"descriptor\", \"string\", \"1\")\n params.setAttr(\"radius\", \"float\", \"0.0\")\n\n # Don't do drift-correction.\n params.setAttr(\"d_scale\", \"int\", 2)\n params.setAttr(\"drift_correction\", \"int\", 0)\n params.setAttr(\"frame_step\", \"int\", 500)\n params.setAttr(\"z_correction\", \"int\", 0)\n\n # 'peak_locations' testing.\n if hasattr(settings, \"peak_locations\") and (settings.peak_locations is not None):\n params.setAttr(\"peak_locations\", \"filename\", settings.peak_locations)\n \n return params\n\n\ndef configure(no_splines, cal_file = None):\n\n # Create sCMOS calibration file if requested.\n #\n if cal_file is not None:\n offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset\n variance = numpy.ones((settings.y_size, settings.x_size))\n gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain\n rqe = numpy.ones((settings.y_size, settings.x_size))\n numpy.save(cal_file, [offset, variance, gain, rqe, 2])\n \n # Create parameters file for analysis.\n #\n print(\"Creating XML file.\")\n params = testingParameters(cal_file = cal_file)\n params.toXMLFile(\"spliner.xml\")\n\n # Create localization on a grid file.\n #\n print(\"Creating gridded localization.\")\n emittersOnGrid.emittersOnGrid(\"grid_list.hdf5\",\n settings.nx,\n settings.ny,\n 1.5,\n 20,\n 0.0,\n 0.0)\n \n # Create randomly located localizations file.\n #\n print(\"Creating random localization.\")\n emittersUniformRandom.emittersUniformRandom(\"random_list.hdf5\",\n 1.0,\n settings.margin,\n settings.x_size,\n settings.y_size,\n 0.0)\n \n # Create sparser grid for PSF measurement.\n #\n print(\"Creating data for PSF measurement.\")\n emittersOnGrid.emittersOnGrid(\"sparse_list.hdf5\",\n 6,\n 3,\n 1.5,\n 40,\n 0.0,\n 0.0)\n \n if no_splines:\n return\n\n # Create beads.txt file for spline measurement.\n #\n with saH5Py.SAH5Py(\"sparse_list.hdf5\") as h5:\n locs = h5.getLocalizations()\n numpy.savetxt(\"beads.txt\", numpy.transpose(numpy.vstack((locs['x'], locs['y']))))\n \n # Create simulated data for PSF measurement.\n #\n bg_f = lambda s, x, y, i3 : background.UniformBackground(s, x, y, i3, photons = 10)\n cam_f = lambda s, x, y, i3 : camera.Ideal(s, x, y, i3, 100.)\n pp_f = lambda s, x, y, i3 : photophysics.AlwaysOn(s, x, y, i3, 20000.0)\n psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size)\n\n sim = simulate.Simulate(background_factory = bg_f,\n camera_factory = cam_f,\n photophysics_factory = pp_f,\n psf_factory = psf_f,\n dither = True,\n x_size = settings.x_size,\n y_size = settings.y_size)\n \n sim.simulate(\"spline_2d.tif\", \"sparse_list.hdf5\", 5)\n\n # Measure the PSF.\n #\n print(\"Measuring PSF.\")\n psf_name = \"psf.psf\"\n measurePSF.measurePSF(\"spline_2d.tif\",\n \"na\",\n \"sparse_list.hdf5\",\n psf_name,\n want2d = True,\n aoi_size = int(settings.spline_size + 1),\n pixel_size = settings.pixel_size * 1.0e-3)\n\n # Measure the Spline.\n #\n if True:\n print(\"Measuring Spline.\")\n psfToSpline.psfToSpline(psf_name, \"psf.spline\", settings.spline_size)\n\n\nif (__name__ == \"__main__\"):\n parser = argparse.ArgumentParser(description = 'Spline diagnostics configuration.')\n\n parser.add_argument('--no-splines', dest='no_splines', action='store_true', default = False)\n\n args = parser.parse_args()\n \n configure(args.no_splines)\n"
] | [
[
"scipy.ndimage.interpolation.zoom",
"numpy.fft.fftfreq",
"numpy.fft.fftn",
"numpy.concatenate",
"numpy.max",
"numpy.copy",
"numpy.mean",
"numpy.zeros_like",
"scipy.ndimage.interpolation.shift",
"scipy.ndimage.filters.gaussian_filter",
"numpy.meshgrid",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.count_nonzero"
],
[
"numpy.vstack",
"numpy.zeros",
"numpy.save",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
javabrett/pandas | [
"7b92e7ee47fa3024aa5bc4fb3518717157c88dcc"
] | [
"pandas/core/base.py"
] | [
"\"\"\"\nBase and utility classes for pandas objects.\n\"\"\"\nimport builtins\nfrom collections import OrderedDict\nimport textwrap\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nfrom pandas.compat import PYPY\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,\n is_datetimelike, is_extension_array_dtype, is_extension_type, is_list_like,\n is_object_dtype, is_scalar, is_timedelta64_ns_dtype)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import algorithms, common as com\nfrom pandas.core.accessor import DirNamesMixin\nfrom pandas.core.arrays import ExtensionArray\nimport pandas.core.nanops as nanops\n\n_shared_docs = dict()\n_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',\n unique='IndexOpsMixin', duplicated='IndexOpsMixin')\n\n\nclass StringMixin:\n \"\"\"\n Implements string methods so long as object defines a `__str__` method.\n \"\"\"\n # side note - this could be made into a metaclass if more than one\n # object needs\n\n # ----------------------------------------------------------------------\n # Formatting\n\n def __str__(self):\n \"\"\"\n Return a string representation for a particular Object\n \"\"\"\n raise AbstractMethodError(self)\n\n def __repr__(self):\n \"\"\"\n Return a string representation for a particular object.\n \"\"\"\n return str(self)\n\n\nclass PandasObject(StringMixin, DirNamesMixin):\n\n \"\"\"baseclass for various pandas objects\"\"\"\n\n @property\n def _constructor(self):\n \"\"\"class constructor (for this class it's just `__class__`\"\"\"\n return self.__class__\n\n def __str__(self):\n \"\"\"\n Return a string representation for a particular object.\n \"\"\"\n # Should be overwritten by base classes\n return object.__repr__(self)\n\n def _reset_cache(self, key=None):\n \"\"\"\n Reset cached properties. If ``key`` is passed, only clears that key.\n \"\"\"\n if getattr(self, '_cache', None) is None:\n return\n if key is None:\n self._cache.clear()\n else:\n self._cache.pop(key, None)\n\n def __sizeof__(self):\n \"\"\"\n Generates the total memory usage for an object that returns\n either a value or Series of values\n \"\"\"\n if hasattr(self, 'memory_usage'):\n mem = self.memory_usage(deep=True)\n if not is_scalar(mem):\n mem = mem.sum()\n return int(mem)\n\n # no memory_usage attribute, so fall back to\n # object's 'sizeof'\n return super().__sizeof__()\n\n\nclass NoNewAttributesMixin:\n \"\"\"Mixin which prevents adding new attributes.\n\n Prevents additional attributes via xxx.attribute = \"something\" after a\n call to `self.__freeze()`. Mainly used to prevent the user from using\n wrong attributes on a accessor (`Series.cat/.str/.dt`).\n\n If you really want to add a new attribute at a later time, you need to use\n `object.__setattr__(self, key, value)`.\n \"\"\"\n\n def _freeze(self):\n \"\"\"Prevents setting additional attributes\"\"\"\n object.__setattr__(self, \"__frozen\", True)\n\n # prevent adding any attribute via s.xxx.new_attribute = ...\n def __setattr__(self, key, value):\n # _cache is used by a decorator\n # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)\n # because\n # 1.) getattr is false for attributes that raise errors\n # 2.) cls.__dict__ doesn't traverse into base classes\n if (getattr(self, \"__frozen\", False) and not\n (key == \"_cache\" or\n key in type(self).__dict__ or\n getattr(self, key, None) is not None)):\n raise AttributeError(\"You cannot add any new attribute '{key}'\".\n format(key=key))\n object.__setattr__(self, key, value)\n\n\nclass GroupByError(Exception):\n pass\n\n\nclass DataError(GroupByError):\n pass\n\n\nclass SpecificationError(GroupByError):\n pass\n\n\nclass SelectionMixin:\n \"\"\"\n mixin implementing the selection & aggregation interface on a group-like\n object sub-classes need to define: obj, exclusions\n \"\"\"\n _selection = None\n _internal_names = ['_cache', '__setstate__']\n _internal_names_set = set(_internal_names)\n\n _builtin_table = OrderedDict((\n (builtins.sum, np.sum),\n (builtins.max, np.max),\n (builtins.min, np.min),\n ))\n\n _cython_table = OrderedDict((\n (builtins.sum, 'sum'),\n (builtins.max, 'max'),\n (builtins.min, 'min'),\n (np.all, 'all'),\n (np.any, 'any'),\n (np.sum, 'sum'),\n (np.nansum, 'sum'),\n (np.mean, 'mean'),\n (np.nanmean, 'mean'),\n (np.prod, 'prod'),\n (np.nanprod, 'prod'),\n (np.std, 'std'),\n (np.nanstd, 'std'),\n (np.var, 'var'),\n (np.nanvar, 'var'),\n (np.median, 'median'),\n (np.nanmedian, 'median'),\n (np.max, 'max'),\n (np.nanmax, 'max'),\n (np.min, 'min'),\n (np.nanmin, 'min'),\n (np.cumprod, 'cumprod'),\n (np.nancumprod, 'cumprod'),\n (np.cumsum, 'cumsum'),\n (np.nancumsum, 'cumsum'),\n ))\n\n @property\n def _selection_name(self):\n \"\"\"\n return a name for myself; this would ideally be called\n the 'name' property, but we cannot conflict with the\n Series.name property which can be set\n \"\"\"\n if self._selection is None:\n return None # 'result'\n else:\n return self._selection\n\n @property\n def _selection_list(self):\n if not isinstance(self._selection, (list, tuple, ABCSeries,\n ABCIndexClass, np.ndarray)):\n return [self._selection]\n return self._selection\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, ABCSeries):\n return self.obj\n else:\n return self.obj[self._selection]\n\n @cache_readonly\n def ndim(self):\n return self._selected_obj.ndim\n\n @cache_readonly\n def _obj_with_exclusions(self):\n if self._selection is not None and isinstance(self.obj,\n ABCDataFrame):\n return self.obj.reindex(columns=self._selection_list)\n\n if len(self.exclusions) > 0:\n return self.obj.drop(self.exclusions, axis=1)\n else:\n return self.obj\n\n def __getitem__(self, key):\n if self._selection is not None:\n raise IndexError('Column(s) {selection} already selected'\n .format(selection=self._selection))\n\n if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,\n np.ndarray)):\n if len(self.obj.columns.intersection(key)) != len(key):\n bad_keys = list(set(key).difference(self.obj.columns))\n raise KeyError(\"Columns not found: {missing}\"\n .format(missing=str(bad_keys)[1:-1]))\n return self._gotitem(list(key), ndim=2)\n\n elif not getattr(self, 'as_index', False):\n if key not in self.obj.columns:\n raise KeyError(\"Column not found: {key}\".format(key=key))\n return self._gotitem(key, ndim=2)\n\n else:\n if key not in self.obj:\n raise KeyError(\"Column not found: {key}\".format(key=key))\n return self._gotitem(key, ndim=1)\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n\n \"\"\"\n raise AbstractMethodError(self)\n\n def aggregate(self, func, *args, **kwargs):\n raise AbstractMethodError(self)\n\n agg = aggregate\n\n def _try_aggregate_string_function(self, arg, *args, **kwargs):\n \"\"\"\n if arg is a string, then try to operate on it:\n - try to find a function (or attribute) on ourselves\n - try to find a numpy function\n - raise\n\n \"\"\"\n assert isinstance(arg, str)\n\n f = getattr(self, arg, None)\n if f is not None:\n if callable(f):\n return f(*args, **kwargs)\n\n # people may try to aggregate on a non-callable attribute\n # but don't let them think they can pass args to it\n assert len(args) == 0\n assert len([kwarg for kwarg in kwargs\n if kwarg not in ['axis', '_level']]) == 0\n return f\n\n f = getattr(np, arg, None)\n if f is not None:\n return f(self, *args, **kwargs)\n\n raise ValueError(\"{arg} is an unknown string function\".format(arg=arg))\n\n def _aggregate(self, arg, *args, **kwargs):\n \"\"\"\n provide an implementation for the aggregators\n\n Parameters\n ----------\n arg : string, dict, function\n *args : args to pass on to the function\n **kwargs : kwargs to pass on to the function\n\n Returns\n -------\n tuple of result, how\n\n Notes\n -----\n how can be a string describe the required post-processing, or\n None if not required\n \"\"\"\n is_aggregator = lambda x: isinstance(x, (list, tuple, dict))\n is_nested_renamer = False\n\n _axis = kwargs.pop('_axis', None)\n if _axis is None:\n _axis = getattr(self, 'axis', 0)\n _level = kwargs.pop('_level', None)\n\n if isinstance(arg, str):\n return self._try_aggregate_string_function(arg, *args,\n **kwargs), None\n\n if isinstance(arg, dict):\n\n # aggregate based on the passed dict\n if _axis != 0: # pragma: no cover\n raise ValueError('Can only pass dict with axis=0')\n\n obj = self._selected_obj\n\n def nested_renaming_depr(level=4):\n # deprecation of nested renaming\n # GH 15931\n warnings.warn(\n (\"using a dict with renaming \"\n \"is deprecated and will be removed in a future \"\n \"version\"),\n FutureWarning, stacklevel=level)\n\n # if we have a dict of any non-scalars\n # eg. {'A' : ['mean']}, normalize all to\n # be list-likes\n if any(is_aggregator(x) for x in arg.values()):\n new_arg = OrderedDict()\n for k, v in arg.items():\n if not isinstance(v, (tuple, list, dict)):\n new_arg[k] = [v]\n else:\n new_arg[k] = v\n\n # the keys must be in the columns\n # for ndim=2, or renamers for ndim=1\n\n # ok for now, but deprecated\n # {'A': { 'ra': 'mean' }}\n # {'A': { 'ra': ['mean'] }}\n # {'ra': ['mean']}\n\n # not ok\n # {'ra' : { 'A' : 'mean' }}\n if isinstance(v, dict):\n is_nested_renamer = True\n\n if k not in obj.columns:\n msg = ('cannot perform renaming for {key} with a '\n 'nested dictionary').format(key=k)\n raise SpecificationError(msg)\n nested_renaming_depr(4 + (_level or 0))\n\n elif isinstance(obj, ABCSeries):\n nested_renaming_depr()\n elif (isinstance(obj, ABCDataFrame) and\n k not in obj.columns):\n raise KeyError(\n \"Column '{col}' does not exist!\".format(col=k))\n\n arg = new_arg\n\n else:\n # deprecation of renaming keys\n # GH 15931\n keys = list(arg.keys())\n if (isinstance(obj, ABCDataFrame) and\n len(obj.columns.intersection(keys)) != len(keys)):\n nested_renaming_depr()\n\n from pandas.core.reshape.concat import concat\n\n def _agg_1dim(name, how, subset=None):\n \"\"\"\n aggregate a 1-dim with how\n \"\"\"\n colg = self._gotitem(name, ndim=1, subset=subset)\n if colg.ndim != 1:\n raise SpecificationError(\"nested dictionary is ambiguous \"\n \"in aggregation\")\n return colg.aggregate(how, _level=(_level or 0) + 1)\n\n def _agg_2dim(name, how):\n \"\"\"\n aggregate a 2-dim with how\n \"\"\"\n colg = self._gotitem(self._selection, ndim=2,\n subset=obj)\n return colg.aggregate(how, _level=None)\n\n def _agg(arg, func):\n \"\"\"\n run the aggregations over the arg with func\n return an OrderedDict\n \"\"\"\n result = OrderedDict()\n for fname, agg_how in arg.items():\n result[fname] = func(fname, agg_how)\n return result\n\n # set the final keys\n keys = list(arg.keys())\n result = OrderedDict()\n\n # nested renamer\n if is_nested_renamer:\n result = list(_agg(arg, _agg_1dim).values())\n\n if all(isinstance(r, dict) for r in result):\n\n result, results = OrderedDict(), result\n for r in results:\n result.update(r)\n keys = list(result.keys())\n\n else:\n\n if self._selection is not None:\n keys = None\n\n # some selection on the object\n elif self._selection is not None:\n\n sl = set(self._selection_list)\n\n # we are a Series like object,\n # but may have multiple aggregations\n if len(sl) == 1:\n\n result = _agg(arg, lambda fname,\n agg_how: _agg_1dim(self._selection, agg_how))\n\n # we are selecting the same set as we are aggregating\n elif not len(sl - set(keys)):\n\n result = _agg(arg, _agg_1dim)\n\n # we are a DataFrame, with possibly multiple aggregations\n else:\n\n result = _agg(arg, _agg_2dim)\n\n # no selection\n else:\n\n try:\n result = _agg(arg, _agg_1dim)\n except SpecificationError:\n\n # we are aggregating expecting all 1d-returns\n # but we have 2d\n result = _agg(arg, _agg_2dim)\n\n # combine results\n\n def is_any_series():\n # return a boolean if we have *any* nested series\n return any(isinstance(r, ABCSeries) for r in result.values())\n\n def is_any_frame():\n # return a boolean if we have *any* nested series\n return any(isinstance(r, ABCDataFrame)\n for r in result.values())\n\n if isinstance(result, list):\n return concat(result, keys=keys, axis=1, sort=True), True\n\n elif is_any_frame():\n # we have a dict of DataFrames\n # return a MI DataFrame\n\n return concat([result[k] for k in keys],\n keys=keys, axis=1), True\n\n elif isinstance(self, ABCSeries) and is_any_series():\n\n # we have a dict of Series\n # return a MI Series\n try:\n result = concat(result)\n except TypeError:\n # we want to give a nice error here if\n # we have non-same sized objects, so\n # we don't automatically broadcast\n\n raise ValueError(\"cannot perform both aggregation \"\n \"and transformation operations \"\n \"simultaneously\")\n\n return result, True\n\n # fall thru\n from pandas import DataFrame, Series\n try:\n result = DataFrame(result)\n except ValueError:\n\n # we have a dict of scalars\n result = Series(result,\n name=getattr(self, 'name', None))\n\n return result, True\n elif is_list_like(arg):\n # we require a list, but not an 'str'\n return self._aggregate_multiple_funcs(arg,\n _level=_level,\n _axis=_axis), None\n else:\n result = None\n\n f = self._is_cython_func(arg)\n if f and not args and not kwargs:\n return getattr(self, f)(), None\n\n # caller can react\n return result, True\n\n def _aggregate_multiple_funcs(self, arg, _level, _axis):\n from pandas.core.reshape.concat import concat\n\n if _axis != 0:\n raise NotImplementedError(\"axis other than 0 is not supported\")\n\n if self._selected_obj.ndim == 1:\n obj = self._selected_obj\n else:\n obj = self._obj_with_exclusions\n\n results = []\n keys = []\n\n # degenerate case\n if obj.ndim == 1:\n for a in arg:\n try:\n colg = self._gotitem(obj.name, ndim=1, subset=obj)\n results.append(colg.aggregate(a))\n\n # make sure we find a good name\n name = com.get_callable_name(a) or a\n keys.append(name)\n except (TypeError, DataError):\n pass\n except SpecificationError:\n raise\n\n # multiples\n else:\n for index, col in enumerate(obj):\n try:\n colg = self._gotitem(col, ndim=1,\n subset=obj.iloc[:, index])\n results.append(colg.aggregate(arg))\n keys.append(col)\n except (TypeError, DataError):\n pass\n except ValueError:\n # cannot aggregate\n continue\n except SpecificationError:\n raise\n\n # if we are empty\n if not len(results):\n raise ValueError(\"no results\")\n\n try:\n return concat(results, keys=keys, axis=1, sort=False)\n except TypeError:\n\n # we are concatting non-NDFrame objects,\n # e.g. a list of scalars\n\n from pandas.core.dtypes.cast import is_nested_object\n from pandas import Series\n result = Series(results, index=keys, name=self.name)\n if is_nested_object(result):\n raise ValueError(\"cannot combine transform and \"\n \"aggregation operations\")\n return result\n\n def _shallow_copy(self, obj=None, obj_type=None, **kwargs):\n \"\"\"\n return a new object with the replacement attributes\n \"\"\"\n if obj is None:\n obj = self._selected_obj.copy()\n if obj_type is None:\n obj_type = self._constructor\n if isinstance(obj, obj_type):\n obj = obj.obj\n for attr in self._attributes:\n if attr not in kwargs:\n kwargs[attr] = getattr(self, attr)\n return obj_type(obj, **kwargs)\n\n def _is_cython_func(self, arg):\n \"\"\"\n if we define an internal function for this argument, return it\n \"\"\"\n return self._cython_table.get(arg)\n\n def _is_builtin_func(self, arg):\n \"\"\"\n if we define an builtin function for this argument, return it,\n otherwise return the arg\n \"\"\"\n return self._builtin_table.get(arg, arg)\n\n\nclass IndexOpsMixin:\n \"\"\" common ops mixin to support a unified interface / docs for Series /\n Index\n \"\"\"\n\n # ndarray compatibility\n __array_priority__ = 1000\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Return the transpose, which is by definition self.\n\n Returns\n -------\n %(klass)s\n \"\"\"\n nv.validate_transpose(args, kwargs)\n return self\n\n T = property(transpose, doc=\"Return the transpose, which is by \"\n \"definition self.\")\n\n @property\n def _is_homogeneous_type(self):\n \"\"\"\n Whether the object has a single dtype.\n\n By definition, Series and Index are always considered homogeneous.\n A MultiIndex may or may not be homogeneous, depending on the\n dtypes of the levels.\n\n See Also\n --------\n DataFrame._is_homogeneous_type\n MultiIndex._is_homogeneous_type\n \"\"\"\n return True\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n return self._values.shape\n\n @property\n def ndim(self):\n \"\"\"\n Number of dimensions of the underlying data, by definition 1.\n \"\"\"\n return 1\n\n def item(self):\n \"\"\"\n Return the first element of the underlying data as a python scalar.\n\n Returns\n -------\n scalar\n The first element of %(klass)s.\n \"\"\"\n return self.values.item()\n\n @property\n def data(self):\n \"\"\"\n Return the data pointer of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.data is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self.values.data\n\n @property\n def itemsize(self):\n \"\"\"\n Return the size of the dtype of the item of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.itemsize is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self._ndarray_values.itemsize\n\n @property\n def nbytes(self):\n \"\"\"\n Return the number of bytes in the underlying data.\n \"\"\"\n return self._values.nbytes\n\n @property\n def strides(self):\n \"\"\"\n Return the strides of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.strides is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self._ndarray_values.strides\n\n @property\n def size(self):\n \"\"\"\n Return the number of elements in the underlying data.\n \"\"\"\n return len(self._values)\n\n @property\n def flags(self):\n \"\"\"\n Return the ndarray.flags for the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.flags is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self.values.flags\n\n @property\n def base(self):\n \"\"\"\n Return the base object if the memory of the underlying data is shared.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.base is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self.values.base\n\n @property\n def array(self) -> ExtensionArray:\n \"\"\"\n The ExtensionArray of the data backing this Series or Index.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n ExtensionArray\n An ExtensionArray of the values stored within. For extension\n types, this is the actual array. For NumPy native types, this\n is a thin (no copy) wrapper around :class:`numpy.ndarray`.\n\n ``.array`` differs ``.values`` which may require converting the\n data to a different form.\n\n See Also\n --------\n Index.to_numpy : Similar method that always returns a NumPy array.\n Series.to_numpy : Similar method that always returns a NumPy array.\n\n Notes\n -----\n This table lays out the different array types for each extension\n dtype within pandas.\n\n ================== =============================\n dtype array type\n ================== =============================\n category Categorical\n period PeriodArray\n interval IntervalArray\n IntegerNA IntegerArray\n datetime64[ns, tz] DatetimeArray\n ================== =============================\n\n For any 3rd-party extension types, the array type will be an\n ExtensionArray.\n\n For all remaining dtypes ``.array`` will be a\n :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray\n stored within. If you absolutely need a NumPy array (possibly with\n copying / coercing data), then use :meth:`Series.to_numpy` instead.\n\n Examples\n --------\n\n For regular NumPy types like int, and float, a PandasArray\n is returned.\n\n >>> pd.Series([1, 2, 3]).array\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n For extension types, like Categorical, the actual ExtensionArray\n is returned\n\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.array\n [a, b, a]\n Categories (2, object): [a, b]\n \"\"\"\n # As a mixin, we depend on the mixing class having _values.\n # Special mixin syntax may be developed in the future:\n # https://github.com/python/typing/issues/246\n result = self._values # type: ignore\n\n if is_datetime64_ns_dtype(result.dtype):\n from pandas.arrays import DatetimeArray\n result = DatetimeArray(result)\n elif is_timedelta64_ns_dtype(result.dtype):\n from pandas.arrays import TimedeltaArray\n result = TimedeltaArray(result)\n\n elif not is_extension_array_dtype(result.dtype):\n from pandas.core.arrays.numpy_ import PandasArray\n result = PandasArray(result)\n\n return result\n\n def to_numpy(self, dtype=None, copy=False):\n \"\"\"\n A NumPy ndarray representing the values in this Series or Index.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`\n copy : bool, default False\n Whether to ensure that the returned value is a not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.array : Get the actual data stored within.\n Index.array : Get the actual data stored within.\n DataFrame.to_numpy : Similar method for DataFrame.\n\n Notes\n -----\n The returned array will be the same up to equality (values equal\n in `self` will be equal in the returned array; likewise for values\n that are not equal). When `self` contains an ExtensionArray, the\n dtype may be different. For example, for a category-dtype Series,\n ``to_numpy()`` will return a NumPy array and the categorical dtype\n will be lost.\n\n For NumPy dtypes, this will be a reference to the actual data stored\n in this Series or Index (assuming ``copy=False``). Modifying the result\n in place will modify the data stored in the Series or Index (not that\n we recommend doing that).\n\n For extension types, ``to_numpy()`` *may* require copying data and\n coercing the result to a NumPy type (possibly object), which may be\n expensive. When you need a no-copy reference to the underlying data,\n :attr:`Series.array` should be used instead.\n\n This table lays out the different dtypes and default return types of\n ``to_numpy()`` for various dtypes within pandas.\n\n ================== ================================\n dtype array type\n ================== ================================\n category[T] ndarray[T] (same dtype as input)\n period ndarray[object] (Periods)\n interval ndarray[object] (Intervals)\n IntegerNA ndarray[object]\n datetime64[ns] datetime64[ns]\n datetime64[ns, tz] ndarray[object] (Timestamps)\n ================== ================================\n\n Examples\n --------\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.to_numpy()\n array(['a', 'b', 'a'], dtype=object)\n\n Specify the `dtype` to control how datetime-aware data is represented.\n Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`\n objects, each with the correct ``tz``.\n\n >>> ser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> ser.to_numpy(dtype=object)\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],\n dtype=object)\n\n Or ``dtype='datetime64[ns]'`` to return an ndarray of native\n datetime64 values. The values are converted to UTC and the timezone\n info is dropped.\n\n >>> ser.to_numpy(dtype=\"datetime64[ns]\")\n ... # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],\n dtype='datetime64[ns]')\n \"\"\"\n if is_datetime64tz_dtype(self.dtype) and dtype is None:\n # note: this is going to change very soon.\n # I have a WIP PR making this unnecessary, but it's\n # a bit out of scope for the DatetimeArray PR.\n dtype = \"object\"\n\n result = np.asarray(self._values, dtype=dtype)\n # TODO(GH-24345): Avoid potential double copy\n if copy:\n result = result.copy()\n return result\n\n @property\n def _ndarray_values(self) -> np.ndarray:\n \"\"\"\n The data as an ndarray, possibly losing information.\n\n The expectation is that this is cheap to compute, and is primarily\n used for interacting with our indexers.\n\n - categorical -> codes\n \"\"\"\n if is_extension_array_dtype(self):\n return self.array._ndarray_values\n # As a mixin, we depend on the mixing class having values.\n # Special mixin syntax may be developed in the future:\n # https://github.com/python/typing/issues/246\n return self.values # type: ignore\n\n @property\n def empty(self):\n return not self.size\n\n def max(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Index.\n\n Parameters\n ----------\n axis : int, optional\n For compatibility with NumPy. Only 0 or None are allowed.\n skipna : bool, default True\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.max()\n ('b', 2)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_max(args, kwargs)\n return nanops.nanmax(self._values, skipna=skipna)\n\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return an ndarray of the maximum argument indexer.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series\n skipna : bool, default True\n\n Returns\n -------\n numpy.ndarray\n Indices of the maximum values.\n\n See Also\n --------\n numpy.ndarray.argmax\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_argmax_with_skipna(skipna, args, kwargs)\n return nanops.nanargmax(self._values, skipna=skipna)\n\n def min(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Index.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series\n skipna : bool, default True\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the minimum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.min()\n ('a', 1)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_min(args, kwargs)\n return nanops.nanmin(self._values, skipna=skipna)\n\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return a ndarray of the minimum argument indexer.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series\n skipna : bool, default True\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n numpy.ndarray.argmin\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_argmax_with_skipna(skipna, args, kwargs)\n return nanops.nanargmin(self._values, skipna=skipna)\n\n def tolist(self):\n \"\"\"\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n list\n\n See Also\n --------\n numpy.ndarray.tolist\n \"\"\"\n if is_datetimelike(self._values):\n return [com.maybe_box_datetimelike(x) for x in self._values]\n elif is_extension_array_dtype(self._values):\n return list(self._values)\n else:\n return self._values.tolist()\n\n to_list = tolist\n\n def __iter__(self):\n \"\"\"\n Return an iterator of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n iterator\n \"\"\"\n # We are explicity making element iterators.\n if is_datetimelike(self._values):\n return map(com.maybe_box_datetimelike, self._values)\n elif is_extension_array_dtype(self._values):\n return iter(self._values)\n else:\n return map(self._values.item, range(self._values.size))\n\n @cache_readonly\n def hasnans(self):\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n return bool(isna(self).any())\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n \"\"\" perform the reduction type operation if we can \"\"\"\n func = getattr(self, name, None)\n if func is None:\n raise TypeError(\"{klass} cannot perform the operation {op}\".format(\n klass=self.__class__.__name__, op=name))\n return func(skipna=skipna, **kwds)\n\n def _map_values(self, mapper, na_action=None):\n \"\"\"\n An internal function that maps values using the input\n correspondence (which can be a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n The input correspondence object\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping function\n\n Returns\n -------\n Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n\n \"\"\"\n\n # we can fastpath dict/Series to an efficient map\n # as we know that we are not going to have to yield\n # python types\n if isinstance(mapper, dict):\n if hasattr(mapper, '__missing__'):\n # If a dictionary subclass defines a default value method,\n # convert mapper to a lookup function (GH #15999).\n dict_with_default = mapper\n mapper = lambda x: dict_with_default[x]\n else:\n # Dictionary does not have a default. Thus it's safe to\n # convert to an Series for efficiency.\n # we specify the keys here to handle the\n # possibility that they are tuples\n from pandas import Series\n mapper = Series(mapper)\n\n if isinstance(mapper, ABCSeries):\n # Since values were input this means we came from either\n # a dict or a series and mapper should be an index\n if is_categorical_dtype(self._values):\n # use the built in categorical series mapper which saves\n # time by mapping the categories instead of all values\n return self._values.map(mapper)\n if is_extension_type(self.dtype):\n values = self._values\n else:\n values = self.values\n\n indexer = mapper.index.get_indexer(values)\n new_values = algorithms.take_1d(mapper._values, indexer)\n\n return new_values\n\n # we must convert to python types\n if is_extension_type(self.dtype):\n values = self._values\n if na_action is not None:\n raise NotImplementedError\n map_f = lambda values, f: values.map(f)\n else:\n values = self.astype(object)\n values = getattr(values, 'values', values)\n if na_action == 'ignore':\n def map_f(values, f):\n return lib.map_infer_mask(values, f,\n isna(values).view(np.uint8))\n else:\n map_f = lib.map_infer\n\n # mapper is a function\n new_values = map_f(values, mapper)\n\n return new_values\n\n def value_counts(self, normalize=False, sort=True, ascending=False,\n bins=None, dropna=True):\n \"\"\"\n Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : boolean, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : boolean, default True\n Sort by frequencies.\n ascending : boolean, default False\n Sort in ascending order.\n bins : integer, optional\n Rather than count values, group them into half-open bins,\n a convenience for ``pd.cut``, only works with numeric data.\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.count: Number of non-NA elements in a DataFrame.\n\n Examples\n --------\n >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])\n >>> index.value_counts()\n 3.0 2\n 4.0 1\n 2.0 1\n 1.0 1\n dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])\n >>> s.value_counts(normalize=True)\n 3.0 0.4\n 4.0 0.2\n 2.0 0.2\n 1.0 0.2\n dtype: float64\n\n **bins**\n\n Bins can be useful for going from a continuous variable to a\n categorical variable; instead of counting unique\n apparitions of values, divide the index in the specified\n number of half-open bins.\n\n >>> s.value_counts(bins=3)\n (2.0, 3.0] 2\n (0.996, 2.0] 2\n (3.0, 4.0] 1\n dtype: int64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> s.value_counts(dropna=False)\n 3.0 2\n NaN 1\n 4.0 1\n 2.0 1\n 1.0 1\n dtype: int64\n \"\"\"\n from pandas.core.algorithms import value_counts\n result = value_counts(self, sort=sort, ascending=ascending,\n normalize=normalize, bins=bins, dropna=dropna)\n return result\n\n def unique(self):\n values = self._values\n\n if hasattr(values, 'unique'):\n\n result = values.unique()\n else:\n from pandas.core.algorithms import unique1d\n result = unique1d(values)\n\n return result\n\n def nunique(self, dropna=True):\n \"\"\"\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n\n >>> s.nunique()\n 4\n \"\"\"\n uniqs = self.unique()\n n = len(uniqs)\n if dropna and isna(uniqs).any():\n n -= 1\n return n\n\n @property\n def is_unique(self):\n \"\"\"\n Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n \"\"\"\n return self.nunique(dropna=False) == len(self)\n\n @property\n def is_monotonic(self):\n \"\"\"\n Return boolean if values in the object are\n monotonic_increasing.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n return Index(self).is_monotonic\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"\n Return boolean if values in the object are\n monotonic_decreasing.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n return Index(self).is_monotonic_decreasing\n\n def memory_usage(self, deep=False):\n \"\"\"\n Memory usage of the values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n numpy.ndarray.nbytes\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False or if used on PyPy\n \"\"\"\n if hasattr(self.array, 'memory_usage'):\n return self.array.memory_usage(deep=deep)\n\n v = self.array.nbytes\n if deep and is_object_dtype(self) and not PYPY:\n v += lib.memory_usage_of_objects(self.array)\n return v\n\n @Substitution(\n values='', order='', size_hint='',\n sort=textwrap.dedent(\"\"\"\\\n sort : boolean, default False\n Sort `uniques` and shuffle `labels` to maintain the\n relationship.\n \"\"\"))\n @Appender(algorithms._shared_docs['factorize'])\n def factorize(self, sort=False, na_sentinel=-1):\n return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)\n\n _shared_docs['searchsorted'] = (\n \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted %(klass)s `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n Parameters\n ----------\n value : array_like\n Values to insert into `self`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n int or array of int\n A scalar or array of insertion points with the\n same shape as `value`.\n\n .. versionchanged :: 0.24.0\n If `value` is a scalar, an int is now always returned.\n Previously, scalar inputs returned an 1-item array for\n :class:`Series` and :class:`Categorical`.\n\n See Also\n --------\n numpy.searchsorted\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n Examples\n --------\n\n >>> x = pd.Series([1, 2, 3])\n >>> x\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> x.searchsorted(4)\n 3\n\n >>> x.searchsorted([0, 4])\n array([0, 3])\n\n >>> x.searchsorted([1, 3], side='left')\n array([0, 2])\n\n >>> x.searchsorted([1, 3], side='right')\n array([1, 3])\n\n >>> x = pd.Categorical(['apple', 'bread', 'bread',\n 'cheese', 'milk'], ordered=True)\n [apple, bread, bread, cheese, milk]\n Categories (4, object): [apple < bread < cheese < milk]\n\n >>> x.searchsorted('bread')\n 1\n\n >>> x.searchsorted(['bread'], side='right')\n array([3])\n \"\"\")\n\n @Substitution(klass='Index')\n @Appender(_shared_docs['searchsorted'])\n def searchsorted(self, value, side='left', sorter=None):\n return algorithms.searchsorted(self._values, value,\n side=side, sorter=sorter)\n\n def drop_duplicates(self, keep='first', inplace=False):\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if isinstance(self, ABCIndexClass):\n if self.is_unique:\n return self._shallow_copy()\n\n duplicated = self.duplicated(keep=keep)\n result = self[np.logical_not(duplicated)]\n if inplace:\n return self._update_inplace(result)\n else:\n return result\n\n def duplicated(self, keep='first'):\n from pandas.core.algorithms import duplicated\n if isinstance(self, ABCIndexClass):\n if self.is_unique:\n return np.zeros(len(self), dtype=np.bool)\n return duplicated(self, keep=keep)\n else:\n return self._constructor(duplicated(self, keep=keep),\n index=self.index).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # abstracts\n\n def _update_inplace(self, result, verify_is_copy=True, **kwargs):\n raise AbstractMethodError(self)\n"
] | [
[
"pandas.arrays.DatetimeArray",
"pandas.core.dtypes.common.is_datetime64_ns_dtype",
"pandas.util._validators.validate_bool_kwarg",
"pandas.Series",
"pandas.core.common.get_callable_name",
"numpy.asarray",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_datetimelike",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.DataFrame",
"pandas.core.nanops.nanmin",
"pandas.core.dtypes.common.is_timedelta64_ns_dtype",
"pandas.util._decorators.Substitution",
"pandas.errors.AbstractMethodError",
"pandas.Index",
"pandas.core.algorithms.factorize",
"pandas.core.nanops.nanargmin",
"pandas.core.arrays.numpy_.PandasArray",
"pandas.core.algorithms.searchsorted",
"pandas.core.dtypes.cast.is_nested_object",
"pandas.core.dtypes.common.is_categorical_dtype",
"numpy.logical_not",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.core.common.maybe_box_datetimelike",
"pandas.compat.numpy.function.validate_transpose",
"pandas.core.reshape.concat.concat",
"pandas.core.nanops.nanargmax",
"pandas.compat.numpy.function.validate_argmax_with_skipna",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.core.algorithms.take_1d",
"pandas.core.dtypes.common.is_scalar",
"pandas.compat.numpy.function.validate_max",
"pandas.arrays.TimedeltaArray",
"pandas.core.algorithms.unique1d",
"pandas.core.dtypes.common.is_extension_type",
"pandas.core.algorithms.value_counts",
"pandas.compat.numpy.function.validate_min",
"pandas.core.dtypes.common.is_object_dtype",
"pandas._libs.lib.memory_usage_of_objects",
"pandas.core.dtypes.missing.isna",
"pandas.core.nanops.nanmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arangoml/dgl | [
"d135058f9986fadcbdf6aa1011a00c3ad45a8ce3",
"d135058f9986fadcbdf6aa1011a00c3ad45a8ce3",
"d135058f9986fadcbdf6aa1011a00c3ad45a8ce3"
] | [
"tutorials/basics/2_basics.py",
"apps/life_sci/dgllife/model/model_zoo/jtnn/jtnn_enc.py",
"tutorials/models/1_gnn/4_rgcn.py"
] | [
"\"\"\"\r\n.. currentmodule:: dgl\r\n\r\nDGLGraph and Node/edge Features\r\n===============================\r\n\r\n**Author**: `Minjie Wang <https://jermainewang.github.io/>`_, Quan Gan, Yu Gai,\r\nZheng Zhang\r\n\r\nIn this tutorial, you learn how to create a graph and how to read and write node and edge representations.\r\n\"\"\"\r\n\r\n###############################################################################\r\n# Creating a graph\r\n# ----------------\r\n# The design of :class:`DGLGraph` was influenced by other graph libraries. You \r\n# can create a graph from networkx and convert it into a :class:`DGLGraph` and \r\n# vice versa.\r\n\r\nimport networkx as nx\r\nimport dgl\r\n\r\ng_nx = nx.petersen_graph()\r\ng_dgl = dgl.DGLGraph(g_nx)\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.subplot(121)\r\nnx.draw(g_nx, with_labels=True)\r\nplt.subplot(122)\r\nnx.draw(g_dgl.to_networkx(), with_labels=True)\r\n\r\nplt.show()\r\n\r\n\r\n###############################################################################\r\n# The examples here show the same graph, except that :class:`DGLGraph` is always directional.\r\n#\r\n# You can also create a graph by calling the DGL interface.\r\n# \r\n# In the next example, you build a star graph. :class:`DGLGraph` nodes are a consecutive range of\r\n# integers between 0 and :func:`number_of_nodes() <DGLGraph.number_of_nodes>`\r\n# and can grow by calling :func:`add_nodes <DGLGraph.add_nodes>`.\r\n# :class:`DGLGraph` edges are in order of their additions. Note that\r\n# edges are accessed in much the same way as nodes, with one extra feature: *edge broadcasting*.\r\n\r\nimport dgl\r\nimport torch as th\r\n\r\ng = dgl.DGLGraph()\r\ng.add_nodes(10)\r\n# A couple edges one-by-one\r\nfor i in range(1, 4):\r\n g.add_edge(i, 0)\r\n# A few more with a paired list\r\nsrc = list(range(5, 8)); dst = [0]*3\r\ng.add_edges(src, dst)\r\n# finish with a pair of tensors\r\nsrc = th.tensor([8, 9]); dst = th.tensor([0, 0])\r\ng.add_edges(src, dst)\r\n\r\n# Edge broadcasting will do star graph in one go!\r\ng.clear(); g.add_nodes(10)\r\nsrc = th.tensor(list(range(1, 10)));\r\ng.add_edges(src, 0)\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nnx.draw(g.to_networkx(), with_labels=True)\r\nplt.show()\r\n\r\n\r\n###############################################################################\r\n# Assigning a feature\r\n# -------------------\r\n# You can also assign features to nodes and edges of a :class:`DGLGraph`. The\r\n# features are represented as dictionary of names (strings) and tensors,\r\n# called **fields**.\r\n#\r\n# The following code snippet assigns each node a vector (len=3).\r\n#\r\n# .. note::\r\n#\r\n# DGL aims to be framework-agnostic, and currently it supports PyTorch and\r\n# MXNet tensors. The following examples use PyTorch only.\r\n\r\nimport dgl\r\nimport torch as th\r\n\r\nx = th.randn(10, 3)\r\ng.ndata['x'] = x\r\n\r\n\r\n###############################################################################\r\n# :func:`ndata <DGLGraph.ndata>` is a syntax sugar to access the state of all nodes. \r\n# States are stored\r\n# in a container ``data`` that hosts a user-defined dictionary.\r\n\r\nprint(g.ndata['x'] == g.nodes[:].data['x'])\r\n\r\n# Access node set with integer, list, or integer tensor\r\ng.nodes[0].data['x'] = th.zeros(1, 3)\r\ng.nodes[[0, 1, 2]].data['x'] = th.zeros(3, 3)\r\ng.nodes[th.tensor([0, 1, 2])].data['x'] = th.zeros(3, 3)\r\n\r\n\r\n###############################################################################\r\n# Assigning edge features is similar to that of node features,\r\n# except that you can also do it by specifying endpoints of the edges.\r\n\r\ng.edata['w'] = th.randn(9, 2)\r\n\r\n# Access edge set with IDs in integer, list, or integer tensor\r\ng.edges[1].data['w'] = th.randn(1, 2)\r\ng.edges[[0, 1, 2]].data['w'] = th.zeros(3, 2)\r\ng.edges[th.tensor([0, 1, 2])].data['w'] = th.zeros(3, 2)\r\n\r\n# You can also access the edges by giving endpoints\r\ng.edges[1, 0].data['w'] = th.ones(1, 2) # edge 1 -> 0\r\ng.edges[[1, 2, 3], [0, 0, 0]].data['w'] = th.ones(3, 2) # edges [1, 2, 3] -> 0\r\n\r\n\r\n###############################################################################\r\n# After assignments, each node or edge field will be associated with a scheme\r\n# containing the shape and data type (dtype) of its field value.\r\n\r\nprint(g.node_attr_schemes())\r\ng.ndata['x'] = th.zeros((10, 4))\r\nprint(g.node_attr_schemes())\r\n\r\n\r\n###############################################################################\r\n# You can also remove node or edge states from the graph. This is particularly\r\n# useful to save memory during inference.\r\n\r\ng.ndata.pop('x')\r\ng.edata.pop('w')\r\n\r\n\r\n###############################################################################\r\n# Working with multigraphs\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~\r\n# Many graph applications need parallel edges. To enable this, construct :class:`DGLGraph`\r\n# with ``multigraph=True``.\r\n\r\ng_multi = dgl.DGLGraph(multigraph=True)\r\ng_multi.add_nodes(10)\r\ng_multi.ndata['x'] = th.randn(10, 2)\r\n\r\ng_multi.add_edges(list(range(1, 10)), 0)\r\ng_multi.add_edge(1, 0) # two edges on 1->0\r\n\r\ng_multi.edata['w'] = th.randn(10, 2)\r\ng_multi.edges[1].data['w'] = th.zeros(1, 2)\r\nprint(g_multi.edges())\r\n\r\n\r\n###############################################################################\r\n# An edge in multigraph cannot be uniquely identified by using its incident nodes\r\n# :math:`u` and :math:`v`; query their edge IDs use ``edge_id`` interface.\r\n\r\neid_10 = g_multi.edge_id(1, 0)\r\ng_multi.edges[eid_10].data['w'] = th.ones(len(eid_10), 2)\r\nprint(g_multi.edata['w'])\r\n\r\n\r\n###############################################################################\r\n# .. note::\r\n#\r\n# * Nodes and edges can be added but not removed.\r\n# * Updating a feature of different schemes raises the risk of error on individual nodes (or\r\n# node subset).\r\n\r\n\r\n###############################################################################\r\n# Next steps\r\n# ----------\r\n# In the :doc:`next tutorial <3_pagerank>` you learn the\r\n# DGL message passing interface by implementing PageRank.\r\n",
"# pylint: disable=C0111, C0103, E1101, W0611, W0612\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nimport dgl.function as DGLF\nfrom dgl import batch, bfs_edges_generator\n\nfrom .nnutils import GRUUpdate, cuda\n\nMAX_NB = 8\n\ndef level_order(forest, roots):\n edges = bfs_edges_generator(forest, roots)\n _, leaves = forest.find_edges(edges[-1])\n edges_back = bfs_edges_generator(forest, roots, reverse=True)\n yield from reversed(edges_back)\n yield from edges\n\nenc_tree_msg = [DGLF.copy_src(src='m', out='m'),\n DGLF.copy_src(src='rm', out='rm')]\nenc_tree_reduce = [DGLF.sum(msg='m', out='s'),\n DGLF.sum(msg='rm', out='accum_rm')]\nenc_tree_gather_msg = DGLF.copy_edge(edge='m', out='m')\nenc_tree_gather_reduce = DGLF.sum(msg='m', out='m')\n\nclass EncoderGatherUpdate(nn.Module):\n def __init__(self, hidden_size):\n nn.Module.__init__(self)\n self.hidden_size = hidden_size\n\n self.W = nn.Linear(2 * hidden_size, hidden_size)\n\n def forward(self, nodes):\n x = nodes.data['x']\n m = nodes.data['m']\n return {\n 'h': torch.relu(self.W(torch.cat([x, m], 1))),\n }\n\nclass DGLJTNNEncoder(nn.Module):\n def __init__(self, vocab, hidden_size, embedding=None):\n nn.Module.__init__(self)\n self.hidden_size = hidden_size\n self.vocab_size = vocab.size()\n self.vocab = vocab\n\n if embedding is None:\n self.embedding = nn.Embedding(self.vocab_size, hidden_size)\n else:\n self.embedding = embedding\n\n self.enc_tree_update = GRUUpdate(hidden_size)\n self.enc_tree_gather_update = EncoderGatherUpdate(hidden_size)\n\n def forward(self, mol_trees):\n mol_tree_batch = batch(mol_trees)\n\n # Build line graph to prepare for belief propagation\n mol_tree_batch_lg = mol_tree_batch.line_graph(\n backtracking=False, shared=True)\n\n return self.run(mol_tree_batch, mol_tree_batch_lg)\n\n def run(self, mol_tree_batch, mol_tree_batch_lg):\n # Since tree roots are designated to 0. In the batched graph we can\n # simply find the corresponding node ID by looking at node_offset\n node_offset = np.cumsum([0] + mol_tree_batch.batch_num_nodes)\n root_ids = node_offset[:-1]\n n_nodes = mol_tree_batch.number_of_nodes()\n n_edges = mol_tree_batch.number_of_edges()\n\n # Assign structure embeddings to tree nodes\n mol_tree_batch.ndata.update({\n 'x': self.embedding(mol_tree_batch.ndata['wid']),\n 'h': cuda(torch.zeros(n_nodes, self.hidden_size)),\n })\n\n # Initialize the intermediate variables according to Eq (4)-(8).\n # Also initialize the src_x and dst_x fields.\n # TODO: context?\n mol_tree_batch.edata.update({\n 's': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'm': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'r': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'z': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'src_x': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'dst_x': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'rm': cuda(torch.zeros(n_edges, self.hidden_size)),\n 'accum_rm': cuda(torch.zeros(n_edges, self.hidden_size)),\n })\n\n # Send the source/destination node features to edges\n mol_tree_batch.apply_edges(\n func=lambda edges: {\n 'src_x': edges.src['x'], 'dst_x': edges.dst['x']},\n )\n\n # Message passing\n # I exploited the fact that the reduce function is a sum of incoming\n # messages, and the uncomputed messages are zero vectors. Essentially,\n # we can always compute s_ij as the sum of incoming m_ij, no matter\n # if m_ij is actually computed or not.\n for eid in level_order(mol_tree_batch, root_ids):\n #eid = mol_tree_batch.edge_ids(u, v)\n mol_tree_batch_lg.pull(\n eid,\n enc_tree_msg,\n enc_tree_reduce,\n self.enc_tree_update,\n )\n\n # Readout\n mol_tree_batch.update_all(\n enc_tree_gather_msg,\n enc_tree_gather_reduce,\n self.enc_tree_gather_update,\n )\n\n root_vecs = mol_tree_batch.nodes[root_ids].data['h']\n\n return mol_tree_batch, root_vecs\n",
"\"\"\"\r\n.. _model-rgcn:\r\n\r\nRelational graph convolutional network\r\n================================================\r\n\r\n**Author:** Lingfan Yu, Mufei Li, Zheng Zhang\r\n\r\nIn this tutorial, you learn how to implement a relational graph convolutional\r\nnetwork (R-GCN). This type of network is one effort to generalize GCN \r\nto handle different relationships between entities in a knowledge base. To \r\nlearn more about the research behind R-GCN, see `Modeling Relational Data with Graph Convolutional\r\nNetworks <https://arxiv.org/pdf/1703.06103.pdf>`_ \r\n\r\nThe straightforward graph convolutional network (GCN) and \r\n`DGL tutorial <http://doc.dgl.ai/tutorials/index.html>`_) exploits\r\nstructural information of a dataset (that is, the graph connectivity) in order to\r\nimprove the extraction of node representations. Graph edges are left as\r\nuntyped.\r\n\r\nA knowledge graph is made up of a collection of triples in the form\r\nsubject, relation, object. Edges thus encode important information and\r\nhave their own embeddings to be learned. Furthermore, there may exist\r\nmultiple edges among any given pair.\r\n\r\n\"\"\"\r\n###############################################################################\r\n# A brief introduction to R-GCN\r\n# ---------------------------\r\n# In *statistical relational learning* (SRL), there are two fundamental\r\n# tasks:\r\n#\r\n# - **Entity classification** - Where you assign types and categorical\r\n# properties to entities.\r\n# - **Link prediction** - Where you recover missing triples.\r\n#\r\n# In both cases, missing information is expected to be recovered from the \r\n# neighborhood structure of the graph. For example, the R-GCN\r\n# paper cited earlier provides the following example. Knowing that Mikhail Baryshnikov was educated at the Vaganova Academy\r\n# implies both that Mikhail Baryshnikov should have the label person, and\r\n# that the triple (Mikhail Baryshnikov, lived in, Russia) must belong to the\r\n# knowledge graph.\r\n#\r\n# R-GCN solves these two problems using a common graph convolutional network. It's \r\n# extended with multi-edge encoding to compute embedding of the entities, but\r\n# with different downstream processing.\r\n#\r\n# - Entity classification is done by attaching a softmax classifier at the\r\n# final embedding of an entity (node). Training is through loss of standard\r\n# cross-entropy.\r\n# - Link prediction is done by reconstructing an edge with an autoencoder\r\n# architecture, using a parameterized score function. Training uses negative\r\n# sampling.\r\n#\r\n# This tutorial focuses on the first task, entity classification, to show how to generate entity\r\n# representation. `Complete\r\n# code <https://github.com/dmlc/dgl/tree/rgcn/examples/pytorch/rgcn>`_\r\n# for both tasks is found in the DGL Github repository.\r\n#\r\n# Key ideas of R-GCN\r\n# -------------------\r\n# Recall that in GCN, the hidden representation for each node :math:`i` at\r\n# :math:`(l+1)^{th}` layer is computed by:\r\n#\r\n# .. math:: h_i^{l+1} = \\sigma\\left(\\sum_{j\\in N_i}\\frac{1}{c_i} W^{(l)} h_j^{(l)}\\right)~~~~~~~~~~(1)\\\\\r\n#\r\n# where :math:`c_i` is a normalization constant.\r\n#\r\n# The key difference between R-GCN and GCN is that in R-GCN, edges can\r\n# represent different relations. In GCN, weight :math:`W^{(l)}` in equation\r\n# :math:`(1)` is shared by all edges in layer :math:`l`. In contrast, in\r\n# R-GCN, different edge types use different weights and only edges of the\r\n# same relation type :math:`r` are associated with the same projection weight\r\n# :math:`W_r^{(l)}`.\r\n#\r\n# So the hidden representation of entities in :math:`(l+1)^{th}` layer in\r\n# R-GCN can be formulated as the following equation:\r\n#\r\n# .. math:: h_i^{l+1} = \\sigma\\left(W_0^{(l)}h_i^{(l)}+\\sum_{r\\in R}\\sum_{j\\in N_i^r}\\frac{1}{c_{i,r}}W_r^{(l)}h_j^{(l)}\\right)~~~~~~~~~~(2)\\\\\r\n#\r\n# where :math:`N_i^r` denotes the set of neighbor indices of node :math:`i`\r\n# under relation :math:`r\\in R` and :math:`c_{i,r}` is a normalization\r\n# constant. In entity classification, the R-GCN paper uses\r\n# :math:`c_{i,r}=|N_i^r|`.\r\n#\r\n# The problem of applying the above equation directly is the rapid growth of\r\n# the number of parameters, especially with highly multi-relational data. In\r\n# order to reduce model parameter size and prevent overfitting, the original\r\n# paper proposes to use basis decomposition.\r\n#\r\n# .. math:: W_r^{(l)}=\\sum\\limits_{b=1}^B a_{rb}^{(l)}V_b^{(l)}~~~~~~~~~~(3)\\\\\r\n#\r\n# Therefore, the weight :math:`W_r^{(l)}` is a linear combination of basis\r\n# transformation :math:`V_b^{(l)}` with coefficients :math:`a_{rb}^{(l)}`.\r\n# The number of bases :math:`B` is much smaller than the number of relations\r\n# in the knowledge base.\r\n#\r\n# .. note::\r\n# Another weight regularization, block-decomposition, is implemented in\r\n# the `link prediction <link-prediction_>`_.\r\n#\r\n# Implement R-GCN in DGL\r\n# ----------------------\r\n#\r\n# An R-GCN model is composed of several R-GCN layers. The first R-GCN layer\r\n# also serves as input layer and takes in features (for example, description texts)\r\n# that are associated with node entity and project to hidden space. In this tutorial,\r\n# we only use the entity ID as an entity feature.\r\n#\r\n# R-GCN layers\r\n# ~~~~~~~~~~~~\r\n#\r\n# For each node, an R-GCN layer performs the following steps:\r\n#\r\n# - Compute outgoing message using node representation and weight matrix\r\n# associated with the edge type (message function)\r\n# - Aggregate incoming messages and generate new node representations (reduce\r\n# and apply function)\r\n#\r\n# The following code is the definition of an R-GCN hidden layer.\r\n#\r\n# .. note::\r\n# Each relation type is associated with a different weight. Therefore,\r\n# the full weight matrix has three dimensions: relation, input_feature,\r\n# output_feature.\r\n#\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom dgl import DGLGraph\r\nimport dgl.function as fn\r\nfrom functools import partial\r\n\r\nclass RGCNLayer(nn.Module):\r\n def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None,\r\n activation=None, is_input_layer=False):\r\n super(RGCNLayer, self).__init__()\r\n self.in_feat = in_feat\r\n self.out_feat = out_feat\r\n self.num_rels = num_rels\r\n self.num_bases = num_bases\r\n self.bias = bias\r\n self.activation = activation\r\n self.is_input_layer = is_input_layer\r\n\r\n # sanity check\r\n if self.num_bases <= 0 or self.num_bases > self.num_rels:\r\n self.num_bases = self.num_rels\r\n\r\n # weight bases in equation (3)\r\n self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat,\r\n self.out_feat))\r\n if self.num_bases < self.num_rels:\r\n # linear combination coefficients in equation (3)\r\n self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases))\r\n\r\n # add bias\r\n if self.bias:\r\n self.bias = nn.Parameter(torch.Tensor(out_feat))\r\n\r\n # init trainable parameters\r\n nn.init.xavier_uniform_(self.weight,\r\n gain=nn.init.calculate_gain('relu'))\r\n if self.num_bases < self.num_rels:\r\n nn.init.xavier_uniform_(self.w_comp,\r\n gain=nn.init.calculate_gain('relu'))\r\n if self.bias:\r\n nn.init.xavier_uniform_(self.bias,\r\n gain=nn.init.calculate_gain('relu'))\r\n\r\n def forward(self, g):\r\n if self.num_bases < self.num_rels:\r\n # generate all weights from bases (equation (3))\r\n weight = self.weight.view(self.in_feat, self.num_bases, self.out_feat)\r\n weight = torch.matmul(self.w_comp, weight).view(self.num_rels,\r\n self.in_feat, self.out_feat)\r\n else:\r\n weight = self.weight\r\n\r\n if self.is_input_layer:\r\n def message_func(edges):\r\n # for input layer, matrix multiply can be converted to be\r\n # an embedding lookup using source node id\r\n embed = weight.view(-1, self.out_feat)\r\n index = edges.data['rel_type'] * self.in_feat + edges.src['id']\r\n return {'msg': embed[index] * edges.data['norm']}\r\n else:\r\n def message_func(edges):\r\n w = weight[edges.data['rel_type']]\r\n msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()\r\n msg = msg * edges.data['norm']\r\n return {'msg': msg}\r\n\r\n def apply_func(nodes):\r\n h = nodes.data['h']\r\n if self.bias:\r\n h = h + self.bias\r\n if self.activation:\r\n h = self.activation(h)\r\n return {'h': h}\r\n\r\n g.update_all(message_func, fn.sum(msg='msg', out='h'), apply_func)\r\n\r\n\r\n###############################################################################\r\n# Full R-GCN model defined\r\n# ~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\nclass Model(nn.Module):\r\n def __init__(self, num_nodes, h_dim, out_dim, num_rels,\r\n num_bases=-1, num_hidden_layers=1):\r\n super(Model, self).__init__()\r\n self.num_nodes = num_nodes\r\n self.h_dim = h_dim\r\n self.out_dim = out_dim\r\n self.num_rels = num_rels\r\n self.num_bases = num_bases\r\n self.num_hidden_layers = num_hidden_layers\r\n\r\n # create rgcn layers\r\n self.build_model()\r\n\r\n # create initial features\r\n self.features = self.create_features()\r\n\r\n def build_model(self):\r\n self.layers = nn.ModuleList()\r\n # input to hidden\r\n i2h = self.build_input_layer()\r\n self.layers.append(i2h)\r\n # hidden to hidden\r\n for _ in range(self.num_hidden_layers):\r\n h2h = self.build_hidden_layer()\r\n self.layers.append(h2h)\r\n # hidden to output\r\n h2o = self.build_output_layer()\r\n self.layers.append(h2o)\r\n\r\n # initialize feature for each node\r\n def create_features(self):\r\n features = torch.arange(self.num_nodes)\r\n return features\r\n\r\n def build_input_layer(self):\r\n return RGCNLayer(self.num_nodes, self.h_dim, self.num_rels, self.num_bases,\r\n activation=F.relu, is_input_layer=True)\r\n\r\n def build_hidden_layer(self):\r\n return RGCNLayer(self.h_dim, self.h_dim, self.num_rels, self.num_bases,\r\n activation=F.relu)\r\n\r\n def build_output_layer(self):\r\n return RGCNLayer(self.h_dim, self.out_dim, self.num_rels, self.num_bases,\r\n activation=partial(F.softmax, dim=1))\r\n\r\n def forward(self, g):\r\n if self.features is not None:\r\n g.ndata['id'] = self.features\r\n for layer in self.layers:\r\n layer(g)\r\n return g.ndata.pop('h')\r\n\r\n###############################################################################\r\n# Handle dataset\r\n# ~~~~~~~~~~~~~~~~\r\n# This tutorial uses Institute for Applied Informatics and Formal Description Methods (AIFB) dataset from R-GCN paper.\r\n\r\n# load graph data\r\nfrom dgl.contrib.data import load_data\r\nimport numpy as np\r\ndata = load_data(dataset='aifb')\r\nnum_nodes = data.num_nodes\r\nnum_rels = data.num_rels\r\nnum_classes = data.num_classes\r\nlabels = data.labels\r\ntrain_idx = data.train_idx\r\n# split training and validation set\r\nval_idx = train_idx[:len(train_idx) // 5]\r\ntrain_idx = train_idx[len(train_idx) // 5:]\r\n\r\n# edge type and normalization factor\r\nedge_type = torch.from_numpy(data.edge_type)\r\nedge_norm = torch.from_numpy(data.edge_norm).unsqueeze(1)\r\n\r\nlabels = torch.from_numpy(labels).view(-1)\r\n\r\n###############################################################################\r\n# Create graph and model\r\n# ~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# configurations\r\nn_hidden = 16 # number of hidden units\r\nn_bases = -1 # use number of relations as number of bases\r\nn_hidden_layers = 0 # use 1 input layer, 1 output layer, no hidden layer\r\nn_epochs = 25 # epochs to train\r\nlr = 0.01 # learning rate\r\nl2norm = 0 # L2 norm coefficient\r\n\r\n# create graph\r\ng = DGLGraph()\r\ng.add_nodes(num_nodes)\r\ng.add_edges(data.edge_src, data.edge_dst)\r\ng.edata.update({'rel_type': edge_type, 'norm': edge_norm})\r\n\r\n# create model\r\nmodel = Model(len(g),\r\n n_hidden,\r\n num_classes,\r\n num_rels,\r\n num_bases=n_bases,\r\n num_hidden_layers=n_hidden_layers)\r\n\r\n###############################################################################\r\n# Training loop\r\n# ~~~~~~~~~~~~~~~~\r\n\r\n# optimizer\r\noptimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2norm)\r\n\r\nprint(\"start training...\")\r\nmodel.train()\r\nfor epoch in range(n_epochs):\r\n optimizer.zero_grad()\r\n logits = model.forward(g)\r\n loss = F.cross_entropy(logits[train_idx], labels[train_idx])\r\n loss.backward()\r\n\r\n optimizer.step()\r\n\r\n train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx])\r\n train_acc = train_acc.item() / len(train_idx)\r\n val_loss = F.cross_entropy(logits[val_idx], labels[val_idx])\r\n val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx])\r\n val_acc = val_acc.item() / len(val_idx)\r\n print(\"Epoch {:05d} | \".format(epoch) +\r\n \"Train Accuracy: {:.4f} | Train Loss: {:.4f} | \".format(\r\n train_acc, loss.item()) +\r\n \"Validation Accuracy: {:.4f} | Validation loss: {:.4f}\".format(\r\n val_acc, val_loss.item()))\r\n\r\n###############################################################################\r\n# .. _link-prediction:\r\n#\r\n# The second task, link prediction\r\n# --------------------------------\r\n# So far, you have seen how to use DGL to implement entity classification with an \r\n# R-GCN model. In the knowledge base setting, representation generated by\r\n# R-GCN can be used to uncover potential relationships between nodes. In the \r\n# R-GCN paper, the authors feed the entity representations generated by R-GCN\r\n# into the `DistMult <https://arxiv.org/pdf/1412.6575.pdf>`_ prediction model\r\n# to predict possible relationships.\r\n#\r\n# The implementation is similar to that presented here, but with an extra DistMult layer\r\n# stacked on top of the R-GCN layers. You can find the complete\r\n# implementation of link prediction with R-GCN in our `Github Python code example\r\n# <https://github.com/dmlc/dgl/blob/master/examples/pytorch/rgcn/link_predict.py>`_.\r\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.randn",
"torch.tensor",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
],
[
"torch.nn.Module.__init__",
"torch.cat",
"torch.zeros",
"numpy.cumsum",
"torch.nn.Embedding",
"torch.nn.Linear"
],
[
"torch.nn.init.calculate_gain",
"torch.Tensor",
"torch.nn.ModuleList",
"torch.nn.functional.cross_entropy",
"torch.from_numpy",
"torch.matmul",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bertinus/causal_cell_embedding | [
"417b55749130fc7b7832fd3ee4c49feff4a04593"
] | [
"ai/causalcell/training.py"
] | [
"import ai.causalcell.utils.configuration as configuration\nimport ai.causalcell.datasets.synthetic_dataset as sd\nimport logging\nimport numpy as np\nimport torch\nimport random\nimport os\nimport copy\nimport dill as pickle\nimport skopt\nfrom collections import OrderedDict\n\n# from ai.causalcell.datasets.synthetic_dataset import global_graph\n\n_LOG = logging.getLogger(__name__)\n\n\ndef set_seed(seed, cuda=False):\n \"\"\"\n Fix the seed for numpy, python random, and pytorch.\n \"\"\"\n print('pytorch/random seed: {}'.format(seed))\n\n # Numpy, python, pytorch (cpu), pytorch (gpu).\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n if cuda:\n torch.cuda.manual_seed_all(seed)\n\n\ndef save_results(results, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n # Save best model\n output_name = \"best_model_{}.pth.tar\".format(results[\"exp_id\"])\n torch.save(results[\"best_model\"].state_dict(), os.path.join(output_dir, output_name))\n\n # Save last model\n output_name = \"last_model_{}.pth.tar\".format(results[\"exp_id\"])\n torch.save(results[\"last_model\"].state_dict(), os.path.join(output_dir, output_name))\n\n # Save the rest of the results dictionary\n del results[\"best_model\"]\n del results[\"last_model\"]\n output_name = \"results_{}.pkl\".format(results[\"exp_id\"])\n with open(os.path.join(output_dir, output_name), 'wb') as f:\n pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef train_epoch(model, device, train_loader, epoch):\n\n model.train()\n\n all_loss, all_losses = [], None\n\n for batch_idx, data in enumerate(train_loader):\n\n x, fingerprint, compound, line = data\n x = x.to(device)\n fingerprint = fingerprint.to(device)\n\n # Expected to return a dictionary of outputs.\n loss, losses = model.forward_backward_update(x, fingerprint, compound, line, device=device)\n\n if all_losses is None:\n all_losses = {i: [losses[i].detach().cpu().item()] for i in losses.keys()}\n else:\n for i in losses.keys():\n all_losses[i].append(losses[i].detach().cpu().item())\n\n all_loss.append(loss.detach())\n\n all_loss = float(torch.mean(torch.tensor(all_loss)).detach().numpy())\n print('epoch {} Mean train loss: {:.4f}'.format(epoch, all_loss))\n\n return all_loss, all_losses\n\n\ndef evaluate_epoch(model, device, data_loader, epoch):\n \"\"\"Evaluates a given model on given data.\"\"\"\n model.eval()\n all_loss, all_losses = [], None\n\n with torch.no_grad():\n for batch_idx, data in enumerate(data_loader):\n\n x, fingerprint, compound, line = data\n x = x.to(device)\n fingerprint = fingerprint.to(device)\n\n # Expected to return a dictionary of outputs.\n loss, losses = model.forward_loss(x, fingerprint, compound, line, device=device)\n\n if all_losses is None:\n all_losses = {i: [losses[i].detach().cpu().item()] for i in losses.keys()}\n else:\n for i in losses.keys():\n all_losses[i].append(losses[i].detach().cpu().item())\n\n # Sum up batch loss.\n loss = sum(losses.values())\n all_loss.append(loss)\n\n all_loss = float(torch.mean(torch.tensor(all_loss)).detach().numpy())\n print('epoch {} Mean valid loss: {:.4f}'.format(epoch, all_loss))\n\n return all_loss, all_losses\n\n\ndef train(cfg):\n \"\"\"\n Trains a model on a dataset given the supplied configuration.\n save is by default True and will result in the model's performance being\n saved to a handy pickle file, as well as the best-performing model being\n saved. Set this to False when doing an outer loop of hyperparameter\n optimization.\n \"\"\"\n exp_name = cfg['experiment_name']\n exp_id = cfg['exp_id']\n n_epochs = cfg['n_epochs']\n seed = cfg['seed']\n output_dir = os.path.join('results', cfg['experiment_name'])\n early_stopping = cfg['early_stopping']\n patience_max = cfg['patience_max']\n patience = 0\n\n set_seed(seed)\n\n # dataloader\n valid_loader = configuration.setup_dataloader(cfg, 'valid')\n train_loader = configuration.setup_dataloader(cfg, 'train')\n\n device = 'cuda' if cfg['cuda'] else 'cpu'\n model = configuration.setup_model(cfg).to(device)\n\n print('model: \\n{}'.format(model))\n\n best_valid_loss = np.inf\n best_model, best_epoch = None, None\n all_train_losses, all_valid_losses = [], []\n\n for epoch in range(n_epochs):\n\n train_loss, train_losses = train_epoch(model=model, device=device, train_loader=train_loader,\n epoch=epoch)\n\n valid_loss, valid_losses = evaluate_epoch(model=model, device=device, data_loader=valid_loader, epoch=epoch)\n\n all_train_losses.append(train_losses)\n all_valid_losses.append(valid_losses)\n\n if valid_loss < best_valid_loss:\n best_model = copy.deepcopy(model)\n best_epoch = epoch\n best_valid_loss = valid_loss\n else:\n patience += 1\n if early_stopping and patience > patience_max:\n break\n\n results = {\"exp_name\": exp_name,\n \"config\": cfg,\n \"data_graph\": sd.global_graph,\n \"seed\": seed,\n \"exp_id\": exp_id,\n \"n_envs_in_split\": {\"train\": train_loader.batch_sampler.n_envs_in_split,\n \"valid\": valid_loader.batch_sampler.n_envs_in_split},\n \"n_samples_in_split\": {\"train\": train_loader.batch_sampler.n_samples,\n \"valid\": valid_loader.batch_sampler.n_samples},\n \"losses\": {\"train\": all_train_losses, \"valid\": all_valid_losses},\n \"best_epoch\": best_epoch,\n \"best_model\": best_model.to('cpu'),\n \"last_model\": model.to('cpu')}\n\n save_results(results, output_dir)\n"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.tensor",
"torch.no_grad",
"torch.cuda.manual_seed_all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahstarwab/nn-gev_torch | [
"f7849e55230322fd5bfb4da81efc72875e2e76da"
] | [
"nn_models.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nclass BLSTMMaskEstimator(nn.Module):\n def __init__(self, input_dim=513, hidden_dim=512, num_layers=1, dropout=0.3, bidirectional=True):\n super(BLSTMMaskEstimator, self).__init__()\n self.dropout = dropout\n # blstm_layer = SequenceBLSTM(513, 256, normalized=True)\n self.blstm_layer = nn.LSTM(input_dim, 256, num_layers, dropout=dropout, bidirectional=bidirectional)\n # relu_1 = SequenceLinear(256, 513, normalized=True)\n self.relu_1 = nn.Linear(hidden_dim, input_dim)\n # relu_2 = SequenceLinear(513, 513, normalized=True)\n self.relu_2 = nn.Linear(input_dim, input_dim)\n # noise_mask_estimate = SequenceLinear(513, 513, normalized=True)\n self.noise_mask_estimate = nn.Linear(input_dim, input_dim)\n # speech_mask_estimate = SequenceLinear(513, 513, normalized=True)\n self.speech_mask_estimate = nn.Linear(input_dim, input_dim)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, Y):\n \n Y = Y.reshape(-1, 1, Y.shape[-1]) #[seq_len X 1 X input_dim]\n blstm, _ = self.blstm_layer(Y)\n \n relu_1 = self.relu_1(blstm)#, dropout=self.dropout)\n #TODO\n #Need torch.clamp(relu_1, min=0, max=1)?\n relu_2 = self.relu_2(relu_1)#, dropout=self.dropout)\n #TODO\n #Need torch.clamp(relu_2, min=0, max=1)\n X_mask = self.sigmoid(self.speech_mask_estimate(relu_2))\n N_mask = self.sigmoid(self.noise_mask_estimate(relu_2))\n \n\n return X_mask, N_mask\n\nclass SimpleFWMaskEstimator(nn.Module):\n def __init__(self, input_dim=513, hidden_dim=1024, output_dim = 513):\n super(SimpleFWMaskEstimator, self).__init__()\n self.relu_1 = nn.Linear(input_dim, hidden_dim)\n self.noise_mask_estimate = nn.Linear(hidden_dim, output_dim)\n self.speech_mask_estimate = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, Y):\n relu_1 = self.relu_1(Y)\n #TODO\n #Need torch.clamp(relu_1, min=0, max=1)\n X_mask = nn.Sigmoid(self.speech_mask_estimate(relu_1))\n N_mask = nn.Sigmoid(self.noise_mask_estimate(relu_1))\n \n\n return X_mask, N_mask,"
] | [
[
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.nn.LSTM"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tomsnail/opencv_tf_py | [
"cf9aa7fa250546564cff56aa33b5a39991b0d8f1",
"cf9aa7fa250546564cff56aa33b5a39991b0d8f1",
"cf9aa7fa250546564cff56aa33b5a39991b0d8f1",
"cf9aa7fa250546564cff56aa33b5a39991b0d8f1",
"cf9aa7fa250546564cff56aa33b5a39991b0d8f1"
] | [
"learn/C01/01/Convolution.py",
"nets/nets_factory_test.py",
"share/rebot/seq2seq.py",
"tensorflow/c01/t11/cnn/train.py",
"tensorflow/c01/t11/cnn/text_cnn.py"
] | [
"import cv2\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread('images/1.jpg',cv2.IMREAD_GRAYSCALE)\nkernel = [[1,1,1],[0,0,0],[-1,-1,-1]]\ndest = signal.convolve2d(img,kernel)\nplt.imshow(dest,cmap='gray')\nplt.show()",
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for nets.re_inception.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import nets_factory\n\nslim = tf.contrib.slim\n\n\nclass NetworksTest(tf.test.TestCase):\n\n def testGetNetworkFn(self):\n batch_size = 5\n num_classes = 1000\n for net in nets_factory.networks_map:\n with self.test_session():\n net_fn = nets_factory.get_network_fn(net, num_classes)\n # Most networks use 224 as their default_image_size\n image_size = getattr(net_fn, 'default_image_size', 224)\n inputs = tf.random_uniform((batch_size, image_size, image_size, 3))\n logits, end_points = net_fn(inputs)\n self.assertTrue(isinstance(logits, tf.Tensor))\n self.assertTrue(isinstance(end_points, dict))\n self.assertEqual(logits.get_shape().as_list()[0], batch_size)\n self.assertEqual(logits.get_shape().as_list()[-1], num_classes)\n\n def testGetNetworkFnArgScope(self):\n batch_size = 5\n num_classes = 10\n net = 'cifarnet'\n with self.test_session(use_gpu=True):\n net_fn = nets_factory.get_network_fn(net, num_classes)\n image_size = getattr(net_fn, 'default_image_size', 224)\n with slim.arg_scope([slim.model_variable, slim.variable],\n device='/CPU:0'):\n inputs = tf.random_uniform((batch_size, image_size, image_size, 3))\n net_fn(inputs)\n weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'CifarNet/conv1')[0]\n self.assertDeviceEqual('/CPU:0', weights.device)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library for creating sequence-to-sequence models in TensorFlow.\n\nSequence-to-sequence recurrent neural networks can learn complex functions\nthat map input sequences to output sequences. These models yield very good\nresults on a number of tasks, such as speech recognition, parsing, machine\ntranslation, or even constructing automated replies to emails.\n\nBefore using this module, it is recommended to read the TensorFlow tutorial\non sequence-to-sequence models. It explains the basic concepts of this module\nand shows an end-to-end example of how to build a translation model.\n https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html\n\nHere is an overview of functions available in this module. They all use\na very similar interface, so after reading the above tutorial and using\none of them, others should be easy to substitute.\n\n* Full sequence-to-sequence models.\n - basic_rnn_seq2seq: The most basic RNN-RNN model.\n - tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.\n - embedding_rnn_seq2seq: The basic model with input embedding.\n - embedding_tied_rnn_seq2seq: The tied model with input embedding.\n - embedding_attention_seq2seq: Advanced model with input embedding and\n the neural attention mechanism; recommended for complex tasks.\n\n* Multi-task sequence-to-sequence models.\n - one2many_rnn_seq2seq: The embedding model with multiple decoders.\n\n* Decoders (when you write your own encoder, you can use these to decode;\n e.g., if you want to write a model that generates captions for images).\n - rnn_decoder: The basic decoder based on a pure RNN.\n - attention_decoder: A decoder that uses the attention mechanism.\n\n* Losses.\n - sequence_loss: Loss for a sequence model returning average log-perplexity.\n - sequence_loss_by_example: As above, but not averaging over all examples.\n\n* model_with_buckets: A convenience function to create models with bucketing\n (see the tutorial above for an explanation of why and how to use it).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\n# We disable pylint because we need python3 compatibility.\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.contrib.rnn.python.ops import core_rnn_cell\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import nest\n\n# TODO(ebrevdo): Remove once _linear is fully deprecated.\nlinear = core_rnn_cell._linear # pylint: disable=protected-access\n\n\ndef _extract_argmax_and_embed(embedding,\n output_projection=None,\n update_embedding=True):\n \"\"\"Get a loop_function that extracts the previous symbol and embeds it.\n\n Args:\n embedding: embedding tensor for symbols.\n output_projection: None or a pair (W, B). If provided, each fed previous\n output will first be multiplied by W and added B.\n update_embedding: Boolean; if False, the gradients will not propagate\n through the embeddings.\n\n Returns:\n A loop function.\n \"\"\"\n\n def loop_function(prev, _):\n if output_projection is not None:\n prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])\n prev_symbol = math_ops.argmax(prev, 1)\n # Note that gradients will not propagate through the second parameter of\n # embedding_lookup.\n emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)\n if not update_embedding:\n emb_prev = array_ops.stop_gradient(emb_prev)\n return emb_prev\n\n return loop_function\n\n\ndef rnn_decoder(decoder_inputs,\n initial_state,\n cell,\n loop_function=None,\n scope=None):\n \"\"\"RNN decoder for the sequence-to-sequence model.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)\n \"\"\"\n with variable_scope.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n for i, inp in enumerate(decoder_inputs):\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n output, state = cell(inp, state)\n outputs.append(output)\n if loop_function is not None:\n prev = output\n return outputs, state\n\n\ndef basic_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n dtype=dtypes.float32,\n scope=None):\n \"\"\"Basic RNN sequence-to-sequence model.\n\n This model first runs an RNN to encode encoder_inputs into a state vector,\n then runs decoder, initialized with the last encoder state, on decoder_inputs.\n Encoder and decoder use the same RNN cell type, but don't share parameters.\n\n Args:\n encoder_inputs: A list of 2D Tensors [batch_size x input_size].\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.\n dtype: The dtype of the initial state of the RNN cell (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"basic_rnn_seq2seq\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell in the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n enc_cell = copy.deepcopy(cell)\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n return rnn_decoder(decoder_inputs, enc_state, cell)\n\n\ndef tied_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n loop_function=None,\n dtype=dtypes.float32,\n scope=None):\n \"\"\"RNN sequence-to-sequence model with tied encoder and decoder parameters.\n\n This model first runs an RNN to encode encoder_inputs into a state vector, and\n then runs decoder, initialized with the last encoder state, on decoder_inputs.\n Encoder and decoder use the same RNN cell and share parameters.\n\n Args:\n encoder_inputs: A list of 2D Tensors [batch_size x input_size].\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to i-th output\n in order to generate i+1-th input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol), see rnn_decoder for details.\n dtype: The dtype of the initial state of the rnn cell (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"tied_rnn_seq2seq\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(\"combined_tied_rnn_seq2seq\"):\n scope = scope or \"tied_rnn_seq2seq\"\n _, enc_state = rnn.static_rnn(\n cell, encoder_inputs, dtype=dtype, scope=scope)\n variable_scope.get_variable_scope().reuse_variables()\n return rnn_decoder(\n decoder_inputs,\n enc_state,\n cell,\n loop_function=loop_function,\n scope=scope)\n\n\ndef embedding_rnn_decoder(decoder_inputs,\n initial_state,\n cell,\n num_symbols,\n embedding_size,\n output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n scope=None):\n \"\"\"RNN decoder with embedding and a pure-decoding option.\n\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has\n shape [num_symbols]; if provided and feed_previous=True, each fed\n previous output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http://arxiv.org/abs/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors. The\n output is of shape [batch_size x cell.output_size] when\n output_projection is not None (and represents the dense representation\n of predicted tokens). It is of shape [batch_size x num_decoder_symbols]\n when output_projection is None.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n with variable_scope.variable_scope(scope or \"embedding_rnn_decoder\") as scope:\n if output_projection is not None:\n dtype = scope.dtype\n proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)\n proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n embedding = variable_scope.get_variable(\"embedding\",\n [num_symbols, embedding_size])\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection,\n update_embedding_for_previous) if feed_previous else None\n emb_inp = (embedding_ops.embedding_lookup(embedding, i)\n for i in decoder_inputs)\n return rnn_decoder(\n emb_inp, initial_state, cell, loop_function=loop_function)\n\n\ndef embedding_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n num_encoder_symbols,\n num_decoder_symbols,\n embedding_size,\n output_projection=None,\n feed_previous=False,\n dtype=None,\n scope=None):\n \"\"\"Embedding RNN sequence-to-sequence model.\n\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_encoder_symbols x input_size]). Then it runs an RNN to encode\n embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs\n by another newly created embedding (of shape [num_decoder_symbols x\n input_size]). Then it runs RNN decoder, initialized with the last\n encoder state, on embedded decoder_inputs.\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols: Integer; number of symbols on the decoder side.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_decoder_symbols] and B has\n shape [num_decoder_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial state for both the encoder and encoder\n rnn cells (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_rnn_seq2seq\"\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors. The\n output is of shape [batch_size x cell.output_size] when\n output_projection is not None (and represents the dense representation\n of predicted tokens). It is of shape [batch_size x num_decoder_symbols]\n when output_projection is None.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(scope or \"embedding_rnn_seq2seq\") as scope:\n if dtype is not None:\n scope.set_dtype(dtype)\n else:\n dtype = scope.dtype\n\n # Encoder.\n encoder_cell = copy.deepcopy(cell)\n encoder_cell = core_rnn_cell.EmbeddingWrapper(\n encoder_cell,\n embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n _, encoder_state = rnn.static_rnn(encoder_cell, encoder_inputs, dtype=dtype)\n\n # Decoder.\n if output_projection is None:\n cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)\n\n if isinstance(feed_previous, bool):\n return embedding_rnn_decoder(\n decoder_inputs,\n encoder_state,\n cell,\n num_decoder_symbols,\n embedding_size,\n output_projection=output_projection,\n feed_previous=feed_previous)\n\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def decoder(feed_previous_bool):\n reuse = None if feed_previous_bool else True\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=reuse):\n outputs, state = embedding_rnn_decoder(\n decoder_inputs,\n encoder_state,\n cell,\n num_decoder_symbols,\n embedding_size,\n output_projection=output_projection,\n feed_previous=feed_previous_bool,\n update_embedding_for_previous=False)\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(feed_previous,\n lambda: decoder(True),\n lambda: decoder(False))\n outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n if nest.is_sequence(encoder_state):\n state = nest.pack_sequence_as(\n structure=encoder_state, flat_sequence=state_list)\n return outputs_and_state[:outputs_len], state\n\n\ndef embedding_tied_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n num_symbols,\n embedding_size,\n num_decoder_symbols=None,\n output_projection=None,\n feed_previous=False,\n dtype=None,\n scope=None):\n \"\"\"Embedding RNN sequence-to-sequence model with tied (shared) parameters.\n\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_symbols x input_size]). Then it runs an RNN to encode embedded\n encoder_inputs into a state vector. Next, it embeds decoder_inputs using\n the same embedding. Then it runs RNN decoder, initialized with the last\n encoder state, on embedded decoder_inputs. The decoder output is over symbols\n from 0 to num_decoder_symbols - 1 if num_decoder_symbols is none; otherwise it\n is over 0 to num_symbols - 1.\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.\n num_symbols: Integer; number of symbols for both encoder and decoder.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_decoder_symbols: Integer; number of output symbols for decoder. If\n provided, the decoder output is over symbols 0 to num_decoder_symbols - 1.\n Otherwise, decoder output is over symbols 0 to num_symbols - 1. Note that\n this assumes that the vocabulary is set up such that the first\n num_decoder_symbols of num_symbols are part of decoding.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has\n shape [num_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype to use for the initial RNN states (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_tied_rnn_seq2seq\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_symbols] containing the generated\n outputs where output_symbols = num_decoder_symbols if\n num_decoder_symbols is not None otherwise output_symbols = num_symbols.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n with variable_scope.variable_scope(\n scope or \"embedding_tied_rnn_seq2seq\", dtype=dtype) as scope:\n dtype = scope.dtype\n\n if output_projection is not None:\n proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)\n proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n embedding = variable_scope.get_variable(\n \"embedding\", [num_symbols, embedding_size], dtype=dtype)\n\n emb_encoder_inputs = [\n embedding_ops.embedding_lookup(embedding, x) for x in encoder_inputs\n ]\n emb_decoder_inputs = [\n embedding_ops.embedding_lookup(embedding, x) for x in decoder_inputs\n ]\n\n output_symbols = num_symbols\n if num_decoder_symbols is not None:\n output_symbols = num_decoder_symbols\n if output_projection is None:\n cell = core_rnn_cell.OutputProjectionWrapper(cell, output_symbols)\n\n if isinstance(feed_previous, bool):\n loop_function = _extract_argmax_and_embed(embedding, output_projection,\n True) if feed_previous else None\n return tied_rnn_seq2seq(\n emb_encoder_inputs,\n emb_decoder_inputs,\n cell,\n loop_function=loop_function,\n dtype=dtype)\n\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def decoder(feed_previous_bool):\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection, False) if feed_previous_bool else None\n reuse = None if feed_previous_bool else True\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=reuse):\n outputs, state = tied_rnn_seq2seq(\n emb_encoder_inputs,\n emb_decoder_inputs,\n cell,\n loop_function=loop_function,\n dtype=dtype)\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(feed_previous,\n lambda: decoder(True),\n lambda: decoder(False))\n outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n # Calculate zero-state to know it's structure.\n static_batch_size = encoder_inputs[0].get_shape()[0]\n for inp in encoder_inputs[1:]:\n static_batch_size.merge_with(inp.get_shape()[0])\n batch_size = static_batch_size.value\n if batch_size is None:\n batch_size = array_ops.shape(encoder_inputs[0])[0]\n zero_state = cell.zero_state(batch_size, dtype)\n if nest.is_sequence(zero_state):\n state = nest.pack_sequence_as(\n structure=zero_state, flat_sequence=state_list)\n return outputs_and_state[:outputs_len], state\n\n\ndef attention_decoder(decoder_inputs,\n initial_state,\n attention_states,\n cell,\n output_size=None,\n num_heads=1,\n loop_function=None,\n dtype=None,\n scope=None,\n initial_state_attention=False):\n \"\"\"RNN decoder with attention for the sequence-to-sequence model.\n\n In this context \"attention\" means that, during decoding, the RNN can look up\n information in the additional tensor attention_states, and it does this by\n focusing on a few entries from the tensor. This model has proven to yield\n especially good results in a number of sequence-to-sequence tasks. This\n implementation is based on http://arxiv.org/abs/1412.7449 (see below for\n details). It is recommended for complex sequence-to-sequence tasks.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.\n output_size: Size of the output vectors; if None, we use cell.output_size.\n num_heads: Number of attention heads that read from attention_states.\n loop_function: If not None, this function will be applied to i-th output\n in order to generate i+1-th input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n dtype: The dtype to use for the RNN initial state (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors of\n shape [batch_size x output_size]. These represent the generated outputs.\n Output i is computed from input i (which is either the i-th element\n of decoder_inputs or loop_function(output {i-1}, i)) as follows.\n First, we run the cell on a combination of the input and previous\n attention masks:\n cell_output, new_state = cell(linear(input, prev_attn), prev_state).\n Then, we calculate new attention masks:\n new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))\n and then we calculate the output:\n output = linear(cell_output, new_attn).\n state: The state of each decoder cell the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: when num_heads is not positive, there are no inputs, shapes\n of attention_states are not set, or input size cannot be inferred\n from the input.\n \"\"\"\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if num_heads < 1:\n raise ValueError(\"With less than 1 heads, use a non-attention decoder.\")\n if attention_states.get_shape()[2].value is None:\n raise ValueError(\"Shape[2] of attention_states must be known: %s\" %\n attention_states.get_shape())\n if output_size is None:\n output_size = cell.output_size\n\n with variable_scope.variable_scope(\n scope or \"attention_decoder\", dtype=dtype) as scope:\n dtype = scope.dtype\n\n batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n if attn_length is None:\n attn_length = array_ops.shape(attention_states)[1]\n attn_size = attention_states.get_shape()[2].value\n\n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = array_ops.reshape(attention_states,\n [-1, attn_length, 1, attn_size])\n hidden_features = []\n v = []\n attention_vec_size = attn_size # Size of query vectors for attention.\n for a in xrange(num_heads):\n k = variable_scope.get_variable(\"AttnW_%d\" % a,\n [1, 1, attn_size, attention_vec_size])\n hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], \"SAME\"))\n v.append(\n variable_scope.get_variable(\"AttnV_%d\" % a, [attention_vec_size]))\n\n state = initial_state\n\n def attention(query):\n \"\"\"Put attention masks on hidden using hidden_features and query.\"\"\"\n ds = [] # Results of attention reads will be stored here.\n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n for q in query_list: # Check that ndims == 2 if specified.\n ndims = q.get_shape().ndims\n if ndims:\n assert ndims == 2\n query = array_ops.concat(query_list, 1)\n for a in xrange(num_heads):\n with variable_scope.variable_scope(\"Attention_%d\" % a):\n y = linear(query, attention_vec_size, True)\n y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),\n [2, 3])\n a = nn_ops.softmax(s)\n # Now calculate the attention-weighted vector d.\n d = math_ops.reduce_sum(\n array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])\n ds.append(array_ops.reshape(d, [-1, attn_size]))\n return ds\n\n outputs = []\n prev = None\n batch_attn_size = array_ops.stack([batch_size, attn_size])\n attns = [\n array_ops.zeros(\n batch_attn_size, dtype=dtype) for _ in xrange(num_heads)\n ]\n for a in attns: # Ensure the second shape of attention vectors is set.\n a.set_shape([None, attn_size])\n if initial_state_attention:\n attns = attention(initial_state)\n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n # If loop_function is set, we use it instead of decoder_inputs.\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n # Merge input and previous attentions into one vector of the right size.\n input_size = inp.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from input: %s\" % inp.name)\n x = linear([inp] + attns, input_size, True)\n # Run the RNN.\n cell_output, state = cell(x, state)\n # Run the attention mechanism.\n if i == 0 and initial_state_attention:\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=True):\n attns = attention(state)\n else:\n attns = attention(state)\n\n with variable_scope.variable_scope(\"AttnOutputProjection\"):\n output = linear([cell_output] + attns, output_size, True)\n if loop_function is not None:\n prev = output\n outputs.append(output)\n\n return outputs, state\n\n\ndef embedding_attention_decoder(decoder_inputs,\n initial_state,\n attention_states,\n cell,\n num_symbols,\n embedding_size,\n num_heads=1,\n output_size=None,\n output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n dtype=None,\n scope=None,\n initial_state_attention=False):\n \"\"\"RNN decoder with embedding and attention and a pure-decoding option.\n\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_size: Size of the output vectors; if None, use output_size.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has shape\n [num_symbols]; if provided and feed_previous=True, each fed previous\n output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http://arxiv.org/abs/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n dtype: The dtype to use for the RNN initial states (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n if output_size is None:\n output_size = cell.output_size\n if output_projection is not None:\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n with variable_scope.variable_scope(\n scope or \"embedding_attention_decoder\", dtype=dtype) as scope:\n\n embedding = variable_scope.get_variable(\"embedding\",\n [num_symbols, embedding_size])\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection,\n update_embedding_for_previous) if feed_previous else None\n emb_inp = [\n embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs\n ]\n return attention_decoder(\n emb_inp,\n initial_state,\n attention_states,\n cell,\n output_size=output_size,\n num_heads=num_heads,\n loop_function=loop_function,\n initial_state_attention=initial_state_attention)\n\n\ndef embedding_attention_seq2seq(encoder_inputs,\n decoder_inputs,\n encoder_cell,\n cell,\n num_encoder_symbols,\n num_decoder_symbols,\n embedding_size,\n num_heads=1,\n output_projection=None,\n feed_previous=False,\n dtype=None,\n scope=None,\n initial_state_attention=False):\n \"\"\"Embedding sequence-to-sequence model with attention.\n\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_encoder_symbols x input_size]). Then it runs an RNN to encode\n embedded encoder_inputs into a state vector. It keeps the outputs of this\n RNN at every step to use for attention later. Next, it embeds decoder_inputs\n by another newly created embedding (of shape [num_decoder_symbols x\n input_size]). Then it runs attention decoder, initialized with the last\n encoder state, on embedded decoder_inputs and attending to encoder outputs.\n\n Warning: when output_projection is None, the size of the attention vectors\n and variables will be made proportional to num_decoder_symbols, can be large.\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols: Integer; number of symbols on the decoder side.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_decoder_symbols] and B has\n shape [num_decoder_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial RNN state (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_seq2seq\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x num_decoder_symbols] containing the generated\n outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(\n scope or \"embedding_attention_seq2seq\", dtype=dtype) as scope:\n dtype = scope.dtype\n # Encoder.\n #encoder_cell = copy.deepcopy(cell)\n encoder_cell = core_rnn_cell.EmbeddingWrapper(\n encoder_cell,\n embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n encoder_outputs, encoder_state = rnn.static_rnn(\n encoder_cell, encoder_inputs, dtype=dtype)\n\n # First calculate a concatenation of encoder outputs to put attention on.\n top_states = [\n array_ops.reshape(e, [-1, 1, cell.output_size]) for e in encoder_outputs\n ]\n attention_states = array_ops.concat(top_states, 1)\n\n # Decoder.\n output_size = None\n if output_projection is None:\n cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)\n output_size = num_decoder_symbols\n\n if isinstance(feed_previous, bool):\n return embedding_attention_decoder(\n decoder_inputs,\n encoder_state,\n attention_states,\n cell,\n num_decoder_symbols,\n embedding_size,\n num_heads=num_heads,\n output_size=output_size,\n output_projection=output_projection,\n feed_previous=feed_previous,\n initial_state_attention=initial_state_attention)\n\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def decoder(feed_previous_bool):\n reuse = None if feed_previous_bool else True\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=reuse):\n outputs, state = embedding_attention_decoder(\n decoder_inputs,\n encoder_state,\n attention_states,\n cell,\n num_decoder_symbols,\n embedding_size,\n num_heads=num_heads,\n output_size=output_size,\n output_projection=output_projection,\n feed_previous=feed_previous_bool,\n update_embedding_for_previous=False,\n initial_state_attention=initial_state_attention)\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(feed_previous,\n lambda: decoder(True),\n lambda: decoder(False))\n outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n if nest.is_sequence(encoder_state):\n state = nest.pack_sequence_as(\n structure=encoder_state, flat_sequence=state_list)\n return outputs_and_state[:outputs_len], state\n\n\ndef one2many_rnn_seq2seq(encoder_inputs,\n decoder_inputs_dict,\n enc_cell,\n dec_cells_dict,\n num_encoder_symbols,\n num_decoder_symbols_dict,\n embedding_size,\n feed_previous=False,\n dtype=None,\n scope=None):\n \"\"\"One-to-many RNN sequence-to-sequence model (multi-task).\n\n This is a multi-task sequence-to-sequence model with one encoder and multiple\n decoders. Reference to multi-task sequence-to-sequence learning can be found\n here: http://arxiv.org/abs/1511.06114\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs_dict: A dictionary mapping decoder name (string) to\n the corresponding decoder_inputs; each decoder_inputs is a list of 1D\n Tensors of shape [batch_size]; num_decoders is defined as\n len(decoder_inputs_dict).\n enc_cell: tf.nn.rnn_cell.RNNCell defining the encoder cell function and\n size.\n dec_cells_dict: A dictionary mapping encoder name (string) to an\n instance of tf.nn.rnn_cell.RNNCell.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an\n integer specifying number of symbols for the corresponding decoder;\n len(num_decoder_symbols_dict) must be equal to num_decoders.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of\n decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial state for both the encoder and encoder\n rnn cells (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"one2many_rnn_seq2seq\"\n\n Returns:\n A tuple of the form (outputs_dict, state_dict), where:\n outputs_dict: A mapping from decoder name (string) to a list of the same\n length as decoder_inputs_dict[name]; each element in the list is a 2D\n Tensors with shape [batch_size x num_decoder_symbol_list[name]]\n containing the generated outputs.\n state_dict: A mapping from decoder name (string) to the final state of the\n corresponding decoder RNN; it is a 2D Tensor of shape\n [batch_size x cell.state_size].\n\n Raises:\n TypeError: if enc_cell or any of the dec_cells are not instances of RNNCell.\n ValueError: if len(dec_cells) != len(decoder_inputs_dict).\n \"\"\"\n outputs_dict = {}\n state_dict = {}\n\n if not isinstance(enc_cell, rnn_cell_impl.RNNCell):\n raise TypeError(\"enc_cell is not an RNNCell: %s\" % type(enc_cell))\n if set(dec_cells_dict) != set(decoder_inputs_dict):\n raise ValueError(\"keys of dec_cells_dict != keys of decodre_inputs_dict\")\n for dec_cell in dec_cells_dict.values():\n if not isinstance(dec_cell, rnn_cell_impl.RNNCell):\n raise TypeError(\"dec_cell is not an RNNCell: %s\" % type(dec_cell))\n\n with variable_scope.variable_scope(\n scope or \"one2many_rnn_seq2seq\", dtype=dtype) as scope:\n dtype = scope.dtype\n\n # Encoder.\n enc_cell = core_rnn_cell.EmbeddingWrapper(\n enc_cell,\n embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n _, encoder_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n\n # Decoder.\n for name, decoder_inputs in decoder_inputs_dict.items():\n num_decoder_symbols = num_decoder_symbols_dict[name]\n dec_cell = dec_cells_dict[name]\n\n with variable_scope.variable_scope(\"one2many_decoder_\" + str(\n name)) as scope:\n dec_cell = core_rnn_cell.OutputProjectionWrapper(\n dec_cell, num_decoder_symbols)\n if isinstance(feed_previous, bool):\n outputs, state = embedding_rnn_decoder(\n decoder_inputs,\n encoder_state,\n dec_cell,\n num_decoder_symbols,\n embedding_size,\n feed_previous=feed_previous)\n else:\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def filled_embedding_rnn_decoder(feed_previous):\n \"\"\"The current decoder with a fixed feed_previous parameter.\"\"\"\n # pylint: disable=cell-var-from-loop\n reuse = None if feed_previous else True\n vs = variable_scope.get_variable_scope()\n with variable_scope.variable_scope(vs, reuse=reuse):\n outputs, state = embedding_rnn_decoder(\n decoder_inputs,\n encoder_state,\n dec_cell,\n num_decoder_symbols,\n embedding_size,\n feed_previous=feed_previous)\n # pylint: enable=cell-var-from-loop\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(\n feed_previous, lambda: filled_embedding_rnn_decoder(True),\n lambda: filled_embedding_rnn_decoder(False))\n # Outputs length is the same as for decoder inputs.\n outputs_len = len(decoder_inputs)\n outputs = outputs_and_state[:outputs_len]\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n if nest.is_sequence(encoder_state):\n state = nest.pack_sequence_as(\n structure=encoder_state, flat_sequence=state_list)\n outputs_dict[name] = outputs\n state_dict[name] = state\n\n return outputs_dict, state_dict\n\n\ndef sequence_loss_by_example(logits,\n targets,\n weights,\n average_across_timesteps=True,\n softmax_loss_function=None,\n name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits (per example).\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n softmax_loss_function: Function (labels, logits) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n **Note that to avoid confusion, it is required for the function to accept\n named arguments.**\n name: Optional name for this operation, default: \"sequence_loss_by_example\".\n\n Returns:\n 1D batch-sized float Tensor: The log-perplexity for each sequence.\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n if len(targets) != len(logits) or len(weights) != len(logits):\n raise ValueError(\"Lengths of logits, weights, and targets must be the same \"\n \"%d, %d, %d.\" % (len(logits), len(weights), len(targets)))\n with ops.name_scope(name, \"sequence_loss_by_example\",\n logits + targets + weights):\n log_perp_list = []\n for logit, target, weight in zip(logits, targets, weights):\n if softmax_loss_function is None:\n # TODO(irving,ebrevdo): This reshape is needed because\n # sequence_loss_by_example is called with scalars sometimes, which\n # violates our general scalar strictness policy.\n target = array_ops.reshape(target, [-1])\n crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=target, logits=logit)\n else:\n crossent = softmax_loss_function(labels=target, logits=logit)\n log_perp_list.append(crossent * weight)\n log_perps = math_ops.add_n(log_perp_list)\n if average_across_timesteps:\n total_size = math_ops.add_n(weights)\n total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.\n log_perps /= total_size\n return log_perps\n\n\ndef sequence_loss(logits,\n targets,\n weights,\n average_across_timesteps=True,\n average_across_batch=True,\n softmax_loss_function=None,\n name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits, batch-collapsed.\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n average_across_batch: If set, divide the returned cost by the batch size.\n softmax_loss_function: Function (labels, logits) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n **Note that to avoid confusion, it is required for the function to accept\n named arguments.**\n name: Optional name for this operation, defaults to \"sequence_loss\".\n\n Returns:\n A scalar float Tensor: The average log-perplexity per symbol (weighted).\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n with ops.name_scope(name, \"sequence_loss\", logits + targets + weights):\n cost = math_ops.reduce_sum(\n sequence_loss_by_example(\n logits,\n targets,\n weights,\n average_across_timesteps=average_across_timesteps,\n softmax_loss_function=softmax_loss_function))\n if average_across_batch:\n batch_size = array_ops.shape(targets[0])[0]\n return cost / math_ops.cast(batch_size, cost.dtype)\n else:\n return cost\n\n\ndef model_with_buckets(encoder_inputs,\n decoder_inputs,\n targets,\n weights,\n buckets,\n seq2seq,\n softmax_loss_function=None,\n per_example_loss=False,\n name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(\n x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (labels, logits) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n **Note that to avoid confusion, it is required for the function to accept\n named arguments.**\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputs, targets, or weights is smaller\n than the largest (last) bucket.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last \"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last \"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n with ops.name_scope(name, \"model_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=True if j > 0 else None):\n bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],\n decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n if per_example_loss:\n losses.append(\n sequence_loss_by_example(\n outputs[-1],\n targets[:bucket[1]],\n weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(\n sequence_loss(\n outputs[-1],\n targets[:bucket[1]],\n weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses",
"\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\n\n\n# Parameters\n# ==================================================\n\n# Data loading params\n# validation数据集占比\ntf.flags.DEFINE_float(\"dev_sample_percentage\", .1, \"Percentage of the training data to use for validation\")\n# 正样本\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/rt-polaritydata/rt-polarity.pos\", \"Data source for the positive data.\")\n# 负样本\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/rt-polaritydata/rt-polarity.neg\", \"Data source for the negative data.\")\n\n# Model Hyperparameters\n# 词向量长度\ntf.flags.DEFINE_integer(\"embedding_dim\", 128, \"Dimensionality of character embedding (default: 128)\")\n# 卷积核大小\ntf.flags.DEFINE_string(\"filter_sizes\", \"3,4,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\n# 每一种卷积核个数\ntf.flags.DEFINE_integer(\"num_filters\", 64, \"Number of filters per filter size (default: 128)\")\n# dropout参数\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\n# l2正则化参数\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0005, \"L2 regularization lambda (default: 0.0)\")\n\n# Training parameters\n# 批次大小\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n# 迭代周期\ntf.flags.DEFINE_integer(\"num_epochs\", 20, \"Number of training epochs (default: 200)\")\n# 多少step测试一次\ntf.flags.DEFINE_integer(\"evaluate_every\", 100, \"Evaluate model on dev set after this many steps (default: 100)\")\n# 多少step保存一次模型\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"Save model after this many steps (default: 100)\")\n# 最多保存多少个模型\ntf.flags.DEFINE_integer(\"num_checkpoints\", 5, \"Number of checkpoints to store (default: 5)\")\n# Misc Parameters\n# tensorFlow 会自动选择一个存在并且支持的设备来运行 operation\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n# 获取你的 operations 和 Tensor 被指派到哪个设备上运行\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n# flags解析\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\n\n# 打印所有参数\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\n\n# Data Preparation\n# ==================================================\n\n# Load data\nprint(\"Loading data...\")\nx_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)\n\n# Build vocabulary\n# 一行数据最多的词汇数\nmax_document_length = max([len(x.split(\" \")) for x in x_text])\nvocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\nx = np.array(list(vocab_processor.fit_transform(x_text)))\nprint(\"x_shape:\",x.shape)\nprint(\"y_shape:\",y.shape)\n\n# Randomly shuffle data\nnp.random.seed(10)\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffled = x[shuffle_indices]\ny_shuffled = y[shuffle_indices]\n\n# Split train/test set\n# TODO: This is very crude, should use cross-validation\n# 数据集切分为两部分\ndev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))\nx_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\ny_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]\nprint(\"Vocabulary Size: {:d}\".format(len(vocab_processor.vocabulary_)))\nprint(\"Train/Dev split: {:d}/{:d}\".format(len(y_train), len(y_dev)))\n\nprint(\"x:\",x_train[0:5])\nprint(\"y:\",y_train[0:5])\n\n\n# Training\n# ==================================================\n\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n cnn = TextCNN(\n sequence_length=x_train.shape[1],\n num_classes=y_train.shape[1],\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n filter_sizes=list(map(int, FLAGS.filter_sizes.split(\",\"))),\n num_filters=FLAGS.num_filters,\n l2_reg_lambda=FLAGS.l2_reg_lambda)\n\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(1e-3)\n # 计算梯度\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n # 将计算出的梯度应用到变量上,是函数minimize()的第二部分,\n # 返回一个应用指定的梯度的操作Operation,对global_step做自增操作\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # Keep track of gradient values and sparsity (optional)\n # 保存变量的梯度值\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n # 定义输出路径\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n\n # Train Summaries\n train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Dev summaries\n dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n # 保存模型,最多保存5个模型\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)\n\n # Write vocabulary\n vocab_processor.save(os.path.join(out_dir, \"vocab\"))\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n if (step%10==0):\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n\n def dev_step(x_batch, y_batch, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n step, summaries, loss, accuracy = sess.run(\n [global_step, dev_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n if writer:\n writer.add_summary(summaries, step)\n\n # Generate batches\n batches = data_helpers.batch_iter(\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\n # Training loop. For each batch...\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n train_step(x_batch, y_batch)\n current_step = tf.train.global_step(sess, global_step)\n # 测试\n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(x_dev, y_dev, writer=dev_summary_writer)\n print(\"\")\n # 保存模型\n if current_step % FLAGS.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))",
"#coding:utf-8\nimport tensorflow as tf\nimport numpy as np\nimport pickle\n\n\nclass TextCNN(object):\n \"\"\"\n A CNN for text classification.\n Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.\n \"\"\"\n # sequence_length-最长词汇数\n # num_classes-分类数\n # vocab_size-总词汇数\n # embedding_size-词向量长度\n # filter_sizes-卷积核尺寸3,4,5\n # num_filters-卷积核数量\n # l2_reg_lambda-l2正则化系数\n def __init__(\n self, sequence_length, num_classes, vocab_size,\n embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):\n\n # Placeholders for input, output and dropout\n self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n # Keeping track of l2 regularization loss (optional)\n l2_loss = tf.constant(0.0)\n\n # Embedding layer\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\n self.W = tf.Variable(\n tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),\n name=\"W\")\n # [batch_size, sequence_length, embedding_size]\n self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)\n # 添加一个维度,[batch_size, sequence_length, embedding_size, 1]\n self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)\n\n # Create a convolution + maxpool layer for each filter size\n \n pooled_outputs = []\n for i, filter_size in enumerate(filter_sizes):\n with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n # Convolution Layer\n filter_shape = [filter_size, embedding_size, 1, num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b\")\n conv = tf.nn.conv2d(\n self.embedded_chars_expanded,\n W,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"conv\")\n # Apply nonlinearity\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\n # Maxpooling over the outputs\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, sequence_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding='VALID',\n name=\"pool\")\n pooled_outputs.append(pooled)\n\n # Combine all the pooled features\n num_filters_total = num_filters * len(filter_sizes)\n self.h_pool = tf.concat(pooled_outputs, 3)\n # 把池化层输出变成一维向量\n self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])\n \n # Add dropout\n with tf.name_scope(\"dropout\"):\n self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)\n\n # Final (unnormalized) scores and predictions\n with tf.name_scope(\"output\"):\n W = tf.get_variable(\n \"W\",\n shape=[num_filters_total, num_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name=\"b\")\n l2_loss += tf.nn.l2_loss(W)\n l2_loss += tf.nn.l2_loss(b)\n self.scores = tf.nn.softmax(tf.nn.xw_plus_b(self.h_drop, W, b, name=\"scores\"))\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\n\n # CalculateMean cross-entropy loss\n with tf.name_scope(\"loss\"):\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\n\n # Accuracy\n with tf.name_scope(\"accuracy\"):\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, \"float\"), name=\"accuracy\")\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"scipy.signal.convolve2d"
],
[
"tensorflow.get_collection",
"tensorflow.random_uniform",
"tensorflow.test.main"
],
[
"tensorflow.contrib.rnn.python.ops.core_rnn_cell.OutputProjectionWrapper",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.tanh",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.nn_ops.softmax",
"tensorflow.python.ops.array_ops.stop_gradient",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell.EmbeddingWrapper",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.nn_ops.conv2d",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.rnn.static_rnn",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.nn_ops.xw_plus_b",
"tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.util.nest.flatten"
],
[
"tensorflow.flags.DEFINE_boolean",
"tensorflow.Graph",
"tensorflow.train.global_step",
"tensorflow.summary.FileWriter",
"tensorflow.summary.scalar",
"numpy.random.seed",
"tensorflow.Variable",
"tensorflow.flags.DEFINE_string",
"tensorflow.global_variables",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.nn.zero_fraction",
"tensorflow.Session",
"tensorflow.flags.DEFINE_float",
"tensorflow.train.AdamOptimizer",
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor",
"tensorflow.summary.merge",
"tensorflow.flags.DEFINE_integer"
],
[
"tensorflow.device",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.concat",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"tensorflow.nn.conv2d",
"tensorflow.name_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.argmax",
"tensorflow.nn.dropout",
"tensorflow.nn.xw_plus_b",
"tensorflow.truncated_normal",
"tensorflow.placeholder",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.bias_add",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fire-suppression-abm/mesa | [
"8498eea3e5d4a739aee3b003107a0e7de59c5026",
"8498eea3e5d4a739aee3b003107a0e7de59c5026"
] | [
"mesa/datacollection.py",
"examples/bank_reserves/batch_run.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nMesa Data Collection Module\n===========================\n\nDataCollector is meant to provide a simple, standard way to collect data\ngenerated by a Mesa model. It collects three types of data: model-level data,\nagent-level data, and tables.\n\nA DataCollector is instantiated with two dictionaries of reporter names and\nassociated variable names or functions for each, one for model-level data and\none for agent-level data; a third dictionary provides table names and columns.\nVariable names are converted into functions which retrieve attributes of that\nname.\n\nWhen the collect() method is called, each model-level function is called, with\nthe model as the argument, and the results associated with the relevant\nvariable. Then the agent-level functions are called on each agent in the model\nscheduler.\n\nAdditionally, other objects can write directly to tables by passing in an\nappropriate dictionary object for a table row.\n\nThe DataCollector then stores the data it collects in dictionaries:\n * model_vars maps each reporter to a list of its values\n * tables maps each table to a dictionary, with each column as a key with a\n list as its value.\n * _agent_records maps each model step to a list of each agents id\n and its values.\n\nFinally, DataCollector can create a pandas DataFrame from each collection.\n\nThe default DataCollector here makes several assumptions:\n * The model has a schedule object called 'schedule'\n * The schedule has an agent list called agents\n * For collecting agent-level variables, agents must have a unique_id\n\n\"\"\"\nfrom functools import partial\nimport itertools\nfrom operator import attrgetter\nimport pandas as pd\n\n\nclass DataCollector:\n \"\"\" Class for collecting data generated by a Mesa model.\n\n A DataCollector is instantiated with dictionaries of names of model- and\n agent-level variables to collect, associated with attribute names or\n functions which actually collect them. When the collect(...) method is\n called, it collects these attributes and executes these functions one by\n one and stores the results.\n\n \"\"\"\n\n model = None\n\n def __init__(self, model_reporters=None, agent_reporters=None, tables=None):\n \"\"\" Instantiate a DataCollector with lists of model and agent reporters.\n\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to either an attribute name, or a method.\n For example, if there was only one model-level reporter for number of\n agents, it might look like:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n If there was only one agent-level reporter (e.g. the agent's energy),\n it might look like this:\n {\"energy\": \"energy\"}\n or like this:\n {\"energy\": lambda a: a.energy}\n\n The tables arg accepts a dictionary mapping names of tables to lists of\n columns. For example, if we want to allow agents to write their age\n when they are destroyed (to keep track of lifespans), it might look\n like:\n {\"Lifespan\": [\"unique_id\", \"age\"]}\n\n Args:\n model_reporters: Dictionary of reporter names and attributes/funcs\n agent_reporters: Dictionary of reporter names and attributes/funcs.\n tables: Dictionary of table names to lists of column names.\n\n Notes:\n If you want to pickle your model you must not use lambda functions.\n If your model includes a large number of agents, you should *only*\n use attribute names for the agent reporter, it will be much faster.\n \"\"\"\n self.model_reporters = {}\n self.agent_reporters = {}\n\n self.model_vars = {}\n self._agent_records = {}\n self.tables = {}\n\n if model_reporters is not None:\n for name, reporter in model_reporters.items():\n self._new_model_reporter(name, reporter)\n\n if agent_reporters is not None:\n for name, reporter in agent_reporters.items():\n self._new_agent_reporter(name, reporter)\n\n if tables is not None:\n for name, columns in tables.items():\n self._new_table(name, columns)\n\n def _new_model_reporter(self, name, reporter):\n \"\"\" Add a new model-level reporter to collect.\n\n Args:\n name: Name of the model-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n \"\"\"\n if type(reporter) is str:\n reporter = partial(self._getattr, reporter)\n self.model_reporters[name] = reporter\n self.model_vars[name] = []\n\n def _new_agent_reporter(self, name, reporter):\n \"\"\" Add a new agent-level reporter to collect.\n\n Args:\n name: Name of the agent-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n\n \"\"\"\n if type(reporter) is str:\n attribute_name = reporter\n reporter = partial(self._getattr, reporter)\n reporter.attribute_name = attribute_name\n self.agent_reporters[name] = reporter\n\n def _new_table(self, table_name, table_columns):\n \"\"\" Add a new table that objects can write to.\n\n Args:\n table_name: Name of the new table.\n table_columns: List of columns to add to the table.\n\n \"\"\"\n new_table = {column: [] for column in table_columns}\n self.tables[table_name] = new_table\n\n def _record_agents(self, model):\n \"\"\" Record agents data in a mapping of functions and agents. \"\"\"\n rep_funcs = self.agent_reporters.values()\n if all([hasattr(rep, 'attribute_name') for rep in rep_funcs]):\n prefix = ['model.schedule.steps', 'unique_id']\n attributes = [func.attribute_name for func in rep_funcs]\n get_reports = attrgetter(*prefix + attributes)\n else:\n def get_reports(agent):\n prefix = (agent.model.schedule.steps, agent.unique_id)\n reports = tuple(rep(agent) for rep in rep_funcs)\n return prefix + reports\n agent_records = map(get_reports, model.schedule.agents)\n return agent_records\n\n def collect(self, model):\n \"\"\" Collect all the data for the given model object. \"\"\"\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n self.model_vars[var].append(reporter(model))\n\n if self.agent_reporters:\n agent_records = self._record_agents(model)\n self._agent_records[model.schedule.steps] = list(agent_records)\n\n def add_table_row(self, table_name, row, ignore_missing=False):\n \"\"\" Add a row dictionary to a specific table.\n\n Args:\n table_name: Name of the table to append a row to.\n row: A dictionary of the form {column_name: value...}\n ignore_missing: If True, fill any missing columns with Nones;\n if False, throw an error if any columns are missing\n\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"Table does not exist.\")\n\n for column in self.tables[table_name]:\n if column in row:\n self.tables[table_name][column].append(row[column])\n elif ignore_missing:\n self.tables[table_name][column].append(None)\n else:\n raise Exception(\"Could not insert row with missing column\")\n\n @staticmethod\n def _getattr(name, object):\n \"\"\" Turn around arguments of getattr to make it partially callable.\"\"\"\n return getattr(object, name, None)\n\n def get_model_vars_dataframe(self):\n \"\"\" Create a pandas DataFrame from the model variables.\n\n The DataFrame has one column for each model variable, and the index is\n (implicitly) the model tick.\n\n \"\"\"\n return pd.DataFrame(self.model_vars)\n\n def get_agent_vars_dataframe(self):\n \"\"\" Create a pandas DataFrame from the agent variables.\n\n The DataFrame has one column for each variable, with two additional\n columns for tick and agent_id.\n\n \"\"\"\n all_records = itertools.chain.from_iterable(\n self._agent_records.values())\n rep_names = [rep_name for rep_name in self.agent_reporters]\n\n df = pd.DataFrame.from_records(\n data=all_records,\n columns=[\"Step\", \"AgentID\"] + rep_names,\n )\n df = df.set_index([\"Step\", \"AgentID\"])\n return df\n\n def get_table_dataframe(self, table_name):\n \"\"\" Create a pandas DataFrame from a particular table.\n\n Args:\n table_name: The name of the table to convert.\n\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"No such table.\")\n return pd.DataFrame(self.tables[table_name])\n",
"\"\"\"\nThe following code was adapted from the Bank Reserves model included in Netlogo\nModel information can be found at: http://ccl.northwestern.edu/netlogo/models/BankReserves\nAccessed on: November 2, 2017\nAuthor of NetLogo code:\n Wilensky, U. (1998). NetLogo Bank Reserves model.\n http://ccl.northwestern.edu/netlogo/models/BankReserves.\n Center for Connected Learning and Computer-Based Modeling,\n Northwestern University, Evanston, IL.\n\nThis version of the model has a BatchRunner at the bottom. This\nis for collecting data on parameter sweeps. It is not meant to\nbe run with run.py, since run.py starts up a server for visualization, which\nisn't necessary for the BatchRunner. To run a parameter sweep, call\nbatch_run.py in the command line.\n\nThe BatchRunner is set up to collect step by step data of the model. It does\nthis by collecting the DataCollector object in a model_reporter (i.e. the\nDataCollector is collecting itself every step).\n\nThe end result of the batch run will be a csv file created in the same\ndirectory from which Python was run. The csv file will contain the data from\nevery step of every run.\n\"\"\"\n\nfrom bank_reserves.agents import Bank, Person\nimport itertools\nfrom mesa import Model\nfrom mesa.batchrunner import BatchRunner\nfrom mesa.space import MultiGrid\nfrom mesa.datacollection import DataCollector\nfrom mesa.time import RandomActivation\nimport numpy as np\nimport pandas as pd\n\n# Start of datacollector functions\n\n\ndef get_num_rich_agents(model):\n \"\"\"list of rich agents\"\"\"\n\n rich_agents = [a for a in model.schedule.agents if a.savings > model.rich_threshold]\n # return number of rich agents\n return len(rich_agents)\n\n\ndef get_num_poor_agents(model):\n \"\"\"list of poor agents\"\"\"\n\n poor_agents = [a for a in model.schedule.agents if a.loans > 10]\n # return number of poor agents\n return len(poor_agents)\n\n\ndef get_num_mid_agents(model):\n \"\"\"list of middle class agents\"\"\"\n\n mid_agents = [a for a in model.schedule.agents if\n a.loans < 10 and a.savings < model.rich_threshold]\n # return number of middle class agents\n return len(mid_agents)\n\n\ndef get_total_savings(model):\n \"\"\"list of amounts of all agents' savings\"\"\"\n\n agent_savings = [a.savings for a in model.schedule.agents]\n # return the sum of agents' savings\n return np.sum(agent_savings)\n\n\ndef get_total_wallets(model):\n \"\"\"list of amounts of all agents' wallets\"\"\"\n\n agent_wallets = [a.wallet for a in model.schedule.agents]\n # return the sum of all agents' wallets\n return np.sum(agent_wallets)\n\n\ndef get_total_money(model):\n \"\"\"sum of all agents' wallets\"\"\"\n\n wallet_money = get_total_wallets(model)\n # sum of all agents' savings\n savings_money = get_total_savings(model)\n # return sum of agents' wallets and savings for total money\n return wallet_money + savings_money\n\n\ndef get_total_loans(model):\n \"\"\"list of amounts of all agents' loans\"\"\"\n\n agent_loans = [a.loans for a in model.schedule.agents]\n # return sum of all agents' loans\n return np.sum(agent_loans)\n\n\ndef track_params(model):\n return (model.init_people,\n model.rich_threshold,\n model.reserve_percent)\n\n\ndef track_run(model):\n return model.uid\n\n\nclass BankReservesModel(Model):\n # id generator to track run number in batch run data\n id_gen = itertools.count(1)\n\n # grid height\n grid_h = 20\n # grid width\n grid_w = 20\n\n \"\"\"init parameters \"init_people\", \"rich_threshold\", and \"reserve_percent\"\n are all UserSettableParameters\"\"\"\n def __init__(self, height=grid_h, width=grid_w, init_people=2, rich_threshold=10,\n reserve_percent=50,):\n self.uid = next(self.id_gen)\n self.height = height\n self.width = width\n self.init_people = init_people\n self.schedule = RandomActivation(self)\n self.grid = MultiGrid(self.width, self.height, torus=True)\n # rich_threshold is the amount of savings a person needs to be considered \"rich\"\n self.rich_threshold = rich_threshold\n self.reserve_percent = reserve_percent\n # see datacollector functions above\n self.datacollector = DataCollector(model_reporters={\n \"Rich\": get_num_rich_agents,\n \"Poor\": get_num_poor_agents,\n \"Middle Class\": get_num_mid_agents,\n \"Savings\": get_total_savings,\n \"Wallets\": get_total_wallets,\n \"Money\": get_total_money,\n \"Loans\": get_total_loans,\n \"Model Params\": track_params,\n \"Run\": track_run},\n agent_reporters={\n \"Wealth\": lambda x: x.wealth})\n\n # create a single bank for the model\n self.bank = Bank(1, self, self.reserve_percent)\n\n # create people for the model according to number of people set by user\n for i in range(self.init_people):\n # set x coordinate as a random number within the width of the grid\n x = self.random.randrange(self.width)\n # set y coordinate as a random number within the height of the grid\n y = self.random.randrange(self.height)\n p = Person(i, (x, y), self, True, self.bank, self.rich_threshold)\n # place the Person object on the grid at coordinates (x, y)\n self.grid.place_agent(p, (x, y))\n # add the Person object to the model schedule\n self.schedule.add(p)\n\n self.running = True\n\n def step(self):\n # collect data\n self.datacollector.collect(self)\n # tell all the agents in the model to run their step function\n self.schedule.step()\n\n def run_model(self):\n for i in range(self.run_time):\n self.step()\n\n\n# parameter lists for each parameter to be tested in batch run\nbr_params = {\"init_people\": [25, 100, 150, 200],\n \"rich_threshold\": [5, 10, 15, 20],\n \"reserve_percent\": [0, 50, 100]}\n\nbr = BatchRunner(BankReservesModel,\n br_params,\n iterations=1,\n max_steps=1000,\n model_reporters={\"Data Collector\": lambda m: m.datacollector})\n\nif __name__ == '__main__':\n br.run_all()\n br_df = br.get_model_vars_dataframe()\n br_step_data = pd.DataFrame()\n for i in range(len(br_df[\"Data Collector\"])):\n if isinstance(br_df[\"Data Collector\"][i], DataCollector):\n i_run_data = br_df[\"Data Collector\"][i].get_model_vars_dataframe()\n br_step_data = br_step_data.append(i_run_data, ignore_index=True)\n br_step_data.to_csv(\"BankReservesModel_Step_Data.csv\")\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.DataFrame"
],
[
"numpy.sum",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
google-research/growneuron | [
"2ab6fe19f920b9f3b9cc9cf8ac39c8965967a5fe"
] | [
"growneuron/layers_test.py"
] | [
"# coding=utf-8\n# Copyright 2022 GradMax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for growneuron.layers.\"\"\"\nimport absl.testing.parameterized as parameterized\nimport growneuron.layers as glayers\nimport tensorflow as tf\n\n\nclass LayerTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.named_parameters(\n ('dense', tf.keras.layers.Dense(3), (3, 4)),\n ('batchnorm', tf.keras.layers.BatchNormalization(), (2, 4)),\n ('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4))\n )\n def test_consistency(self, layer, input_shape):\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n original_out = layer(x)\n new_out = wrapped_layer(x)\n self.assertAllEqual(original_out, new_out)\n\n @parameterized.named_parameters(\n ('dense', tf.keras.layers.Dense(3), (3, 4), 1),\n ('dense_5neuron', tf.keras.layers.Dense(3), (3, 4), 5),\n ('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 1),\n ('conv2d_5neuron', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 5),\n )\n def test_add_neurons_incoming_zeros(self, layer, input_shape, n_new):\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n original_out = wrapped_layer(x)\n old_output_shape = original_out.get_shape()\n n_neurons_old = old_output_shape[-1]\n wrapped_layer.add_neurons(n_new, new_weights='zeros', is_outgoing=False)\n new_out = wrapped_layer(x)\n # Check the output has the expected shape\n new_shape = old_output_shape[:-1] + [n_neurons_old+n_new]\n self.assertAllEqual(new_shape, new_out.get_shape())\n # Check the old neurons create same output\n self.assertAllClose(original_out, new_out[Ellipsis, :n_neurons_old])\n # Check the new neurons create zero output\n self.assertEqual(0, tf.math.count_nonzero(new_out[Ellipsis, n_neurons_old:]))\n new_weights, new_biases = wrapped_layer.get_weights()\n # Check the new weights are zero\n added_weights = new_weights[Ellipsis, n_neurons_old:]\n self.assertAllEqual(added_weights, tf.zeros_like(added_weights))\n # Check the new biases are zero\n added_biases = new_biases[n_neurons_old:]\n self.assertAllEqual(added_biases, tf.zeros_like(added_biases))\n\n @parameterized.named_parameters(\n ('dense', tf.keras.layers.Dense(3), (3, 4), 1),\n ('dense_5neuron', tf.keras.layers.Dense(3), (3, 4), 5),\n ('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 1),\n ('conv2d_5neuron', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 5),\n )\n def test_add_neurons_outgoing_zeros(self, layer, input_shape, n_new):\n wrapped_layer = glayers.GrowLayer(layer)\n n_features = input_shape[-1]\n x = tf.random.uniform(input_shape)\n # New input after growing would have more features\n new_input_shape = input_shape[:-1] + (n_new,)\n new_x = tf.concat([x, tf.random.uniform(new_input_shape)], axis=-1)\n original_out = layer(x)\n old_weights, old_biases = wrapped_layer.get_weights()\n wrapped_layer.add_neurons(n_new, new_weights='zeros', is_outgoing=True)\n new_out = wrapped_layer(new_x)\n new_weights, new_biases = wrapped_layer.get_weights()\n print(new_weights, new_biases)\n # Output of the layer shouldn't change.\n self.assertAllClose(original_out, new_out)\n # Check biases are unchanged\n self.assertAllEqual(old_biases, new_biases)\n # Check the new weights are zero\n added_weights = new_weights[Ellipsis, n_features:, :]\n self.assertAllEqual(added_weights, tf.zeros_like(added_weights))\n # Check the old weights are same\n kept_weights = new_weights[Ellipsis, :n_features, :]\n self.assertAllEqual(old_weights, kept_weights)\n\n @parameterized.named_parameters(\n ('dense_kernel', 'dense', ('kernel',)),\n ('dense_bias', 'dense', ('bias',)),\n ('dense_activity', 'dense', ('activity',)),\n ('dense_all', 'dense', ('kernel', 'bias', 'activity')),\n ('conv2d_kernel', 'conv2d', ('kernel',)),\n ('conv2d_bias', 'conv2d', ('bias',)),\n ('conv2d_activity', 'conv2d', ('activity',)),\n ('conv2d_all', 'conv2d', ('kernel', 'bias', 'activity')),\n )\n def test_regularizer_incoming(self, layer_type, regularizer_types):\n reg_kwargs = {f'{r_type}_regularizer': tf.keras.regularizers.L2(0.1)\n for r_type in regularizer_types}\n print(reg_kwargs)\n if layer_type == 'dense':\n layer = tf.keras.layers.Dense(3, **reg_kwargs)\n input_shape = (3, 4)\n elif layer_type == 'conv2d':\n layer = tf.keras.layers.Conv2D(3, 3, **reg_kwargs)\n input_shape = (3, 5, 5, 4)\n else:\n raise ValueError('not supported')\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n _ = wrapped_layer(x)\n old_losses = wrapped_layer.losses\n wrapped_layer.add_neurons(1, new_weights='zeros', is_outgoing=False)\n _ = wrapped_layer(x)\n new_losses = wrapped_layer.losses\n for old_loss, new_loss in zip(old_losses, new_losses):\n self.assertAllClose(old_loss, new_loss)\n\n @parameterized.named_parameters(\n ('dense_kernel', 'dense', ('kernel',)),\n ('dense_bias', 'dense', ('bias',)),\n ('dense_activity', 'dense', ('activity',)),\n ('dense_all', 'dense', ('kernel', 'bias', 'activity')),\n ('conv2d_kernel', 'conv2d', ('kernel',)),\n ('conv2d_bias', 'conv2d', ('bias',)),\n ('conv2d_activity', 'conv2d', ('activity',)),\n ('conv2d_all', 'conv2d', ('kernel', 'bias', 'activity')),\n ('bn_beta', 'bn', ('beta',)),\n )\n def test_regularizer_outgoing(self, layer_type, regularizer_types):\n reg_kwargs = {f'{r_type}_regularizer': tf.keras.regularizers.L2(0.1)\n for r_type in regularizer_types}\n print(reg_kwargs)\n if layer_type == 'dense':\n layer = tf.keras.layers.Dense(3, **reg_kwargs)\n input_shape = (3, 4)\n elif layer_type == 'conv2d':\n layer = tf.keras.layers.Conv2D(3, 3, **reg_kwargs)\n input_shape = (3, 5, 5, 4)\n elif layer_type == 'bn':\n layer = tf.keras.layers.BatchNormalization(**reg_kwargs)\n input_shape = (3, 4)\n else:\n raise ValueError('not supported')\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n _ = wrapped_layer(x)\n old_losses = wrapped_layer.losses\n if layer_type == 'bn':\n wrapped_layer.add_neurons_identity(1)\n else:\n wrapped_layer.add_neurons(1, new_weights='zeros', is_outgoing=True)\n new_input_shape = input_shape[:-1] + (1,)\n new_x = tf.concat([x, tf.random.uniform(new_input_shape)], axis=-1)\n _ = wrapped_layer(new_x)\n new_losses = wrapped_layer.losses\n for old_loss, new_loss in zip(old_losses, new_losses):\n self.assertAllClose(old_loss, new_loss)\n\n @parameterized.named_parameters(\n ('2d_axis1', (4, 5), -1),\n ('3d_axis1', (3, 3, 1), -1),\n ('4d_axis1', (3, 3, 4, 5), -1),\n ('2d_axis2', (4, 5), -2),\n ('3d_axis2', (3, 3, 1), -2),\n ('4d_axis2', (3, 3, 4, 5), -2),\n )\n def test_norm_l2(self, shape, axis):\n tensor = tf.reshape(tf.range(tf.math.reduce_prod(shape),\n dtype=tf.float32), shape)\n calculated_norm = glayers.norm_l2(tensor, axis)\n if axis == -2:\n tensor = tf.einsum('...ij->...ji', tensor)\n # L2 norm should be 1 over axis 1\n flat_tensor = tf.reshape(tensor,\n [-1, tensor.shape[-1]])\n expected_norms = tf.norm(flat_tensor, axis=-2)\n self.assertAllClose(expected_norms, calculated_norm)\n pass\n\n @parameterized.named_parameters(\n ('2d_axis1', (4, 5), -1),\n ('3d_axis1', (3, 3, 1), -1),\n ('4d_axis1', (3, 3, 4, 5), -1),\n ('2d_axis2', (4, 5), -2),\n ('3d_axis2', (3, 3, 1), -2),\n ('4d_axis2', (3, 3, 4, 5), -2),\n )\n def test_normalize_l2(self, shape, axis):\n tensor = tf.reshape(tf.range(tf.math.reduce_prod(shape),\n dtype=tf.float32), shape)\n normalized_tensor = glayers.normalize_l2(tensor, axis)\n if axis == -2:\n normalized_tensor = tf.einsum('...ij->...ji', normalized_tensor)\n # L2 norm should be 1 over axis 1\n flat_tensor = tf.reshape(normalized_tensor,\n [-1, normalized_tensor.shape[-1]])\n norms = tf.norm(flat_tensor, axis=-2)\n self.assertAllClose(norms, tf.ones_like(norms))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.norm",
"tensorflow.math.count_nonzero",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.L2",
"tensorflow.reshape",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.keras.layers.Conv2D",
"tensorflow.einsum",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.math.reduce_prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpeychev/disentangled-autoencoders | [
"2d1f18fe198486f29c74ba5606ffcadaff7055cf",
"2d1f18fe198486f29c74ba5606ffcadaff7055cf"
] | [
"src/base_autoencoder.py",
"src/fc_autoencoder.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport draw_util\nimport os\n\nclass Autoencoder(object):\n\n def partial_fit(self, targets):\n assert (self.is_training)\n if not self.is_denoising:\n self.sess.run(self.train_op, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets) if self.is_denoising == 1 else \\\n draw_util.erase(targets)\n self.sess.run(self.train_op, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n\n def calc_reconstruction_loss(self, targets):\n if len(targets) == 40000:\n A = self.calc_reconstruction_loss(targets[:20000])\n B = self.calc_reconstruction_loss(targets[20000:])\n return (A + B) / 2.0\n if not self.is_denoising:\n return self.sess.run(self.reconstruction_loss, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets) if self.is_denoising == 1 else \\\n draw_util.erase(targets)\n return self.sess.run(self.reconstruction_loss, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n\n def calc_kl_divergence(self, inputs):\n if len(inputs) == 40000:\n A = self.calc_kl_divergence(inputs[:20000])\n B = self.calc_kl_divergence(inputs[20000:])\n return (A + B) / 2.0\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.kl_divergence, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def calc_cost(self, targets):\n if len(targets) == 40000:\n A = self.calc_cost(targets[:20000])\n B = self.calc_cost(targets[20000:])\n return (A + B) / 2.0\n if not self.is_denoising:\n return self.sess.run(self.cost, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets) if self.is_denoising == 1 else \\\n draw_util.erase(targets)\n return self.sess.run(self.cost, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n\n def get_code_dimension(self):\n return self.code_dimension\n\n def get_beta(self):\n return self.beta\n\n def get_output_layer(self, inputs, ignore_noise=False):\n if not ignore_noise:\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.output_layer, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def get_output_layer_from_code(self, code):\n return self.sess.run(self.output_layer, feed_dict={self.code: code})\n\n def get_code(self, inputs, ignore_noise=False):\n if not ignore_noise:\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.code, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def get_code_mean(self, inputs):\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.code_mean, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def get_code_variance(self, inputs):\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n code_log_sigma_sq = self.sess.run(self.code_log_sigma_sq, feed_dict = {\n self.input_layer: inputs, self.batch_size: [len(inputs)]})\n return np.exp(code_log_sigma_sq)\n\n def calc_reconstruction_accuracy(self, targets):\n inputs = targets if not self.is_denoising else (draw_util.add_noise(targets) if \\\n self.is_denoising == 1 else draw_util.erase(targets))\n predicted_images = self.get_output_layer(inputs)\n return np.mean(np.sqrt(np.sum(np.square(predicted_images - targets), axis=1)))\n\n def save_model(self):\n self.saver.save(self.sess, os.path.join(self.logs_dir, 'model'))\n\n def restore_model(self):\n self.saver.restore(self.sess, tf.train.latest_checkpoint(self.logs_dir))\n\n def close_session(self):\n self.sess.close()\n tf.reset_default_graph()\n",
"import os\nimport tensorflow as tf\nimport numpy as np\nimport util\nimport draw_util\nfrom base_autoencoder import Autoencoder\n\nclass FcAutoencoder(Autoencoder):\n\n def __init__(self,\n encoder_layers_size=[4096, 1200, 1200, 10],\n decoder_layers_size=[10, 1200, 1200, 1200, 4096],\n beta=None,\n encoder_activation_fn=tf.nn.relu,\n decoder_activation_fn=tf.tanh,\n learning_rate=None,\n seq_index=None,\n denoising=False):\n print('Construct fully connected autoencoder:')\n print('Encoder layers: {0}'.format(encoder_layers_size))\n print('Decoder layers: {0}'.format(decoder_layers_size))\n print('Encoder activation function: {0}'.format(encoder_activation_fn))\n print('Decoder activation function: {0}'.format(decoder_activation_fn))\n print('Beta = {0}'.format(beta))\n print('Seq index = {0}'.format(seq_index))\n print('Is trainable: {0}'.format(learning_rate is not None))\n print('Is denoising: {0}'.format(denoising))\n print('Learning rate: {0}'.format(learning_rate))\n print('Logs dir: {0}'.format(os.path.join(util.get_logs_dir(), seq_index)))\n\n self.encoder_layers_size = encoder_layers_size\n self.decoder_layers_size = decoder_layers_size\n self.code_dimension = encoder_layers_size[-1]\n self.beta = beta\n self.encoder_activation_fn = encoder_activation_fn\n self.decoder_activation_fn = decoder_activation_fn\n self.is_training = (learning_rate is not None)\n self.is_denoising = denoising\n self.optimizer = tf.train.AdamOptimizer(learning_rate) if self.is_training else None\n self.logs_dir = os.path.join(util.get_logs_dir(), seq_index)\n\n if self.is_training:\n util.prepare_dir(self.logs_dir)\n else:\n assert (os.path.exists(self.logs_dir))\n\n self.encoder_get_weights = util.get_weights_he \\\n if 'elu' in encoder_activation_fn.__name__ else util.get_weights_xavier\n self.decoder_get_weights = util.get_weights_he \\\n if 'elu' in decoder_activation_fn.__name__ else util.get_weights_xavier\n\n self._build_network()\n self._define_loss_function()\n\n init_op = tf.global_variables_initializer()\n self.merged_summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver(max_to_keep=1)\n\n self.sess = tf.InteractiveSession()\n self.sess.run(init_op)\n\n def _build_network(self):\n self._build_input()\n self._build_code(self._build_encoder())\n self._build_output(self._build_decoder())\n\n def _build_input(self):\n with tf.name_scope('Input'):\n self.input_layer = tf.placeholder(tf.float32, shape=[None, self.encoder_layers_size[0]],\n name='layer')\n self.target_layer = self.input_layer if not self.is_denoising else tf.placeholder(\n tf.float32, shape=[None, self.encoder_layers_size[0]], name='target_layer')\n self.batch_size = tf.placeholder(tf.int32, shape=[1], name='batch_size')\n\n def _build_encoder(self):\n last_layer = self.input_layer\n for i in range(1, len(self.encoder_layers_size) - 1):\n scope = 'EncoderHidden_' + str(i)\n with tf.name_scope(scope):\n W = self.encoder_get_weights((self.encoder_layers_size[i - 1],\n self.encoder_layers_size[i]))\n\n current_layer = self.encoder_activation_fn(\n util.batch_norm(tf.matmul(last_layer, W), self.is_training), name='layer')\n\n last_layer = current_layer\n return last_layer\n\n def _build_code(self, last_layer):\n with tf.name_scope('Code'):\n with tf.name_scope('mean'):\n mean_weights = util.get_weights_he((self.encoder_layers_size[-2],\n self.encoder_layers_size[-1]))\n mean_biases = util.get_bias(self.encoder_layers_size[-1])\n\n tf.summary.histogram('Code_mean_weights_summary', mean_weights)\n tf.summary.histogram('Code_mean_biases_summary', mean_biases)\n\n self.code_mean = tf.nn.bias_add(tf.matmul(last_layer, mean_weights), mean_biases,\n name='layer')\n\n with tf.name_scope('stddev'):\n stddev_weights = util.get_weights_he((self.encoder_layers_size[-2],\n self.encoder_layers_size[-1]))\n stddev_biases = util.get_bias(self.encoder_layers_size[-1])\n\n tf.summary.histogram('Code_stddev_weights_summary', stddev_weights)\n tf.summary.histogram('Code_stddev_biases_summary', stddev_biases)\n\n self.code_log_sigma_sq = tf.nn.bias_add(tf.matmul(last_layer, stddev_weights),\n stddev_biases, name='layer')\n\n epsilon = tf.random_normal([self.batch_size[0], self.encoder_layers_size[-1]])\n self.code = tf.add(self.code_mean,\n tf.multiply(tf.sqrt(tf.exp(self.code_log_sigma_sq)), epsilon), name='layer')\n\n def _build_decoder(self):\n last_layer = self.code\n for i in range(1, len(self.decoder_layers_size) - 1):\n scope = 'DecoderHidden_' + str(i)\n with tf.name_scope(scope):\n W = self.decoder_get_weights((self.decoder_layers_size[i - 1],\n self.decoder_layers_size[i]))\n\n current_layer = self.decoder_activation_fn(\n util.batch_norm(tf.matmul(last_layer, W), self.is_training), name='layer')\n\n last_layer = current_layer\n return last_layer\n\n def _build_output(self, last_layer):\n with tf.name_scope('Output'):\n W = util.get_weights_xavier((self.decoder_layers_size[-2],\n self.decoder_layers_size[-1]))\n b = util.get_bias(self.decoder_layers_size[-1])\n\n tf.summary.histogram('Output_weights_summary', W)\n tf.summary.histogram('Output_biases_summary', b)\n\n self.logits = tf.nn.bias_add(tf.matmul(last_layer, W), b, name='logits')\n self.output_layer = tf.sigmoid(self.logits, name='layer')\n\n def _define_loss_function(self):\n with tf.name_scope('CostFunction'):\n self.reconstruction_loss = tf.reduce_mean(tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.logits, labels=self.target_layer),\n 1), name='reconstruction')\n self.kl_divergence = tf.constant(0.0, name='kl_divergence') if self.beta < 1e-3 else \\\n self.beta * tf.reduce_mean(0.5 * tf.reduce_sum(tf.square(self.code_mean)\n + tf.exp(self.code_log_sigma_sq) - self.code_log_sigma_sq - 1, 1),\n name='kl_divergence')\n self.cost = tf.add(self.reconstruction_loss, self.kl_divergence, name='cost')\n\n if self.is_training:\n self.train_op = self.optimizer.minimize(self.cost)\n\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('reconstruction_loss', self.reconstruction_loss)\n tf.summary.scalar('kl_divergence', self.kl_divergence)\n\n def get_summary(self, targets):\n if not self.is_denoising:\n return self.sess.run(self.merged_summary_op, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets)\n return self.sess.run(self.merged_summary_op, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n"
] | [
[
"numpy.square",
"tensorflow.reset_default_graph",
"numpy.exp",
"tensorflow.train.latest_checkpoint"
],
[
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.InteractiveSession",
"tensorflow.summary.histogram",
"tensorflow.sigmoid",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.name_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.add",
"tensorflow.train.Saver",
"tensorflow.square",
"tensorflow.summary.scalar",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
lelloman/python-languagedetector | [
"2abfa582b2f2100399e6cf8f92bc65d68ba20dad"
] | [
"to_tensorflow_lite.py"
] | [
"#!/usr/bin/python\nfrom __future__ import print_function\nfrom common import *\nimport tensorflow.contrib.lite as tflite\n\nimport keras\n\na = keras.models.Sequential()\n\nmodel = load_model()\n\nfull_model_file_name = 'full_model.h5'\nmodel.save(full_model_file_name)\nconverter = tflite.TFLiteConverter.from_keras_model_file(full_model_file_name)\ntflite_model = converter.convert()\nopen(\"converted_model.tflite\", \"wb\").write(tflite_model)"
] | [
[
"tensorflow.contrib.lite.TFLiteConverter.from_keras_model_file"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Con-Mi/lambda-packs | [
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab",
"b23a8464abdd88050b83310e1d0e99c54dac28ab"
] | [
"Skimage_numpy/source/scipy/special/__init__.py",
"Sklearn_scipy_numpy/source/sklearn/tree/tree.py",
"Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py",
"Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/plot.py",
"Keras_tensorflow_nightly/source2.7/tensorflow/contrib/layers/ops/gen_sparse_feature_cross_op.py",
"Skimage_numpy/source/scipy/optimize/_differentialevolution.py",
"Sklearn_scipy_numpy/source/sklearn/tree/export.py",
"Sklearn_scipy_numpy/source/sklearn/ensemble/gradient_boosting.py",
"Sklearn_scipy_numpy/source/sklearn/datasets/california_housing.py",
"Sklearn_scipy_numpy/source/sklearn/decomposition/setup.py",
"Sklearn_scipy_numpy/source/sklearn/datasets/lfw.py",
"Sklearn_scipy_numpy/source/sklearn/learning_curve.py",
"Sklearn_scipy_numpy/source/scipy/linalg/tests/test_decomp_update.py",
"Keras_tensorflow/source/tensorflow/python/framework/test_ops.py",
"Sklearn_scipy_numpy/source/sklearn/decomposition/tests/test_online_lda.py",
"Keras_tensorflow_nightly/source2.7/keras/engine/topology.py",
"Sklearn_scipy_numpy/source/sklearn/feature_extraction/tests/test_image.py"
] | [
"\"\"\"\n========================================\nSpecial functions (:mod:`scipy.special`)\n========================================\n\n.. module:: scipy.special\n\nNearly all of the functions below are universal functions and follow\nbroadcasting and automatic array-looping rules. Exceptions are noted.\n\nError handling\n==============\n\nErrors are handled by returning nans, or other appropriate values.\nSome of the special function routines will emit warnings when an error\noccurs. By default this is disabled. To enable such messages use\n``errprint(1)``, and to disable such messages use ``errprint(0)``.\n\nExample:\n\n >>> print scipy.special.bdtr(-1,10,0.3)\n >>> scipy.special.errprint(1)\n >>> print scipy.special.bdtr(-1,10,0.3)\n\n.. autosummary::\n :toctree: generated/\n\n errprint\n SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``\n\nAvailable functions\n===================\n\nAiry functions\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n airy -- Airy functions and their derivatives.\n airye -- Exponentially scaled Airy functions\n ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)\n bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)\n itairy --\n \n\nElliptic Functions and Integrals\n--------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellipj -- Jacobian elliptic functions\n ellipk -- Complete elliptic integral of the first kind.\n ellipkm1 -- ellipkm1(x) == ellipk(1 - x)\n ellipkinc -- Incomplete elliptic integral of the first kind.\n ellipe -- Complete elliptic integral of the second kind.\n ellipeinc -- Incomplete elliptic integral of the second kind.\n\nBessel Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n jv -- Bessel function of real-valued order and complex argument.\n jn -- Alias for jv\n jve -- Exponentially scaled Bessel function.\n yn -- Bessel function of second kind (integer order).\n yv -- Bessel function of the second kind (real-valued order).\n yve -- Exponentially scaled Bessel function of the second kind.\n kn -- Modified Bessel function of the second kind (integer order).\n kv -- Modified Bessel function of the second kind (real order).\n kve -- Exponentially scaled modified Bessel function of the second kind.\n iv -- Modified Bessel function.\n ive -- Exponentially scaled modified Bessel function.\n hankel1 -- Hankel function of the first kind.\n hankel1e -- Exponentially scaled Hankel function of the first kind.\n hankel2 -- Hankel function of the second kind.\n hankel2e -- Exponentially scaled Hankel function of the second kind.\n\nThe following is not an universal function:\n\n.. autosummary::\n :toctree: generated/\n\n lmbda -- [+]Sequence of lambda functions with arbitrary order v.\n\nZeros of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.\n jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.\n jn_zeros -- [+]Zeros of Jn(x)\n jnp_zeros -- [+]Zeros of Jn'(x)\n yn_zeros -- [+]Zeros of Yn(x)\n ynp_zeros -- [+]Zeros of Yn'(x)\n y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)\n y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)\n y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')\n\nFaster versions of common Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n j0 -- Bessel function of order 0.\n j1 -- Bessel function of order 1.\n y0 -- Bessel function of second kind of order 0.\n y1 -- Bessel function of second kind of order 1.\n i0 -- Modified Bessel function of order 0.\n i0e -- Exponentially scaled modified Bessel function of order 0.\n i1 -- Modified Bessel function of order 1.\n i1e -- Exponentially scaled modified Bessel function of order 1.\n k0 -- Modified Bessel function of the second kind of order 0.\n k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.\n k1 -- Modified Bessel function of the second kind of order 1.\n k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.\n\nIntegrals of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n itj0y0 -- Basic integrals of j0 and y0 from 0 to x.\n it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.\n iti0k0 -- Basic integrals of i0 and k0 from 0 to x.\n it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.\n besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.\n\nDerivatives of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n jvp -- Nth derivative of Jv(v,z)\n yvp -- Nth derivative of Yv(v,z)\n kvp -- Nth derivative of Kv(v,z)\n ivp -- Nth derivative of Iv(v,z)\n h1vp -- Nth derivative of H1v(v,z)\n h2vp -- Nth derivative of H2v(v,z)\n\nSpherical Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n spherical_jn -- Spherical Bessel function of the first kind, jn(z)\n spherical_yn -- Spherical Bessel function of the second kind, yn(z)\n spherical_in -- Modified spherical Bessel function of the first kind, in(z)\n spherical_kn -- Modified spherical Bessel function of the second kind, kn(z)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)\n sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)\n sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)\n sph_in -- [+]Sequence of spherical Bessel functions, in(z)\n sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)\n sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)\n\nRiccati-Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.\n riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.\n\nStruve Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n struve -- Struve function --- Hv(x)\n modstruve -- Modified Struve function --- Lv(x)\n itstruve0 -- Integral of H0(t) from 0 to x\n it2struve0 -- Integral of H0(t)/t from x to Inf.\n itmodstruve0 -- Integral of L0(t) from 0 to x.\n\n\nRaw Statistical Functions\n-------------------------\n\n.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.\n\n.. autosummary::\n :toctree: generated/\n\n bdtr -- Sum of terms 0 through k of the binomial pdf.\n bdtrc -- Sum of terms k+1 through n of the binomial pdf.\n bdtri -- Inverse of bdtr\n bdtrik --\n bdtrin --\n btdtr -- Integral from 0 to x of beta pdf.\n btdtri -- Quantiles of beta distribution\n btdtria --\n btdtrib --\n fdtr -- Integral from 0 to x of F pdf.\n fdtrc -- Integral from x to infinity under F pdf.\n fdtri -- Inverse of fdtrc\n fdtridfd -- \n gdtr -- Integral from 0 to x of gamma pdf.\n gdtrc -- Integral from x to infinity under gamma pdf.\n gdtria -- Inverse with respect to `a` of gdtr.\n gdtrib -- Inverse with respect to `b` of gdtr.\n gdtrix -- Inverse with respect to `x` of gdtr.\n nbdtr -- Sum of terms 0 through k of the negative binomial pdf.\n nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.\n nbdtri -- Inverse of nbdtr\n nbdtrik --\n nbdtrin --\n ncfdtr -- CDF of non-central t distribution.\n ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.\n ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.\n ncfdtri -- Inverse CDF of noncentral F distribution.\n ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.\n nctdtr -- CDF of noncentral t distribution.\n nctdtridf -- Find degrees of freedom of noncentral t distribution.\n nctdtrit -- Inverse CDF of noncentral t distribution.\n nctdtrinc -- Find noncentrality parameter of noncentral t distribution.\n nrdtrimn -- Find mean of normal distribution from cdf and std.\n nrdtrisd -- Find std of normal distribution from cdf and mean.\n pdtr -- Sum of terms 0 through k of the Poisson pdf.\n pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.\n pdtri -- Inverse of pdtr\n pdtrik --\n stdtr -- Integral from -infinity to t of the Student-t pdf.\n stdtridf --\n stdtrit --\n chdtr -- Integral from 0 to x of the Chi-square pdf.\n chdtrc -- Integral from x to infnity of Chi-square pdf.\n chdtri -- Inverse of chdtrc.\n chdtriv --\n ndtr -- Integral from -infinity to x of standard normal pdf\n log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf\n ndtri -- Inverse of ndtr (quantiles)\n chndtr --\n chndtridf --\n chndtrinc --\n chndtrix --\n smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)\n smirnovi -- Inverse of smirnov.\n kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.\n kolmogi -- Inverse of kolmogorov\n tklmbda -- Tukey-Lambda CDF\n logit --\n expit --\n boxcox -- Compute the Box-Cox transformation.\n boxcox1p -- Compute the Box-Cox transformation of 1 + x.\n inv_boxcox -- Compute the inverse of the Box-Cox tranformation.\n inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.\n\n\nInformation Theory Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n entr -- entr(x) = -x*log(x)\n rel_entr -- rel_entr(x, y) = x*log(x/y)\n kl_div -- kl_div(x, y) = x*log(x/y) - x + y\n huber -- Huber loss function.\n pseudo_huber -- Pseudo-Huber loss function.\n\n\nGamma and Related Functions\n---------------------------\n\n.. autosummary::\n :toctree: generated/\n\n gamma -- Gamma function.\n gammaln -- Log of the absolute value of the Gamma function.\n loggamma -- Principal branch of the logarithm of the Gamma function.\n gammasgn -- Sign of the gamma function.\n gammainc -- Incomplete gamma integral.\n gammaincinv -- Inverse of gammainc.\n gammaincc -- Complemented incomplete gamma integral.\n gammainccinv -- Inverse of gammaincc.\n beta -- Beta function.\n betaln -- Log of the absolute value of the beta function.\n betainc -- Incomplete beta integral.\n betaincinv -- Inverse of betainc.\n psi -- Logarithmic derivative of the gamma function.\n rgamma -- One divided by the gamma function.\n polygamma -- Nth derivative of psi function.\n multigammaln -- Log of the multivariate gamma.\n digamma -- Digamma function (derivative of the logarithm of gamma).\n poch -- The Pochhammer symbol (rising factorial).\n\n\nError Function and Fresnel Integrals\n------------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n erf -- Error function.\n erfc -- Complemented error function (1- erf(x))\n erfcx -- Scaled complemented error function exp(x**2)*erfc(x)\n erfi -- Imaginary error function, -i erf(i x)\n erfinv -- Inverse of error function\n erfcinv -- Inverse of erfc\n wofz -- Fadeeva function.\n dawsn -- Dawson's integral.\n fresnel -- Fresnel sine and cosine integrals.\n fresnel_zeros -- Complex zeros of both Fresnel integrals\n modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)\n modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n erf_zeros -- [+]Complex zeros of erf(z)\n fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals\n fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals\n\nLegendre Functions\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n lpmv -- Associated Legendre Function of arbitrary non-negative degree v.\n sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.\n lpn -- [+]Legendre Functions (polynomials) of the first kind\n lqn -- [+]Legendre Functions of the second kind.\n lpmn -- [+]Associated Legendre Function of the first kind for real arguments.\n lqmn -- [+]Associated Legendre Function of the second kind.\n\nEllipsoidal Harmonics\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellip_harm -- Ellipsoidal harmonic E\n ellip_harm_2 -- Ellipsoidal harmonic F\n ellip_normal -- Ellipsoidal normalization constant\n\nOrthogonal polynomials\n----------------------\n\nThe following functions evaluate values of orthogonal polynomials:\n\n.. autosummary::\n :toctree: generated/\n\n assoc_laguerre\n eval_legendre\n eval_chebyt\n eval_chebyu\n eval_chebyc\n eval_chebys\n eval_jacobi\n eval_laguerre\n eval_genlaguerre\n eval_hermite\n eval_hermitenorm\n eval_gegenbauer\n eval_sh_legendre\n eval_sh_chebyt\n eval_sh_chebyu\n eval_sh_jacobi\n\nThe functions below, in turn, return the polynomial coefficients in\n:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.\nThe :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns\nthe roots, weights, and total weights for the appropriate form of Gaussian\nquadrature. These are returned in an ``n x 3`` array with roots in the first\ncolumn, weights in the second column, and total weights in the final column.\nNote that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing\narithmetic, and lose information of the original orthogonal polynomial.\n\n.. autosummary::\n :toctree: generated/\n\n legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).\n chebyt -- [+]Chebyshev polynomial T_n(x)\n chebyu -- [+]Chebyshev polynomial U_n(x)\n chebyc -- [+]Chebyshev polynomial C_n(x)\n chebys -- [+]Chebyshev polynomial S_n(x)\n jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)\n laguerre -- [+]Laguerre polynomial, L_n(x)\n genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)\n hermite -- [+]Hermite polynomial H_n(x)\n hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)\n gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)\n sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)\n sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)\n sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)\n sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)\n\n.. warning::\n\n Computing values of high-order polynomials (around ``order > 20``) using\n polynomial coefficients is numerically unstable. To evaluate polynomial\n values, the ``eval_*`` functions should be used instead.\n\nRoots and weights for orthogonal polynomials\n\n.. autosummary::\n :toctree: generated/\n\n c_roots\n cg_roots\n h_roots\n he_roots\n j_roots\n js_roots\n l_roots\n la_roots\n p_roots\n ps_roots\n s_roots\n t_roots\n ts_roots\n u_roots\n us_roots\n\n\nHypergeometric Functions\n------------------------\n\n.. autosummary::\n :toctree: generated/\n\n hyp2f1 -- Gauss hypergeometric function (2F1)\n hyp1f1 -- Confluent hypergeometric function (1F1)\n hyperu -- Confluent hypergeometric function (U)\n hyp0f1 -- Confluent hypergeometric limit function (0F1)\n hyp2f0 -- Hypergeometric function (2F0)\n hyp1f2 -- Hypergeometric function (1F2)\n hyp3f0 -- Hypergeometric function (3F0)\n\n\nParabolic Cylinder Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pbdv -- Parabolic cylinder function Dv(x) and derivative.\n pbvv -- Parabolic cylinder function Vv(x) and derivative.\n pbwa -- Parabolic cylinder function W(a,x) and derivative.\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)\n pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)\n pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z\n\nMathieu and Related Functions\n-----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_a -- Characteristic values for even solution (ce_m)\n mathieu_b -- Characteristic values for odd solution (se_m)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_even_coef -- [+]sequence of expansion coefficients for even solution\n mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution\n\nThe following return both function and first derivative:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_cem -- Even Mathieu function\n mathieu_sem -- Odd Mathieu function\n mathieu_modcem1 -- Even modified Mathieu function of the first kind\n mathieu_modcem2 -- Even modified Mathieu function of the second kind\n mathieu_modsem1 -- Odd modified Mathieu function of the first kind\n mathieu_modsem2 -- Odd modified Mathieu function of the second kind\n\nSpheroidal Wave Functions\n-------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1 -- Prolate spheroidal angular function of the first kind\n pro_rad1 -- Prolate spheroidal radial function of the first kind\n pro_rad2 -- Prolate spheroidal radial function of the second kind\n obl_ang1 -- Oblate spheroidal angular function of the first kind\n obl_rad1 -- Oblate spheroidal radial function of the first kind\n obl_rad2 -- Oblate spheroidal radial function of the second kind\n pro_cv -- Compute characteristic value for prolate functions\n obl_cv -- Compute characteristic value for oblate functions\n pro_cv_seq -- Compute sequence of prolate characteristic values\n obl_cv_seq -- Compute sequence of oblate characteristic values\n\nThe following functions require pre-computed characteristic value:\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1_cv -- Prolate spheroidal angular function of the first kind\n pro_rad1_cv -- Prolate spheroidal radial function of the first kind\n pro_rad2_cv -- Prolate spheroidal radial function of the second kind\n obl_ang1_cv -- Oblate spheroidal angular function of the first kind\n obl_rad1_cv -- Oblate spheroidal radial function of the first kind\n obl_rad2_cv -- Oblate spheroidal radial function of the second kind\n\nKelvin Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n kelvin -- All Kelvin functions (order 0) and derivatives.\n kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives\n ber -- Kelvin function ber x\n bei -- Kelvin function bei x\n berp -- Derivative of Kelvin function ber x\n beip -- Derivative of Kelvin function bei x\n ker -- Kelvin function ker x\n kei -- Kelvin function kei x\n kerp -- Derivative of Kelvin function ker x\n keip -- Derivative of Kelvin function kei x\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n ber_zeros -- [+]Zeros of Kelvin function bei x\n bei_zeros -- [+]Zeros of Kelvin function ber x\n berp_zeros -- [+]Zeros of derivative of Kelvin function ber x\n beip_zeros -- [+]Zeros of derivative of Kelvin function bei x\n ker_zeros -- [+]Zeros of Kelvin function kei x\n kei_zeros -- [+]Zeros of Kelvin function ker x\n kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x\n keip_zeros -- [+]Zeros of derivative of Kelvin function kei x\n\nCombinatorics\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n comb -- [+]Combinations of N things taken k at a time, \"N choose k\"\n perm -- [+]Permutations of N things taken k at a time, \"k-permutations of N\"\n\nOther Special Functions\n-----------------------\n\n.. autosummary::\n :toctree: generated/\n\n agm -- Arithmetic-Geometric Mean\n bernoulli -- Bernoulli numbers\n binom -- Binomial coefficient.\n diric -- Dirichlet function (periodic sinc)\n euler -- Euler numbers\n expn -- Exponential integral.\n exp1 -- Exponential integral of order 1 (for complex argument)\n expi -- Another exponential integral -- Ei(x)\n factorial -- The factorial function, n! = special.gamma(n+1)\n factorial2 -- Double factorial, (n!)!\n factorialk -- [+](...((n!)!)!...)! where there are k '!'\n shichi -- Hyperbolic sine and cosine integrals.\n sici -- Integral of the sinc and \"cosinc\" functions.\n spence -- Spence's function, also known as the dilogarithm.\n lambertw -- Lambert W function\n zeta -- Riemann zeta function of two arguments.\n zetac -- Standard Riemann zeta function minus 1.\n\nConvenience Functions\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n cbrt -- Cube root.\n exp10 -- 10 raised to the x power.\n exp2 -- 2 raised to the x power.\n radian -- radian angle given degrees, minutes, and seconds.\n cosdg -- cosine of the angle given in degrees.\n sindg -- sine of the angle given in degrees.\n tandg -- tangent of the angle given in degrees.\n cotdg -- cotangent of the angle given in degrees.\n log1p -- log(1+x)\n expm1 -- exp(x)-1\n cosm1 -- cos(x)-1\n round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.\n xlogy -- x*log(y)\n xlog1py -- x*log1p(y)\n exprel -- (exp(x)-1)/x\n sinc -- sin(x)/x\n\n.. [+] in the description indicates a function which is not a universal\n.. function and does not follow broadcasting and automatic\n.. array-looping rules.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom ._ufuncs import *\n\nfrom .basic import *\nfrom . import specfun\nfrom . import orthogonal\nfrom .orthogonal import *\nfrom .spfun_stats import multigammaln\nfrom ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal\nfrom .lambertw import lambertw\nfrom ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,\n spherical_kn)\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom numpy.dual import register_func\nregister_func('i0',i0)\ndel register_func\n\nfrom numpy.testing import Tester\ntest = Tester().test\n",
"\"\"\"\nThis module gathers tree-based methods, including decision, regression and\nrandomized trees. Single and multi-output problems are both handled.\n\"\"\"\n\n# Authors: Gilles Louppe <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Brian Holt <[email protected]>\n# Noel Dawe <[email protected]>\n# Satrajit Gosh <[email protected]>\n# Joly Arnaud <[email protected]>\n# Fares Hedayati <[email protected]>\n#\n# Licence: BSD 3 clause\n\nfrom __future__ import division\n\n\nimport numbers\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nimport numpy as np\nfrom scipy.sparse import issparse\n\nfrom ..base import BaseEstimator\nfrom ..base import ClassifierMixin\nfrom ..base import RegressorMixin\nfrom ..externals import six\nfrom ..feature_selection.from_model import _LearntSelectorMixin\nfrom ..utils import check_array\nfrom ..utils import check_random_state\nfrom ..utils import compute_sample_weight\nfrom ..utils.validation import NotFittedError\nfrom ..utils.multiclass import check_classification_targets\n\n\nfrom ._criterion import Criterion\nfrom ._splitter import Splitter\nfrom ._tree import DepthFirstTreeBuilder\nfrom ._tree import BestFirstTreeBuilder\nfrom ._tree import Tree\nfrom . import _tree, _splitter, _criterion\n\n__all__ = [\"DecisionTreeClassifier\",\n \"DecisionTreeRegressor\",\n \"ExtraTreeClassifier\",\n \"ExtraTreeRegressor\"]\n\n\n# =============================================================================\n# Types and constants\n# =============================================================================\n\nDTYPE = _tree.DTYPE\nDOUBLE = _tree.DOUBLE\n\nCRITERIA_CLF = {\"gini\": _criterion.Gini, \"entropy\": _criterion.Entropy}\nCRITERIA_REG = {\"mse\": _criterion.MSE, \"friedman_mse\": _criterion.FriedmanMSE}\n\nDENSE_SPLITTERS = {\"best\": _splitter.BestSplitter,\n \"random\": _splitter.RandomSplitter}\n\nSPARSE_SPLITTERS = {\"best\": _splitter.BestSparseSplitter,\n \"random\": _splitter.RandomSparseSplitter}\n\n# =============================================================================\n# Base decision tree\n# =============================================================================\n\n\nclass BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,\n _LearntSelectorMixin)):\n \"\"\"Base class for decision trees.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n criterion,\n splitter,\n max_depth,\n min_samples_split,\n min_samples_leaf,\n min_weight_fraction_leaf,\n max_features,\n max_leaf_nodes,\n random_state,\n class_weight=None,\n presort=False):\n self.criterion = criterion\n self.splitter = splitter\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.random_state = random_state\n self.max_leaf_nodes = max_leaf_nodes\n self.class_weight = class_weight\n self.presort = presort\n\n self.n_features_ = None\n self.n_outputs_ = None\n self.classes_ = None\n self.n_classes_ = None\n\n self.tree_ = None\n self.max_features_ = None\n\n def fit(self, X, y, sample_weight=None, check_input=True,\n X_idx_sorted=None):\n \"\"\"Build a decision tree from the training set (X, y).\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = [n_samples, n_features]\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n The target values (class labels in classification, real numbers in\n regression). In the regression case, use ``dtype=np.float64`` and\n ``order='C'`` for maximum efficiency.\n\n sample_weight : array-like, shape = [n_samples] or None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. In the case of\n classification, splits are also ignored if they would result in any\n single class carrying a negative weight in either child node.\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n X_idx_sorted : array-like, shape = [n_samples, n_features], optional\n The indexes of the sorted training input samples. If many tree\n are grown on the same dataset, this allows the ordering to be\n cached between trees. If None, the data will be sorted here.\n Don't use this parameter unless you know what to do.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n\n random_state = check_random_state(self.random_state)\n if check_input:\n X = check_array(X, dtype=DTYPE, accept_sparse=\"csc\")\n if issparse(X):\n X.sort_indices()\n\n if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:\n raise ValueError(\"No support for np.int64 index based \"\n \"sparse matrices\")\n\n # Determine output settings\n n_samples, self.n_features_ = X.shape\n is_classification = isinstance(self, ClassifierMixin)\n\n y = np.atleast_1d(y)\n expanded_class_weight = None\n\n if y.ndim == 1:\n # reshape is necessary to preserve the data contiguity against vs\n # [:, np.newaxis] that does not.\n y = np.reshape(y, (-1, 1))\n\n self.n_outputs_ = y.shape[1]\n\n if is_classification:\n check_classification_targets(y)\n y = np.copy(y)\n\n self.classes_ = []\n self.n_classes_ = []\n\n if self.class_weight is not None:\n y_original = np.copy(y)\n\n y_store_unique_indices = np.zeros(y.shape, dtype=np.int)\n for k in range(self.n_outputs_):\n classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)\n self.classes_.append(classes_k)\n self.n_classes_.append(classes_k.shape[0])\n y = y_store_unique_indices\n\n if self.class_weight is not None:\n expanded_class_weight = compute_sample_weight(\n self.class_weight, y_original)\n\n else:\n self.classes_ = [None] * self.n_outputs_\n self.n_classes_ = [1] * self.n_outputs_\n\n self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)\n\n if getattr(y, \"dtype\", None) != DOUBLE or not y.flags.contiguous:\n y = np.ascontiguousarray(y, dtype=DOUBLE)\n\n # Check parameters\n max_depth = ((2 ** 31) - 1 if self.max_depth is None\n else self.max_depth)\n max_leaf_nodes = (-1 if self.max_leaf_nodes is None\n else self.max_leaf_nodes)\n\n if isinstance(self.max_features, six.string_types):\n if self.max_features == \"auto\":\n if is_classification:\n max_features = max(1, int(np.sqrt(self.n_features_)))\n else:\n max_features = self.n_features_\n elif self.max_features == \"sqrt\":\n max_features = max(1, int(np.sqrt(self.n_features_)))\n elif self.max_features == \"log2\":\n max_features = max(1, int(np.log2(self.n_features_)))\n else:\n raise ValueError(\n 'Invalid value for max_features. Allowed string '\n 'values are \"auto\", \"sqrt\" or \"log2\".')\n elif self.max_features is None:\n max_features = self.n_features_\n elif isinstance(self.max_features, (numbers.Integral, np.integer)):\n max_features = self.max_features\n else: # float\n if self.max_features > 0.0:\n max_features = max(1, int(self.max_features * self.n_features_))\n else:\n max_features = 0\n\n self.max_features_ = max_features\n\n if len(y) != n_samples:\n raise ValueError(\"Number of labels=%d does not match \"\n \"number of samples=%d\" % (len(y), n_samples))\n if self.min_samples_split <= 0:\n raise ValueError(\"min_samples_split must be greater than zero.\")\n if self.min_samples_leaf <= 0:\n raise ValueError(\"min_samples_leaf must be greater than zero.\")\n if not 0 <= self.min_weight_fraction_leaf <= 0.5:\n raise ValueError(\"min_weight_fraction_leaf must in [0, 0.5]\")\n if max_depth <= 0:\n raise ValueError(\"max_depth must be greater than zero. \")\n if not (0 < max_features <= self.n_features_):\n raise ValueError(\"max_features must be in (0, n_features]\")\n if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):\n raise ValueError(\"max_leaf_nodes must be integral number but was \"\n \"%r\" % max_leaf_nodes)\n if -1 < max_leaf_nodes < 2:\n raise ValueError((\"max_leaf_nodes {0} must be either smaller than \"\n \"0 or larger than 1\").format(max_leaf_nodes))\n\n if sample_weight is not None:\n if (getattr(sample_weight, \"dtype\", None) != DOUBLE or\n not sample_weight.flags.contiguous):\n sample_weight = np.ascontiguousarray(\n sample_weight, dtype=DOUBLE)\n if len(sample_weight.shape) > 1:\n raise ValueError(\"Sample weights array has more \"\n \"than one dimension: %d\" %\n len(sample_weight.shape))\n if len(sample_weight) != n_samples:\n raise ValueError(\"Number of weights=%d does not match \"\n \"number of samples=%d\" %\n (len(sample_weight), n_samples))\n\n if expanded_class_weight is not None:\n if sample_weight is not None:\n sample_weight = sample_weight * expanded_class_weight\n else:\n sample_weight = expanded_class_weight\n\n # Set min_weight_leaf from min_weight_fraction_leaf\n if self.min_weight_fraction_leaf != 0. and sample_weight is not None:\n min_weight_leaf = (self.min_weight_fraction_leaf *\n np.sum(sample_weight))\n else:\n min_weight_leaf = 0.\n\n # Set min_samples_split sensibly\n min_samples_split = max(self.min_samples_split,\n 2 * self.min_samples_leaf)\n\n presort = self.presort\n # Allow presort to be 'auto', which means True if the dataset is dense,\n # otherwise it will be False.\n if self.presort == 'auto' and issparse(X):\n presort = False\n elif self.presort == 'auto':\n presort = True\n\n if presort is True and issparse(X):\n raise ValueError(\"Presorting is not supported for sparse matrices.\")\n\n # If multiple trees are built on the same dataset, we only want to\n # presort once. Splitters now can accept presorted indices if desired,\n # but do not handle any presorting themselves. Ensemble algorithms which\n # desire presorting must do presorting themselves and pass that matrix\n # into each tree.\n if X_idx_sorted is None and presort:\n X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),\n dtype=np.int32)\n\n if presort and X_idx_sorted.shape != X.shape:\n raise ValueError(\"The shape of X (X.shape = {}) doesn't match \"\n \"the shape of X_idx_sorted (X_idx_sorted\"\n \".shape = {})\".format(X.shape,\n X_idx_sorted.shape))\n\n # Build tree\n criterion = self.criterion\n if not isinstance(criterion, Criterion):\n if is_classification:\n criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,\n self.n_classes_)\n else:\n criterion = CRITERIA_REG[self.criterion](self.n_outputs_)\n\n SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS\n\n splitter = self.splitter\n if not isinstance(self.splitter, Splitter):\n splitter = SPLITTERS[self.splitter](criterion,\n self.max_features_,\n self.min_samples_leaf,\n min_weight_leaf,\n random_state,\n self.presort)\n\n self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)\n\n # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise\n if max_leaf_nodes < 0:\n builder = DepthFirstTreeBuilder(splitter, min_samples_split,\n self.min_samples_leaf,\n min_weight_leaf,\n max_depth)\n else:\n builder = BestFirstTreeBuilder(splitter, min_samples_split,\n self.min_samples_leaf,\n min_weight_leaf,\n max_depth,\n max_leaf_nodes)\n\n builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)\n\n if self.n_outputs_ == 1:\n self.n_classes_ = self.n_classes_[0]\n self.classes_ = self.classes_[0]\n\n return self\n\n def _validate_X_predict(self, X, check_input):\n \"\"\"Validate X whenever one tries to predict, apply, predict_proba\"\"\"\n if self.tree_ is None:\n raise NotFittedError(\"Estimator not fitted, \"\n \"call `fit` before exploiting the model.\")\n\n if check_input:\n X = check_array(X, dtype=DTYPE, accept_sparse=\"csr\")\n if issparse(X) and (X.indices.dtype != np.intc or\n X.indptr.dtype != np.intc):\n raise ValueError(\"No support for np.int64 index based \"\n \"sparse matrices\")\n\n n_features = X.shape[1]\n if self.n_features_ != n_features:\n raise ValueError(\"Number of features of the model must \"\n \" match the input. Model n_features is %s and \"\n \" input n_features is %s \"\n % (self.n_features_, n_features))\n\n return X\n\n def predict(self, X, check_input=True):\n \"\"\"Predict class or regression value for X.\n\n For a classification model, the predicted class for each sample in X is\n returned. For a regression model, the predicted value based on X is\n returned.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n y : array of shape = [n_samples] or [n_samples, n_outputs]\n The predicted classes, or the predict values.\n \"\"\"\n\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n n_samples = X.shape[0]\n\n # Classification\n if isinstance(self, ClassifierMixin):\n if self.n_outputs_ == 1:\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n\n else:\n predictions = np.zeros((n_samples, self.n_outputs_))\n\n for k in range(self.n_outputs_):\n predictions[:, k] = self.classes_[k].take(\n np.argmax(proba[:, k], axis=1),\n axis=0)\n\n return predictions\n\n # Regression\n else:\n if self.n_outputs_ == 1:\n return proba[:, 0]\n\n else:\n return proba[:, :, 0]\n\n def apply(self, X, check_input=True):\n \"\"\"\n Returns the index of the leaf that each sample is predicted as.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : array_like or sparse matrix, shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n X_leaves : array_like, shape = [n_samples,]\n For each datapoint x in X, return the index of the leaf x\n ends up in. Leaves are numbered within\n ``[0; self.tree_.node_count)``, possibly with gaps in the\n numbering.\n \"\"\"\n X = self._validate_X_predict(X, check_input)\n return self.tree_.apply(X)\n\n @property\n def feature_importances_(self):\n \"\"\"Return the feature importances.\n\n The importance of a feature is computed as the (normalized) total\n reduction of the criterion brought by that feature.\n It is also known as the Gini importance.\n\n Returns\n -------\n feature_importances_ : array, shape = [n_features]\n \"\"\"\n if self.tree_ is None:\n raise NotFittedError(\"Estimator not fitted, call `fit` before\"\n \" `feature_importances_`.\")\n\n return self.tree_.compute_feature_importances()\n\n\n# =============================================================================\n# Public estimators\n# =============================================================================\n\nclass DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):\n \"\"\"A decision tree classifier.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : string, optional (default=\"gini\")\n The function to measure the quality of a split. Supported criteria are\n \"gini\" for the Gini impurity and \"entropy\" for the information gain.\n\n splitter : string, optional (default=\"best\")\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_features : int, float, string or None, optional (default=None)\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n max_depth : int or None, optional (default=None)\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n Ignored if ``max_leaf_nodes`` is not None.\n\n min_samples_split : int, optional (default=2)\n The minimum number of samples required to split an internal node.\n\n min_samples_leaf : int, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the input samples required to be at a\n leaf node.\n\n max_leaf_nodes : int or None, optional (default=None)\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n If not None then ``max_depth`` will be ignored.\n\n class_weight : dict, list of dicts, \"balanced\" or None, optional (default=None)\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one. For\n multi-output problems, a list of dicts can be provided in the same\n order as the columns of y.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n For multi-output, the weights of each column of y will be multiplied.\n\n Note that these weights will be multiplied with sample_weight (passed\n through the fit method) if sample_weight is specified.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n presort : bool, optional (default=False)\n Whether to presort the data to speed up the finding of best splits in\n fitting. For the default settings of a decision tree on large\n datasets, setting this to true may slow down the training process.\n When using either a smaller dataset or a restricted depth, this may\n speed up the training.\n\n Attributes\n ----------\n classes_ : array of shape = [n_classes] or a list of such arrays\n The classes labels (single output problem),\n or a list of arrays of class labels (multi-output problem).\n\n feature_importances_ : array of shape = [n_features]\n The feature importances. The higher, the more important the\n feature. The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance [4]_.\n\n max_features_ : int,\n The inferred value of max_features.\n\n n_classes_ : int or list\n The number of classes (for single output problems),\n or a list containing the number of classes for each\n output (for multi-output problems).\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree object\n The underlying Tree object.\n\n See also\n --------\n DecisionTreeRegressor\n\n References\n ----------\n\n .. [1] http://en.wikipedia.org/wiki/Decision_tree_learning\n\n .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\n\n .. [3] T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n\n .. [4] L. Breiman, and A. Cutler, \"Random Forests\",\n http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.cross_validation import cross_val_score\n >>> from sklearn.tree import DecisionTreeClassifier\n >>> clf = DecisionTreeClassifier(random_state=0)\n >>> iris = load_iris()\n >>> cross_val_score(clf, iris.data, iris.target, cv=10)\n ... # doctest: +SKIP\n ...\n array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,\n 0.93..., 0.93..., 1. , 0.93..., 1. ])\n \"\"\"\n def __init__(self,\n criterion=\"gini\",\n splitter=\"best\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=None,\n random_state=None,\n max_leaf_nodes=None,\n class_weight=None,\n presort=False):\n super(DecisionTreeClassifier, self).__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n class_weight=class_weight,\n random_state=random_state,\n presort=presort)\n\n def predict_proba(self, X, check_input=True):\n \"\"\"Predict class probabilities of the input samples X.\n\n The predicted class probability is the fraction of samples of the same\n class in a leaf.\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n p : array of shape = [n_samples, n_classes], or a list of n_outputs\n such arrays if n_outputs > 1.\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n \"\"\"\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n\n if self.n_outputs_ == 1:\n proba = proba[:, :self.n_classes_]\n normalizer = proba.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba /= normalizer\n\n return proba\n\n else:\n all_proba = []\n\n for k in range(self.n_outputs_):\n proba_k = proba[:, k, :self.n_classes_[k]]\n normalizer = proba_k.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba_k /= normalizer\n all_proba.append(proba_k)\n\n return all_proba\n\n def predict_log_proba(self, X):\n \"\"\"Predict class log-probabilities of the input samples X.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n p : array of shape = [n_samples, n_classes], or a list of n_outputs\n such arrays if n_outputs > 1.\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n \"\"\"\n proba = self.predict_proba(X)\n\n if self.n_outputs_ == 1:\n return np.log(proba)\n\n else:\n for k in range(self.n_outputs_):\n proba[k] = np.log(proba[k])\n\n return proba\n\n\nclass DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):\n \"\"\"A decision tree regressor.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : string, optional (default=\"mse\")\n The function to measure the quality of a split. The only supported\n criterion is \"mse\" for the mean squared error, which is equal to\n variance reduction as feature selection criterion.\n\n splitter : string, optional (default=\"best\")\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_features : int, float, string or None, optional (default=None)\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n max_depth : int or None, optional (default=None)\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n Ignored if ``max_leaf_nodes`` is not None.\n\n min_samples_split : int, optional (default=2)\n The minimum number of samples required to split an internal node.\n\n min_samples_leaf : int, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the input samples required to be at a\n leaf node.\n\n max_leaf_nodes : int or None, optional (default=None)\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n If not None then ``max_depth`` will be ignored.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n presort : bool, optional (default=False)\n Whether to presort the data to speed up the finding of best splits in\n fitting. For the default settings of a decision tree on large\n datasets, setting this to true may slow down the training process.\n When using either a smaller dataset or a restricted depth, this may\n speed up the training.\n\n Attributes\n ----------\n feature_importances_ : array of shape = [n_features]\n The feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the\n (normalized) total reduction of the criterion brought\n by that feature. It is also known as the Gini importance [4]_.\n\n max_features_ : int,\n The inferred value of max_features.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree object\n The underlying Tree object.\n\n See also\n --------\n DecisionTreeClassifier\n\n References\n ----------\n\n .. [1] http://en.wikipedia.org/wiki/Decision_tree_learning\n\n .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\n\n .. [3] T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n\n .. [4] L. Breiman, and A. Cutler, \"Random Forests\",\n http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n\n Examples\n --------\n >>> from sklearn.datasets import load_boston\n >>> from sklearn.cross_validation import cross_val_score\n >>> from sklearn.tree import DecisionTreeRegressor\n >>> boston = load_boston()\n >>> regressor = DecisionTreeRegressor(random_state=0)\n >>> cross_val_score(regressor, boston.data, boston.target, cv=10)\n ... # doctest: +SKIP\n ...\n array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,\n 0.07..., 0.29..., 0.33..., -1.42..., -1.77...])\n \"\"\"\n def __init__(self,\n criterion=\"mse\",\n splitter=\"best\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=None,\n random_state=None,\n max_leaf_nodes=None,\n presort=False):\n super(DecisionTreeRegressor, self).__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n random_state=random_state,\n presort=presort)\n\n\nclass ExtraTreeClassifier(DecisionTreeClassifier):\n \"\"\"An extremely randomized tree classifier.\n\n Extra-trees differ from classic decision trees in the way they are built.\n When looking for the best split to separate the samples of a node into two\n groups, random splits are drawn for each of the `max_features` randomly\n selected features and the best split among those is chosen. When\n `max_features` is set 1, this amounts to building a totally random\n decision tree.\n\n Warning: Extra-trees should only be used within ensemble methods.\n\n Read more in the :ref:`User Guide <tree>`.\n\n See also\n --------\n ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor\n\n References\n ----------\n\n .. [1] P. Geurts, D. Ernst., and L. Wehenkel, \"Extremely randomized trees\",\n Machine Learning, 63(1), 3-42, 2006.\n \"\"\"\n def __init__(self,\n criterion=\"gini\",\n splitter=\"random\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n random_state=None,\n max_leaf_nodes=None,\n class_weight=None):\n super(ExtraTreeClassifier, self).__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n class_weight=class_weight,\n random_state=random_state)\n\n\nclass ExtraTreeRegressor(DecisionTreeRegressor):\n \"\"\"An extremely randomized tree regressor.\n\n Extra-trees differ from classic decision trees in the way they are built.\n When looking for the best split to separate the samples of a node into two\n groups, random splits are drawn for each of the `max_features` randomly\n selected features and the best split among those is chosen. When\n `max_features` is set 1, this amounts to building a totally random\n decision tree.\n\n Warning: Extra-trees should only be used within ensemble methods.\n\n Read more in the :ref:`User Guide <tree>`.\n\n See also\n --------\n ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor\n\n References\n ----------\n\n .. [1] P. Geurts, D. Ernst., and L. Wehenkel, \"Extremely randomized trees\",\n Machine Learning, 63(1), 3-42, 2006.\n \"\"\"\n def __init__(self,\n criterion=\"mse\",\n splitter=\"random\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n random_state=None,\n max_leaf_nodes=None):\n super(ExtraTreeRegressor, self).__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n random_state=random_state)\n",
"\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: boosted_trees_ops.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_boosted_trees_calculate_best_gains_per_feature_outputs = [\"node_ids_list\",\n \"gains_list\",\n \"thresholds_list\",\n \"left_node_contribs_list\",\n \"right_node_contribs_list\"]\n_BoostedTreesCalculateBestGainsPerFeatureOutput = _collections.namedtuple(\n \"BoostedTreesCalculateBestGainsPerFeature\",\n _boosted_trees_calculate_best_gains_per_feature_outputs)\n\n\ndef boosted_trees_calculate_best_gains_per_feature(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None):\n r\"\"\"Calculates gains for each feature and returns the best possible split information for the feature.\n\n The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\n It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\n In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\n The length of output lists are all of the same length, `num_features`.\n The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.\n\n Args:\n node_id_range: A `Tensor` of type `int32`.\n A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).\n stats_summary_list: A list of at least 1 `Tensor` objects with type `float32`.\n A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.\n l1: A `Tensor` of type `float32`.\n l1 regularization factor on leaf weights, per instance based.\n l2: A `Tensor` of type `float32`.\n l2 regularization factor on leaf weights, per instance based.\n tree_complexity: A `Tensor` of type `float32`.\n adjustment to the gain, per leaf based.\n min_node_weight: A `Tensor` of type `float32`.\n mininum avg of hessians in a node before required for the node to be considered for splitting.\n max_splits: An `int` that is `>= 1`.\n the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list).\n\n node_ids_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.\n gains_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n thresholds_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.\n left_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n right_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(stats_summary_list, (list, tuple)):\n raise TypeError(\n \"Expected list for 'stats_summary_list' argument to \"\n \"'boosted_trees_calculate_best_gains_per_feature' Op, not %r.\" % stats_summary_list)\n _attr_num_features = len(stats_summary_list)\n max_splits = _execute.make_int(max_splits, \"max_splits\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesCalculateBestGainsPerFeature\",\n node_id_range=node_id_range, stats_summary_list=stats_summary_list,\n l1=l1, l2=l2, tree_complexity=tree_complexity,\n min_node_weight=min_node_weight, max_splits=max_splits, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"max_splits\", _op.get_attr(\"max_splits\"), \"num_features\",\n _op.get_attr(\"num_features\"))\n _execute.record_gradient(\n \"BoostedTreesCalculateBestGainsPerFeature\", _inputs_flat, _attrs, _result, name)\n _result = [_result[:_attr_num_features]] + _result[_attr_num_features:]\n _result = _result[:1] + [_result[1:1 + _attr_num_features]] + _result[1 + _attr_num_features:]\n _result = _result[:2] + [_result[2:2 + _attr_num_features]] + _result[2 + _attr_num_features:]\n _result = _result[:3] + [_result[3:3 + _attr_num_features]] + _result[3 + _attr_num_features:]\n _result = _result[:4] + [_result[4:]]\n _result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesCalculateBestGainsPerFeature\", name,\n _ctx._post_execution_callbacks, node_id_range, stats_summary_list, l1,\n l2, tree_complexity, min_node_weight, \"max_splits\", max_splits)\n _result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)\n return _result\n except _core._FallbackException:\n return boosted_trees_calculate_best_gains_per_feature_eager_fallback(\n node_id_range, stats_summary_list, l1, l2, tree_complexity,\n min_node_weight, max_splits=max_splits, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_calculate_best_gains_per_feature\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(stats_summary_list, (list, tuple)):\n raise TypeError(\n \"Expected list for 'stats_summary_list' argument to \"\n \"'boosted_trees_calculate_best_gains_per_feature' Op, not %r.\" % stats_summary_list)\n _attr_num_features = len(stats_summary_list)\n max_splits = _execute.make_int(max_splits, \"max_splits\")\n node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32)\n stats_summary_list = _ops.convert_n_to_tensor(stats_summary_list, _dtypes.float32)\n l1 = _ops.convert_to_tensor(l1, _dtypes.float32)\n l2 = _ops.convert_to_tensor(l2, _dtypes.float32)\n tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32)\n min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32)\n _inputs_flat = [node_id_range] + list(stats_summary_list) + [l1, l2, tree_complexity, min_node_weight]\n _attrs = (\"max_splits\", max_splits, \"num_features\", _attr_num_features)\n _result = _execute.execute(b\"BoostedTreesCalculateBestGainsPerFeature\",\n _attr_num_features + _attr_num_features +\n _attr_num_features + _attr_num_features +\n _attr_num_features, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"BoostedTreesCalculateBestGainsPerFeature\", _inputs_flat, _attrs, _result, name)\n _result = [_result[:_attr_num_features]] + _result[_attr_num_features:]\n _result = _result[:1] + [_result[1:1 + _attr_num_features]] + _result[1 + _attr_num_features:]\n _result = _result[:2] + [_result[2:2 + _attr_num_features]] + _result[2 + _attr_num_features:]\n _result = _result[:3] + [_result[3:3 + _attr_num_features]] + _result[3 + _attr_num_features:]\n _result = _result[:4] + [_result[4:]]\n _result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)\n return _result\n\n\ndef boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):\n r\"\"\"Creates a tree ensemble model and returns a handle to it.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble resource to be created.\n stamp_token: A `Tensor` of type `int64`.\n Token to use as the initial value of the resource stamp.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n Serialized proto of the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesCreateEnsemble\",\n tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token,\n tree_ensemble_serialized=tree_ensemble_serialized, name=name)\n return _op\n _result = None\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesCreateEnsemble\", name, _ctx._post_execution_callbacks,\n tree_ensemble_handle, stamp_token, tree_ensemble_serialized)\n return _result\n except _core._FallbackException:\n return boosted_trees_create_ensemble_eager_fallback(\n tree_ensemble_handle, stamp_token, tree_ensemble_serialized,\n name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_create_ensemble\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)\n tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)\n _inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]\n _attrs = None\n _result = _execute.execute(b\"BoostedTreesCreateEnsemble\", 0,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _result = None\n return _result\n\n\ndef boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):\n r\"\"\"Deserializes a serialized tree ensemble config and replaces current tree\n\n ensemble.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n stamp_token: A `Tensor` of type `int64`.\n Token to use as the new value of the resource stamp.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n Serialized proto of the ensemble.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesDeserializeEnsemble\",\n tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token,\n tree_ensemble_serialized=tree_ensemble_serialized, name=name)\n return _op\n _result = None\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesDeserializeEnsemble\", name,\n _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token,\n tree_ensemble_serialized)\n return _result\n except _core._FallbackException:\n return boosted_trees_deserialize_ensemble_eager_fallback(\n tree_ensemble_handle, stamp_token, tree_ensemble_serialized,\n name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_deserialize_ensemble\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)\n tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)\n _inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]\n _attrs = None\n _result = _execute.execute(b\"BoostedTreesDeserializeEnsemble\", 0,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _result = None\n return _result\n\n\ndef boosted_trees_ensemble_resource_handle_op(container=\"\", shared_name=\"\", name=None):\n r\"\"\"Creates a handle to a BoostedTreesEnsembleResource\n\n Args:\n container: An optional `string`. Defaults to `\"\"`.\n shared_name: An optional `string`. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if container is None:\n container = \"\"\n container = _execute.make_str(container, \"container\")\n if shared_name is None:\n shared_name = \"\"\n shared_name = _execute.make_str(shared_name, \"shared_name\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesEnsembleResourceHandleOp\", container=container,\n shared_name=shared_name, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"container\", _op.get_attr(\"container\"), \"shared_name\",\n _op.get_attr(\"shared_name\"))\n _execute.record_gradient(\n \"BoostedTreesEnsembleResourceHandleOp\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesEnsembleResourceHandleOp\", name,\n _ctx._post_execution_callbacks, \"container\", container, \"shared_name\",\n shared_name)\n return _result\n except _core._FallbackException:\n return boosted_trees_ensemble_resource_handle_op_eager_fallback(\n container=container, shared_name=shared_name, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_ensemble_resource_handle_op_eager_fallback(container=\"\", shared_name=\"\", name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_ensemble_resource_handle_op\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if container is None:\n container = \"\"\n container = _execute.make_str(container, \"container\")\n if shared_name is None:\n shared_name = \"\"\n shared_name = _execute.make_str(shared_name, \"shared_name\")\n _inputs_flat = []\n _attrs = (\"container\", container, \"shared_name\", shared_name)\n _result = _execute.execute(b\"BoostedTreesEnsembleResourceHandleOp\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"BoostedTreesEnsembleResourceHandleOp\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n_boosted_trees_get_ensemble_states_outputs = [\"stamp_token\", \"num_trees\",\n \"num_finalized_trees\",\n \"num_attempted_layers\",\n \"last_layer_nodes_range\"]\n_BoostedTreesGetEnsembleStatesOutput = _collections.namedtuple(\n \"BoostedTreesGetEnsembleStates\",\n _boosted_trees_get_ensemble_states_outputs)\n\n\ndef boosted_trees_get_ensemble_states(tree_ensemble_handle, name=None):\n r\"\"\"Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range).\n\n stamp_token: A `Tensor` of type `int64`.\n num_trees: A `Tensor` of type `int32`.\n num_finalized_trees: A `Tensor` of type `int32`.\n num_attempted_layers: A `Tensor` of type `int32`.\n last_layer_nodes_range: A `Tensor` of type `int32`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesGetEnsembleStates\",\n tree_ensemble_handle=tree_ensemble_handle, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = None\n _execute.record_gradient(\n \"BoostedTreesGetEnsembleStates\", _inputs_flat, _attrs, _result, name)\n _result = _BoostedTreesGetEnsembleStatesOutput._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesGetEnsembleStates\", name, _ctx._post_execution_callbacks,\n tree_ensemble_handle)\n _result = _BoostedTreesGetEnsembleStatesOutput._make(_result)\n return _result\n except _core._FallbackException:\n return boosted_trees_get_ensemble_states_eager_fallback(\n tree_ensemble_handle, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_get_ensemble_states\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n _inputs_flat = [tree_ensemble_handle]\n _attrs = None\n _result = _execute.execute(b\"BoostedTreesGetEnsembleStates\", 5,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"BoostedTreesGetEnsembleStates\", _inputs_flat, _attrs, _result, name)\n _result = _BoostedTreesGetEnsembleStatesOutput._make(_result)\n return _result\n\n\ndef boosted_trees_make_stats_summary(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None):\n r\"\"\"Makes the summary of accumulated stats for the batch.\n\n The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.\n\n Args:\n node_ids: A `Tensor` of type `int32`.\n int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.\n gradients: A `Tensor` of type `float32`.\n float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.\n hessians: A `Tensor` of type `float32`.\n float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.\n bucketized_features_list: A list of at least 1 `Tensor` objects with type `int32`.\n int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).\n max_splits: An `int` that is `>= 1`.\n int; the maximum number of splits possible in the whole tree.\n num_buckets: An `int` that is `>= 1`.\n int; equals to the maximum possible value of bucketized feature.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(bucketized_features_list, (list, tuple)):\n raise TypeError(\n \"Expected list for 'bucketized_features_list' argument to \"\n \"'boosted_trees_make_stats_summary' Op, not %r.\" % bucketized_features_list)\n _attr_num_features = len(bucketized_features_list)\n max_splits = _execute.make_int(max_splits, \"max_splits\")\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesMakeStatsSummary\", node_ids=node_ids,\n gradients=gradients, hessians=hessians,\n bucketized_features_list=bucketized_features_list,\n max_splits=max_splits, num_buckets=num_buckets, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"max_splits\", _op.get_attr(\"max_splits\"), \"num_buckets\",\n _op.get_attr(\"num_buckets\"), \"num_features\",\n _op.get_attr(\"num_features\"))\n _execute.record_gradient(\n \"BoostedTreesMakeStatsSummary\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesMakeStatsSummary\", name, _ctx._post_execution_callbacks,\n node_ids, gradients, hessians, bucketized_features_list, \"max_splits\",\n max_splits, \"num_buckets\", num_buckets)\n return _result\n except _core._FallbackException:\n return boosted_trees_make_stats_summary_eager_fallback(\n node_ids, gradients, hessians, bucketized_features_list,\n max_splits=max_splits, num_buckets=num_buckets, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_make_stats_summary_eager_fallback(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_make_stats_summary\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(bucketized_features_list, (list, tuple)):\n raise TypeError(\n \"Expected list for 'bucketized_features_list' argument to \"\n \"'boosted_trees_make_stats_summary' Op, not %r.\" % bucketized_features_list)\n _attr_num_features = len(bucketized_features_list)\n max_splits = _execute.make_int(max_splits, \"max_splits\")\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32)\n gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)\n hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)\n bucketized_features_list = _ops.convert_n_to_tensor(bucketized_features_list, _dtypes.int32)\n _inputs_flat = [node_ids, gradients, hessians] + list(bucketized_features_list)\n _attrs = (\"max_splits\", max_splits, \"num_buckets\", num_buckets,\n \"num_features\", _attr_num_features)\n _result = _execute.execute(b\"BoostedTreesMakeStatsSummary\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"BoostedTreesMakeStatsSummary\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef boosted_trees_predict(tree_ensemble_handle, bucketized_features, logits_dimension, name=None):\n r\"\"\"Runs multiple additive regression ensemble predictors on input instances and\n\n computes the logits. It is designed to be used during prediction.\n It traverses all the trees and calculates the final score for each instance.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.\n A list of rank 1 Tensors containing bucket id for each\n feature.\n logits_dimension: An `int`.\n scalar, dimension of the logits, to be used for partial logits\n shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(bucketized_features, (list, tuple)):\n raise TypeError(\n \"Expected list for 'bucketized_features' argument to \"\n \"'boosted_trees_predict' Op, not %r.\" % bucketized_features)\n _attr_num_bucketized_features = len(bucketized_features)\n logits_dimension = _execute.make_int(logits_dimension, \"logits_dimension\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesPredict\", tree_ensemble_handle=tree_ensemble_handle,\n bucketized_features=bucketized_features,\n logits_dimension=logits_dimension, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"num_bucketized_features\",\n _op.get_attr(\"num_bucketized_features\"), \"logits_dimension\",\n _op.get_attr(\"logits_dimension\"))\n _execute.record_gradient(\n \"BoostedTreesPredict\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesPredict\", name, _ctx._post_execution_callbacks,\n tree_ensemble_handle, bucketized_features, \"logits_dimension\",\n logits_dimension)\n return _result\n except _core._FallbackException:\n return boosted_trees_predict_eager_fallback(\n tree_ensemble_handle, bucketized_features,\n logits_dimension=logits_dimension, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_predict_eager_fallback(tree_ensemble_handle, bucketized_features, logits_dimension, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_predict\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(bucketized_features, (list, tuple)):\n raise TypeError(\n \"Expected list for 'bucketized_features' argument to \"\n \"'boosted_trees_predict' Op, not %r.\" % bucketized_features)\n _attr_num_bucketized_features = len(bucketized_features)\n logits_dimension = _execute.make_int(logits_dimension, \"logits_dimension\")\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)\n _inputs_flat = [tree_ensemble_handle] + list(bucketized_features)\n _attrs = (\"num_bucketized_features\", _attr_num_bucketized_features,\n \"logits_dimension\", logits_dimension)\n _result = _execute.execute(b\"BoostedTreesPredict\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"BoostedTreesPredict\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n_boosted_trees_serialize_ensemble_outputs = [\"stamp_token\",\n \"tree_ensemble_serialized\"]\n_BoostedTreesSerializeEnsembleOutput = _collections.namedtuple(\n \"BoostedTreesSerializeEnsemble\",\n _boosted_trees_serialize_ensemble_outputs)\n\n\ndef boosted_trees_serialize_ensemble(tree_ensemble_handle, name=None):\n r\"\"\"Serializes the tree ensemble to a proto.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (stamp_token, tree_ensemble_serialized).\n\n stamp_token: A `Tensor` of type `int64`.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesSerializeEnsemble\",\n tree_ensemble_handle=tree_ensemble_handle, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = None\n _execute.record_gradient(\n \"BoostedTreesSerializeEnsemble\", _inputs_flat, _attrs, _result, name)\n _result = _BoostedTreesSerializeEnsembleOutput._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesSerializeEnsemble\", name, _ctx._post_execution_callbacks,\n tree_ensemble_handle)\n _result = _BoostedTreesSerializeEnsembleOutput._make(_result)\n return _result\n except _core._FallbackException:\n return boosted_trees_serialize_ensemble_eager_fallback(\n tree_ensemble_handle, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_serialize_ensemble\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n _inputs_flat = [tree_ensemble_handle]\n _attrs = None\n _result = _execute.execute(b\"BoostedTreesSerializeEnsemble\", 2,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"BoostedTreesSerializeEnsemble\", _inputs_flat, _attrs, _result, name)\n _result = _BoostedTreesSerializeEnsembleOutput._make(_result)\n return _result\n\n\n_boosted_trees_training_predict_outputs = [\"partial_logits\", \"tree_ids\",\n \"node_ids\"]\n_BoostedTreesTrainingPredictOutput = _collections.namedtuple(\n \"BoostedTreesTrainingPredict\", _boosted_trees_training_predict_outputs)\n\n\ndef boosted_trees_training_predict(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None):\n r\"\"\"Runs multiple additive regression ensemble predictors on input instances and\n\n computes the update to cached logits. It is designed to be used during training.\n It traverses the trees starting from cached tree id and cached node id and\n calculates the updates to be pushed to the cache.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n cached_tree_ids: A `Tensor` of type `int32`.\n Rank 1 Tensor containing cached tree ids which is the starting\n tree of prediction.\n cached_node_ids: A `Tensor` of type `int32`.\n Rank 1 Tensor containing cached node id which is the starting\n node of prediction.\n bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.\n A list of rank 1 Tensors containing bucket id for each\n feature.\n logits_dimension: An `int`.\n scalar, dimension of the logits, to be used for partial logits\n shape.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (partial_logits, tree_ids, node_ids).\n\n partial_logits: A `Tensor` of type `float32`.\n tree_ids: A `Tensor` of type `int32`.\n node_ids: A `Tensor` of type `int32`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(bucketized_features, (list, tuple)):\n raise TypeError(\n \"Expected list for 'bucketized_features' argument to \"\n \"'boosted_trees_training_predict' Op, not %r.\" % bucketized_features)\n _attr_num_bucketized_features = len(bucketized_features)\n logits_dimension = _execute.make_int(logits_dimension, \"logits_dimension\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesTrainingPredict\",\n tree_ensemble_handle=tree_ensemble_handle,\n cached_tree_ids=cached_tree_ids, cached_node_ids=cached_node_ids,\n bucketized_features=bucketized_features,\n logits_dimension=logits_dimension, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"num_bucketized_features\",\n _op.get_attr(\"num_bucketized_features\"), \"logits_dimension\",\n _op.get_attr(\"logits_dimension\"))\n _execute.record_gradient(\n \"BoostedTreesTrainingPredict\", _inputs_flat, _attrs, _result, name)\n _result = _BoostedTreesTrainingPredictOutput._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesTrainingPredict\", name, _ctx._post_execution_callbacks,\n tree_ensemble_handle, cached_tree_ids, cached_node_ids,\n bucketized_features, \"logits_dimension\", logits_dimension)\n _result = _BoostedTreesTrainingPredictOutput._make(_result)\n return _result\n except _core._FallbackException:\n return boosted_trees_training_predict_eager_fallback(\n tree_ensemble_handle, cached_tree_ids, cached_node_ids,\n bucketized_features, logits_dimension=logits_dimension, name=name,\n ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_training_predict_eager_fallback(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_training_predict\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(bucketized_features, (list, tuple)):\n raise TypeError(\n \"Expected list for 'bucketized_features' argument to \"\n \"'boosted_trees_training_predict' Op, not %r.\" % bucketized_features)\n _attr_num_bucketized_features = len(bucketized_features)\n logits_dimension = _execute.make_int(logits_dimension, \"logits_dimension\")\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n cached_tree_ids = _ops.convert_to_tensor(cached_tree_ids, _dtypes.int32)\n cached_node_ids = _ops.convert_to_tensor(cached_node_ids, _dtypes.int32)\n bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)\n _inputs_flat = [tree_ensemble_handle, cached_tree_ids, cached_node_ids] + list(bucketized_features)\n _attrs = (\"num_bucketized_features\", _attr_num_bucketized_features,\n \"logits_dimension\", logits_dimension)\n _result = _execute.execute(b\"BoostedTreesTrainingPredict\", 3,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"BoostedTreesTrainingPredict\", _inputs_flat, _attrs, _result, name)\n _result = _BoostedTreesTrainingPredictOutput._make(_result)\n return _result\n\n\ndef boosted_trees_update_ensemble(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None):\n r\"\"\"Updates the tree ensemble by either adding a layer to the last tree being grown\n\n or by starting a new tree.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the ensemble variable.\n feature_ids: A `Tensor` of type `int32`.\n Rank 1 tensor with ids for each feature. This is the real id of\n the feature that will be used in the split.\n node_ids: A list of `Tensor` objects with type `int32`.\n List of rank 1 tensors representing the nodes for which this feature\n has a split.\n gains: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 1 tensors representing the gains for each of the feature's\n split.\n thresholds: A list with the same length as `node_ids` of `Tensor` objects with type `int32`.\n List of rank 1 tensors representing the thesholds for each of the\n feature's split.\n left_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 2 tensors with left leaf contribs for each of\n the feature's splits. Will be added to the previous node values to constitute\n the values of the left nodes.\n right_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 2 tensors with right leaf contribs for each\n of the feature's splits. Will be added to the previous node values to constitute\n the values of the right nodes.\n max_depth: A `Tensor` of type `int32`. Max depth of the tree to build.\n learning_rate: A `Tensor` of type `float32`.\n shrinkage const for each new tree.\n pruning_mode: An `int` that is `>= 0`.\n 0-No pruning, 1-Pre-pruning, 2-Post-pruning.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(node_ids, (list, tuple)):\n raise TypeError(\n \"Expected list for 'node_ids' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % node_ids)\n _attr_num_features = len(node_ids)\n if not isinstance(gains, (list, tuple)):\n raise TypeError(\n \"Expected list for 'gains' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % gains)\n if len(gains) != _attr_num_features:\n raise ValueError(\n \"List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(gains), _attr_num_features))\n if not isinstance(thresholds, (list, tuple)):\n raise TypeError(\n \"Expected list for 'thresholds' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % thresholds)\n if len(thresholds) != _attr_num_features:\n raise ValueError(\n \"List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(thresholds), _attr_num_features))\n if not isinstance(left_node_contribs, (list, tuple)):\n raise TypeError(\n \"Expected list for 'left_node_contribs' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % left_node_contribs)\n if len(left_node_contribs) != _attr_num_features:\n raise ValueError(\n \"List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(left_node_contribs), _attr_num_features))\n if not isinstance(right_node_contribs, (list, tuple)):\n raise TypeError(\n \"Expected list for 'right_node_contribs' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % right_node_contribs)\n if len(right_node_contribs) != _attr_num_features:\n raise ValueError(\n \"List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(right_node_contribs), _attr_num_features))\n pruning_mode = _execute.make_int(pruning_mode, \"pruning_mode\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"BoostedTreesUpdateEnsemble\",\n tree_ensemble_handle=tree_ensemble_handle, feature_ids=feature_ids,\n node_ids=node_ids, gains=gains, thresholds=thresholds,\n left_node_contribs=left_node_contribs,\n right_node_contribs=right_node_contribs, max_depth=max_depth,\n learning_rate=learning_rate, pruning_mode=pruning_mode, name=name)\n return _op\n _result = None\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"BoostedTreesUpdateEnsemble\", name, _ctx._post_execution_callbacks,\n tree_ensemble_handle, feature_ids, node_ids, gains, thresholds,\n left_node_contribs, right_node_contribs, max_depth, learning_rate,\n \"pruning_mode\", pruning_mode)\n return _result\n except _core._FallbackException:\n return boosted_trees_update_ensemble_eager_fallback(\n tree_ensemble_handle, feature_ids, node_ids, gains, thresholds,\n left_node_contribs, right_node_contribs, max_depth, learning_rate,\n pruning_mode=pruning_mode, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function boosted_trees_update_ensemble\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(node_ids, (list, tuple)):\n raise TypeError(\n \"Expected list for 'node_ids' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % node_ids)\n _attr_num_features = len(node_ids)\n if not isinstance(gains, (list, tuple)):\n raise TypeError(\n \"Expected list for 'gains' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % gains)\n if len(gains) != _attr_num_features:\n raise ValueError(\n \"List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(gains), _attr_num_features))\n if not isinstance(thresholds, (list, tuple)):\n raise TypeError(\n \"Expected list for 'thresholds' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % thresholds)\n if len(thresholds) != _attr_num_features:\n raise ValueError(\n \"List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(thresholds), _attr_num_features))\n if not isinstance(left_node_contribs, (list, tuple)):\n raise TypeError(\n \"Expected list for 'left_node_contribs' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % left_node_contribs)\n if len(left_node_contribs) != _attr_num_features:\n raise ValueError(\n \"List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(left_node_contribs), _attr_num_features))\n if not isinstance(right_node_contribs, (list, tuple)):\n raise TypeError(\n \"Expected list for 'right_node_contribs' argument to \"\n \"'boosted_trees_update_ensemble' Op, not %r.\" % right_node_contribs)\n if len(right_node_contribs) != _attr_num_features:\n raise ValueError(\n \"List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d \"\n \"must match length %d of argument 'node_ids'.\" %\n (len(right_node_contribs), _attr_num_features))\n pruning_mode = _execute.make_int(pruning_mode, \"pruning_mode\")\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n feature_ids = _ops.convert_to_tensor(feature_ids, _dtypes.int32)\n node_ids = _ops.convert_n_to_tensor(node_ids, _dtypes.int32)\n gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)\n thresholds = _ops.convert_n_to_tensor(thresholds, _dtypes.int32)\n left_node_contribs = _ops.convert_n_to_tensor(left_node_contribs, _dtypes.float32)\n right_node_contribs = _ops.convert_n_to_tensor(right_node_contribs, _dtypes.float32)\n max_depth = _ops.convert_to_tensor(max_depth, _dtypes.int32)\n learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)\n _inputs_flat = [tree_ensemble_handle, feature_ids] + list(node_ids) + list(gains) + list(thresholds) + list(left_node_contribs) + list(right_node_contribs) + [max_depth, learning_rate]\n _attrs = (\"pruning_mode\", pruning_mode, \"num_features\", _attr_num_features)\n _result = _execute.execute(b\"BoostedTreesUpdateEnsemble\", 0,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _result = None\n return _result\n\n\ndef is_boosted_trees_ensemble_initialized(tree_ensemble_handle, name=None):\n r\"\"\"Checks whether a tree ensemble has been initialized.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble resouce.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n _, _, _op = _op_def_lib._apply_op_helper(\n \"IsBoostedTreesEnsembleInitialized\",\n tree_ensemble_handle=tree_ensemble_handle, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = None\n _execute.record_gradient(\n \"IsBoostedTreesEnsembleInitialized\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"IsBoostedTreesEnsembleInitialized\", name,\n _ctx._post_execution_callbacks, tree_ensemble_handle)\n return _result\n except _core._FallbackException:\n return is_boosted_trees_ensemble_initialized_eager_fallback(\n tree_ensemble_handle, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function is_boosted_trees_ensemble_initialized\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)\n _inputs_flat = [tree_ensemble_handle]\n _attrs = None\n _result = _execute.execute(b\"IsBoostedTreesEnsembleInitialized\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"IsBoostedTreesEnsembleInitialized\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"BoostedTreesCalculateBestGainsPerFeature\"\n# input_arg {\n# name: \"node_id_range\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"stats_summary_list\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# input_arg {\n# name: \"l1\"\n# type: DT_FLOAT\n# }\n# input_arg {\n# name: \"l2\"\n# type: DT_FLOAT\n# }\n# input_arg {\n# name: \"tree_complexity\"\n# type: DT_FLOAT\n# }\n# input_arg {\n# name: \"min_node_weight\"\n# type: DT_FLOAT\n# }\n# output_arg {\n# name: \"node_ids_list\"\n# type: DT_INT32\n# number_attr: \"num_features\"\n# }\n# output_arg {\n# name: \"gains_list\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# output_arg {\n# name: \"thresholds_list\"\n# type: DT_INT32\n# number_attr: \"num_features\"\n# }\n# output_arg {\n# name: \"left_node_contribs_list\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# output_arg {\n# name: \"right_node_contribs_list\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# attr {\n# name: \"max_splits\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"num_features\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# }\n# op {\n# name: \"BoostedTreesCreateEnsemble\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# input_arg {\n# name: \"stamp_token\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"tree_ensemble_serialized\"\n# type: DT_STRING\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesDeserializeEnsemble\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# input_arg {\n# name: \"stamp_token\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"tree_ensemble_serialized\"\n# type: DT_STRING\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesEnsembleResourceHandleOp\"\n# output_arg {\n# name: \"resource\"\n# type: DT_RESOURCE\n# }\n# attr {\n# name: \"container\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"shared_name\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesGetEnsembleStates\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# output_arg {\n# name: \"stamp_token\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"num_trees\"\n# type: DT_INT32\n# }\n# output_arg {\n# name: \"num_finalized_trees\"\n# type: DT_INT32\n# }\n# output_arg {\n# name: \"num_attempted_layers\"\n# type: DT_INT32\n# }\n# output_arg {\n# name: \"last_layer_nodes_range\"\n# type: DT_INT32\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesMakeStatsSummary\"\n# input_arg {\n# name: \"node_ids\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"gradients\"\n# type: DT_FLOAT\n# }\n# input_arg {\n# name: \"hessians\"\n# type: DT_FLOAT\n# }\n# input_arg {\n# name: \"bucketized_features_list\"\n# type: DT_INT32\n# number_attr: \"num_features\"\n# }\n# output_arg {\n# name: \"stats_summary\"\n# type: DT_FLOAT\n# }\n# attr {\n# name: \"max_splits\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"num_buckets\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"num_features\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# }\n# op {\n# name: \"BoostedTreesPredict\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# input_arg {\n# name: \"bucketized_features\"\n# type: DT_INT32\n# number_attr: \"num_bucketized_features\"\n# }\n# output_arg {\n# name: \"logits\"\n# type: DT_FLOAT\n# }\n# attr {\n# name: \"num_bucketized_features\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"logits_dimension\"\n# type: \"int\"\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesSerializeEnsemble\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# output_arg {\n# name: \"stamp_token\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"tree_ensemble_serialized\"\n# type: DT_STRING\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesTrainingPredict\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# input_arg {\n# name: \"cached_tree_ids\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"cached_node_ids\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"bucketized_features\"\n# type: DT_INT32\n# number_attr: \"num_bucketized_features\"\n# }\n# output_arg {\n# name: \"partial_logits\"\n# type: DT_FLOAT\n# }\n# output_arg {\n# name: \"tree_ids\"\n# type: DT_INT32\n# }\n# output_arg {\n# name: \"node_ids\"\n# type: DT_INT32\n# }\n# attr {\n# name: \"num_bucketized_features\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"logits_dimension\"\n# type: \"int\"\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"BoostedTreesUpdateEnsemble\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# input_arg {\n# name: \"feature_ids\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"node_ids\"\n# type: DT_INT32\n# number_attr: \"num_features\"\n# }\n# input_arg {\n# name: \"gains\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# input_arg {\n# name: \"thresholds\"\n# type: DT_INT32\n# number_attr: \"num_features\"\n# }\n# input_arg {\n# name: \"left_node_contribs\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# input_arg {\n# name: \"right_node_contribs\"\n# type: DT_FLOAT\n# number_attr: \"num_features\"\n# }\n# input_arg {\n# name: \"max_depth\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"learning_rate\"\n# type: DT_FLOAT\n# }\n# attr {\n# name: \"pruning_mode\"\n# type: \"int\"\n# has_minimum: true\n# }\n# attr {\n# name: \"num_features\"\n# type: \"int\"\n# has_minimum: true\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"IsBoostedTreesEnsembleInitialized\"\n# input_arg {\n# name: \"tree_ensemble_handle\"\n# type: DT_RESOURCE\n# }\n# output_arg {\n# name: \"is_initialized\"\n# type: DT_BOOL\n# }\n# is_stateful: true\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\206\\003\\n(BoostedTreesCalculateBestGainsPerFeature\\022\\021\\n\\rnode_id_range\\030\\003\\022$\\n\\022stats_summary_list\\030\\001*\\014num_features\\022\\006\\n\\002l1\\030\\001\\022\\006\\n\\002l2\\030\\001\\022\\023\\n\\017tree_complexity\\030\\001\\022\\023\\n\\017min_node_weight\\030\\001\\032\\037\\n\\rnode_ids_list\\030\\003*\\014num_features\\032\\034\\n\\ngains_list\\030\\001*\\014num_features\\032!\\n\\017thresholds_list\\030\\003*\\014num_features\\032)\\n\\027left_node_contribs_list\\030\\001*\\014num_features\\032*\\n\\030right_node_contribs_list\\030\\001*\\014num_features\\\"\\025\\n\\nmax_splits\\022\\003int(\\0010\\001\\\"\\027\\n\\014num_features\\022\\003int(\\0010\\001\\nh\\n\\032BoostedTreesCreateEnsemble\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\022\\017\\n\\013stamp_token\\030\\t\\022\\034\\n\\030tree_ensemble_serialized\\030\\007\\210\\001\\001\\nm\\n\\037BoostedTreesDeserializeEnsemble\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\022\\017\\n\\013stamp_token\\030\\t\\022\\034\\n\\030tree_ensemble_serialized\\030\\007\\210\\001\\001\\nk\\n$BoostedTreesEnsembleResourceHandleOp\\032\\014\\n\\010resource\\030\\024\\\"\\027\\n\\tcontainer\\022\\006string\\032\\002\\022\\000\\\"\\031\\n\\013shared_name\\022\\006string\\032\\002\\022\\000\\210\\001\\001\\n\\253\\001\\n\\035BoostedTreesGetEnsembleStates\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\032\\017\\n\\013stamp_token\\030\\t\\032\\r\\n\\tnum_trees\\030\\003\\032\\027\\n\\023num_finalized_trees\\030\\003\\032\\030\\n\\024num_attempted_layers\\030\\003\\032\\032\\n\\026last_layer_nodes_range\\030\\003\\210\\001\\001\\n\\320\\001\\n\\034BoostedTreesMakeStatsSummary\\022\\014\\n\\010node_ids\\030\\003\\022\\r\\n\\tgradients\\030\\001\\022\\014\\n\\010hessians\\030\\001\\022*\\n\\030bucketized_features_list\\030\\003*\\014num_features\\032\\021\\n\\rstats_summary\\030\\001\\\"\\025\\n\\nmax_splits\\022\\003int(\\0010\\001\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\\"\\027\\n\\014num_features\\022\\003int(\\0010\\001\\n\\255\\001\\n\\023BoostedTreesPredict\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\0220\\n\\023bucketized_features\\030\\003*\\027num_bucketized_features\\032\\n\\n\\006logits\\030\\001\\\"\\\"\\n\\027num_bucketized_features\\022\\003int(\\0010\\001\\\"\\027\\n\\020logits_dimension\\022\\003int\\210\\001\\001\\nk\\n\\035BoostedTreesSerializeEnsemble\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\032\\017\\n\\013stamp_token\\030\\t\\032\\034\\n\\030tree_ensemble_serialized\\030\\007\\210\\001\\001\\n\\203\\002\\n\\033BoostedTreesTrainingPredict\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\022\\023\\n\\017cached_tree_ids\\030\\003\\022\\023\\n\\017cached_node_ids\\030\\003\\0220\\n\\023bucketized_features\\030\\003*\\027num_bucketized_features\\032\\022\\n\\016partial_logits\\030\\001\\032\\014\\n\\010tree_ids\\030\\003\\032\\014\\n\\010node_ids\\030\\003\\\"\\\"\\n\\027num_bucketized_features\\022\\003int(\\0010\\001\\\"\\027\\n\\020logits_dimension\\022\\003int\\210\\001\\001\\n\\272\\002\\n\\032BoostedTreesUpdateEnsemble\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\022\\017\\n\\013feature_ids\\030\\003\\022\\032\\n\\010node_ids\\030\\003*\\014num_features\\022\\027\\n\\005gains\\030\\001*\\014num_features\\022\\034\\n\\nthresholds\\030\\003*\\014num_features\\022$\\n\\022left_node_contribs\\030\\001*\\014num_features\\022%\\n\\023right_node_contribs\\030\\001*\\014num_features\\022\\r\\n\\tmax_depth\\030\\003\\022\\021\\n\\rlearning_rate\\030\\001\\\"\\025\\n\\014pruning_mode\\022\\003int(\\001\\\"\\025\\n\\014num_features\\022\\003int(\\001\\210\\001\\001\\nT\\n!IsBoostedTreesEnsembleInitialized\\022\\030\\n\\024tree_ensemble_handle\\030\\024\\032\\022\\n\\016is_initialized\\030\\n\\210\\001\\001\")\n",
"\"\"\"Implementations of various common operations.\n\nIncluding `show()` for displaying an array or with matplotlib.\nMost can handle a numpy array or `rasterio.Band()`.\nPrimarily supports `$ rio insp`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._io import RasterReader\n\nfrom rasterio.compat import zip_longest\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_plt():\n \"\"\"import matplotlib.pyplot\n raise import error if matplotlib is not installed\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n return plt\n except (ImportError, RuntimeError): # pragma: no cover\n msg = \"Could not import matplotlib\\n\"\n msg += \"matplotlib required for plotting functions\"\n raise ImportError(msg)\n\n\ndef show(source, with_bounds=True, contour=False, contour_label_kws=None,\n ax=None, title=None, **kwargs):\n \"\"\"Display a raster or raster band using matplotlib.\n\n Parameters\n ----------\n source : array-like in raster axis order,\n or (raster dataset, bidx) tuple,\n or raster dataset,\n If the tuple (raster dataset, bidx),\n selects band `bidx` from raster. If raster dataset display the rgb image\n as defined in the colorinterp metadata, or default to first band.\n with_bounds : bool (opt)\n Whether to change the image extent to the spatial bounds of the image,\n rather than pixel coordinates. Only works when source is\n (raster dataset, bidx) or raster dataset.\n contour : bool (opt)\n Whether to plot the raster data as contours\n contour_label_kws : dictionary (opt)\n Keyword arguments for labeling the contours,\n empty dictionary for no labels.\n ax : matplotlib axis (opt)\n Axis to plot on, otherwise uses current axis.\n title : str, optional\n Title for the figure.\n **kwargs : key, value pairings optional\n These will be passed to the matplotlib imshow or contour method\n depending on contour argument.\n See full lists at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow\n or\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour\n\n Returns\n -------\n ax : matplotlib Axes\n Axes with plot.\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n if with_bounds:\n kwargs['extent'] = plotting_extent(source[0])\n elif isinstance(source, RasterReader):\n if source.count == 1:\n arr = source.read(1, masked=True)\n else:\n try:\n source_colorinterp = {source.colorinterp(n): n for n in source.indexes}\n colorinterp = rasterio.enums.ColorInterp\n rgb_indexes = [source_colorinterp[ci] for ci in\n (colorinterp.red, colorinterp.green, colorinterp.blue)]\n arr = source.read(rgb_indexes, masked=True)\n arr = reshape_as_image(arr)\n\n if with_bounds:\n kwargs['extent'] = plotting_extent(source)\n except KeyError:\n arr = source.read(1, masked=True)\n else:\n # The source is a numpy array reshape it to image if it has 3+ bands\n source = np.ma.squeeze(source)\n if len(source.shape) >= 3:\n arr = reshape_as_image(source)\n else:\n arr = source\n\n show = False\n if not ax:\n show = True\n ax = plt.gca()\n\n if contour:\n if 'cmap' not in kwargs:\n kwargs['colors'] = kwargs.get('colors', 'red')\n kwargs['linewidths'] = kwargs.get('linewidths', 1.5)\n kwargs['alpha'] = kwargs.get('alpha', 0.8)\n\n C = ax.contour(arr, origin='upper', **kwargs)\n if contour_label_kws is None:\n # no explicit label kws passed use defaults\n contour_label_kws = dict(fontsize=8,\n inline=True)\n if contour_label_kws:\n ax.clabel(C, **contour_label_kws)\n else:\n ax.imshow(arr, **kwargs)\n if title:\n ax.set_title(title, fontweight='bold')\n\n if show:\n plt.show()\n\n return ax\n\n\ndef plotting_extent(source):\n \"\"\"Returns an extent in the format needed\n for matplotlib's imshow (left, right, bottom, top)\n instead of rasterio's bounds (left, bottom, top, right)\n\n Parameters\n ----------\n source : raster dataset\n \"\"\"\n extent = (source.bounds.left, source.bounds.right,\n source.bounds.bottom, source.bounds.top)\n return extent\n\n\ndef reshape_as_image(arr):\n \"\"\"Returns the source array reshaped into the order\n expected by image processing and visualization software\n (matplotlib, scikit-image, etc)\n by swapping the axes order from (bands, rows, columns)\n to (rows, columns, bands)\n\n Parameters\n ----------\n source : array-like in a of format (bands, rows, columns)\n \"\"\"\n # swap the axes order from (bands, rows, columns) to (rows, columns, bands)\n im = np.ma.transpose(arr, [1,2,0])\n return im\n\n\n\ndef reshape_as_raster(arr):\n \"\"\"Returns the array in a raster order\n by swapping the axes order from (rows, columns, bands)\n to (bands, rows, columns)\n\n Parameters\n ----------\n arr : array-like in the image form of (rows, columns, bands)\n \"\"\"\n # swap the axes order from (rows, columns, bands) to (bands, rows, columns)\n im = np.transpose(arr, [2,0,1])\n return im\n\n\ndef show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs):\n \"\"\"Easily display a histogram with matplotlib.\n\n Parameters\n ----------\n source : np.array or RasterReader, rasterio.Band or tuple(dataset, bidx)\n Input data to display. The first three arrays in multi-dimensional\n arrays are plotted as red, green, and blue.\n bins : int, optional\n Compute histogram across N bins.\n masked : bool, optional\n When working with a `rasterio.Band()` object, specifies if the data\n should be masked on read.\n title : str, optional\n Title for the figure.\n ax : matplotlib axes (opt)\n The raster will be added to this axes if passed.\n **kwargs : optional keyword arguments\n These will be passed to the matplotlib hist method. See full list at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, RasterReader):\n arr = source.read(masked=masked)\n elif isinstance(source, (tuple, rasterio.Band)):\n arr = source[0].read(source[1], masked=masked)\n else:\n arr = source\n\n # The histogram is computed individually for each 'band' in the array\n # so we need the overall min/max to constrain the plot\n rng = arr.min(), arr.max()\n\n if len(arr.shape) is 2:\n arr = np.expand_dims(arr.flatten(), 0).T\n colors = ['gold']\n else:\n arr = arr.reshape(arr.shape[0], -1).T\n colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']\n\n #The goal is to provide a curated set of colors for working with\n # smaller datasets and let matplotlib define additional colors when\n # working with larger datasets.\n if arr.shape[-1] > len(colors):\n n = arr.shape[-1] - len(colors)\n colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n))))\n else:\n colors = colors[:arr.shape[-1]]\n\n # If a rasterio.Band() is given make sure the proper index is displayed\n # in the legend.\n if isinstance(source, (tuple, rasterio.Band)):\n labels = [str(source[1])]\n else:\n labels = (str(i + 1) for i in range(len(arr)))\n\n if ax:\n show = False\n else:\n show = True\n ax = plt.gca()\n\n fig = ax.get_figure()\n\n ax.hist(arr,\n bins=bins,\n color=colors,\n label=labels,\n range=rng,\n **kwargs)\n\n ax.legend(loc=\"upper right\")\n ax.set_title(title, fontweight='bold')\n ax.grid(True)\n ax.set_xlabel('DN')\n ax.set_ylabel('Frequency')\n if show:\n plt.show()\n",
"\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: sparse_feature_cross_op.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_sparse_feature_cross_outputs = [\"output_indices\", \"output_values\",\n \"output_shape\"]\n_SparseFeatureCrossOutput = _collections.namedtuple(\n \"SparseFeatureCross\", _sparse_feature_cross_outputs)\n\n\n@tf_export('sparse_feature_cross')\ndef sparse_feature_cross(indices, values, shapes, dense, hashed_output, num_buckets, out_type, internal_type, name=None):\n r\"\"\"Generates sparse cross form a list of sparse tensors.\n\n The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\n representing features of one feature column. It outputs a 2D `SparseTensor` with\n the batchwise crosses of these features.\n\n For example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\n then the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\n if hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: HashCombine(\n Fingerprint64(\"f\"), HashCombine(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: HashCombine(\n Fingerprint64(\"g\"), HashCombine(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: HashCombine(\n Fingerprint64(\"g\"), HashCombine(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))\n\n Args:\n indices: A list of `Tensor` objects with type `int64`.\n 2-D. Indices of each input `SparseTensor`.\n values: A list of `Tensor` objects with types from: `int64`, `string`.\n 1-D. values of each `SparseTensor`.\n shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.\n 1-D. Shapes of each `SparseTensor`.\n dense: A list of `Tensor` objects with types from: `int64`, `string`.\n 2-D. Columns represented by dense `Tensor`.\n hashed_output: A `bool`.\n num_buckets: An `int` that is `>= 0`.\n out_type: A `tf.DType` from: `tf.int64, tf.string`.\n internal_type: A `tf.DType` from: `tf.int64, tf.string`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (output_indices, output_values, output_shape).\n\n output_indices: A `Tensor` of type `int64`. 2-D. Indices of the concatenated `SparseTensor`.\n output_values: A `Tensor` of type `out_type`. 1-D. Non-empty values of the concatenated or hashed\n `SparseTensor`.\n output_shape: A `Tensor` of type `int64`. 1-D. Shape of the concatenated `SparseTensor`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(indices, (list, tuple)):\n raise TypeError(\n \"Expected list for 'indices' argument to \"\n \"'sparse_feature_cross' Op, not %r.\" % indices)\n _attr_N = len(indices)\n if not isinstance(shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'shapes' argument to \"\n \"'sparse_feature_cross' Op, not %r.\" % shapes)\n if len(shapes) != _attr_N:\n raise ValueError(\n \"List argument 'shapes' to 'sparse_feature_cross' Op with length %d \"\n \"must match length %d of argument 'indices'.\" %\n (len(shapes), _attr_N))\n hashed_output = _execute.make_bool(hashed_output, \"hashed_output\")\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n out_type = _execute.make_type(out_type, \"out_type\")\n internal_type = _execute.make_type(internal_type, \"internal_type\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SparseFeatureCross\", indices=indices, values=values, shapes=shapes,\n dense=dense, hashed_output=hashed_output, num_buckets=num_buckets,\n out_type=out_type, internal_type=internal_type, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"N\", _op.get_attr(\"N\"), \"hashed_output\",\n _op.get_attr(\"hashed_output\"), \"num_buckets\",\n _op.get_attr(\"num_buckets\"), \"sparse_types\",\n _op.get_attr(\"sparse_types\"), \"dense_types\",\n _op.get_attr(\"dense_types\"), \"out_type\",\n _op.get_attr(\"out_type\"), \"internal_type\",\n _op.get_attr(\"internal_type\"))\n _execute.record_gradient(\n \"SparseFeatureCross\", _inputs_flat, _attrs, _result, name)\n _result = _SparseFeatureCrossOutput._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"SparseFeatureCross\", name, _ctx._post_execution_callbacks, indices,\n values, shapes, dense, \"hashed_output\", hashed_output, \"num_buckets\",\n num_buckets, \"out_type\", out_type, \"internal_type\", internal_type)\n _result = _SparseFeatureCrossOutput._make(_result)\n return _result\n except _core._FallbackException:\n return sparse_feature_cross_eager_fallback(\n indices, values, shapes, dense, hashed_output=hashed_output,\n num_buckets=num_buckets, out_type=out_type,\n internal_type=internal_type, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef sparse_feature_cross_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, out_type, internal_type, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function sparse_feature_cross\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(indices, (list, tuple)):\n raise TypeError(\n \"Expected list for 'indices' argument to \"\n \"'sparse_feature_cross' Op, not %r.\" % indices)\n _attr_N = len(indices)\n if not isinstance(shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'shapes' argument to \"\n \"'sparse_feature_cross' Op, not %r.\" % shapes)\n if len(shapes) != _attr_N:\n raise ValueError(\n \"List argument 'shapes' to 'sparse_feature_cross' Op with length %d \"\n \"must match length %d of argument 'indices'.\" %\n (len(shapes), _attr_N))\n hashed_output = _execute.make_bool(hashed_output, \"hashed_output\")\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n out_type = _execute.make_type(out_type, \"out_type\")\n internal_type = _execute.make_type(internal_type, \"internal_type\")\n _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)\n _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)\n indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)\n shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)\n _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)\n _attrs = (\"N\", _attr_N, \"hashed_output\", hashed_output, \"num_buckets\",\n num_buckets, \"sparse_types\", _attr_sparse_types, \"dense_types\",\n _attr_dense_types, \"out_type\", out_type, \"internal_type\", internal_type)\n _result = _execute.execute(b\"SparseFeatureCross\", 3, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"SparseFeatureCross\", _inputs_flat, _attrs, _result, name)\n _result = _SparseFeatureCrossOutput._make(_result)\n return _result\n\n_ops.RegisterShape(\"SparseFeatureCross\")(None)\n\n\n_sparse_feature_cross_v2_outputs = [\"output_indices\", \"output_values\",\n \"output_shape\"]\n_SparseFeatureCrossV2Output = _collections.namedtuple(\n \"SparseFeatureCrossV2\", _sparse_feature_cross_v2_outputs)\n\n\n@tf_export('sparse_feature_cross_v2')\ndef sparse_feature_cross_v2(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None):\n r\"\"\"Generates sparse cross form a list of sparse tensors.\n\n The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\n representing features of one feature column. It outputs a 2D `SparseTensor` with\n the batchwise crosses of these features.\n\n For example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\n then the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\n if hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))\n\n Args:\n indices: A list of `Tensor` objects with type `int64`.\n 2-D. Indices of each input `SparseTensor`.\n values: A list of `Tensor` objects with types from: `int64`, `string`.\n 1-D. values of each `SparseTensor`.\n shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.\n 1-D. Shapes of each `SparseTensor`.\n dense: A list of `Tensor` objects with types from: `int64`, `string`.\n 2-D. Columns represented by dense `Tensor`.\n hashed_output: A `bool`.\n num_buckets: An `int` that is `>= 0`.\n hash_key: An `int`.\n out_type: A `tf.DType` from: `tf.int64, tf.string`.\n internal_type: A `tf.DType` from: `tf.int64, tf.string`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (output_indices, output_values, output_shape).\n\n output_indices: A `Tensor` of type `int64`. 2-D. Indices of the concatenated `SparseTensor`.\n output_values: A `Tensor` of type `out_type`. 1-D. Non-empty values of the concatenated or hashed\n `SparseTensor`.\n output_shape: A `Tensor` of type `int64`. 1-D. Shape of the concatenated `SparseTensor`.\n \"\"\"\n _ctx = _context._context\n if _ctx is None or not _ctx._eager_context.is_eager:\n if not isinstance(indices, (list, tuple)):\n raise TypeError(\n \"Expected list for 'indices' argument to \"\n \"'sparse_feature_cross_v2' Op, not %r.\" % indices)\n _attr_N = len(indices)\n if not isinstance(shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'shapes' argument to \"\n \"'sparse_feature_cross_v2' Op, not %r.\" % shapes)\n if len(shapes) != _attr_N:\n raise ValueError(\n \"List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d \"\n \"must match length %d of argument 'indices'.\" %\n (len(shapes), _attr_N))\n hashed_output = _execute.make_bool(hashed_output, \"hashed_output\")\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n hash_key = _execute.make_int(hash_key, \"hash_key\")\n out_type = _execute.make_type(out_type, \"out_type\")\n internal_type = _execute.make_type(internal_type, \"internal_type\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SparseFeatureCrossV2\", indices=indices, values=values, shapes=shapes,\n dense=dense, hashed_output=hashed_output, num_buckets=num_buckets,\n hash_key=hash_key, out_type=out_type, internal_type=internal_type,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"N\", _op.get_attr(\"N\"), \"hashed_output\",\n _op.get_attr(\"hashed_output\"), \"num_buckets\",\n _op.get_attr(\"num_buckets\"), \"hash_key\",\n _op.get_attr(\"hash_key\"), \"sparse_types\",\n _op.get_attr(\"sparse_types\"), \"dense_types\",\n _op.get_attr(\"dense_types\"), \"out_type\",\n _op.get_attr(\"out_type\"), \"internal_type\",\n _op.get_attr(\"internal_type\"))\n _execute.record_gradient(\n \"SparseFeatureCrossV2\", _inputs_flat, _attrs, _result, name)\n _result = _SparseFeatureCrossV2Output._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"SparseFeatureCrossV2\", name, _ctx._post_execution_callbacks, indices,\n values, shapes, dense, \"hashed_output\", hashed_output, \"num_buckets\",\n num_buckets, \"hash_key\", hash_key, \"out_type\", out_type,\n \"internal_type\", internal_type)\n _result = _SparseFeatureCrossV2Output._make(_result)\n return _result\n except _core._FallbackException:\n return sparse_feature_cross_v2_eager_fallback(\n indices, values, shapes, dense, hashed_output=hashed_output,\n num_buckets=num_buckets, hash_key=hash_key, out_type=out_type,\n internal_type=internal_type, name=name, ctx=_ctx)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function sparse_feature_cross_v2\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(indices, (list, tuple)):\n raise TypeError(\n \"Expected list for 'indices' argument to \"\n \"'sparse_feature_cross_v2' Op, not %r.\" % indices)\n _attr_N = len(indices)\n if not isinstance(shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'shapes' argument to \"\n \"'sparse_feature_cross_v2' Op, not %r.\" % shapes)\n if len(shapes) != _attr_N:\n raise ValueError(\n \"List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d \"\n \"must match length %d of argument 'indices'.\" %\n (len(shapes), _attr_N))\n hashed_output = _execute.make_bool(hashed_output, \"hashed_output\")\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n hash_key = _execute.make_int(hash_key, \"hash_key\")\n out_type = _execute.make_type(out_type, \"out_type\")\n internal_type = _execute.make_type(internal_type, \"internal_type\")\n _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)\n _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)\n indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)\n shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)\n _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)\n _attrs = (\"N\", _attr_N, \"hashed_output\", hashed_output, \"num_buckets\",\n num_buckets, \"hash_key\", hash_key, \"sparse_types\", _attr_sparse_types,\n \"dense_types\", _attr_dense_types, \"out_type\", out_type, \"internal_type\",\n internal_type)\n _result = _execute.execute(b\"SparseFeatureCrossV2\", 3, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"SparseFeatureCrossV2\", _inputs_flat, _attrs, _result, name)\n _result = _SparseFeatureCrossV2Output._make(_result)\n return _result\n\n_ops.RegisterShape(\"SparseFeatureCrossV2\")(None)\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"SparseFeatureCross\"\n# input_arg {\n# name: \"indices\"\n# type: DT_INT64\n# number_attr: \"N\"\n# }\n# input_arg {\n# name: \"values\"\n# type_list_attr: \"sparse_types\"\n# }\n# input_arg {\n# name: \"shapes\"\n# type: DT_INT64\n# number_attr: \"N\"\n# }\n# input_arg {\n# name: \"dense\"\n# type_list_attr: \"dense_types\"\n# }\n# output_arg {\n# name: \"output_indices\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"output_values\"\n# type_attr: \"out_type\"\n# }\n# output_arg {\n# name: \"output_shape\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"N\"\n# type: \"int\"\n# has_minimum: true\n# }\n# attr {\n# name: \"hashed_output\"\n# type: \"bool\"\n# }\n# attr {\n# name: \"num_buckets\"\n# type: \"int\"\n# has_minimum: true\n# }\n# attr {\n# name: \"sparse_types\"\n# type: \"list(type)\"\n# has_minimum: true\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# attr {\n# name: \"dense_types\"\n# type: \"list(type)\"\n# has_minimum: true\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# attr {\n# name: \"out_type\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# attr {\n# name: \"internal_type\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# }\n# op {\n# name: \"SparseFeatureCrossV2\"\n# input_arg {\n# name: \"indices\"\n# type: DT_INT64\n# number_attr: \"N\"\n# }\n# input_arg {\n# name: \"values\"\n# type_list_attr: \"sparse_types\"\n# }\n# input_arg {\n# name: \"shapes\"\n# type: DT_INT64\n# number_attr: \"N\"\n# }\n# input_arg {\n# name: \"dense\"\n# type_list_attr: \"dense_types\"\n# }\n# output_arg {\n# name: \"output_indices\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"output_values\"\n# type_attr: \"out_type\"\n# }\n# output_arg {\n# name: \"output_shape\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"N\"\n# type: \"int\"\n# has_minimum: true\n# }\n# attr {\n# name: \"hashed_output\"\n# type: \"bool\"\n# }\n# attr {\n# name: \"num_buckets\"\n# type: \"int\"\n# has_minimum: true\n# }\n# attr {\n# name: \"hash_key\"\n# type: \"int\"\n# }\n# attr {\n# name: \"sparse_types\"\n# type: \"list(type)\"\n# has_minimum: true\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# attr {\n# name: \"dense_types\"\n# type: \"list(type)\"\n# has_minimum: true\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# attr {\n# name: \"out_type\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# attr {\n# name: \"internal_type\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT64\n# type: DT_STRING\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\337\\002\\n\\022SparseFeatureCross\\022\\016\\n\\007indices\\030\\t*\\001N\\022\\026\\n\\006values2\\014sparse_types\\022\\r\\n\\006shapes\\030\\t*\\001N\\022\\024\\n\\005dense2\\013dense_types\\032\\022\\n\\016output_indices\\030\\t\\032\\031\\n\\routput_values\\\"\\010out_type\\032\\020\\n\\014output_shape\\030\\t\\\"\\n\\n\\001N\\022\\003int(\\001\\\"\\025\\n\\rhashed_output\\022\\004bool\\\"\\024\\n\\013num_buckets\\022\\003int(\\001\\\"$\\n\\014sparse_types\\022\\nlist(type)(\\001:\\006\\n\\0042\\002\\t\\007\\\"#\\n\\013dense_types\\022\\nlist(type)(\\001:\\006\\n\\0042\\002\\t\\007\\\"\\030\\n\\010out_type\\022\\004type:\\006\\n\\0042\\002\\t\\007\\\"\\035\\n\\rinternal_type\\022\\004type:\\006\\n\\0042\\002\\t\\007\\n\\362\\002\\n\\024SparseFeatureCrossV2\\022\\016\\n\\007indices\\030\\t*\\001N\\022\\026\\n\\006values2\\014sparse_types\\022\\r\\n\\006shapes\\030\\t*\\001N\\022\\024\\n\\005dense2\\013dense_types\\032\\022\\n\\016output_indices\\030\\t\\032\\031\\n\\routput_values\\\"\\010out_type\\032\\020\\n\\014output_shape\\030\\t\\\"\\n\\n\\001N\\022\\003int(\\001\\\"\\025\\n\\rhashed_output\\022\\004bool\\\"\\024\\n\\013num_buckets\\022\\003int(\\001\\\"\\017\\n\\010hash_key\\022\\003int\\\"$\\n\\014sparse_types\\022\\nlist(type)(\\001:\\006\\n\\0042\\002\\t\\007\\\"#\\n\\013dense_types\\022\\nlist(type)(\\001:\\006\\n\\0042\\002\\t\\007\\\"\\030\\n\\010out_type\\022\\004type:\\006\\n\\0042\\002\\t\\007\\\"\\035\\n\\rinternal_type\\022\\004type:\\006\\n\\0042\\002\\t\\007\")\n",
"\"\"\"\ndifferential_evolution: The differential evolution global optimization algorithm\nAdded by Andrew Nelson 2014\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom scipy.optimize import OptimizeResult, minimize\nfrom scipy.optimize.optimize import _status_message\nimport numbers\n\n__all__ = ['differential_evolution']\n\n_MACHEPS = np.finfo(np.float64).eps\n\n\ndef differential_evolution(func, bounds, args=(), strategy='best1bin',\n maxiter=1000, popsize=15, tol=0.01,\n mutation=(0.5, 1), recombination=0.7, seed=None,\n callback=None, disp=False, polish=True,\n init='latinhypercube'):\n \"\"\"Finds the global minimum of a multivariate function.\n Differential Evolution is stochastic in nature (does not use gradient\n methods) to find the minimium, and can search large areas of candidate\n space, but often requires larger numbers of function evaluations than\n conventional gradient based techniques.\n\n The algorithm is due to Storn and Price [1]_.\n\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence\n Bounds for variables. ``(min, max)`` pairs for each element in ``x``,\n defining the lower and upper bounds for the optimizing argument of\n `func`. It is required to have ``len(bounds) == len(x)``.\n ``len(bounds)`` is used to determine the number of parameters in ``x``.\n args : tuple, optional\n Any additional fixed parameters needed to\n completely specify the objective function.\n strategy : str, optional\n The differential evolution strategy to use. Should be one of:\n\n - 'best1bin'\n - 'best1exp'\n - 'rand1exp'\n - 'randtobest1exp'\n - 'best2exp'\n - 'rand2exp'\n - 'randtobest1bin'\n - 'best2bin'\n - 'rand2bin'\n - 'rand1bin'\n\n The default is 'best1bin'.\n maxiter : int, optional\n The maximum number of generations over which the entire population is\n evolved. The maximum number of function evaluations (with no polishing)\n is: ``(maxiter + 1) * popsize * len(x)``\n popsize : int, optional\n A multiplier for setting the total population size. The population has\n ``popsize * len(x)`` individuals.\n tol : float, optional\n When the mean of the population energies, multiplied by tol,\n divided by the standard deviation of the population energies\n is greater than 1 the solving process terminates:\n ``convergence = mean(pop) * tol / stdev(pop) > 1``\n mutation : float or tuple(float, float), optional\n The mutation constant. In the literature this is also known as\n differential weight, being denoted by F.\n If specified as a float it should be in the range [0, 2].\n If specified as a tuple ``(min, max)`` dithering is employed. Dithering\n randomly changes the mutation constant on a generation by generation\n basis. The mutation constant for that generation is taken from\n ``U[min, max)``. Dithering can help speed convergence significantly.\n Increasing the mutation constant increases the search radius, but will\n slow down convergence.\n recombination : float, optional\n The recombination constant, should be in the range [0, 1]. In the\n literature this is also known as the crossover probability, being\n denoted by CR. Increasing this value allows a larger number of mutants\n to progress into the next generation, but at the risk of population\n stability.\n seed : int or `np.random.RandomState`, optional\n If `seed` is not specified the `np.RandomState` singleton is used.\n If `seed` is an int, a new `np.random.RandomState` instance is used,\n seeded with seed.\n If `seed` is already a `np.random.RandomState instance`, then that\n `np.random.RandomState` instance is used.\n Specify `seed` for repeatable minimizations.\n disp : bool, optional\n Display status messages\n callback : callable, `callback(xk, convergence=val)`, optional\n A function to follow the progress of the minimization. ``xk`` is\n the current value of ``x0``. ``val`` represents the fractional\n value of the population convergence. When ``val`` is greater than one\n the function halts. If callback returns `True`, then the minimization\n is halted (any polishing is still carried out).\n polish : bool, optional\n If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`\n method is used to polish the best population member at the end, which\n can improve the minimization slightly.\n init : string, optional\n Specify how the population initialization is performed. Should be\n one of:\n\n - 'latinhypercube'\n - 'random'\n\n The default is 'latinhypercube'. Latin Hypercube sampling tries to\n maximize coverage of the available parameter space. 'random' initializes\n the population randomly - this has the drawback that clustering can\n occur, preventing the whole of parameter space being covered.\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a `OptimizeResult` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing, then\n OptimizeResult also contains the ``jac`` attribute.\n\n Notes\n -----\n Differential evolution is a stochastic population based method that is\n useful for global optimization problems. At each pass through the population\n the algorithm mutates each candidate solution by mixing with other candidate\n solutions to create a trial candidate. There are several strategies [2]_ for\n creating trial candidates, which suit some problems more than others. The\n 'best1bin' strategy is a good starting point for many systems. In this\n strategy two members of the population are randomly chosen. Their difference\n is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,\n so far:\n\n .. math::\n\n b' = b_0 + mutation * (population[rand0] - population[rand1])\n\n A trial vector is then constructed. Starting with a randomly chosen 'i'th\n parameter the trial is sequentially filled (in modulo) with parameters from\n `b'` or the original candidate. The choice of whether to use `b'` or the\n original candidate is made with a binomial distribution (the 'bin' in\n 'best1bin') - a random number in [0, 1) is generated. If this number is\n less than the `recombination` constant then the parameter is loaded from\n `b'`, otherwise it is loaded from the original candidate. The final\n parameter is always loaded from `b'`. Once the trial candidate is built\n its fitness is assessed. If the trial is better than the original candidate\n then it takes its place. If it is also better than the best overall\n candidate it also replaces that.\n To improve your chances of finding a global minimum use higher `popsize`\n values, with higher `mutation` and (dithering), but lower `recombination`\n values. This has the effect of widening the search radius, but slowing\n convergence.\n\n .. versionadded:: 0.15.0\n\n Examples\n --------\n Let us consider the problem of minimizing the Rosenbrock function. This\n function is implemented in `rosen` in `scipy.optimize`.\n\n >>> from scipy.optimize import rosen, differential_evolution\n >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]\n >>> result = differential_evolution(rosen, bounds)\n >>> result.x, result.fun\n (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)\n\n Next find the minimum of the Ackley function\n (http://en.wikipedia.org/wiki/Test_functions_for_optimization).\n\n >>> from scipy.optimize import differential_evolution\n >>> import numpy as np\n >>> def ackley(x):\n ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))\n ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))\n ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e\n >>> bounds = [(-5, 5), (-5, 5)]\n >>> result = differential_evolution(ackley, bounds)\n >>> result.x, result.fun\n (array([ 0., 0.]), 4.4408920985006262e-16)\n\n References\n ----------\n .. [1] Storn, R and Price, K, Differential Evolution - a Simple and\n Efficient Heuristic for Global Optimization over Continuous Spaces,\n Journal of Global Optimization, 1997, 11, 341 - 359.\n .. [2] http://www1.icsi.berkeley.edu/~storn/code.html\n .. [3] http://en.wikipedia.org/wiki/Differential_evolution\n \"\"\"\n\n solver = DifferentialEvolutionSolver(func, bounds, args=args,\n strategy=strategy, maxiter=maxiter,\n popsize=popsize, tol=tol,\n mutation=mutation,\n recombination=recombination,\n seed=seed, polish=polish,\n callback=callback,\n disp=disp,\n init=init)\n return solver.solve()\n\n\nclass DifferentialEvolutionSolver(object):\n\n \"\"\"This class implements the differential evolution solver\n\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence\n Bounds for variables. ``(min, max)`` pairs for each element in ``x``,\n defining the lower and upper bounds for the optimizing argument of\n `func`. It is required to have ``len(bounds) == len(x)``.\n ``len(bounds)`` is used to determine the number of parameters in ``x``.\n args : tuple, optional\n Any additional fixed parameters needed to\n completely specify the objective function.\n strategy : str, optional\n The differential evolution strategy to use. Should be one of:\n\n - 'best1bin'\n - 'best1exp'\n - 'rand1exp'\n - 'randtobest1exp'\n - 'best2exp'\n - 'rand2exp'\n - 'randtobest1bin'\n - 'best2bin'\n - 'rand2bin'\n - 'rand1bin'\n\n The default is 'best1bin'\n\n maxiter : int, optional\n The maximum number of generations over which the entire population is\n evolved. The maximum number of function evaluations (with no polishing)\n is: ``(maxiter + 1) * popsize * len(x)``\n popsize : int, optional\n A multiplier for setting the total population size. The population has\n ``popsize * len(x)`` individuals.\n tol : float, optional\n When the mean of the population energies, multiplied by tol,\n divided by the standard deviation of the population energies\n is greater than 1 the solving process terminates:\n ``convergence = mean(pop) * tol / stdev(pop) > 1``\n mutation : float or tuple(float, float), optional\n The mutation constant. In the literature this is also known as\n differential weight, being denoted by F.\n If specified as a float it should be in the range [0, 2].\n If specified as a tuple ``(min, max)`` dithering is employed. Dithering\n randomly changes the mutation constant on a generation by generation\n basis. The mutation constant for that generation is taken from\n U[min, max). Dithering can help speed convergence significantly.\n Increasing the mutation constant increases the search radius, but will\n slow down convergence.\n recombination : float, optional\n The recombination constant, should be in the range [0, 1]. In the\n literature this is also known as the crossover probability, being\n denoted by CR. Increasing this value allows a larger number of mutants\n to progress into the next generation, but at the risk of population\n stability.\n seed : int or `np.random.RandomState`, optional\n If `seed` is not specified the `np.random.RandomState` singleton is\n used.\n If `seed` is an int, a new `np.random.RandomState` instance is used,\n seeded with `seed`.\n If `seed` is already a `np.random.RandomState` instance, then that\n `np.random.RandomState` instance is used.\n Specify `seed` for repeatable minimizations.\n disp : bool, optional\n Display status messages\n callback : callable, `callback(xk, convergence=val)`, optional\n A function to follow the progress of the minimization. ``xk`` is\n the current value of ``x0``. ``val`` represents the fractional\n value of the population convergence. When ``val`` is greater than one\n the function halts. If callback returns `True`, then the minimization\n is halted (any polishing is still carried out).\n polish : bool, optional\n If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method\n is used to polish the best population member at the end. This requires\n a few more function evaluations.\n maxfun : int, optional\n Set the maximum number of function evaluations. However, it probably\n makes more sense to set `maxiter` instead.\n init : string, optional\n Specify which type of population initialization is performed. Should be\n one of:\n\n - 'latinhypercube'\n - 'random'\n \"\"\"\n\n # Dispatch of mutation strategy method (binomial or exponential).\n _binomial = {'best1bin': '_best1',\n 'randtobest1bin': '_randtobest1',\n 'best2bin': '_best2',\n 'rand2bin': '_rand2',\n 'rand1bin': '_rand1'}\n _exponential = {'best1exp': '_best1',\n 'rand1exp': '_rand1',\n 'randtobest1exp': '_randtobest1',\n 'best2exp': '_best2',\n 'rand2exp': '_rand2'}\n\n def __init__(self, func, bounds, args=(),\n strategy='best1bin', maxiter=1000, popsize=15,\n tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,\n maxfun=np.inf, callback=None, disp=False, polish=True,\n init='latinhypercube'):\n\n if strategy in self._binomial:\n self.mutation_func = getattr(self, self._binomial[strategy])\n elif strategy in self._exponential:\n self.mutation_func = getattr(self, self._exponential[strategy])\n else:\n raise ValueError(\"Please select a valid mutation strategy\")\n self.strategy = strategy\n\n self.callback = callback\n self.polish = polish\n self.tol = tol\n\n # Mutation constant should be in [0, 2). If specified as a sequence\n # then dithering is performed.\n self.scale = mutation\n if (not np.all(np.isfinite(mutation)) or\n np.any(np.array(mutation) >= 2) or\n np.any(np.array(mutation) < 0)):\n raise ValueError('The mutation constant must be a float in '\n 'U[0, 2), or specified as a tuple(min, max)'\n ' where min < max and min, max are in U[0, 2).')\n\n self.dither = None\n if hasattr(mutation, '__iter__') and len(mutation) > 1:\n self.dither = [mutation[0], mutation[1]]\n self.dither.sort()\n\n self.cross_over_probability = recombination\n\n self.func = func\n self.args = args\n\n # convert tuple of lower and upper bounds to limits\n # [(low_0, high_0), ..., (low_n, high_n]\n # -> [[low_0, ..., low_n], [high_0, ..., high_n]]\n self.limits = np.array(bounds, dtype='float').T\n if (np.size(self.limits, 0) != 2 or not\n np.all(np.isfinite(self.limits))):\n raise ValueError('bounds should be a sequence containing '\n 'real valued (min, max) pairs for each value'\n ' in x')\n\n if maxiter is None: # the default used to be None\n maxiter = 1000\n self.maxiter = maxiter\n if maxfun is None: # the default used to be None\n maxfun = np.inf\n self.maxfun = maxfun\n\n # population is scaled to between [0, 1].\n # We have to scale between parameter <-> population\n # save these arguments for _scale_parameter and\n # _unscale_parameter. This is an optimization\n self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])\n self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])\n\n self.parameter_count = np.size(self.limits, 1)\n\n self.random_number_generator = _make_random_gen(seed)\n\n # default population initialization is a latin hypercube design, but\n # there are other population initializations possible.\n self.num_population_members = popsize * self.parameter_count\n\n self.population_shape = (self.num_population_members,\n self.parameter_count)\n\n self._nfev = 0\n if init == 'latinhypercube':\n self.init_population_lhs()\n elif init == 'random':\n self.init_population_random()\n else:\n raise ValueError(\"The population initialization method must be one\"\n \"of 'latinhypercube' or 'random'\")\n\n self.disp = disp\n\n def init_population_lhs(self):\n \"\"\"\n Initializes the population with Latin Hypercube Sampling.\n Latin Hypercube Sampling ensures that each parameter is uniformly\n sampled over its range.\n \"\"\"\n rng = self.random_number_generator\n\n # Each parameter range needs to be sampled uniformly. The scaled\n # parameter range ([0, 1)) needs to be split into\n # `self.num_population_members` segments, each of which has the following\n # size:\n segsize = 1.0 / self.num_population_members\n\n # Within each segment we sample from a uniform random distribution.\n # We need to do this sampling for each parameter.\n samples = (segsize * rng.random_sample(self.population_shape)\n\n # Offset each segment to cover the entire parameter range [0, 1)\n + np.linspace(0., 1., self.num_population_members,\n endpoint=False)[:, np.newaxis])\n\n # Create an array for population of candidate solutions.\n self.population = np.zeros_like(samples)\n\n # Initialize population of candidate solutions by permutation of the\n # random samples.\n for j in range(self.parameter_count):\n order = rng.permutation(range(self.num_population_members))\n self.population[:, j] = samples[order, j]\n\n # reset population energies\n self.population_energies = (np.ones(self.num_population_members) *\n np.inf)\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n def init_population_random(self):\n \"\"\"\n Initialises the population at random. This type of initialization\n can possess clustering, Latin Hypercube sampling is generally better.\n \"\"\"\n rng = self.random_number_generator\n self.population = rng.random_sample(self.population_shape)\n\n # reset population energies\n self.population_energies = (np.ones(self.num_population_members) *\n np.inf)\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n @property\n def x(self):\n \"\"\"\n The best solution from the solver\n\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n \"\"\"\n return self._scale_parameters(self.population[0])\n\n @property\n def convergence(self):\n \"\"\"\n The standard deviation of the population energies divided by their\n mean.\n \"\"\"\n return (np.std(self.population_energies) /\n np.abs(np.mean(self.population_energies) + _MACHEPS))\n\n def solve(self):\n \"\"\"\n Runs the DifferentialEvolutionSolver.\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing,\n then OptimizeResult also contains the ``jac`` attribute.\n \"\"\"\n nit, warning_flag = 0, False\n status_message = _status_message['success']\n\n # The population may have just been initialized (all entries are\n # np.inf). If it has you have to calculate the initial energies.\n # Although this is also done in the evolve generator it's possible\n # that someone can set maxiter=0, at which point we still want the\n # initial energies to be calculated (the following loop isn't run).\n if np.all(np.isinf(self.population_energies)):\n self._calculate_population_energies()\n\n # do the optimisation.\n for nit in range(1, self.maxiter + 1):\n # evolve the population by a generation\n try:\n next(self)\n except StopIteration:\n warning_flag = True\n status_message = _status_message['maxfev']\n break\n\n if self.disp:\n print(\"differential_evolution step %d: f(x)= %g\"\n % (nit,\n self.population_energies[0]))\n\n # stop when the fractional s.d. of the population is less than tol\n # of the mean energy\n convergence = self.convergence\n\n if (self.callback and\n self.callback(self._scale_parameters(self.population[0]),\n convergence=self.tol / convergence) is True):\n\n warning_flag = True\n status_message = ('callback function requested stop early '\n 'by returning True')\n break\n\n if convergence < self.tol or warning_flag:\n break\n\n else:\n status_message = _status_message['maxiter']\n warning_flag = True\n\n DE_result = OptimizeResult(\n x=self.x,\n fun=self.population_energies[0],\n nfev=self._nfev,\n nit=nit,\n message=status_message,\n success=(warning_flag is not True))\n\n if self.polish:\n result = minimize(self.func,\n np.copy(DE_result.x),\n method='L-BFGS-B',\n bounds=self.limits.T,\n args=self.args)\n\n self._nfev += result.nfev\n DE_result.nfev = self._nfev\n\n if result.fun < DE_result.fun:\n DE_result.fun = result.fun\n DE_result.x = result.x\n DE_result.jac = result.jac\n # to keep internal state consistent\n self.population_energies[0] = result.fun\n self.population[0] = self._unscale_parameters(result.x)\n\n return DE_result\n\n def _calculate_population_energies(self):\n \"\"\"\n Calculate the energies of all the population members at the same time.\n Puts the best member in first place. Useful if the population has just\n been initialised.\n \"\"\"\n for index, candidate in enumerate(self.population):\n if self._nfev > self.maxfun:\n break\n\n parameters = self._scale_parameters(candidate)\n self.population_energies[index] = self.func(parameters,\n *self.args)\n self._nfev += 1\n\n minval = np.argmin(self.population_energies)\n\n # put the lowest energy into the best solution position.\n lowest_energy = self.population_energies[minval]\n self.population_energies[minval] = self.population_energies[0]\n self.population_energies[0] = lowest_energy\n\n self.population[[0, minval], :] = self.population[[minval, 0], :]\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"\n Evolve the population by a single generation\n\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n fun : float\n Value of objective function obtained from the best solution.\n \"\"\"\n # the population may have just been initialized (all entries are\n # np.inf). If it has you have to calculate the initial energies\n if np.all(np.isinf(self.population_energies)):\n self._calculate_population_energies()\n\n if self.dither is not None:\n self.scale = (self.random_number_generator.rand()\n * (self.dither[1] - self.dither[0]) + self.dither[0])\n\n for candidate in range(self.num_population_members):\n if self._nfev > self.maxfun:\n raise StopIteration\n\n # create a trial solution\n trial = self._mutate(candidate)\n\n # ensuring that it's in the range [0, 1)\n self._ensure_constraint(trial)\n\n # scale from [0, 1) to the actual parameter value\n parameters = self._scale_parameters(trial)\n\n # determine the energy of the objective function\n energy = self.func(parameters, *self.args)\n self._nfev += 1\n\n # if the energy of the trial candidate is lower than the\n # original population member then replace it\n if energy < self.population_energies[candidate]:\n self.population[candidate] = trial\n self.population_energies[candidate] = energy\n\n # if the trial candidate also has a lower energy than the\n # best solution then replace that as well\n if energy < self.population_energies[0]:\n self.population_energies[0] = energy\n self.population[0] = trial\n\n return self.x, self.population_energies[0]\n\n def next(self):\n \"\"\"\n Evolve the population by a single generation\n\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n fun : float\n Value of objective function obtained from the best solution.\n \"\"\"\n # next() is required for compatibility with Python2.7.\n return self.__next__()\n\n def _scale_parameters(self, trial):\n \"\"\"\n scale from a number between 0 and 1 to parameters.\n \"\"\"\n return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2\n\n def _unscale_parameters(self, parameters):\n \"\"\"\n scale from parameters to a number between 0 and 1.\n \"\"\"\n return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5\n\n def _ensure_constraint(self, trial):\n \"\"\"\n make sure the parameters lie between the limits\n \"\"\"\n for index, param in enumerate(trial):\n if param > 1 or param < 0:\n trial[index] = self.random_number_generator.rand()\n\n def _mutate(self, candidate):\n \"\"\"\n create a trial vector based on a mutation strategy\n \"\"\"\n trial = np.copy(self.population[candidate])\n\n rng = self.random_number_generator\n\n fill_point = rng.randint(0, self.parameter_count)\n\n if (self.strategy == 'randtobest1exp' or\n self.strategy == 'randtobest1bin'):\n bprime = self.mutation_func(candidate,\n self._select_samples(candidate, 5))\n else:\n bprime = self.mutation_func(self._select_samples(candidate, 5))\n\n if self.strategy in self._binomial:\n crossovers = rng.rand(self.parameter_count)\n crossovers = crossovers < self.cross_over_probability\n # the last one is always from the bprime vector for binomial\n # If you fill in modulo with a loop you have to set the last one to\n # true. If you don't use a loop then you can have any random entry\n # be True.\n crossovers[fill_point] = True\n trial = np.where(crossovers, bprime, trial)\n return trial\n\n elif self.strategy in self._exponential:\n i = 0\n while (i < self.parameter_count and\n rng.rand() < self.cross_over_probability):\n\n trial[fill_point] = bprime[fill_point]\n fill_point = (fill_point + 1) % self.parameter_count\n i += 1\n\n return trial\n\n def _best1(self, samples):\n \"\"\"\n best1bin, best1exp\n \"\"\"\n r0, r1 = samples[:2]\n return (self.population[0] + self.scale *\n (self.population[r0] - self.population[r1]))\n\n def _rand1(self, samples):\n \"\"\"\n rand1bin, rand1exp\n \"\"\"\n r0, r1, r2 = samples[:3]\n return (self.population[r0] + self.scale *\n (self.population[r1] - self.population[r2]))\n\n def _randtobest1(self, candidate, samples):\n \"\"\"\n randtobest1bin, randtobest1exp\n \"\"\"\n r0, r1 = samples[:2]\n bprime = np.copy(self.population[candidate])\n bprime += self.scale * (self.population[0] - bprime)\n bprime += self.scale * (self.population[r0] -\n self.population[r1])\n return bprime\n\n def _best2(self, samples):\n \"\"\"\n best2bin, best2exp\n \"\"\"\n r0, r1, r2, r3 = samples[:4]\n bprime = (self.population[0] + self.scale *\n (self.population[r0] + self.population[r1] -\n self.population[r2] - self.population[r3]))\n\n return bprime\n\n def _rand2(self, samples):\n \"\"\"\n rand2bin, rand2exp\n \"\"\"\n r0, r1, r2, r3, r4 = samples\n bprime = (self.population[r0] + self.scale *\n (self.population[r1] + self.population[r2] -\n self.population[r3] - self.population[r4]))\n\n return bprime\n\n def _select_samples(self, candidate, number_samples):\n \"\"\"\n obtain random integers from range(self.num_population_members),\n without replacement. You can't have the original candidate either.\n \"\"\"\n idxs = list(range(self.num_population_members))\n idxs.remove(candidate)\n self.random_number_generator.shuffle(idxs)\n idxs = idxs[:number_samples]\n return idxs\n\n\ndef _make_random_gen(seed):\n \"\"\"Turn seed into a np.random.RandomState instance\n\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n",
"\"\"\"\nThis module defines export functions for decision trees.\n\"\"\"\n\n# Authors: Gilles Louppe <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Brian Holt <[email protected]>\n# Noel Dawe <[email protected]>\n# Satrajit Gosh <[email protected]>\n# Trevor Stephens <[email protected]>\n# Licence: BSD 3 clause\n\nimport numpy as np\n\nfrom ..externals import six\n\nfrom . import _criterion\nfrom . import _tree\n\n\ndef _color_brew(n):\n \"\"\"Generate n colors with equally spaced hues.\n\n Parameters\n ----------\n n : int\n The number of colors required.\n\n Returns\n -------\n color_list : list, length n\n List of n tuples of form (R, G, B) being the components of each color.\n \"\"\"\n color_list = []\n\n # Initialize saturation & value; calculate chroma & value shift\n s, v = 0.75, 0.9\n c = s * v\n m = v - c\n\n for h in np.arange(25, 385, 360. / n).astype(int):\n # Calculate some intermediate values\n h_bar = h / 60.\n x = c * (1 - abs((h_bar % 2) - 1))\n # Initialize RGB with same hue & chroma as our color\n rgb = [(c, x, 0),\n (x, c, 0),\n (0, c, x),\n (0, x, c),\n (x, 0, c),\n (c, 0, x),\n (c, x, 0)]\n r, g, b = rgb[int(h_bar)]\n # Shift the initial RGB values to match value and store\n rgb = [(int(255 * (r + m))),\n (int(255 * (g + m))),\n (int(255 * (b + m)))]\n color_list.append(rgb)\n\n return color_list\n\n\ndef export_graphviz(decision_tree, out_file=\"tree.dot\", max_depth=None,\n feature_names=None, class_names=None, label='all',\n filled=False, leaves_parallel=False, impurity=True,\n node_ids=False, proportion=False, rotate=False,\n rounded=False, special_characters=False):\n \"\"\"Export a decision tree in DOT format.\n\n This function generates a GraphViz representation of the decision tree,\n which is then written into `out_file`. Once exported, graphical renderings\n can be generated using, for example::\n\n $ dot -Tps tree.dot -o tree.ps (PostScript format)\n $ dot -Tpng tree.dot -o tree.png (PNG format)\n\n The sample counts that are shown are weighted with any sample_weights that\n might be present.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n decision_tree : decision tree classifier\n The decision tree to be exported to GraphViz.\n\n out_file : file object or string, optional (default=\"tree.dot\")\n Handle or name of the output file.\n\n max_depth : int, optional (default=None)\n The maximum depth of the representation. If None, the tree is fully\n generated.\n\n feature_names : list of strings, optional (default=None)\n Names of each of the features.\n\n class_names : list of strings, bool or None, optional (default=None)\n Names of each of the target classes in ascending numerical order.\n Only relevant for classification and not supported for multi-output.\n If ``True``, shows a symbolic representation of the class name.\n\n label : {'all', 'root', 'none'}, optional (default='all')\n Whether to show informative labels for impurity, etc.\n Options include 'all' to show at every node, 'root' to show only at\n the top root node, or 'none' to not show at any node.\n\n filled : bool, optional (default=False)\n When set to ``True``, paint nodes to indicate majority class for\n classification, extremity of values for regression, or purity of node\n for multi-output.\n\n leaves_parallel : bool, optional (default=False)\n When set to ``True``, draw all leaf nodes at the bottom of the tree.\n\n impurity : bool, optional (default=True)\n When set to ``True``, show the impurity at each node.\n\n node_ids : bool, optional (default=False)\n When set to ``True``, show the ID number on each node.\n\n proportion : bool, optional (default=False)\n When set to ``True``, change the display of 'values' and/or 'samples'\n to be proportions and percentages respectively.\n\n rotate : bool, optional (default=False)\n When set to ``True``, orient tree left to right rather than top-down.\n\n rounded : bool, optional (default=False)\n When set to ``True``, draw node boxes with rounded corners and use\n Helvetica fonts instead of Times-Roman.\n\n special_characters : bool, optional (default=False)\n When set to ``False``, ignore special characters for PostScript\n compatibility.\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn import tree\n\n >>> clf = tree.DecisionTreeClassifier()\n >>> iris = load_iris()\n\n >>> clf = clf.fit(iris.data, iris.target)\n >>> tree.export_graphviz(clf,\n ... out_file='tree.dot') # doctest: +SKIP\n \"\"\"\n\n def get_color(value):\n # Find the appropriate color & intensity for a node\n if colors['bounds'] is None:\n # Classification tree\n color = list(colors['rgb'][np.argmax(value)])\n sorted_values = sorted(value, reverse=True)\n alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /\n (1 - sorted_values[1]), 0))\n else:\n # Regression tree or multi-output\n color = list(colors['rgb'][0])\n alpha = int(np.round(255 * ((value - colors['bounds'][0]) /\n (colors['bounds'][1] -\n colors['bounds'][0])), 0))\n\n # Return html color code in #RRGGBBAA format\n color.append(alpha)\n hex_codes = [str(i) for i in range(10)]\n hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])\n color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]\n\n return '#' + ''.join(color)\n\n def node_to_str(tree, node_id, criterion):\n # Generate the node content string\n if tree.n_outputs == 1:\n value = tree.value[node_id][0, :]\n else:\n value = tree.value[node_id]\n\n # Should labels be shown?\n labels = (label == 'root' and node_id == 0) or label == 'all'\n\n # PostScript compatibility for special characters\n if special_characters:\n characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']\n node_string = '<'\n else:\n characters = ['#', '[', ']', '<=', '\\\\n', '\"']\n node_string = '\"'\n\n # Write node ID\n if node_ids:\n if labels:\n node_string += 'node '\n node_string += characters[0] + str(node_id) + characters[4]\n\n # Write decision criteria\n if tree.children_left[node_id] != _tree.TREE_LEAF:\n # Always write node decision criteria, except for leaves\n if feature_names is not None:\n feature = feature_names[tree.feature[node_id]]\n else:\n feature = \"X%s%s%s\" % (characters[1],\n tree.feature[node_id],\n characters[2])\n node_string += '%s %s %s%s' % (feature,\n characters[3],\n round(tree.threshold[node_id], 4),\n characters[4])\n\n # Write impurity\n if impurity:\n if isinstance(criterion, _criterion.FriedmanMSE):\n criterion = \"friedman_mse\"\n elif not isinstance(criterion, six.string_types):\n criterion = \"impurity\"\n if labels:\n node_string += '%s = ' % criterion\n node_string += (str(round(tree.impurity[node_id], 4)) +\n characters[4])\n\n # Write node sample count\n if labels:\n node_string += 'samples = '\n if proportion:\n percent = (100. * tree.n_node_samples[node_id] /\n float(tree.n_node_samples[0]))\n node_string += (str(round(percent, 1)) + '%' +\n characters[4])\n else:\n node_string += (str(tree.n_node_samples[node_id]) +\n characters[4])\n\n # Write node class distribution / regression value\n if proportion and tree.n_classes[0] != 1:\n # For classification this will show the proportion of samples\n value = value / tree.weighted_n_node_samples[node_id]\n if labels:\n node_string += 'value = '\n if tree.n_classes[0] == 1:\n # Regression\n value_text = np.around(value, 4)\n elif proportion:\n # Classification\n value_text = np.around(value, 2)\n elif np.all(np.equal(np.mod(value, 1), 0)):\n # Classification without floating-point weights\n value_text = value.astype(int)\n else:\n # Classification with floating-point weights\n value_text = np.around(value, 4)\n # Strip whitespace\n value_text = str(value_text.astype('S32')).replace(\"b'\", \"'\")\n value_text = value_text.replace(\"' '\", \", \").replace(\"'\", \"\")\n if tree.n_classes[0] == 1 and tree.n_outputs == 1:\n value_text = value_text.replace(\"[\", \"\").replace(\"]\", \"\")\n value_text = value_text.replace(\"\\n \", characters[4])\n node_string += value_text + characters[4]\n\n # Write node majority class\n if (class_names is not None and\n tree.n_classes[0] != 1 and\n tree.n_outputs == 1):\n # Only done for single-output classification trees\n if labels:\n node_string += 'class = '\n if class_names is not True:\n class_name = class_names[np.argmax(value)]\n else:\n class_name = \"y%s%s%s\" % (characters[1],\n np.argmax(value),\n characters[2])\n node_string += class_name\n\n # Clean up any trailing newlines\n if node_string[-2:] == '\\\\n':\n node_string = node_string[:-2]\n if node_string[-5:] == '<br/>':\n node_string = node_string[:-5]\n\n return node_string + characters[5]\n\n def recurse(tree, node_id, criterion, parent=None, depth=0):\n if node_id == _tree.TREE_LEAF:\n raise ValueError(\"Invalid node_id %s\" % _tree.TREE_LEAF)\n\n left_child = tree.children_left[node_id]\n right_child = tree.children_right[node_id]\n\n # Add node with description\n if max_depth is None or depth <= max_depth:\n\n # Collect ranks for 'leaf' option in plot_options\n if left_child == _tree.TREE_LEAF:\n ranks['leaves'].append(str(node_id))\n elif str(depth) not in ranks:\n ranks[str(depth)] = [str(node_id)]\n else:\n ranks[str(depth)].append(str(node_id))\n\n out_file.write('%d [label=%s'\n % (node_id,\n node_to_str(tree, node_id, criterion)))\n\n if filled:\n # Fetch appropriate color for node\n if 'rgb' not in colors:\n # Initialize colors and bounds if required\n colors['rgb'] = _color_brew(tree.n_classes[0])\n if tree.n_outputs != 1:\n # Find max and min impurities for multi-output\n colors['bounds'] = (np.min(-tree.impurity),\n np.max(-tree.impurity))\n elif tree.n_classes[0] == 1:\n # Find max and min values in leaf nodes for regression\n colors['bounds'] = (np.min(tree.value),\n np.max(tree.value))\n if tree.n_outputs == 1:\n node_val = (tree.value[node_id][0, :] /\n tree.weighted_n_node_samples[node_id])\n if tree.n_classes[0] == 1:\n # Regression\n node_val = tree.value[node_id][0, :]\n else:\n # If multi-output color node by impurity\n node_val = -tree.impurity[node_id]\n out_file.write(', fillcolor=\"%s\"' % get_color(node_val))\n out_file.write('] ;\\n')\n\n if parent is not None:\n # Add edge to parent\n out_file.write('%d -> %d' % (parent, node_id))\n if parent == 0:\n # Draw True/False labels if parent is root node\n angles = np.array([45, -45]) * ((rotate - .5) * -2)\n out_file.write(' [labeldistance=2.5, labelangle=')\n if node_id == 1:\n out_file.write('%d, headlabel=\"True\"]' % angles[0])\n else:\n out_file.write('%d, headlabel=\"False\"]' % angles[1])\n out_file.write(' ;\\n')\n\n if left_child != _tree.TREE_LEAF:\n recurse(tree, left_child, criterion=criterion, parent=node_id,\n depth=depth + 1)\n recurse(tree, right_child, criterion=criterion, parent=node_id,\n depth=depth + 1)\n\n else:\n ranks['leaves'].append(str(node_id))\n\n out_file.write('%d [label=\"(...)\"' % node_id)\n if filled:\n # color cropped nodes grey\n out_file.write(', fillcolor=\"#C0C0C0\"')\n out_file.write('] ;\\n' % node_id)\n\n if parent is not None:\n # Add edge to parent\n out_file.write('%d -> %d ;\\n' % (parent, node_id))\n\n own_file = False\n try:\n if isinstance(out_file, six.string_types):\n if six.PY3:\n out_file = open(out_file, \"w\", encoding=\"utf-8\")\n else:\n out_file = open(out_file, \"wb\")\n own_file = True\n\n # The depth of each node for plotting with 'leaf' option\n ranks = {'leaves': []}\n # The colors to render each node with\n colors = {'bounds': None}\n\n out_file.write('digraph Tree {\\n')\n\n # Specify node aesthetics\n out_file.write('node [shape=box')\n rounded_filled = []\n if filled:\n rounded_filled.append('filled')\n if rounded:\n rounded_filled.append('rounded')\n if len(rounded_filled) > 0:\n out_file.write(', style=\"%s\", color=\"black\"'\n % \", \".join(rounded_filled))\n if rounded:\n out_file.write(', fontname=helvetica')\n out_file.write('] ;\\n')\n\n # Specify graph & edge aesthetics\n if leaves_parallel:\n out_file.write('graph [ranksep=equally, splines=polyline] ;\\n')\n if rounded:\n out_file.write('edge [fontname=helvetica] ;\\n')\n if rotate:\n out_file.write('rankdir=LR ;\\n')\n\n # Now recurse the tree and add node & edge attributes\n if isinstance(decision_tree, _tree.Tree):\n recurse(decision_tree, 0, criterion=\"impurity\")\n else:\n recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)\n\n # If required, draw leaf nodes at same depth as each other\n if leaves_parallel:\n for rank in sorted(ranks):\n out_file.write(\"{rank=same ; \" +\n \"; \".join(r for r in ranks[rank]) + \"} ;\\n\")\n out_file.write(\"}\")\n\n finally:\n if own_file:\n out_file.close()\n",
"\"\"\"Gradient Boosted Regression Trees\n\nThis module contains methods for fitting gradient boosted regression trees for\nboth classification and regression.\n\nThe module structure is the following:\n\n- The ``BaseGradientBoosting`` base class implements a common ``fit`` method\n for all the estimators in the module. Regression and classification\n only differ in the concrete ``LossFunction`` used.\n\n- ``GradientBoostingClassifier`` implements gradient boosting for\n classification problems.\n\n- ``GradientBoostingRegressor`` implements gradient boosting for\n regression problems.\n\"\"\"\n\n# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,\n# Arnaud Joly, Jacob Schreiber\n# License: BSD 3 clause\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nfrom .base import BaseEnsemble\nfrom ..base import BaseEstimator\nfrom ..base import ClassifierMixin\nfrom ..base import RegressorMixin\nfrom ..externals import six\nfrom ..feature_selection.from_model import _LearntSelectorMixin\n\nfrom ._gradient_boosting import predict_stages\nfrom ._gradient_boosting import predict_stage\nfrom ._gradient_boosting import _random_sample_mask\n\nimport numbers\nimport numpy as np\n\nfrom scipy import stats\nfrom scipy.sparse import csc_matrix\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import issparse\n\nfrom time import time\nfrom ..tree.tree import DecisionTreeRegressor\nfrom ..tree._tree import DTYPE\nfrom ..tree._tree import TREE_LEAF\n\nfrom ..utils import check_random_state \nfrom ..utils import check_array\nfrom ..utils import check_X_y\nfrom ..utils import column_or_1d\nfrom ..utils import check_consistent_length\nfrom ..utils import deprecated\nfrom ..utils.extmath import logsumexp\nfrom ..utils.fixes import expit\nfrom ..utils.fixes import bincount\nfrom ..utils.stats import _weighted_percentile\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import NotFittedError\nfrom ..utils.multiclass import check_classification_targets\n\n\nclass QuantileEstimator(BaseEstimator):\n \"\"\"An estimator predicting the alpha-quantile of the training targets.\"\"\"\n def __init__(self, alpha=0.9):\n if not 0 < alpha < 1.0:\n raise ValueError(\"`alpha` must be in (0, 1.0) but was %r\" % alpha)\n self.alpha = alpha\n\n def fit(self, X, y, sample_weight=None):\n if sample_weight is None:\n self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)\n else:\n self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)\n\n def predict(self, X):\n check_is_fitted(self, 'quantile')\n\n y = np.empty((X.shape[0], 1), dtype=np.float64)\n y.fill(self.quantile)\n return y\n\n\nclass MeanEstimator(BaseEstimator):\n \"\"\"An estimator predicting the mean of the training targets.\"\"\"\n def fit(self, X, y, sample_weight=None):\n if sample_weight is None:\n self.mean = np.mean(y)\n else:\n self.mean = np.average(y, weights=sample_weight)\n\n def predict(self, X):\n check_is_fitted(self, 'mean')\n\n y = np.empty((X.shape[0], 1), dtype=np.float64)\n y.fill(self.mean)\n return y\n\n\nclass LogOddsEstimator(BaseEstimator):\n \"\"\"An estimator predicting the log odds ratio.\"\"\"\n scale = 1.0\n\n def fit(self, X, y, sample_weight=None):\n # pre-cond: pos, neg are encoded as 1, 0\n if sample_weight is None:\n pos = np.sum(y)\n neg = y.shape[0] - pos\n else:\n pos = np.sum(sample_weight * y)\n neg = np.sum(sample_weight * (1 - y))\n\n if neg == 0 or pos == 0:\n raise ValueError('y contains non binary labels.')\n self.prior = self.scale * np.log(pos / neg)\n\n def predict(self, X):\n check_is_fitted(self, 'prior')\n\n y = np.empty((X.shape[0], 1), dtype=np.float64)\n y.fill(self.prior)\n return y\n\n\nclass ScaledLogOddsEstimator(LogOddsEstimator):\n \"\"\"Log odds ratio scaled by 0.5 -- for exponential loss. \"\"\"\n scale = 0.5\n\n\nclass PriorProbabilityEstimator(BaseEstimator):\n \"\"\"An estimator predicting the probability of each\n class in the training data.\n \"\"\"\n def fit(self, X, y, sample_weight=None):\n if sample_weight is None:\n sample_weight = np.ones_like(y, dtype=np.float64)\n class_counts = bincount(y, weights=sample_weight)\n self.priors = class_counts / class_counts.sum()\n\n def predict(self, X):\n check_is_fitted(self, 'priors')\n\n y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)\n y[:] = self.priors\n return y\n\n\nclass ZeroEstimator(BaseEstimator):\n \"\"\"An estimator that simply predicts zero. \"\"\"\n\n def fit(self, X, y, sample_weight=None):\n if np.issubdtype(y.dtype, int):\n # classification\n self.n_classes = np.unique(y).shape[0]\n if self.n_classes == 2:\n self.n_classes = 1\n else:\n # regression\n self.n_classes = 1\n\n def predict(self, X):\n check_is_fitted(self, 'n_classes')\n\n y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)\n y.fill(0.0)\n return y\n\n\nclass LossFunction(six.with_metaclass(ABCMeta, object)):\n \"\"\"Abstract base class for various loss functions.\n\n Attributes\n ----------\n K : int\n The number of regression trees to be induced;\n 1 for regression and binary classification;\n ``n_classes`` for multi-class classification.\n \"\"\"\n\n is_multi_class = False\n\n def __init__(self, n_classes):\n self.K = n_classes\n\n def init_estimator(self):\n \"\"\"Default ``init`` estimator for loss function. \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def __call__(self, y, pred, sample_weight=None):\n \"\"\"Compute the loss of prediction ``pred`` and ``y``. \"\"\"\n\n @abstractmethod\n def negative_gradient(self, y, y_pred, **kargs):\n \"\"\"Compute the negative gradient.\n\n Parameters\n ---------\n y : np.ndarray, shape=(n,)\n The target labels.\n y_pred : np.ndarray, shape=(n,):\n The predictions.\n \"\"\"\n\n def update_terminal_regions(self, tree, X, y, residual, y_pred,\n sample_weight, sample_mask,\n learning_rate=1.0, k=0):\n \"\"\"Update the terminal regions (=leaves) of the given tree and\n updates the current predictions of the model. Traverses tree\n and invokes template method `_update_terminal_region`.\n\n Parameters\n ----------\n tree : tree.Tree\n The tree object.\n X : ndarray, shape=(n, m)\n The data array.\n y : ndarray, shape=(n,)\n The target labels.\n residual : ndarray, shape=(n,)\n The residuals (usually the negative gradient).\n y_pred : ndarray, shape=(n,)\n The predictions.\n sample_weight : ndarray, shape=(n,)\n The weight of each sample.\n sample_mask : ndarray, shape=(n,)\n The sample mask to be used.\n learning_rate : float, default=0.1\n learning rate shrinks the contribution of each tree by\n ``learning_rate``.\n k : int, default 0\n The index of the estimator being updated.\n\n \"\"\"\n # compute leaf for each sample in ``X``.\n terminal_regions = tree.apply(X)\n\n # mask all which are not in sample mask.\n masked_terminal_regions = terminal_regions.copy()\n masked_terminal_regions[~sample_mask] = -1\n\n # update each leaf (= perform line search)\n for leaf in np.where(tree.children_left == TREE_LEAF)[0]:\n self._update_terminal_region(tree, masked_terminal_regions,\n leaf, X, y, residual,\n y_pred[:, k], sample_weight)\n\n # update predictions (both in-bag and out-of-bag)\n y_pred[:, k] += (learning_rate\n * tree.value[:, 0, 0].take(terminal_regions, axis=0))\n\n @abstractmethod\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n \"\"\"Template method for updating terminal regions (=leaves). \"\"\"\n\n\nclass RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):\n \"\"\"Base class for regression loss functions. \"\"\"\n\n def __init__(self, n_classes):\n if n_classes != 1:\n raise ValueError(\"``n_classes`` must be 1 for regression but \"\n \"was %r\" % n_classes)\n super(RegressionLossFunction, self).__init__(n_classes)\n\n\nclass LeastSquaresError(RegressionLossFunction):\n \"\"\"Loss function for least squares (LS) estimation.\n Terminal regions need not to be updated for least squares. \"\"\"\n def init_estimator(self):\n return MeanEstimator()\n\n def __call__(self, y, pred, sample_weight=None):\n if sample_weight is None:\n return np.mean((y - pred.ravel()) ** 2.0)\n else:\n return (1.0 / sample_weight.sum() *\n np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))\n\n def negative_gradient(self, y, pred, **kargs):\n return y - pred.ravel()\n\n def update_terminal_regions(self, tree, X, y, residual, y_pred,\n sample_weight, sample_mask,\n learning_rate=1.0, k=0):\n \"\"\"Least squares does not need to update terminal regions.\n\n But it has to update the predictions.\n \"\"\"\n # update predictions\n y_pred[:, k] += learning_rate * tree.predict(X).ravel()\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n pass\n\n\nclass LeastAbsoluteError(RegressionLossFunction):\n \"\"\"Loss function for least absolute deviation (LAD) regression. \"\"\"\n def init_estimator(self):\n return QuantileEstimator(alpha=0.5)\n\n def __call__(self, y, pred, sample_weight=None):\n if sample_weight is None:\n return np.abs(y - pred.ravel()).mean()\n else:\n return (1.0 / sample_weight.sum() *\n np.sum(sample_weight * np.abs(y - pred.ravel())))\n\n def negative_gradient(self, y, pred, **kargs):\n \"\"\"1.0 if y - pred > 0.0 else -1.0\"\"\"\n pred = pred.ravel()\n return 2.0 * (y - pred > 0.0) - 1.0\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n \"\"\"LAD updates terminal regions to median estimates. \"\"\"\n terminal_region = np.where(terminal_regions == leaf)[0]\n sample_weight = sample_weight.take(terminal_region, axis=0)\n diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)\n tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)\n\n\nclass HuberLossFunction(RegressionLossFunction):\n \"\"\"Huber loss function for robust regression.\n\n M-Regression proposed in Friedman 2001.\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n \"\"\"\n\n def __init__(self, n_classes, alpha=0.9):\n super(HuberLossFunction, self).__init__(n_classes)\n self.alpha = alpha\n self.gamma = None\n\n def init_estimator(self):\n return QuantileEstimator(alpha=0.5)\n\n def __call__(self, y, pred, sample_weight=None):\n pred = pred.ravel()\n diff = y - pred\n gamma = self.gamma\n if gamma is None:\n if sample_weight is None:\n gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)\n else:\n gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)\n\n gamma_mask = np.abs(diff) <= gamma\n if sample_weight is None:\n sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)\n lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))\n loss = (sq_loss + lin_loss) / y.shape[0]\n else:\n sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)\n lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *\n (np.abs(diff[~gamma_mask]) - gamma / 2.0))\n loss = (sq_loss + lin_loss) / sample_weight.sum()\n return loss\n\n def negative_gradient(self, y, pred, sample_weight=None, **kargs):\n pred = pred.ravel()\n diff = y - pred\n if sample_weight is None:\n gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)\n else:\n gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)\n gamma_mask = np.abs(diff) <= gamma\n residual = np.zeros((y.shape[0],), dtype=np.float64)\n residual[gamma_mask] = diff[gamma_mask]\n residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])\n self.gamma = gamma\n return residual\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n terminal_region = np.where(terminal_regions == leaf)[0]\n sample_weight = sample_weight.take(terminal_region, axis=0)\n gamma = self.gamma\n diff = (y.take(terminal_region, axis=0)\n - pred.take(terminal_region, axis=0))\n median = _weighted_percentile(diff, sample_weight, percentile=50)\n diff_minus_median = diff - median\n tree.value[leaf, 0] = median + np.mean(\n np.sign(diff_minus_median) *\n np.minimum(np.abs(diff_minus_median), gamma))\n\n\nclass QuantileLossFunction(RegressionLossFunction):\n \"\"\"Loss function for quantile regression.\n\n Quantile regression allows to estimate the percentiles\n of the conditional distribution of the target.\n \"\"\"\n\n def __init__(self, n_classes, alpha=0.9):\n super(QuantileLossFunction, self).__init__(n_classes)\n assert 0 < alpha < 1.0\n self.alpha = alpha\n self.percentile = alpha * 100.0\n\n def init_estimator(self):\n return QuantileEstimator(self.alpha)\n\n def __call__(self, y, pred, sample_weight=None):\n pred = pred.ravel()\n diff = y - pred\n alpha = self.alpha\n\n mask = y > pred\n if sample_weight is None:\n loss = (alpha * diff[mask].sum() +\n (1.0 - alpha) * diff[~mask].sum()) / y.shape[0]\n else:\n loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +\n (1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /\n sample_weight.sum())\n return loss\n\n def negative_gradient(self, y, pred, **kargs):\n alpha = self.alpha\n pred = pred.ravel()\n mask = y > pred\n return (alpha * mask) - ((1.0 - alpha) * ~mask)\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n terminal_region = np.where(terminal_regions == leaf)[0]\n diff = (y.take(terminal_region, axis=0)\n - pred.take(terminal_region, axis=0))\n sample_weight = sample_weight.take(terminal_region, axis=0)\n\n val = _weighted_percentile(diff, sample_weight, self.percentile)\n tree.value[leaf, 0] = val\n\n\nclass ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):\n \"\"\"Base class for classification loss functions. \"\"\"\n\n def _score_to_proba(self, score):\n \"\"\"Template method to convert scores to probabilities.\n\n the does not support probabilites raises AttributeError.\n \"\"\"\n raise TypeError('%s does not support predict_proba' % type(self).__name__)\n\n @abstractmethod\n def _score_to_decision(self, score):\n \"\"\"Template method to convert scores to decisions.\n\n Returns int arrays.\n \"\"\"\n\n\nclass BinomialDeviance(ClassificationLossFunction):\n \"\"\"Binomial deviance loss function for binary classification.\n\n Binary classification is a special case; here, we only need to\n fit one tree instead of ``n_classes`` trees.\n \"\"\"\n def __init__(self, n_classes):\n if n_classes != 2:\n raise ValueError(\"{0:s} requires 2 classes.\".format(\n self.__class__.__name__))\n # we only need to fit one tree for binary clf.\n super(BinomialDeviance, self).__init__(1)\n\n def init_estimator(self):\n return LogOddsEstimator()\n\n def __call__(self, y, pred, sample_weight=None):\n \"\"\"Compute the deviance (= 2 * negative log-likelihood). \"\"\"\n # logaddexp(0, v) == log(1.0 + exp(v))\n pred = pred.ravel()\n if sample_weight is None:\n return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))\n else:\n return (-2.0 / sample_weight.sum() *\n np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))\n\n def negative_gradient(self, y, pred, **kargs):\n \"\"\"Compute the residual (= negative gradient). \"\"\"\n return y - expit(pred.ravel())\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n \"\"\"Make a single Newton-Raphson step.\n\n our node estimate is given by:\n\n sum(w * (y - prob)) / sum(w * prob * (1 - prob))\n\n we take advantage that: y - prob = residual\n \"\"\"\n terminal_region = np.where(terminal_regions == leaf)[0]\n residual = residual.take(terminal_region, axis=0)\n y = y.take(terminal_region, axis=0)\n sample_weight = sample_weight.take(terminal_region, axis=0)\n\n numerator = np.sum(sample_weight * residual)\n denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))\n\n if denominator == 0.0:\n tree.value[leaf, 0, 0] = 0.0\n else:\n tree.value[leaf, 0, 0] = numerator / denominator\n\n def _score_to_proba(self, score):\n proba = np.ones((score.shape[0], 2), dtype=np.float64)\n proba[:, 1] = expit(score.ravel())\n proba[:, 0] -= proba[:, 1]\n return proba\n\n def _score_to_decision(self, score):\n proba = self._score_to_proba(score)\n return np.argmax(proba, axis=1)\n\n\nclass MultinomialDeviance(ClassificationLossFunction):\n \"\"\"Multinomial deviance loss function for multi-class classification.\n\n For multi-class classification we need to fit ``n_classes`` trees at\n each stage.\n \"\"\"\n\n is_multi_class = True\n\n def __init__(self, n_classes):\n if n_classes < 3:\n raise ValueError(\"{0:s} requires more than 2 classes.\".format(\n self.__class__.__name__))\n super(MultinomialDeviance, self).__init__(n_classes)\n\n def init_estimator(self):\n return PriorProbabilityEstimator()\n\n def __call__(self, y, pred, sample_weight=None):\n # create one-hot label encoding\n Y = np.zeros((y.shape[0], self.K), dtype=np.float64)\n for k in range(self.K):\n Y[:, k] = y == k\n\n if sample_weight is None:\n return np.sum(-1 * (Y * pred).sum(axis=1) +\n logsumexp(pred, axis=1))\n else:\n return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +\n logsumexp(pred, axis=1))\n\n def negative_gradient(self, y, pred, k=0, **kwargs):\n \"\"\"Compute negative gradient for the ``k``-th class. \"\"\"\n return y - np.nan_to_num(np.exp(pred[:, k] -\n logsumexp(pred, axis=1)))\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n \"\"\"Make a single Newton-Raphson step. \"\"\"\n terminal_region = np.where(terminal_regions == leaf)[0]\n residual = residual.take(terminal_region, axis=0)\n y = y.take(terminal_region, axis=0)\n sample_weight = sample_weight.take(terminal_region, axis=0)\n\n numerator = np.sum(sample_weight * residual)\n numerator *= (self.K - 1) / self.K\n\n denominator = np.sum(sample_weight * (y - residual) *\n (1.0 - y + residual))\n\n if denominator == 0.0:\n tree.value[leaf, 0, 0] = 0.0\n else:\n tree.value[leaf, 0, 0] = numerator / denominator\n\n def _score_to_proba(self, score):\n return np.nan_to_num(\n np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))\n\n def _score_to_decision(self, score):\n proba = self._score_to_proba(score)\n return np.argmax(proba, axis=1)\n\n\nclass ExponentialLoss(ClassificationLossFunction):\n \"\"\"Exponential loss function for binary classification.\n\n Same loss as AdaBoost.\n\n References\n ----------\n Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007\n \"\"\"\n def __init__(self, n_classes):\n if n_classes != 2:\n raise ValueError(\"{0:s} requires 2 classes.\".format(\n self.__class__.__name__))\n # we only need to fit one tree for binary clf.\n super(ExponentialLoss, self).__init__(1)\n\n def init_estimator(self):\n return ScaledLogOddsEstimator()\n\n def __call__(self, y, pred, sample_weight=None):\n pred = pred.ravel()\n if sample_weight is None:\n return np.mean(np.exp(-(2. * y - 1.) * pred))\n else:\n return (1.0 / sample_weight.sum() *\n np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))\n\n def negative_gradient(self, y, pred, **kargs):\n y_ = -(2. * y - 1.)\n return y_ * np.exp(y_ * pred.ravel())\n\n def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):\n terminal_region = np.where(terminal_regions == leaf)[0]\n pred = pred.take(terminal_region, axis=0)\n y = y.take(terminal_region, axis=0)\n sample_weight = sample_weight.take(terminal_region, axis=0)\n\n y_ = 2. * y - 1.\n\n numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))\n denominator = np.sum(sample_weight * np.exp(-y_ * pred))\n\n if denominator == 0.0:\n tree.value[leaf, 0, 0] = 0.0\n else:\n tree.value[leaf, 0, 0] = numerator / denominator\n\n def _score_to_proba(self, score):\n proba = np.ones((score.shape[0], 2), dtype=np.float64)\n proba[:, 1] = expit(2.0 * score.ravel())\n proba[:, 0] -= proba[:, 1]\n return proba\n\n def _score_to_decision(self, score):\n return (score.ravel() >= 0.0).astype(np.int)\n\n\nLOSS_FUNCTIONS = {'ls': LeastSquaresError,\n 'lad': LeastAbsoluteError,\n 'huber': HuberLossFunction,\n 'quantile': QuantileLossFunction,\n 'deviance': None, # for both, multinomial and binomial\n 'exponential': ExponentialLoss,\n }\n\n\nINIT_ESTIMATORS = {'zero': ZeroEstimator}\n\n\nclass VerboseReporter(object):\n \"\"\"Reports verbose output to stdout.\n\n If ``verbose==1`` output is printed once in a while (when iteration mod\n verbose_mod is zero).; if larger than 1 then output is printed for\n each update.\n \"\"\"\n\n def __init__(self, verbose):\n self.verbose = verbose\n\n def init(self, est, begin_at_stage=0):\n # header fields and line format str\n header_fields = ['Iter', 'Train Loss']\n verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']\n # do oob?\n if est.subsample < 1:\n header_fields.append('OOB Improve')\n verbose_fmt.append('{oob_impr:>16.4f}')\n header_fields.append('Remaining Time')\n verbose_fmt.append('{remaining_time:>16s}')\n\n # print the header line\n print(('%10s ' + '%16s ' *\n (len(header_fields) - 1)) % tuple(header_fields))\n\n self.verbose_fmt = ' '.join(verbose_fmt)\n # plot verbose info each time i % verbose_mod == 0\n self.verbose_mod = 1\n self.start_time = time()\n self.begin_at_stage = begin_at_stage\n\n def update(self, j, est):\n \"\"\"Update reporter with new iteration. \"\"\"\n do_oob = est.subsample < 1\n # we need to take into account if we fit additional estimators.\n i = j - self.begin_at_stage # iteration relative to the start iter\n if (i + 1) % self.verbose_mod == 0:\n oob_impr = est.oob_improvement_[j] if do_oob else 0\n remaining_time = ((est.n_estimators - (j + 1)) *\n (time() - self.start_time) / float(i + 1))\n if remaining_time > 60:\n remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)\n else:\n remaining_time = '{0:.2f}s'.format(remaining_time)\n print(self.verbose_fmt.format(iter=j + 1,\n train_score=est.train_score_[j],\n oob_impr=oob_impr,\n remaining_time=remaining_time))\n if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):\n # adjust verbose frequency (powers of 10)\n self.verbose_mod *= 10\n\n\nclass BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,\n _LearntSelectorMixin)):\n \"\"\"Abstract base class for Gradient Boosting. \"\"\"\n\n @abstractmethod\n def __init__(self, loss, learning_rate, n_estimators, min_samples_split,\n min_samples_leaf, min_weight_fraction_leaf,\n max_depth, init, subsample, max_features,\n random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,\n warm_start=False, presort='auto'):\n\n self.n_estimators = n_estimators\n self.learning_rate = learning_rate\n self.loss = loss\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.subsample = subsample\n self.max_features = max_features\n self.max_depth = max_depth\n self.init = init\n self.random_state = random_state\n self.alpha = alpha\n self.verbose = verbose\n self.max_leaf_nodes = max_leaf_nodes\n self.warm_start = warm_start\n self.presort = presort\n\n self.estimators_ = np.empty((0, 0), dtype=np.object)\n\n def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,\n random_state, X_idx_sorted, X_csc=None, X_csr=None):\n \"\"\"Fit another stage of ``n_classes_`` trees to the boosting model. \"\"\"\n\n assert sample_mask.dtype == np.bool\n loss = self.loss_\n original_y = y\n\n for k in range(loss.K):\n if loss.is_multi_class:\n y = np.array(original_y == k, dtype=np.float64)\n\n residual = loss.negative_gradient(y, y_pred, k=k,\n sample_weight=sample_weight)\n\n # induce regression tree on residuals\n tree = DecisionTreeRegressor(\n criterion='friedman_mse',\n splitter='best',\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n max_features=self.max_features,\n max_leaf_nodes=self.max_leaf_nodes,\n random_state=random_state,\n presort=self.presort)\n\n if self.subsample < 1.0:\n # no inplace multiplication!\n sample_weight = sample_weight * sample_mask.astype(np.float64)\n\n if X_csc is not None:\n tree.fit(X_csc, residual, sample_weight=sample_weight,\n check_input=False, X_idx_sorted=X_idx_sorted)\n else:\n tree.fit(X, residual, sample_weight=sample_weight,\n check_input=False, X_idx_sorted=X_idx_sorted)\n\n # update tree leaves\n if X_csr is not None:\n loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,\n sample_weight, sample_mask,\n self.learning_rate, k=k)\n else:\n loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,\n sample_weight, sample_mask,\n self.learning_rate, k=k)\n\n # add tree to ensemble\n self.estimators_[i, k] = tree\n\n return y_pred\n\n def _check_params(self):\n \"\"\"Check validity of parameters and raise ValueError if not valid. \"\"\"\n if self.n_estimators <= 0:\n raise ValueError(\"n_estimators must be greater than 0 but \"\n \"was %r\" % self.n_estimators)\n\n if self.learning_rate <= 0.0:\n raise ValueError(\"learning_rate must be greater than 0 but \"\n \"was %r\" % self.learning_rate)\n\n if (self.loss not in self._SUPPORTED_LOSS\n or self.loss not in LOSS_FUNCTIONS):\n raise ValueError(\"Loss '{0:s}' not supported. \".format(self.loss))\n\n if self.loss == 'deviance':\n loss_class = (MultinomialDeviance\n if len(self.classes_) > 2\n else BinomialDeviance)\n else:\n loss_class = LOSS_FUNCTIONS[self.loss]\n\n if self.loss in ('huber', 'quantile'):\n self.loss_ = loss_class(self.n_classes_, self.alpha)\n else:\n self.loss_ = loss_class(self.n_classes_)\n\n if not (0.0 < self.subsample <= 1.0):\n raise ValueError(\"subsample must be in (0,1] but \"\n \"was %r\" % self.subsample)\n\n if self.init is not None:\n if isinstance(self.init, six.string_types):\n if self.init not in INIT_ESTIMATORS:\n raise ValueError('init=\"%s\" is not supported' % self.init)\n else:\n if (not hasattr(self.init, 'fit')\n or not hasattr(self.init, 'predict')):\n raise ValueError(\"init=%r must be valid BaseEstimator \"\n \"and support both fit and \"\n \"predict\" % self.init)\n\n if not (0.0 < self.alpha < 1.0):\n raise ValueError(\"alpha must be in (0.0, 1.0) but \"\n \"was %r\" % self.alpha)\n\n if isinstance(self.max_features, six.string_types):\n if self.max_features == \"auto\":\n # if is_classification\n if self.n_classes_ > 1:\n max_features = max(1, int(np.sqrt(self.n_features)))\n else:\n # is regression\n max_features = self.n_features\n elif self.max_features == \"sqrt\":\n max_features = max(1, int(np.sqrt(self.n_features)))\n elif self.max_features == \"log2\":\n max_features = max(1, int(np.log2(self.n_features)))\n else:\n raise ValueError(\"Invalid value for max_features: %r. \"\n \"Allowed string values are 'auto', 'sqrt' \"\n \"or 'log2'.\" % self.max_features)\n elif self.max_features is None:\n max_features = self.n_features\n elif isinstance(self.max_features, (numbers.Integral, np.integer)):\n max_features = self.max_features\n else: # float\n if 0. < self.max_features <= 1.:\n max_features = max(int(self.max_features * self.n_features), 1)\n else:\n raise ValueError(\"max_features must be in (0, n_features]\")\n\n self.max_features_ = max_features\n\n def _init_state(self):\n \"\"\"Initialize model state and allocate model state data structures. \"\"\"\n\n if self.init is None:\n self.init_ = self.loss_.init_estimator()\n elif isinstance(self.init, six.string_types):\n self.init_ = INIT_ESTIMATORS[self.init]()\n else:\n self.init_ = self.init\n\n self.estimators_ = np.empty((self.n_estimators, self.loss_.K),\n dtype=np.object)\n self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)\n # do oob?\n if self.subsample < 1.0:\n self.oob_improvement_ = np.zeros((self.n_estimators),\n dtype=np.float64)\n\n def _clear_state(self):\n \"\"\"Clear the state of the gradient boosting model. \"\"\"\n if hasattr(self, 'estimators_'):\n self.estimators_ = np.empty((0, 0), dtype=np.object)\n if hasattr(self, 'train_score_'):\n del self.train_score_\n if hasattr(self, 'oob_improvement_'):\n del self.oob_improvement_\n if hasattr(self, 'init_'):\n del self.init_\n\n def _resize_state(self):\n \"\"\"Add additional ``n_estimators`` entries to all attributes. \"\"\"\n # self.n_estimators is the number of additional est to fit\n total_n_estimators = self.n_estimators\n if total_n_estimators < self.estimators_.shape[0]:\n raise ValueError('resize with smaller n_estimators %d < %d' %\n (total_n_estimators, self.estimators_[0]))\n\n self.estimators_.resize((total_n_estimators, self.loss_.K))\n self.train_score_.resize(total_n_estimators)\n if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):\n # if do oob resize arrays or create new if not available\n if hasattr(self, 'oob_improvement_'):\n self.oob_improvement_.resize(total_n_estimators)\n else:\n self.oob_improvement_ = np.zeros((total_n_estimators,),\n dtype=np.float64)\n\n def _is_initialized(self):\n return len(getattr(self, 'estimators_', [])) > 0\n\n def _check_initialized(self):\n \"\"\"Check that the estimator is initialized, raising an error if not.\"\"\"\n if self.estimators_ is None or len(self.estimators_) == 0:\n raise NotFittedError(\"Estimator not fitted, call `fit`\"\n \" before making predictions`.\")\n\n def fit(self, X, y, sample_weight=None, monitor=None):\n \"\"\"Fit the gradient boosting model.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like, shape = [n_samples]\n Target values (integers in classification, real numbers in\n regression)\n For classification, labels must correspond to classes.\n\n sample_weight : array-like, shape = [n_samples] or None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. In the case of\n classification, splits are also ignored if they would result in any\n single class carrying a negative weight in either child node.\n\n monitor : callable, optional\n The monitor is called after each iteration with the current\n iteration, a reference to the estimator and the local variables of\n ``_fit_stages`` as keyword arguments ``callable(i, self,\n locals())``. If the callable returns ``True`` the fitting procedure\n is stopped. The monitor can be used for various things such as\n computing held-out estimates, early stopping, model introspect, and\n snapshoting.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n # if not warmstart - clear the estimator state\n if not self.warm_start:\n self._clear_state()\n\n # Check input\n X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)\n n_samples, self.n_features = X.shape\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=np.float32)\n else:\n sample_weight = column_or_1d(sample_weight, warn=True)\n\n check_consistent_length(X, y, sample_weight)\n\n y = self._validate_y(y)\n\n random_state = check_random_state(self.random_state)\n self._check_params()\n\n if not self._is_initialized():\n # init state\n self._init_state()\n\n # fit initial model - FIXME make sample_weight optional\n self.init_.fit(X, y, sample_weight)\n\n # init predictions\n y_pred = self.init_.predict(X)\n begin_at_stage = 0\n else:\n # add more estimators to fitted model\n # invariant: warm_start = True\n if self.n_estimators < self.estimators_.shape[0]:\n raise ValueError('n_estimators=%d must be larger or equal to '\n 'estimators_.shape[0]=%d when '\n 'warm_start==True'\n % (self.n_estimators,\n self.estimators_.shape[0]))\n begin_at_stage = self.estimators_.shape[0]\n y_pred = self._decision_function(X)\n self._resize_state()\n\n X_idx_sorted = None\n presort = self.presort\n # Allow presort to be 'auto', which means True if the dataset is dense,\n # otherwise it will be False.\n if presort == 'auto' and issparse(X):\n presort = False\n elif presort == 'auto':\n presort = True\n \n if presort:\n if issparse(X):\n raise ValueError(\"Presorting is not supported for sparse matrices.\")\n else:\n X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),\n dtype=np.int32)\n\n # fit the boosting stages\n n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,\n begin_at_stage, monitor, X_idx_sorted)\n # change shape of arrays after fit (early-stopping or additional ests)\n if n_stages != self.estimators_.shape[0]:\n self.estimators_ = self.estimators_[:n_stages]\n self.train_score_ = self.train_score_[:n_stages]\n if hasattr(self, 'oob_improvement_'):\n self.oob_improvement_ = self.oob_improvement_[:n_stages]\n\n return self\n\n def _fit_stages(self, X, y, y_pred, sample_weight, random_state,\n begin_at_stage=0, monitor=None, X_idx_sorted=None):\n \"\"\"Iteratively fits the stages.\n\n For each stage it computes the progress (OOB, train score)\n and delegates to ``_fit_stage``.\n Returns the number of stages fit; might differ from ``n_estimators``\n due to early stopping.\n \"\"\"\n n_samples = X.shape[0]\n do_oob = self.subsample < 1.0\n sample_mask = np.ones((n_samples, ), dtype=np.bool)\n n_inbag = max(1, int(self.subsample * n_samples))\n loss_ = self.loss_\n\n # Set min_weight_leaf from min_weight_fraction_leaf\n if self.min_weight_fraction_leaf != 0. and sample_weight is not None:\n min_weight_leaf = (self.min_weight_fraction_leaf *\n np.sum(sample_weight))\n else:\n min_weight_leaf = 0.\n\n if self.verbose:\n verbose_reporter = VerboseReporter(self.verbose)\n verbose_reporter.init(self, begin_at_stage)\n\n X_csc = csc_matrix(X) if issparse(X) else None\n X_csr = csr_matrix(X) if issparse(X) else None\n\n # perform boosting iterations\n i = begin_at_stage\n for i in range(begin_at_stage, self.n_estimators):\n\n # subsampling\n if do_oob:\n sample_mask = _random_sample_mask(n_samples, n_inbag,\n random_state)\n # OOB score before adding this stage\n old_oob_score = loss_(y[~sample_mask],\n y_pred[~sample_mask],\n sample_weight[~sample_mask])\n\n # fit next stage of trees\n y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,\n sample_mask, random_state, X_idx_sorted,\n X_csc, X_csr)\n\n # track deviance (= loss)\n if do_oob:\n self.train_score_[i] = loss_(y[sample_mask],\n y_pred[sample_mask],\n sample_weight[sample_mask])\n self.oob_improvement_[i] = (\n old_oob_score - loss_(y[~sample_mask],\n y_pred[~sample_mask],\n sample_weight[~sample_mask]))\n else:\n # no need to fancy index w/ no subsampling\n self.train_score_[i] = loss_(y, y_pred, sample_weight)\n\n if self.verbose > 0:\n verbose_reporter.update(i, self)\n\n if monitor is not None:\n early_stopping = monitor(i, self, locals())\n if early_stopping:\n break\n return i + 1\n\n def _make_estimator(self, append=True):\n # we don't need _make_estimator\n raise NotImplementedError()\n\n def _init_decision_function(self, X):\n \"\"\"Check input and compute prediction of ``init``. \"\"\"\n self._check_initialized()\n X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)\n if X.shape[1] != self.n_features:\n raise ValueError(\"X.shape[1] should be {0:d}, not {1:d}.\".format(\n self.n_features, X.shape[1]))\n score = self.init_.predict(X).astype(np.float64)\n return score\n\n def _decision_function(self, X):\n # for use in inner loop, not raveling the output in single-class case,\n # not doing input validation.\n score = self._init_decision_function(X)\n predict_stages(self.estimators_, X, self.learning_rate, score)\n return score\n\n @deprecated(\" and will be removed in 0.19\")\n def decision_function(self, X):\n \"\"\"Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n score : array, shape = [n_samples, n_classes] or [n_samples]\n The decision function of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n Regression and binary classification produce an array of shape\n [n_samples].\n \"\"\"\n\n self._check_initialized()\n X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)\n score = self._decision_function(X)\n if score.shape[1] == 1:\n return score.ravel()\n return score\n\n def _staged_decision_function(self, X):\n \"\"\"Compute decision function of ``X`` for each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n score : generator of array, shape = [n_samples, k]\n The decision function of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n Regression and binary classification are special cases with\n ``k == 1``, otherwise ``k==n_classes``.\n \"\"\"\n X = check_array(X, dtype=DTYPE, order=\"C\")\n score = self._init_decision_function(X)\n for i in range(self.estimators_.shape[0]):\n predict_stage(self.estimators_, i, X, self.learning_rate, score)\n yield score.copy()\n\n @deprecated(\" and will be removed in 0.19\")\n def staged_decision_function(self, X):\n \"\"\"Compute decision function of ``X`` for each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n score : generator of array, shape = [n_samples, k]\n The decision function of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n Regression and binary classification are special cases with\n ``k == 1``, otherwise ``k==n_classes``.\n \"\"\"\n for dec in self._staged_decision_function(X):\n # no yield from in Python2.X\n yield dec\n\n @property\n def feature_importances_(self):\n \"\"\"Return the feature importances (the higher, the more important the\n feature).\n\n Returns\n -------\n feature_importances_ : array, shape = [n_features]\n \"\"\"\n self._check_initialized()\n\n total_sum = np.zeros((self.n_features, ), dtype=np.float64)\n for stage in self.estimators_:\n stage_sum = sum(tree.feature_importances_\n for tree in stage) / len(stage)\n total_sum += stage_sum\n\n importances = total_sum / len(self.estimators_)\n return importances\n\n def _validate_y(self, y):\n self.n_classes_ = 1\n if y.dtype.kind == 'O':\n y = y.astype(np.float64)\n # Default implementation\n return y\n\n def apply(self, X):\n \"\"\"Apply trees in the ensemble to X, return leaf indices.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]\n For each datapoint x in X and for each tree in the ensemble,\n return the index of the leaf x ends up in in each estimator.\n In the case of binary classification n_classes is 1.\n \"\"\"\n\n self._check_initialized()\n X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)\n\n # n_classes will be equal to 1 in the binary classification or the\n # regression case.\n n_estimators, n_classes = self.estimators_.shape\n leaves = np.zeros((X.shape[0], n_estimators, n_classes))\n\n for i in range(n_estimators):\n for j in range(n_classes):\n estimator = self.estimators_[i, j]\n leaves[:, i, j] = estimator.apply(X, check_input=False)\n\n return leaves\n\n\nclass GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):\n \"\"\"Gradient Boosting for classification.\n\n GB builds an additive model in a\n forward stage-wise fashion; it allows for the optimization of\n arbitrary differentiable loss functions. In each stage ``n_classes_``\n regression trees are fit on the negative gradient of the\n binomial or multinomial deviance loss function. Binary classification\n is a special case where only a single regression tree is induced.\n\n Read more in the :ref:`User Guide <gradient_boosting>`.\n\n Parameters\n ----------\n loss : {'deviance', 'exponential'}, optional (default='deviance')\n loss function to be optimized. 'deviance' refers to\n deviance (= logistic regression) for classification\n with probabilistic outputs. For loss 'exponential' gradient\n boosting recovers the AdaBoost algorithm.\n\n learning_rate : float, optional (default=0.1)\n learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n\n n_estimators : int (default=100)\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n\n max_depth : integer, optional (default=3)\n maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Ignored if ``max_leaf_nodes`` is not None.\n\n min_samples_split : integer, optional (default=2)\n The minimum number of samples required to split an internal node.\n\n min_samples_leaf : integer, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the input samples required to be at a\n leaf node.\n\n subsample : float, optional (default=1.0)\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n\n max_features : int, float, string or None, optional (default=None)\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n max_leaf_nodes : int or None, optional (default=None)\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n If not None then ``max_depth`` will be ignored.\n\n init : BaseEstimator, None, optional (default=None)\n An estimator object that is used to compute the initial\n predictions. ``init`` has to provide ``fit`` and ``predict``.\n If None it uses ``loss.init_estimator``.\n\n verbose : int, default: 0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n\n warm_start : bool, default: False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n presort : bool or 'auto', optional (default='auto')\n Whether to presort the data to speed up the finding of best splits in\n fitting. Auto mode by default will use presorting on dense data and\n default to normal sorting on sparse data. Setting presort to true on\n sparse data will raise an error.\n\n .. versionadded:: 0.17\n *presort* parameter.\n\n Attributes\n ----------\n feature_importances_ : array, shape = [n_features]\n The feature importances (the higher, the more important the feature).\n\n oob_improvement_ : array, shape = [n_estimators]\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n\n train_score_ : array, shape = [n_estimators]\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n init : BaseEstimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]\n The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary\n classification, otherwise n_classes.\n\n\n See also\n --------\n sklearn.tree.DecisionTreeClassifier, RandomForestClassifier\n AdaBoostClassifier\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n \"\"\"\n\n _SUPPORTED_LOSS = ('deviance', 'exponential')\n\n def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,\n subsample=1.0, min_samples_split=2,\n min_samples_leaf=1, min_weight_fraction_leaf=0.,\n max_depth=3, init=None, random_state=None,\n max_features=None, verbose=0,\n max_leaf_nodes=None, warm_start=False,\n presort='auto'):\n\n super(GradientBoostingClassifier, self).__init__(\n loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_depth=max_depth, init=init, subsample=subsample,\n max_features=max_features,\n random_state=random_state, verbose=verbose,\n max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,\n presort=presort)\n\n def _validate_y(self, y):\n check_classification_targets(y)\n self.classes_, y = np.unique(y, return_inverse=True)\n self.n_classes_ = len(self.classes_)\n return y\n\n def decision_function(self, X):\n \"\"\"Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n score : array, shape = [n_samples, n_classes] or [n_samples]\n The decision function of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n Regression and binary classification produce an array of shape\n [n_samples].\n \"\"\"\n X = check_array(X, dtype=DTYPE, order=\"C\")\n score = self._decision_function(X)\n if score.shape[1] == 1:\n return score.ravel()\n return score\n\n def staged_decision_function(self, X):\n \"\"\"Compute decision function of ``X`` for each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n score : generator of array, shape = [n_samples, k]\n The decision function of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n Regression and binary classification are special cases with\n ``k == 1``, otherwise ``k==n_classes``.\n \"\"\"\n for dec in self._staged_decision_function(X):\n # no yield from in Python2.X\n yield dec\n\n def predict(self, X):\n \"\"\"Predict class for X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y: array of shape = [\"n_samples]\n The predicted values.\n \"\"\"\n score = self.decision_function(X)\n decisions = self.loss_._score_to_decision(score)\n return self.classes_.take(decisions, axis=0)\n\n def staged_predict(self, X):\n \"\"\"Predict class at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : generator of array of shape = [n_samples]\n The predicted value of the input samples.\n \"\"\"\n for score in self._staged_decision_function(X):\n decisions = self.loss_._score_to_decision(score)\n yield self.classes_.take(decisions, axis=0)\n\n def predict_proba(self, X):\n \"\"\"Predict class probabilities for X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Raises\n ------\n AttributeError\n If the ``loss`` does not support probabilities.\n\n Returns\n -------\n p : array of shape = [n_samples]\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n \"\"\"\n score = self.decision_function(X)\n try:\n return self.loss_._score_to_proba(score)\n except NotFittedError:\n raise\n except AttributeError:\n raise AttributeError('loss=%r does not support predict_proba' %\n self.loss)\n\n def predict_log_proba(self, X):\n \"\"\"Predict class log-probabilities for X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Raises\n ------\n AttributeError\n If the ``loss`` does not support probabilities.\n\n Returns\n -------\n p : array of shape = [n_samples]\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute `classes_`.\n \"\"\"\n proba = self.predict_proba(X)\n return np.log(proba)\n\n def staged_predict_proba(self, X):\n \"\"\"Predict class probabilities at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : generator of array of shape = [n_samples]\n The predicted value of the input samples.\n \"\"\"\n try:\n for score in self._staged_decision_function(X):\n yield self.loss_._score_to_proba(score)\n except NotFittedError:\n raise\n except AttributeError:\n raise AttributeError('loss=%r does not support predict_proba' %\n self.loss)\n\n\nclass GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):\n \"\"\"Gradient Boosting for regression.\n\n GB builds an additive model in a forward stage-wise fashion;\n it allows for the optimization of arbitrary differentiable loss functions.\n In each stage a regression tree is fit on the negative gradient of the\n given loss function.\n\n Read more in the :ref:`User Guide <gradient_boosting>`.\n\n Parameters\n ----------\n loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')\n loss function to be optimized. 'ls' refers to least squares\n regression. 'lad' (least absolute deviation) is a highly robust\n loss function solely based on order information of the input\n variables. 'huber' is a combination of the two. 'quantile'\n allows quantile regression (use `alpha` to specify the quantile).\n\n learning_rate : float, optional (default=0.1)\n learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n\n n_estimators : int (default=100)\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n\n max_depth : integer, optional (default=3)\n maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Ignored if ``max_leaf_nodes`` is not None.\n\n min_samples_split : integer, optional (default=2)\n The minimum number of samples required to split an internal node.\n\n min_samples_leaf : integer, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the input samples required to be at a\n leaf node.\n\n subsample : float, optional (default=1.0)\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n\n max_features : int, float, string or None, optional (default=None)\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n max_leaf_nodes : int or None, optional (default=None)\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n alpha : float (default=0.9)\n The alpha-quantile of the huber loss function and the quantile\n loss function. Only if ``loss='huber'`` or ``loss='quantile'``.\n\n init : BaseEstimator, None, optional (default=None)\n An estimator object that is used to compute the initial\n predictions. ``init`` has to provide ``fit`` and ``predict``.\n If None it uses ``loss.init_estimator``.\n\n verbose : int, default: 0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n\n warm_start : bool, default: False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n presort : bool or 'auto', optional (default='auto')\n Whether to presort the data to speed up the finding of best splits in\n fitting. Auto mode by default will use presorting on dense data and\n default to normal sorting on sparse data. Setting presort to true on\n sparse data will raise an error.\n\n .. versionadded:: 0.17\n optional parameter *presort*.\n\n Attributes\n ----------\n feature_importances_ : array, shape = [n_features]\n The feature importances (the higher, the more important the feature).\n\n oob_improvement_ : array, shape = [n_estimators]\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n\n train_score_ : array, shape = [n_estimators]\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n `init` : BaseEstimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]\n The collection of fitted sub-estimators.\n\n See also\n --------\n DecisionTreeRegressor, RandomForestRegressor\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n \"\"\"\n\n _SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')\n\n def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,\n subsample=1.0, min_samples_split=2,\n min_samples_leaf=1, min_weight_fraction_leaf=0.,\n max_depth=3, init=None, random_state=None,\n max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,\n warm_start=False, presort='auto'):\n\n super(GradientBoostingRegressor, self).__init__(\n loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_depth=max_depth, init=init, subsample=subsample,\n max_features=max_features,\n random_state=random_state, alpha=alpha, verbose=verbose,\n max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,\n presort=presort)\n\n def predict(self, X):\n \"\"\"Predict regression target for X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples]\n The predicted values.\n \"\"\"\n X = check_array(X, dtype=DTYPE, order=\"C\")\n return self._decision_function(X).ravel()\n\n def staged_predict(self, X):\n \"\"\"Predict regression target at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : generator of array of shape = [n_samples]\n The predicted value of the input samples.\n \"\"\"\n for y in self._staged_decision_function(X):\n yield y.ravel()\n\n def apply(self, X):\n \"\"\"Apply trees in the ensemble to X, return leaf indices.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n X_leaves : array_like, shape = [n_samples, n_estimators]\n For each datapoint x in X and for each tree in the ensemble,\n return the index of the leaf x ends up in in each estimator.\n \"\"\"\n\n leaves = super(GradientBoostingRegressor, self).apply(X)\n leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])\n return leaves\n",
"\"\"\"California housing dataset.\n\nThe original database is available from StatLib\n\n http://lib.stat.cmu.edu/\n\nThe data contains 20,640 observations on 9 variables.\n\nThis dataset contains the average house value as target variable\nand the following input variables (features): average income,\nhousing average age, average rooms, average bedrooms, population,\naverage occupation, latitude, and longitude in that order.\n\nReferences\n----------\n\nPace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\nStatistics and Probability Letters, 33 (1997) 291-297.\n\n\"\"\"\n# Authors: Peter Prettenhofer\n# License: BSD 3 clause\n\nfrom io import BytesIO\nimport os\nfrom os.path import exists\nfrom os import makedirs\nimport tarfile\n\ntry:\n # Python 2\n from urllib2 import urlopen\nexcept ImportError:\n # Python 3+\n from urllib.request import urlopen\n\nimport numpy as np\n\nfrom .base import get_data_home, Bunch\nfrom .base import _pkl_filepath\nfrom ..externals import joblib\n\n\nDATA_URL = \"http://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz\"\nTARGET_FILENAME = \"cal_housing.pkz\"\n\n# Grab the module-level docstring to use as a description of the\n# dataset\nMODULE_DOCS = __doc__\n\n\ndef fetch_california_housing(data_home=None, download_if_missing=True):\n \"\"\"Loader for the California housing dataset from StatLib.\n\n Read more in the :ref:`User Guide <datasets>`.\n\n Parameters\n ----------\n data_home : optional, default: None\n Specify another download and cache folder for the datasets. By default\n all scikit learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing: optional, True by default\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n dataset : dict-like object with the following attributes:\n\n dataset.data : ndarray, shape [20640, 8]\n Each row corresponding to the 8 feature values in order.\n\n dataset.target : numpy array of shape (20640,)\n Each value corresponds to the average house value in units of 100,000.\n\n dataset.feature_names : array of length 8\n Array of ordered feature names used in the dataset.\n\n dataset.DESCR : string\n Description of the California housing dataset.\n\n Notes\n ------\n\n This dataset consists of 20,640 samples and 9 features.\n \"\"\"\n data_home = get_data_home(data_home=data_home)\n if not exists(data_home):\n makedirs(data_home)\n filepath = _pkl_filepath(data_home, TARGET_FILENAME)\n if not exists(filepath):\n print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))\n archive_fileobj = BytesIO(urlopen(DATA_URL).read())\n fileobj = tarfile.open(\n mode=\"r:gz\",\n fileobj=archive_fileobj).extractfile(\n 'CaliforniaHousing/cal_housing.data')\n\n cal_housing = np.loadtxt(fileobj, delimiter=',')\n # Columns are not in the same order compared to the previous\n # URL resource on lib.stat.cmu.edu\n columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]\n cal_housing = cal_housing[:, columns_index]\n joblib.dump(cal_housing, filepath, compress=6)\n else:\n cal_housing = joblib.load(filepath)\n\n feature_names = [\"MedInc\", \"HouseAge\", \"AveRooms\", \"AveBedrms\",\n \"Population\", \"AveOccup\", \"Latitude\", \"Longitude\"]\n\n target, data = cal_housing[:, 0], cal_housing[:, 1:]\n\n # avg rooms = total rooms / households\n data[:, 2] /= data[:, 5]\n\n # avg bed rooms = total bed rooms / households\n data[:, 3] /= data[:, 5]\n\n # avg occupancy = population / housholds\n data[:, 5] = data[:, 4] / data[:, 5]\n\n # target in units of 100,000\n target = target / 100000.0\n\n return Bunch(data=data,\n target=target,\n feature_names=feature_names,\n DESCR=MODULE_DOCS)\n",
"import os\nimport numpy\nfrom numpy.distutils.misc_util import Configuration\n\n\ndef configuration(parent_package=\"\", top_path=None):\n config = Configuration(\"decomposition\", parent_package, top_path)\n\n libraries = []\n if os.name == 'posix':\n libraries.append('m')\n\n config.add_extension(\"_online_lda\",\n sources=[\"_online_lda.c\"],\n include_dirs=[numpy.get_include()],\n libraries=libraries)\n\n config.add_extension('cdnmf_fast',\n sources=['cdnmf_fast.c'],\n include_dirs=[numpy.get_include()],\n libraries=libraries)\n\n config.add_subpackage(\"tests\")\n\n return config\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n setup(**configuration().todict())\n",
"\"\"\"Loader for the Labeled Faces in the Wild (LFW) dataset\n\nThis dataset is a collection of JPEG pictures of famous people collected\nover the internet, all details are available on the official website:\n\n http://vis-www.cs.umass.edu/lfw/\n\nEach picture is centered on a single face. The typical task is called\nFace Verification: given a pair of two pictures, a binary classifier\nmust predict whether the two images are from the same person.\n\nAn alternative task, Face Recognition or Face Identification is:\ngiven the picture of the face of an unknown person, identify the name\nof the person by referring to a gallery of previously seen pictures of\nidentified persons.\n\nBoth Face Verification and Face Recognition are tasks that are typically\nperformed on the output of a model trained to perform Face Detection. The\nmost popular model for Face Detection is called Viola-Johns and is\nimplemented in the OpenCV library. The LFW faces were extracted by this face\ndetector from various online websites.\n\"\"\"\n# Copyright (c) 2011 Olivier Grisel <[email protected]>\n# License: BSD 3 clause\n\nfrom os import listdir, makedirs, remove\nfrom os.path import join, exists, isdir\n\nfrom sklearn.utils import deprecated\n\nimport logging\nimport numpy as np\n\ntry:\n import urllib.request as urllib # for backwards compatibility\nexcept ImportError:\n import urllib\n\nfrom .base import get_data_home, Bunch\nfrom ..externals.joblib import Memory\n\nfrom ..externals.six import b\n\nlogger = logging.getLogger(__name__)\n\n\nBASE_URL = \"http://vis-www.cs.umass.edu/lfw/\"\nARCHIVE_NAME = \"lfw.tgz\"\nFUNNELED_ARCHIVE_NAME = \"lfw-funneled.tgz\"\nTARGET_FILENAMES = [\n 'pairsDevTrain.txt',\n 'pairsDevTest.txt',\n 'pairs.txt',\n]\n\n\ndef scale_face(face):\n \"\"\"Scale back to 0-1 range in case of normalization for plotting\"\"\"\n scaled = face - face.min()\n scaled /= scaled.max()\n return scaled\n\n\n#\n# Common private utilities for data fetching from the original LFW website\n# local disk caching, and image decoding.\n#\n\n\ndef check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):\n \"\"\"Helper function to download any missing LFW data\"\"\"\n data_home = get_data_home(data_home=data_home)\n lfw_home = join(data_home, \"lfw_home\")\n\n if funneled:\n archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)\n data_folder_path = join(lfw_home, \"lfw_funneled\")\n archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME\n else:\n archive_path = join(lfw_home, ARCHIVE_NAME)\n data_folder_path = join(lfw_home, \"lfw\")\n archive_url = BASE_URL + ARCHIVE_NAME\n\n if not exists(lfw_home):\n makedirs(lfw_home)\n\n for target_filename in TARGET_FILENAMES:\n target_filepath = join(lfw_home, target_filename)\n if not exists(target_filepath):\n if download_if_missing:\n url = BASE_URL + target_filename\n logger.warning(\"Downloading LFW metadata: %s\", url)\n urllib.urlretrieve(url, target_filepath)\n else:\n raise IOError(\"%s is missing\" % target_filepath)\n\n if not exists(data_folder_path):\n\n if not exists(archive_path):\n if download_if_missing:\n logger.warning(\"Downloading LFW data (~200MB): %s\", archive_url)\n urllib.urlretrieve(archive_url, archive_path)\n else:\n raise IOError(\"%s is missing\" % target_filepath)\n\n import tarfile\n logger.info(\"Decompressing the data archive to %s\", data_folder_path)\n tarfile.open(archive_path, \"r:gz\").extractall(path=lfw_home)\n remove(archive_path)\n\n return lfw_home, data_folder_path\n\n\ndef _load_imgs(file_paths, slice_, color, resize):\n \"\"\"Internally used to load images\"\"\"\n\n # Try to import imread and imresize from PIL. We do this here to prevent\n # the whole sklearn.datasets module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n from scipy.misc import imresize\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL)\"\n \" is required to load data from jpeg files\")\n\n # compute the portion of the images to load to respect the slice_ parameter\n # given by the caller\n default_slice = (slice(0, 250), slice(0, 250))\n if slice_ is None:\n slice_ = default_slice\n else:\n slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))\n\n h_slice, w_slice = slice_\n h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)\n w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)\n\n if resize is not None:\n resize = float(resize)\n h = int(resize * h)\n w = int(resize * w)\n\n # allocate some contiguous memory to host the decoded image slices\n n_faces = len(file_paths)\n if not color:\n faces = np.zeros((n_faces, h, w), dtype=np.float32)\n else:\n faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)\n\n # iterate over the collected file path to load the jpeg files as numpy\n # arrays\n for i, file_path in enumerate(file_paths):\n if i % 1000 == 0:\n logger.info(\"Loading face #%05d / %05d\", i + 1, n_faces)\n\n # Checks if jpeg reading worked. Refer to issue #3594 for more\n # details.\n img = imread(file_path)\n if img.ndim is 0:\n raise RuntimeError(\"Failed to read the image file %s, \"\n \"Please make sure that libjpeg is installed\"\n % file_path)\n\n face = np.asarray(img[slice_], dtype=np.float32)\n face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats\n if resize is not None:\n face = imresize(face, resize)\n if not color:\n # average the color channels to compute a gray levels\n # representaion\n face = face.mean(axis=2)\n\n faces[i, ...] = face\n\n return faces\n\n\n#\n# Task #1: Face Identification on picture with names\n#\n\ndef _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,\n min_faces_per_person=0):\n \"\"\"Perform the actual data loading for the lfw people dataset\n\n This operation is meant to be cached by a joblib wrapper.\n \"\"\"\n # scan the data folder content to retain people with more that\n # `min_faces_per_person` face pictures\n person_names, file_paths = [], []\n for person_name in sorted(listdir(data_folder_path)):\n folder_path = join(data_folder_path, person_name)\n if not isdir(folder_path):\n continue\n paths = [join(folder_path, f) for f in listdir(folder_path)]\n n_pictures = len(paths)\n if n_pictures >= min_faces_per_person:\n person_name = person_name.replace('_', ' ')\n person_names.extend([person_name] * n_pictures)\n file_paths.extend(paths)\n\n n_faces = len(file_paths)\n if n_faces == 0:\n raise ValueError(\"min_faces_per_person=%d is too restrictive\" %\n min_faces_per_person)\n\n target_names = np.unique(person_names)\n target = np.searchsorted(target_names, person_names)\n\n faces = _load_imgs(file_paths, slice_, color, resize)\n\n # shuffle the faces with a deterministic RNG scheme to avoid having\n # all faces of the same person in a row, as it would break some\n # cross validation and learning algorithms such as SGD and online\n # k-means that make an IID assumption\n\n indices = np.arange(n_faces)\n np.random.RandomState(42).shuffle(indices)\n faces, target = faces[indices], target[indices]\n return faces, target, target_names\n\n\ndef fetch_lfw_people(data_home=None, funneled=True, resize=0.5,\n min_faces_per_person=0, color=False,\n slice_=(slice(70, 195), slice(78, 172)),\n download_if_missing=True):\n \"\"\"Loader for the Labeled Faces in the Wild (LFW) people dataset\n\n This dataset is a collection of JPEG pictures of famous people\n collected on the internet, all details are available on the\n official website:\n\n http://vis-www.cs.umass.edu/lfw/\n\n Each picture is centered on a single face. Each pixel of each channel\n (color in RGB) is encoded by a float in range 0.0 - 1.0.\n\n The task is called Face Recognition (or Identification): given the\n picture of a face, find the name of the person given a training set\n (gallery).\n\n The original images are 250 x 250 pixels, but the default slice and resize\n arguments reduce them to 62 x 74.\n\n Parameters\n ----------\n data_home : optional, default: None\n Specify another download and cache folder for the datasets. By default\n all scikit learn data is stored in '~/scikit_learn_data' subfolders.\n\n funneled : boolean, optional, default: True\n Download and use the funneled variant of the dataset.\n\n resize : float, optional, default 0.5\n Ratio used to resize the each face picture.\n\n min_faces_per_person : int, optional, default None\n The extracted dataset will only retain pictures of people that have at\n least `min_faces_per_person` different pictures.\n\n color : boolean, optional, default False\n Keep the 3 RGB channels instead of averaging them to a single\n gray level channel. If color is True the shape of the data has\n one more dimension than than the shape with color = False.\n\n slice_ : optional\n Provide a custom 2D slice (height, width) to extract the\n 'interesting' part of the jpeg files and avoid use statistical\n correlation from the background\n\n download_if_missing : optional, True by default\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n dataset : dict-like object with the following attributes:\n\n dataset.data : numpy array of shape (13233, 2914)\n Each row corresponds to a ravelled face image of original size 62 x 47\n pixels. Changing the ``slice_`` or resize parameters will change the shape\n of the output.\n\n dataset.images : numpy array of shape (13233, 62, 47)\n Each row is a face image corresponding to one of the 5749 people in\n the dataset. Changing the ``slice_`` or resize parameters will change the shape\n of the output.\n\n dataset.target : numpy array of shape (13233,)\n Labels associated to each face image. Those labels range from 0-5748\n and correspond to the person IDs.\n\n dataset.DESCR : string\n Description of the Labeled Faces in the Wild (LFW) dataset.\n \"\"\"\n lfw_home, data_folder_path = check_fetch_lfw(\n data_home=data_home, funneled=funneled,\n download_if_missing=download_if_missing)\n logger.info('Loading LFW people faces from %s', lfw_home)\n\n # wrap the loader in a memoizing function that will return memmaped data\n # arrays for optimal memory usage\n m = Memory(cachedir=lfw_home, compress=6, verbose=0)\n load_func = m.cache(_fetch_lfw_people)\n\n # load and memoize the pairs as np arrays\n faces, target, target_names = load_func(\n data_folder_path, resize=resize,\n min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)\n\n # pack the results as a Bunch instance\n return Bunch(data=faces.reshape(len(faces), -1), images=faces,\n target=target, target_names=target_names,\n DESCR=\"LFW faces dataset\")\n\n\n#\n# Task #2: Face Verification on pairs of face pictures\n#\n\n\ndef _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,\n color=False, resize=None):\n \"\"\"Perform the actual data loading for the LFW pairs dataset\n\n This operation is meant to be cached by a joblib wrapper.\n \"\"\"\n # parse the index file to find the number of pairs to be able to allocate\n # the right amount of memory before starting to decode the jpeg files\n with open(index_file_path, 'rb') as index_file:\n split_lines = [ln.strip().split(b('\\t')) for ln in index_file]\n pair_specs = [sl for sl in split_lines if len(sl) > 2]\n n_pairs = len(pair_specs)\n\n # interating over the metadata lines for each pair to find the filename to\n # decode and load in memory\n target = np.zeros(n_pairs, dtype=np.int)\n file_paths = list()\n for i, components in enumerate(pair_specs):\n if len(components) == 3:\n target[i] = 1\n pair = (\n (components[0], int(components[1]) - 1),\n (components[0], int(components[2]) - 1),\n )\n elif len(components) == 4:\n target[i] = 0\n pair = (\n (components[0], int(components[1]) - 1),\n (components[2], int(components[3]) - 1),\n )\n else:\n raise ValueError(\"invalid line %d: %r\" % (i + 1, components))\n for j, (name, idx) in enumerate(pair):\n try:\n person_folder = join(data_folder_path, name)\n except TypeError:\n person_folder = join(data_folder_path, str(name, 'UTF-8'))\n filenames = list(sorted(listdir(person_folder)))\n file_path = join(person_folder, filenames[idx])\n file_paths.append(file_path)\n\n pairs = _load_imgs(file_paths, slice_, color, resize)\n shape = list(pairs.shape)\n n_faces = shape.pop(0)\n shape.insert(0, 2)\n shape.insert(0, n_faces // 2)\n pairs.shape = shape\n\n return pairs, target, np.array(['Different persons', 'Same person'])\n\n\n@deprecated(\"Function 'load_lfw_people' has been deprecated in 0.17 and will be \"\n \"removed in 0.19.\"\n \"Use fetch_lfw_people(download_if_missing=False) instead.\")\ndef load_lfw_people(download_if_missing=False, **kwargs):\n \"\"\"Alias for fetch_lfw_people(download_if_missing=False)\n\n Check fetch_lfw_people.__doc__ for the documentation and parameter list.\n \"\"\"\n return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)\n\n\ndef fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,\n color=False, slice_=(slice(70, 195), slice(78, 172)),\n download_if_missing=True):\n \"\"\"Loader for the Labeled Faces in the Wild (LFW) pairs dataset\n\n This dataset is a collection of JPEG pictures of famous people\n collected on the internet, all details are available on the\n official website:\n\n http://vis-www.cs.umass.edu/lfw/\n\n Each picture is centered on a single face. Each pixel of each channel\n (color in RGB) is encoded by a float in range 0.0 - 1.0.\n\n The task is called Face Verification: given a pair of two pictures,\n a binary classifier must predict whether the two images are from\n the same person.\n\n In the official `README.txt`_ this task is described as the\n \"Restricted\" task. As I am not sure as to implement the\n \"Unrestricted\" variant correctly, I left it as unsupported for now.\n\n .. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt\n\n The original images are 250 x 250 pixels, but the default slice and resize\n arguments reduce them to 62 x 74.\n\n Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.\n\n Parameters\n ----------\n subset : optional, default: 'train'\n Select the dataset to load: 'train' for the development training\n set, 'test' for the development test set, and '10_folds' for the\n official evaluation set that is meant to be used with a 10-folds\n cross validation.\n\n data_home : optional, default: None\n Specify another download and cache folder for the datasets. By\n default all scikit learn data is stored in '~/scikit_learn_data'\n subfolders.\n\n funneled : boolean, optional, default: True\n Download and use the funneled variant of the dataset.\n\n resize : float, optional, default 0.5\n Ratio used to resize the each face picture.\n\n color : boolean, optional, default False\n Keep the 3 RGB channels instead of averaging them to a single\n gray level channel. If color is True the shape of the data has\n one more dimension than than the shape with color = False.\n\n slice_ : optional\n Provide a custom 2D slice (height, width) to extract the\n 'interesting' part of the jpeg files and avoid use statistical\n correlation from the background\n\n download_if_missing : optional, True by default\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n The data is returned as a Bunch object with the following attributes:\n\n data : numpy array of shape (2200, 5828)\n Each row corresponds to 2 ravel'd face images of original size 62 x 47\n pixels. Changing the ``slice_`` or resize parameters will change the shape\n of the output.\n\n pairs : numpy array of shape (2200, 2, 62, 47)\n Each row has 2 face images corresponding to same or different person\n from the dataset containing 5749 people. Changing the ``slice_`` or resize\n parameters will change the shape of the output.\n\n target : numpy array of shape (13233,)\n Labels associated to each pair of images. The two label values being\n different persons or the same person.\n\n DESCR : string\n Description of the Labeled Faces in the Wild (LFW) dataset.\n\n \"\"\"\n lfw_home, data_folder_path = check_fetch_lfw(\n data_home=data_home, funneled=funneled,\n download_if_missing=download_if_missing)\n logger.info('Loading %s LFW pairs from %s', subset, lfw_home)\n\n # wrap the loader in a memoizing function that will return memmaped data\n # arrays for optimal memory usage\n m = Memory(cachedir=lfw_home, compress=6, verbose=0)\n load_func = m.cache(_fetch_lfw_pairs)\n\n # select the right metadata file according to the requested subset\n label_filenames = {\n 'train': 'pairsDevTrain.txt',\n 'test': 'pairsDevTest.txt',\n '10_folds': 'pairs.txt',\n }\n if subset not in label_filenames:\n raise ValueError(\"subset='%s' is invalid: should be one of %r\" % (\n subset, list(sorted(label_filenames.keys()))))\n index_file_path = join(lfw_home, label_filenames[subset])\n\n # load and memoize the pairs as np arrays\n pairs, target, target_names = load_func(\n index_file_path, data_folder_path, resize=resize, color=color,\n slice_=slice_)\n\n # pack the results as a Bunch instance\n return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,\n target=target, target_names=target_names,\n DESCR=\"'%s' segment of the LFW pairs dataset\" % subset)\n\n\n@deprecated(\"Function 'load_lfw_pairs' has been deprecated in 0.17 and will be \"\n \"removed in 0.19.\"\n \"Use fetch_lfw_pairs(download_if_missing=False) instead.\")\ndef load_lfw_pairs(download_if_missing=False, **kwargs):\n \"\"\"Alias for fetch_lfw_pairs(download_if_missing=False)\n\n Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.\n \"\"\"\n return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)\n",
"\"\"\"Utilities to evaluate models with respect to a variable\n\"\"\"\n# Author: Alexander Fabisch <[email protected]>\n#\n# License: BSD 3 clause\n\nimport warnings\n\nimport numpy as np\n\nfrom .base import is_classifier, clone\nfrom .cross_validation import check_cv\nfrom .externals.joblib import Parallel, delayed\nfrom .cross_validation import _safe_split, _score, _fit_and_score\nfrom .metrics.scorer import check_scoring\nfrom .utils import indexable\nfrom .utils.fixes import astype\n\n\n__all__ = ['learning_curve', 'validation_curve']\n\n\ndef learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),\n cv=None, scoring=None, exploit_incremental_learning=False,\n n_jobs=1, pre_dispatch=\"all\", verbose=0):\n \"\"\"Learning curve.\n\n Determines cross-validated training and test scores for different training\n set sizes.\n\n A cross-validation generator splits the whole dataset k times in training\n and test data. Subsets of the training set with varying sizes will be used\n to train the estimator and a score for each training subset size and the\n test set will be computed. Afterwards, the scores will be averaged over\n all k runs for each training subset size.\n\n Read more in the :ref:`User Guide <learning_curves>`.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n exploit_incremental_learning : boolean, optional, default: False\n If the estimator supports incremental learning, this will be\n used to speed up fitting for different training set sizes.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n\n pre_dispatch : integer or string, optional\n Number of predispatched jobs for parallel execution (default is\n all). The option can reduce the allocated memory. The string can\n be an expression like '2*n_jobs'.\n\n verbose : integer, optional\n Controls the verbosity: the higher, the more messages.\n\n Returns\n -------\n train_sizes_abs : array, shape = (n_unique_ticks,), dtype int\n Numbers of training examples that has been used to generate the\n learning curve. Note that the number of ticks might be less\n than n_ticks because duplicate entries will be removed.\n\n train_scores : array, shape (n_ticks, n_cv_folds)\n Scores on training sets.\n\n test_scores : array, shape (n_ticks, n_cv_folds)\n Scores on test set.\n\n Notes\n -----\n See :ref:`examples/model_selection/plot_learning_curve.py\n <example_model_selection_plot_learning_curve.py>`\n \"\"\"\n if exploit_incremental_learning and not hasattr(estimator, \"partial_fit\"):\n raise ValueError(\"An estimator must support the partial_fit interface \"\n \"to exploit incremental learning\")\n\n X, y = indexable(X, y)\n # Make a list since we will be iterating multiple times over the folds\n cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))\n scorer = check_scoring(estimator, scoring=scoring)\n\n # HACK as long as boolean indices are allowed in cv generators\n if cv[0][0].dtype == bool:\n new_cv = []\n for i in range(len(cv)):\n new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))\n cv = new_cv\n\n n_max_training_samples = len(cv[0][0])\n # Because the lengths of folds can be significantly different, it is\n # not guaranteed that we use all of the available training data when we\n # use the first 'n_max_training_samples' samples.\n train_sizes_abs = _translate_train_sizes(train_sizes,\n n_max_training_samples)\n n_unique_ticks = train_sizes_abs.shape[0]\n if verbose > 0:\n print(\"[learning_curve] Training set sizes: \" + str(train_sizes_abs))\n\n parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,\n verbose=verbose)\n if exploit_incremental_learning:\n classes = np.unique(y) if is_classifier(estimator) else None\n out = parallel(delayed(_incremental_fit_estimator)(\n clone(estimator), X, y, classes, train, test, train_sizes_abs,\n scorer, verbose) for train, test in cv)\n else:\n out = parallel(delayed(_fit_and_score)(\n clone(estimator), X, y, scorer, train[:n_train_samples], test,\n verbose, parameters=None, fit_params=None, return_train_score=True)\n for train, test in cv for n_train_samples in train_sizes_abs)\n out = np.array(out)[:, :2]\n n_cv_folds = out.shape[0] // n_unique_ticks\n out = out.reshape(n_cv_folds, n_unique_ticks, 2)\n\n out = np.asarray(out).transpose((2, 1, 0))\n\n return train_sizes_abs, out[0], out[1]\n\n\ndef _translate_train_sizes(train_sizes, n_max_training_samples):\n \"\"\"Determine absolute sizes of training subsets and validate 'train_sizes'.\n\n Examples:\n _translate_train_sizes([0.5, 1.0], 10) -> [5, 10]\n _translate_train_sizes([5, 10], 10) -> [5, 10]\n\n Parameters\n ----------\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Numbers of training examples that will be used to generate the\n learning curve. If the dtype is float, it is regarded as a\n fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].\n\n n_max_training_samples : int\n Maximum number of training samples (upper bound of 'train_sizes').\n\n Returns\n -------\n train_sizes_abs : array, shape (n_unique_ticks,), dtype int\n Numbers of training examples that will be used to generate the\n learning curve. Note that the number of ticks might be less\n than n_ticks because duplicate entries will be removed.\n \"\"\"\n train_sizes_abs = np.asarray(train_sizes)\n n_ticks = train_sizes_abs.shape[0]\n n_min_required_samples = np.min(train_sizes_abs)\n n_max_required_samples = np.max(train_sizes_abs)\n if np.issubdtype(train_sizes_abs.dtype, np.float):\n if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:\n raise ValueError(\"train_sizes has been interpreted as fractions \"\n \"of the maximum number of training samples and \"\n \"must be within (0, 1], but is within [%f, %f].\"\n % (n_min_required_samples,\n n_max_required_samples))\n train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,\n dtype=np.int, copy=False)\n train_sizes_abs = np.clip(train_sizes_abs, 1,\n n_max_training_samples)\n else:\n if (n_min_required_samples <= 0 or\n n_max_required_samples > n_max_training_samples):\n raise ValueError(\"train_sizes has been interpreted as absolute \"\n \"numbers of training samples and must be within \"\n \"(0, %d], but is within [%d, %d].\"\n % (n_max_training_samples,\n n_min_required_samples,\n n_max_required_samples))\n\n train_sizes_abs = np.unique(train_sizes_abs)\n if n_ticks > train_sizes_abs.shape[0]:\n warnings.warn(\"Removed duplicate entries from 'train_sizes'. Number \"\n \"of ticks will be less than than the size of \"\n \"'train_sizes' %d instead of %d).\"\n % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)\n\n return train_sizes_abs\n\n\ndef _incremental_fit_estimator(estimator, X, y, classes, train, test,\n train_sizes, scorer, verbose):\n \"\"\"Train estimator on training subsets incrementally and compute scores.\"\"\"\n train_scores, test_scores = [], []\n partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])\n for n_train_samples, partial_train in partitions:\n train_subset = train[:n_train_samples]\n X_train, y_train = _safe_split(estimator, X, y, train_subset)\n X_partial_train, y_partial_train = _safe_split(estimator, X, y,\n partial_train)\n X_test, y_test = _safe_split(estimator, X, y, test, train_subset)\n if y_partial_train is None:\n estimator.partial_fit(X_partial_train, classes=classes)\n else:\n estimator.partial_fit(X_partial_train, y_partial_train,\n classes=classes)\n train_scores.append(_score(estimator, X_train, y_train, scorer))\n test_scores.append(_score(estimator, X_test, y_test, scorer))\n return np.array((train_scores, test_scores)).T\n\n\ndef validation_curve(estimator, X, y, param_name, param_range, cv=None,\n scoring=None, n_jobs=1, pre_dispatch=\"all\", verbose=0):\n \"\"\"Validation curve.\n\n Determine training and test scores for varying parameter values.\n\n Compute scores for an estimator with different values of a specified\n parameter. This is similar to grid search with one parameter. However, this\n will also compute training scores and is merely a utility for plotting the\n results.\n\n Read more in the :ref:`User Guide <validation_curve>`.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n param_name : string\n Name of the parameter that will be varied.\n\n param_range : array-like, shape (n_values,)\n The values of the parameter that will be evaluated.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n\n pre_dispatch : integer or string, optional\n Number of predispatched jobs for parallel execution (default is\n all). The option can reduce the allocated memory. The string can\n be an expression like '2*n_jobs'.\n\n verbose : integer, optional\n Controls the verbosity: the higher, the more messages.\n\n Returns\n -------\n train_scores : array, shape (n_ticks, n_cv_folds)\n Scores on training sets.\n\n test_scores : array, shape (n_ticks, n_cv_folds)\n Scores on test set.\n\n Notes\n -----\n See\n :ref:`examples/model_selection/plot_validation_curve.py\n <example_model_selection_plot_validation_curve.py>`\n \"\"\"\n X, y = indexable(X, y)\n cv = check_cv(cv, X, y, classifier=is_classifier(estimator))\n scorer = check_scoring(estimator, scoring=scoring)\n\n parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,\n verbose=verbose)\n out = parallel(delayed(_fit_and_score)(\n estimator, X, y, scorer, train, test, verbose,\n parameters={param_name: v}, fit_params=None, return_train_score=True)\n for train, test in cv for v in param_range)\n\n out = np.asarray(out)[:, :2]\n n_params = len(param_range)\n n_cv_folds = out.shape[0] // n_params\n out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))\n\n return out[0], out[1]\n",
"from __future__ import division, print_function, absolute_import\n\nimport itertools\n\nimport numpy as np\nfrom numpy.testing import (assert_, assert_allclose, assert_raises,\n assert_equal, run_module_suite)\nfrom scipy import linalg\nimport scipy.linalg._decomp_update as _decomp_update\nfrom scipy.linalg._decomp_update import *\n\ndef assert_unitary(a, rtol=None, atol=None, assert_sqr=True):\n if rtol is None:\n rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)\n if atol is None:\n atol = 2*np.finfo(a.dtype).eps\n\n if assert_sqr:\n assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square')\n aTa = np.dot(a.T.conj(), a)\n assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol)\n\ndef assert_upper_tri(a, rtol=None, atol=None):\n if rtol is None:\n rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)\n if atol is None:\n atol = 2*np.finfo(a.dtype).eps\n mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_)\n assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol)\n\ndef check_qr(q, r, a, rtol, atol, assert_sqr=True):\n assert_unitary(q, rtol, atol, assert_sqr)\n assert_upper_tri(r, rtol, atol)\n assert_allclose(q.dot(r), a, rtol=rtol, atol=atol)\n\ndef make_strided(arrs):\n strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)]\n kmax = len(strides)\n k = 0\n ret = []\n for a in arrs:\n if a.ndim == 1:\n s = strides[k % kmax]\n k += 1\n base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype)\n view = base[s[1]::s[0]]\n view[...] = a\n elif a.ndim == 2:\n s = strides[k % kmax]\n t = strides[(k+1) % kmax]\n k += 2\n base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]), a.dtype)\n view = base[s[1]::s[0], t[1]::t[0]]\n view[...] = a\n else:\n raise ValueError('make_strided only works for ndim = 1 or 2 arrays')\n ret.append(view)\n return ret\n\ndef negate_strides(arrs):\n ret = []\n for a in arrs:\n b = np.zeros_like(a)\n if b.ndim == 2:\n b = b[::-1, ::-1]\n elif b.ndim == 1:\n b = b[::-1]\n else:\n raise ValueError('negate_strides only works for ndim = 1 or 2 arrays')\n b[...] = a\n ret.append(b)\n return ret\n\ndef nonitemsize_strides(arrs):\n out = []\n for a in arrs:\n a_dtype = a.dtype\n b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')])\n c = b.getfield(a_dtype)\n c[...] = a\n out.append(c)\n return out\n\ndef make_nonnative(arrs):\n out = []\n for a in arrs:\n out.append(a.astype(a.dtype.newbyteorder()))\n return out\n\nclass BaseQRdeltas(object):\n def __init__(self):\n self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2)\n self.atol = 10 * np.finfo(self.dtype).eps\n\n def generate(self, type, mode='full'):\n np.random.seed(29382)\n shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12),\n 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type]\n a = np.random.random(shape)\n if np.iscomplexobj(self.dtype.type(1)):\n b = np.random.random(shape)\n a = a + 1j * b\n a = a.astype(self.dtype)\n q, r = linalg.qr(a, mode=mode)\n return a, q, r\n\nclass BaseQRdelete(BaseQRdeltas):\n def test_sqr_1_row(self):\n a, q, r = self.generate('sqr')\n for row in range(r.shape[0]):\n q1, r1 = qr_delete(q, r, row, overwrite_qr=False)\n a1 = np.delete(a, row, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_sqr_p_row(self):\n a, q, r = self.generate('sqr')\n for ndel in range(2, 6):\n for row in range(a.shape[0]-ndel):\n q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)\n a1 = np.delete(a, slice(row, row+ndel), 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_sqr_1_col(self):\n a, q, r = self.generate('sqr')\n for col in range(r.shape[1]):\n q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)\n a1 = np.delete(a, col, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_sqr_p_col(self):\n a, q, r = self.generate('sqr')\n for ndel in range(2, 6):\n for col in range(r.shape[1]-ndel):\n q1, r1 = qr_delete(q, r, col, ndel, which='col',\n overwrite_qr=False)\n a1 = np.delete(a, slice(col, col+ndel), 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_1_row(self):\n a, q, r = self.generate('tall')\n for row in range(r.shape[0]):\n q1, r1 = qr_delete(q, r, row, overwrite_qr=False)\n a1 = np.delete(a, row, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_p_row(self):\n a, q, r = self.generate('tall')\n for ndel in range(2, 6):\n for row in range(a.shape[0]-ndel):\n q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)\n a1 = np.delete(a, slice(row, row+ndel), 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_1_col(self):\n a, q, r = self.generate('tall')\n for col in range(r.shape[1]):\n q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)\n a1 = np.delete(a, col, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_p_col(self):\n a, q, r = self.generate('tall')\n for ndel in range(2, 6):\n for col in range(r.shape[1]-ndel):\n q1, r1 = qr_delete(q, r, col, ndel, which='col',\n overwrite_qr=False)\n a1 = np.delete(a, slice(col, col+ndel), 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_1_row(self):\n a, q, r = self.generate('fat')\n for row in range(r.shape[0]):\n q1, r1 = qr_delete(q, r, row, overwrite_qr=False)\n a1 = np.delete(a, row, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_p_row(self):\n a, q, r = self.generate('fat')\n for ndel in range(2, 6):\n for row in range(a.shape[0]-ndel):\n q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)\n a1 = np.delete(a, slice(row, row+ndel), 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_1_col(self):\n a, q, r = self.generate('fat')\n for col in range(r.shape[1]):\n q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)\n a1 = np.delete(a, col, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_p_col(self):\n a, q, r = self.generate('fat')\n for ndel in range(2, 6):\n for col in range(r.shape[1]-ndel):\n q1, r1 = qr_delete(q, r, col, ndel, which='col',\n overwrite_qr=False)\n a1 = np.delete(a, slice(col, col+ndel), 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_economic_1_row(self):\n # this test always starts and ends with an economic decomp.\n a, q, r = self.generate('tall', 'economic')\n for row in range(r.shape[0]):\n q1, r1 = qr_delete(q, r, row, overwrite_qr=False)\n a1 = np.delete(a, row, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n # for economic row deletes \n # eco - prow = eco\n # eco - prow = sqr\n # eco - prow = fat\n def base_economic_p_row_xxx(self, ndel):\n a, q, r = self.generate('tall', 'economic')\n for row in range(a.shape[0]-ndel):\n q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)\n a1 = np.delete(a, slice(row, row+ndel), 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_p_row_economic(self):\n # (12, 7) - (3, 7) = (9,7) --> stays economic\n self.base_economic_p_row_xxx(3)\n\n def test_economic_p_row_sqr(self):\n # (12, 7) - (5, 7) = (7, 7) --> becomes square\n self.base_economic_p_row_xxx(5)\n\n def test_economic_p_row_fat(self):\n # (12, 7) - (7,7) = (5, 7) --> becomes fat\n self.base_economic_p_row_xxx(7)\n\n def test_economic_1_col(self):\n a, q, r = self.generate('tall', 'economic')\n for col in range(r.shape[1]):\n q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)\n a1 = np.delete(a, col, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_p_col(self):\n a, q, r = self.generate('tall', 'economic')\n for ndel in range(2, 6):\n for col in range(r.shape[1]-ndel):\n q1, r1 = qr_delete(q, r, col, ndel, which='col',\n overwrite_qr=False)\n a1 = np.delete(a, slice(col, col+ndel), 1)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_Mx1_1_row(self):\n a, q, r = self.generate('Mx1')\n for row in range(r.shape[0]):\n q1, r1 = qr_delete(q, r, row, overwrite_qr=False)\n a1 = np.delete(a, row, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_p_row(self):\n a, q, r = self.generate('Mx1')\n for ndel in range(2, 6):\n for row in range(a.shape[0]-ndel):\n q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)\n a1 = np.delete(a, slice(row, row+ndel), 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1xN_1_col(self):\n a, q, r = self.generate('1xN')\n for col in range(r.shape[1]):\n q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)\n a1 = np.delete(a, col, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1xN_p_col(self):\n a, q, r = self.generate('1xN')\n for ndel in range(2, 6):\n for col in range(r.shape[1]-ndel):\n q1, r1 = qr_delete(q, r, col, ndel, which='col',\n overwrite_qr=False)\n a1 = np.delete(a, slice(col, col+ndel), 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_economic_1_row(self):\n a, q, r = self.generate('Mx1', 'economic')\n for row in range(r.shape[0]):\n q1, r1 = qr_delete(q, r, row, overwrite_qr=False)\n a1 = np.delete(a, row, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_Mx1_economic_p_row(self):\n a, q, r = self.generate('Mx1', 'economic')\n for ndel in range(2, 6):\n for row in range(a.shape[0]-ndel):\n q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)\n a1 = np.delete(a, slice(row, row+ndel), 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_delete_last_1_row(self):\n # full and eco are the same for 1xN \n a, q, r = self.generate('1xN')\n q1, r1 = qr_delete(q, r, 0, 1, 'row')\n assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))\n assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))\n\n def test_delete_last_p_row(self):\n a, q, r = self.generate('tall', 'full')\n q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')\n assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))\n assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))\n\n a, q, r = self.generate('tall', 'economic')\n q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')\n assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))\n assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))\n\n def test_delete_last_1_col(self):\n a, q, r = self.generate('Mx1', 'economic')\n q1, r1 = qr_delete(q, r, 0, 1, 'col')\n assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))\n assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))\n\n a, q, r = self.generate('Mx1', 'full')\n q1, r1 = qr_delete(q, r, 0, 1, 'col')\n assert_unitary(q1)\n assert_(q1.dtype == q.dtype)\n assert_(q1.shape == q.shape)\n assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))\n\n def test_delete_last_p_col(self):\n a, q, r = self.generate('tall', 'full')\n q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')\n assert_unitary(q1)\n assert_(q1.dtype == q.dtype)\n assert_(q1.shape == q.shape)\n assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))\n\n a, q, r = self.generate('tall', 'economic')\n q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')\n assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))\n assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))\n\n def test_delete_1x1_row_col(self):\n a, q, r = self.generate('1x1')\n q1, r1 = qr_delete(q, r, 0, 1, 'row')\n assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))\n assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))\n\n a, q, r = self.generate('1x1')\n q1, r1 = qr_delete(q, r, 0, 1, 'col')\n assert_unitary(q1)\n assert_(q1.dtype == q.dtype)\n assert_(q1.shape == q.shape)\n assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))\n\n # all full qr, row deletes and single column deletes should be able to\n # handle any non negative strides. (only row and column vector\n # operations are used.) p column delete require fortran ordered\n # Q and R and will make a copy as necessary. Economic qr row deletes \n # requre a contigous q.\n\n def base_non_simple_strides(self, adjust_strides, ks, p, which, overwriteable):\n if which == 'row':\n qind = (slice(p,None), slice(p,None))\n rind = (slice(p,None), slice(None))\n else:\n qind = (slice(None), slice(None))\n rind = (slice(None), slice(None,-p))\n\n for type, k in itertools.product(['sqr', 'tall', 'fat'], ks):\n a, q0, r0, = self.generate(type)\n qs, rs = adjust_strides((q0, r0))\n if p == 1:\n a1 = np.delete(a, k, 0 if which == 'row' else 1)\n else:\n s = slice(k,k+p)\n if k < 0:\n s = slice(k, k + p + (a.shape[0] if which == 'row' else a.shape[1]))\n a1 = np.delete(a, s, 0 if which == 'row' else 1)\n\n # for each variable, q, r we try with it strided and\n # overwrite=False. Then we try with overwrite=True, and make\n # sure that q and r are still overwritten.\n\n q = q0.copy('F')\n r = r0.copy('F') \n q1, r1 = qr_delete(qs, r, k, p, which, False)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n q1o, r1o = qr_delete(qs, r, k, p, which, True)\n check_qr(q1o, r1o, a1, self.rtol, self.atol)\n if overwriteable:\n assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol)\n assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n q2, r2 = qr_delete(q, rs, k, p, which, False)\n check_qr(q2, r2, a1, self.rtol, self.atol)\n q2o, r2o = qr_delete(q, rs, k, p, which, True)\n check_qr(q2o, r2o, a1, self.rtol, self.atol)\n if overwriteable:\n assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol)\n assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n # since some of these were consumed above\n qs, rs = adjust_strides((q, r))\n q3, r3 = qr_delete(qs, rs, k, p, which, False)\n check_qr(q3, r3, a1, self.rtol, self.atol)\n q3o, r3o = qr_delete(qs, rs, k, p, which, True)\n check_qr(q3o, r3o, a1, self.rtol, self.atol)\n if overwriteable:\n assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol)\n assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol)\n\n def test_non_unit_strides_1_row(self):\n self.base_non_simple_strides(make_strided, [0], 1, 'row', True)\n\n def test_non_unit_strides_p_row(self):\n self.base_non_simple_strides(make_strided, [0], 3, 'row', True)\n\n def test_non_unit_strides_1_col(self):\n self.base_non_simple_strides(make_strided, [0], 1, 'col', True)\n\n def test_non_unit_strides_p_col(self):\n self.base_non_simple_strides(make_strided, [0], 3, 'col', False)\n\n def test_neg_strides_1_row(self):\n self.base_non_simple_strides(negate_strides, [0], 1, 'row', False)\n\n def test_neg_strides_p_row(self):\n self.base_non_simple_strides(negate_strides, [0], 3, 'row', False)\n\n def test_neg_strides_1_col(self):\n self.base_non_simple_strides(negate_strides, [0], 1, 'col', False)\n\n def test_neg_strides_p_col(self):\n self.base_non_simple_strides(negate_strides, [0], 3, 'col', False)\n\n def test_non_itemize_strides_1_row(self):\n self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False)\n\n def test_non_itemize_strides_p_row(self):\n self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False)\n\n def test_non_itemize_strides_1_col(self):\n self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False)\n\n def test_non_itemize_strides_p_col(self):\n self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False)\n\n def test_non_native_byte_order_1_row(self):\n self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False)\n\n def test_non_native_byte_order_p_row(self):\n self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False)\n\n def test_non_native_byte_order_1_col(self):\n self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False)\n\n def test_non_native_byte_order_p_col(self):\n self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False)\n\n def test_neg_k(self):\n a, q, r = self.generate('sqr')\n for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']):\n q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False)\n if w == 'row':\n a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0)\n else:\n a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'):\n assert_sqr = True if mode == 'full' else False\n if which == 'row':\n qind = (slice(p,None), slice(p,None))\n rind = (slice(p,None), slice(None))\n else:\n qind = (slice(None), slice(None))\n rind = (slice(None), slice(None,-p))\n a, q0, r0 = self.generate('sqr', mode)\n if p == 1:\n a1 = np.delete(a, 3, 0 if which == 'row' else 1)\n else:\n a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1)\n\n # don't overwrite\n q = q0.copy('F')\n r = r0.copy('F')\n q1, r1 = qr_delete(q, r, 3, p, which, False)\n check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr)\n check_qr(q, r, a, self.rtol, self.atol, assert_sqr)\n\n if test_F:\n q = q0.copy('F')\n r = r0.copy('F')\n q2, r2 = qr_delete(q, r, 3, p, which, True)\n check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr)\n # verify the overwriting\n assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol)\n assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol)\n\n if test_C:\n q = q0.copy('C')\n r = r0.copy('C')\n q3, r3 = qr_delete(q, r, 3, p, which, True)\n check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr)\n assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol)\n assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol)\n\n def test_overwrite_qr_1_row(self):\n # any positively strided q and r.\n self.base_overwrite_qr('row', 1, True, True)\n\n def test_overwrite_economic_qr_1_row(self):\n # Any contiguous q and positively strided r.\n self.base_overwrite_qr('row', 1, True, True, 'economic')\n\n def test_overwrite_qr_1_col(self):\n # any positively strided q and r.\n # full and eco share code paths\n self.base_overwrite_qr('col', 1, True, True)\n\n def test_overwrite_qr_p_row(self):\n # any positively strided q and r.\n self.base_overwrite_qr('row', 3, True, True)\n\n def test_overwrite_economic_qr_p_row(self):\n # any contiguous q and positively strided r\n self.base_overwrite_qr('row', 3, True, True, 'economic')\n\n def test_overwrite_qr_p_col(self):\n # only F orderd q and r can be overwritten for cols\n # full and eco share code paths \n self.base_overwrite_qr('col', 3, False, True)\n\n def test_bad_which(self):\n a, q, r = self.generate('sqr')\n assert_raises(ValueError, qr_delete, q, r, 0, which='foo')\n\n def test_bad_k(self):\n a, q, r = self.generate('tall')\n assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1)\n assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1)\n assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col')\n assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col')\n\n def test_bad_p(self):\n a, q, r = self.generate('tall')\n # p must be positive\n assert_raises(ValueError, qr_delete, q, r, 0, -1)\n assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col')\n\n # and nonzero\n assert_raises(ValueError, qr_delete, q, r, 0, 0)\n assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col')\n\n # must have at least k+p rows or cols, depending.\n assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2)\n assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col')\n\n def test_empty_q(self):\n a, q, r = self.generate('tall')\n # same code path for 'row' and 'col'\n assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1)\n\n def test_empty_r(self):\n a, q, r = self.generate('tall')\n # same code path for 'row' and 'col'\n assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1)\n\n def test_mismatched_q_and_r(self):\n a, q, r = self.generate('tall')\n r = r[1:]\n assert_raises(ValueError, qr_delete, q, r, 0, 1)\n\n def test_unsupported_dtypes(self):\n dts = ['int8', 'int16', 'int32', 'int64', \n 'uint8', 'uint16', 'uint32', 'uint64',\n 'float16', 'longdouble', 'longcomplex',\n 'bool']\n a, q0, r0 = self.generate('tall')\n for dtype in dts:\n q = q0.real.astype(dtype)\n r = r0.real.astype(dtype)\n assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')\n assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row')\n assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')\n assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col')\n\n assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')\n assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row')\n assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')\n assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col')\n\n def test_check_finite(self):\n a0, q0, r0 = self.generate('tall')\n\n q = q0.copy('F')\n q[1,1] = np.nan\n assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')\n assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row')\n assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')\n assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col')\n\n r = r0.copy('F')\n r[1,1] = np.nan\n assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')\n assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row')\n assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')\n assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col')\n\n def test_qr_scalar(self):\n a, q, r = self.generate('1x1')\n assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row')\n assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row')\n assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col')\n assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col')\n\nclass TestQRdelete_f(BaseQRdelete):\n dtype = np.dtype('f')\n\nclass TestQRdelete_F(BaseQRdelete):\n dtype = np.dtype('F')\n\nclass TestQRdelete_d(BaseQRdelete):\n dtype = np.dtype('d')\n\nclass TestQRdelete_D(BaseQRdelete):\n dtype = np.dtype('D')\n\nclass BaseQRinsert(BaseQRdeltas):\n def generate(self, type, mode='full', which='row', p=1):\n a, q, r = super(BaseQRinsert, self).generate(type, mode)\n\n assert_(p > 0)\n\n # super call set the seed...\n if which == 'row':\n if p == 1:\n u = np.random.random(a.shape[1])\n else:\n u = np.random.random((p, a.shape[1]))\n elif which == 'col':\n if p == 1:\n u = np.random.random(a.shape[0])\n else:\n u = np.random.random((a.shape[0], p))\n else:\n ValueError('which should be either \"row\" or \"col\"')\n\n if np.iscomplexobj(self.dtype.type(1)):\n b = np.random.random(u.shape)\n u = u + 1j * b\n\n u = u.astype(self.dtype)\n return a, q, r, u\n\n def test_sqr_1_row(self):\n a, q, r, u = self.generate('sqr', which='row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n \n def test_sqr_p_row(self):\n # sqr + rows --> fat always\n a, q, r, u = self.generate('sqr', which='row', p=3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_sqr_1_col(self):\n a, q, r, u = self.generate('sqr', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_sqr_p_col(self):\n # sqr + cols --> fat always\n a, q, r, u = self.generate('sqr', which='col', p=3)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(3, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_1_row(self):\n a, q, r, u = self.generate('tall', which='row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_p_row(self):\n # tall + rows --> tall always\n a, q, r, u = self.generate('tall', which='row', p=3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_1_col(self):\n a, q, r, u = self.generate('tall', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n # for column adds to tall matrices there are three cases to test\n # tall + pcol --> tall\n # tall + pcol --> sqr\n # tall + pcol --> fat\n def base_tall_p_col_xxx(self, p):\n a, q, r, u = self.generate('tall', which='col', p=p)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(p, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_p_col_tall(self):\n # 12x7 + 12x3 = 12x10 --> stays tall\n self.base_tall_p_col_xxx(3)\n\n def test_tall_p_col_sqr(self):\n # 12x7 + 12x5 = 12x12 --> becomes sqr\n self.base_tall_p_col_xxx(5)\n\n def test_tall_p_col_fat(self):\n # 12x7 + 12x7 = 12x14 --> becomes fat\n self.base_tall_p_col_xxx(7)\n\n def test_fat_1_row(self):\n a, q, r, u = self.generate('fat', which='row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n # for row adds to fat matrices there are three cases to test\n # fat + prow --> fat\n # fat + prow --> sqr\n # fat + prow --> tall\n def base_fat_p_row_xxx(self, p):\n a, q, r, u = self.generate('fat', which='row', p=p)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(p, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n \n def test_fat_p_row_fat(self):\n # 7x12 + 3x12 = 10x12 --> stays fat\n self.base_fat_p_row_xxx(3)\n\n def test_fat_p_row_sqr(self):\n # 7x12 + 5x12 = 12x12 --> becomes sqr\n self.base_fat_p_row_xxx(5)\n\n def test_fat_p_row_tall(self):\n # 7x12 + 7x12 = 14x12 --> becomes tall\n self.base_fat_p_row_xxx(7)\n\n def test_fat_1_col(self):\n a, q, r, u = self.generate('fat', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_p_col(self):\n # fat + cols --> fat always\n a, q, r, u = self.generate('fat', which='col', p=3)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(3, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_economic_1_row(self):\n a, q, r, u = self.generate('tall', 'economic', 'row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_p_row(self):\n # tall + rows --> tall always\n a, q, r, u = self.generate('tall', 'economic', 'row', 3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_1_col(self):\n a, q, r, u = self.generate('tall', 'economic', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_1_col_bad_update(self):\n # When the column to be added lies in the span of Q, the update is\n # not meaningful. This is detected, and a LinAlgError is issued.\n q = np.eye(5, 3, dtype=self.dtype)\n r = np.eye(3, dtype=self.dtype)\n u = np.array([1, 0, 0, 0, 0], self.dtype)\n assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col')\n\n # for column adds to economic matrices there are three cases to test\n # eco + pcol --> eco\n # eco + pcol --> sqr\n # eco + pcol --> fat\n def base_economic_p_col_xxx(self, p):\n a, q, r, u = self.generate('tall', 'economic', which='col', p=p)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(p, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_p_col_eco(self):\n # 12x7 + 12x3 = 12x10 --> stays eco\n self.base_economic_p_col_xxx(3)\n\n def test_economic_p_col_sqr(self):\n # 12x7 + 12x5 = 12x12 --> becomes sqr\n self.base_economic_p_col_xxx(5)\n\n def test_economic_p_col_fat(self):\n # 12x7 + 12x7 = 12x14 --> becomes fat\n self.base_economic_p_col_xxx(7)\n\n def test_Mx1_1_row(self):\n a, q, r, u = self.generate('Mx1', which='row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n \n def test_Mx1_p_row(self):\n a, q, r, u = self.generate('Mx1', which='row', p=3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_1_col(self):\n a, q, r, u = self.generate('Mx1', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_p_col(self):\n a, q, r, u = self.generate('Mx1', which='col', p=3)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(3, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_economic_1_row(self):\n a, q, r, u = self.generate('Mx1', 'economic', 'row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n \n def test_Mx1_economic_p_row(self):\n a, q, r, u = self.generate('Mx1', 'economic', 'row', 3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_Mx1_economic_1_col(self):\n a, q, r, u = self.generate('Mx1', 'economic', 'col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_Mx1_economic_p_col(self):\n a, q, r, u = self.generate('Mx1', 'full', 'col', 3)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(3, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_1xN_1_row(self):\n a, q, r, u = self.generate('1xN', which='row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n \n def test_1xN_p_row(self):\n a, q, r, u = self.generate('1xN', which='row', p=3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1xN_1_col(self):\n a, q, r, u = self.generate('1xN', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1xN_p_col(self):\n a, q, r, u = self.generate('1xN', which='col', p=3)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(3, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1x1_1_row(self):\n a, q, r, u = self.generate('1x1', which='row')\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row, u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n \n def test_1x1_p_row(self):\n a, q, r, u = self.generate('1x1', which='row', p=3)\n for row in range(r.shape[0] + 1):\n q1, r1 = qr_insert(q, r, u, row)\n a1 = np.insert(a, row*np.ones(3, np.intp), u, 0)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1x1_1_col(self):\n a, q, r, u = self.generate('1x1', which='col')\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col, u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1x1_p_col(self):\n a, q, r, u = self.generate('1x1', which='col', p=3)\n for col in range(r.shape[1] + 1):\n q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)\n a1 = np.insert(a, col*np.ones(3, np.intp), u, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n \n def test_1x1_1_scalar(self):\n a, q, r, u = self.generate('1x1', which='row')\n assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row')\n assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row')\n assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row')\n\n assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col')\n assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col')\n assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col')\n\n def base_non_simple_strides(self, adjust_strides, k, p, which):\n for type in ['sqr', 'tall', 'fat']:\n a, q0, r0, u0 = self.generate(type, which=which, p=p)\n qs, rs, us = adjust_strides((q0, r0, u0))\n if p == 1:\n ai = np.insert(a, k, u0, 0 if which == 'row' else 1)\n else:\n ai = np.insert(a, k*np.ones(p, np.intp),\n u0 if which == 'row' else u0, \n 0 if which == 'row' else 1)\n\n # for each variable, q, r, u we try with it strided and\n # overwrite=False. Then we try with overwrite=True. Nothing\n # is checked to see if it can be overwritten, since only\n # F ordered Q can be overwritten when adding columns.\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False)\n check_qr(q1, r1, ai, self.rtol, self.atol)\n q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True)\n check_qr(q1o, r1o, ai, self.rtol, self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False)\n check_qr(q2, r2, ai, self.rtol, self.atol)\n q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True)\n check_qr(q2o, r2o, ai, self.rtol, self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False)\n check_qr(q3, r3, ai, self.rtol, self.atol)\n q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True)\n check_qr(q3o, r3o, ai, self.rtol, self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n # since some of these were consumed above\n qs, rs, us = adjust_strides((q, r, u))\n q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False)\n check_qr(q5, r5, ai, self.rtol, self.atol)\n q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True)\n check_qr(q5o, r5o, ai, self.rtol, self.atol)\n \n def test_non_unit_strides_1_row(self):\n self.base_non_simple_strides(make_strided, 0, 1, 'row')\n\n def test_non_unit_strides_p_row(self):\n self.base_non_simple_strides(make_strided, 0, 3, 'row')\n\n def test_non_unit_strides_1_col(self):\n self.base_non_simple_strides(make_strided, 0, 1, 'col')\n\n def test_non_unit_strides_p_col(self):\n self.base_non_simple_strides(make_strided, 0, 3, 'col')\n\n def test_neg_strides_1_row(self):\n self.base_non_simple_strides(negate_strides, 0, 1, 'row')\n\n def test_neg_strides_p_row(self):\n self.base_non_simple_strides(negate_strides, 0, 3, 'row')\n\n def test_neg_strides_1_col(self):\n self.base_non_simple_strides(negate_strides, 0, 1, 'col')\n\n def test_neg_strides_p_col(self):\n self.base_non_simple_strides(negate_strides, 0, 3, 'col')\n\n def test_non_itemsize_strides_1_row(self):\n self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row')\n\n def test_non_itemsize_strides_p_row(self):\n self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row')\n\n def test_non_itemsize_strides_1_col(self):\n self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col')\n\n def test_non_itemsize_strides_p_col(self):\n self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col')\n\n def test_non_native_byte_order_1_row(self):\n self.base_non_simple_strides(make_nonnative, 0, 1, 'row')\n\n def test_non_native_byte_order_p_row(self):\n self.base_non_simple_strides(make_nonnative, 0, 3, 'row')\n\n def test_non_native_byte_order_1_col(self):\n self.base_non_simple_strides(make_nonnative, 0, 1, 'col')\n\n def test_non_native_byte_order_p_col(self):\n self.base_non_simple_strides(make_nonnative, 0, 3, 'col')\n\n def test_overwrite_qu_rank_1(self):\n # when inserting rows, the size of both Q and R change, so only\n # column inserts can overwrite q. Only complex column inserts \n # with C ordered Q overwrite u. Any contiguous Q is overwritten\n # when inserting 1 column\n a, q0, r, u, = self.generate('sqr', which='col', p=1)\n q = q0.copy('C')\n u0 = u.copy()\n # don't overwrite\n q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)\n a1 = np.insert(a, 0, u0, 1)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n check_qr(q, r, a, self.rtol, self.atol)\n\n # try overwriting\n q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)\n check_qr(q2, r2, a1, self.rtol, self.atol)\n # verify the overwriting\n assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)\n assert_allclose(u, u0.conj(), self.rtol, self.atol)\n\n # now try with a fortran ordered Q\n qF = q0.copy('F')\n u1 = u0.copy()\n q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False)\n check_qr(q3, r3, a1, self.rtol, self.atol)\n check_qr(qF, r, a, self.rtol, self.atol)\n\n # try overwriting\n q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True)\n check_qr(q4, r4, a1, self.rtol, self.atol)\n assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol)\n\n def test_overwrite_qu_rank_p(self):\n # when inserting rows, the size of both Q and R change, so only\n # column inserts can potentially overwrite Q. In practice, only\n # F ordered Q are overwritten with a rank p update.\n a, q0, r, u, = self.generate('sqr', which='col', p=3)\n q = q0.copy('F')\n a1 = np.insert(a, np.zeros(3, np.intp), u, 1)\n\n # don't overwrite\n q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n check_qr(q, r, a, self.rtol, self.atol)\n\n # try overwriting\n q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)\n check_qr(q2, r2, a1, self.rtol, self.atol)\n assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)\n\n def test_empty_inputs(self):\n a, q, r, u = self.generate('sqr', which='row')\n assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row')\n assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row')\n assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row')\n assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col')\n assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col')\n assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col')\n\n def test_mismatched_shapes(self):\n a, q, r, u = self.generate('tall', which='row')\n assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row')\n assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row')\n assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row')\n assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col')\n assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col')\n assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col')\n\n def test_unsupported_dtypes(self):\n dts = ['int8', 'int16', 'int32', 'int64', \n 'uint8', 'uint16', 'uint32', 'uint64',\n 'float16', 'longdouble', 'longcomplex',\n 'bool']\n a, q0, r0, u0 = self.generate('sqr', which='row')\n for dtype in dts:\n q = q0.real.astype(dtype)\n r = r0.real.astype(dtype)\n u = u0.real.astype(dtype)\n assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')\n assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')\n assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')\n assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')\n assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')\n assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')\n\n def test_check_finite(self):\n a0, q0, r0, u0 = self.generate('sqr', which='row', p=3)\n\n q = q0.copy('F')\n q[1,1] = np.nan\n assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row')\n assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')\n assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col')\n assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')\n\n r = r0.copy('F')\n r[1,1] = np.nan\n assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row')\n assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')\n assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col')\n assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')\n\n u = u0.copy('F')\n u[0,0] = np.nan\n assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row')\n assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')\n assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col')\n assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')\n\nclass TestQRinsert_f(BaseQRinsert):\n dtype = np.dtype('f')\n\nclass TestQRinsert_F(BaseQRinsert):\n dtype = np.dtype('F')\n\nclass TestQRinsert_d(BaseQRinsert):\n dtype = np.dtype('d')\n\nclass TestQRinsert_D(BaseQRinsert):\n dtype = np.dtype('D')\n\nclass BaseQRupdate(BaseQRdeltas):\n def generate(self, type, mode='full', p=1):\n a, q, r = super(BaseQRupdate, self).generate(type, mode)\n\n # super call set the seed...\n if p == 1:\n u = np.random.random(q.shape[0])\n v = np.random.random(r.shape[1])\n else:\n u = np.random.random((q.shape[0], p))\n v = np.random.random((r.shape[1], p))\n\n if np.iscomplexobj(self.dtype.type(1)):\n b = np.random.random(u.shape)\n u = u + 1j * b\n\n c = np.random.random(v.shape)\n v = v + 1j * c\n\n u = u.astype(self.dtype)\n v = v.astype(self.dtype)\n return a, q, r, u, v\n\n def test_sqr_rank_1(self):\n a, q, r, u, v = self.generate('sqr')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_sqr_rank_p(self):\n # test ndim = 2, rank 1 updates here too\n for p in [1, 2, 3, 5]:\n a, q, r, u, v = self.generate('sqr', p=p)\n if p == 1:\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_rank_1(self):\n a, q, r, u, v = self.generate('tall')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_tall_rank_p(self):\n for p in [1, 2, 3, 5]:\n a, q, r, u, v = self.generate('tall', p=p)\n if p == 1:\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_rank_1(self):\n a, q, r, u, v = self.generate('fat')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_fat_rank_p(self):\n for p in [1, 2, 3, 5]:\n a, q, r, u, v = self.generate('fat', p=p)\n if p == 1:\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_economic_rank_1(self):\n a, q, r, u, v = self.generate('tall', 'economic')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_economic_rank_p(self):\n for p in [1, 2, 3, 5]:\n a, q, r, u, v = self.generate('tall', 'economic', p)\n if p == 1:\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_Mx1_rank_1(self):\n a, q, r, u, v = self.generate('Mx1')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_rank_p(self):\n # when M or N == 1, only a rank 1 update is allowed. This isn't \n # fundamental limitation, but the code does not support it.\n a, q, r, u, v = self.generate('Mx1', p=1)\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_Mx1_economic_rank_1(self):\n a, q, r, u, v = self.generate('Mx1', 'economic')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_Mx1_economic_rank_p(self):\n # when M or N == 1, only a rank 1 update is allowed. This isn't \n # fundamental limitation, but the code does not support it.\n a, q, r, u, v = self.generate('Mx1', 'economic', p=1)\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n\n def test_1xN_rank_1(self):\n a, q, r, u, v = self.generate('1xN')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1xN_rank_p(self):\n # when M or N == 1, only a rank 1 update is allowed. This isn't \n # fundamental limitation, but the code does not support it.\n a, q, r, u, v = self.generate('1xN', p=1)\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1x1_rank_1(self):\n a, q, r, u, v = self.generate('1x1')\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.outer(u, v.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1x1_rank_p(self):\n # when M or N == 1, only a rank 1 update is allowed. This isn't \n # fundamental limitation, but the code does not support it.\n a, q, r, u, v = self.generate('1x1', p=1)\n u = u.reshape(u.size, 1)\n v = v.reshape(v.size, 1)\n q1, r1 = qr_update(q, r, u, v, False)\n a1 = a + np.dot(u, v.T.conj())\n check_qr(q1, r1, a1, self.rtol, self.atol)\n\n def test_1x1_rank_1_scalar(self):\n a, q, r, u, v = self.generate('1x1')\n assert_raises(ValueError, qr_update, q[0, 0], r, u, v)\n assert_raises(ValueError, qr_update, q, r[0, 0], u, v)\n assert_raises(ValueError, qr_update, q, r, u[0], v)\n assert_raises(ValueError, qr_update, q, r, u, v[0])\n\n def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable):\n assert_sqr = False if mode == 'economic' else True\n for type in ['sqr', 'tall', 'fat']:\n a, q0, r0, u0, v0 = self.generate(type, mode, p)\n qs, rs, us, vs = adjust_strides((q0, r0, u0, v0))\n if p == 1:\n aup = a + np.outer(u0, v0.conj())\n else:\n aup = a + np.dot(u0, v0.T.conj())\n\n # for each variable, q, r, u, v we try with it strided and\n # overwrite=False. Then we try with overwrite=True, and make\n # sure that if p == 1, r and v are still overwritten.\n # a strided q and u must always be copied.\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('C')\n q1, r1 = qr_update(qs, r, u, v, False)\n check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr)\n q1o, r1o = qr_update(qs, r, u, v, True)\n check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr)\n if overwriteable:\n assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol)\n assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('C')\n q2, r2 = qr_update(q, rs, u, v, False)\n check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr)\n q2o, r2o = qr_update(q, rs, u, v, True)\n check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr)\n if overwriteable:\n assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol)\n assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('C')\n q3, r3 = qr_update(q, r, us, v, False)\n check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr)\n q3o, r3o = qr_update(q, r, us, v, True)\n check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr)\n if overwriteable:\n assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol)\n assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('C')\n q4, r4 = qr_update(q, r, u, vs, False)\n check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr)\n q4o, r4o = qr_update(q, r, u, vs, True)\n check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr)\n if overwriteable:\n assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol)\n assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('C')\n # since some of these were consumed above\n qs, rs, us, vs = adjust_strides((q, r, u, v))\n q5, r5 = qr_update(qs, rs, us, vs, False)\n check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr)\n q5o, r5o = qr_update(qs, rs, us, vs, True)\n check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr)\n if overwriteable:\n assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol)\n assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)\n\n def test_non_unit_strides_rank_1(self):\n self.base_non_simple_strides(make_strided, 'full', 1, True)\n\n def test_non_unit_strides_economic_rank_1(self):\n self.base_non_simple_strides(make_strided, 'economic', 1, True)\n\n def test_non_unit_strides_rank_p(self):\n self.base_non_simple_strides(make_strided, 'full', 3, False)\n\n def test_non_unit_strides_economic_rank_p(self):\n self.base_non_simple_strides(make_strided, 'economic', 3, False)\n\n def test_neg_strides_rank_1(self):\n self.base_non_simple_strides(negate_strides, 'full', 1, False)\n\n def test_neg_strides_economic_rank_1(self):\n self.base_non_simple_strides(negate_strides, 'economic', 1, False)\n\n def test_neg_strides_rank_p(self):\n self.base_non_simple_strides(negate_strides, 'full', 3, False)\n\n def test_neg_strides_economic_rank_p(self):\n self.base_non_simple_strides(negate_strides, 'economic', 3, False)\n \n def test_non_itemsize_strides_rank_1(self):\n self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False)\n\n def test_non_itemsize_strides_economic_rank_1(self):\n self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False)\n\n def test_non_itemsize_strides_rank_p(self):\n self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False)\n\n def test_non_itemsize_strides_economic_rank_p(self):\n self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False)\n\n def test_non_native_byte_order_rank_1(self):\n self.base_non_simple_strides(make_nonnative, 'full', 1, False)\n\n def test_non_native_byte_order_economic_rank_1(self):\n self.base_non_simple_strides(make_nonnative, 'economic', 1, False)\n\n def test_non_native_byte_order_rank_p(self):\n self.base_non_simple_strides(make_nonnative, 'full', 3, False)\n\n def test_non_native_byte_order_economic_rank_p(self):\n self.base_non_simple_strides(make_nonnative, 'economic', 3, False)\n\n def test_overwrite_qruv_rank_1(self):\n # Any positive strided q, r, u, and v can be overwritten for a rank 1 \n # update, only checking C and F contiguous.\n a, q0, r0, u0, v0 = self.generate('sqr')\n a1 = a + np.outer(u0, v0.conj())\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('F')\n\n # don't overwrite\n q1, r1 = qr_update(q, r, u, v, False)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n check_qr(q, r, a, self.rtol, self.atol)\n\n q2, r2 = qr_update(q, r, u, v, True)\n check_qr(q2, r2, a1, self.rtol, self.atol)\n # verify the overwriting, no good way to check u and v.\n assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)\n assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('C')\n r = r0.copy('C')\n u = u0.copy('C')\n v = v0.copy('C')\n q3, r3 = qr_update(q, r, u, v, True)\n check_qr(q3, r3, a1, self.rtol, self.atol)\n assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)\n assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)\n\n def test_overwrite_qruv_rank_1_economic(self):\n # updating economic decompositions can overwrite any contigous r,\n # and positively strided r and u. V is only ever read.\n # only checking C and F contiguous.\n a, q0, r0, u0, v0 = self.generate('tall', 'economic')\n a1 = a + np.outer(u0, v0.conj())\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('F')\n\n # don't overwrite\n q1, r1 = qr_update(q, r, u, v, False)\n check_qr(q1, r1, a1, self.rtol, self.atol, False)\n check_qr(q, r, a, self.rtol, self.atol, False)\n\n q2, r2 = qr_update(q, r, u, v, True)\n check_qr(q2, r2, a1, self.rtol, self.atol, False)\n # verify the overwriting, no good way to check u and v.\n assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)\n assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)\n\n q = q0.copy('C')\n r = r0.copy('C')\n u = u0.copy('C')\n v = v0.copy('C')\n q3, r3 = qr_update(q, r, u, v, True)\n check_qr(q3, r3, a1, self.rtol, self.atol, False)\n assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)\n assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)\n\n def test_overwrite_qruv_rank_p(self):\n # for rank p updates, q r must be F contiguous, v must be C (v.T --> F)\n # and u can be C or F, but is only overwritten if Q is C and complex\n a, q0, r0, u0, v0 = self.generate('sqr', p=3)\n a1 = a + np.dot(u0, v0.T.conj())\n q = q0.copy('F')\n r = r0.copy('F')\n u = u0.copy('F')\n v = v0.copy('C')\n\n # don't overwrite\n q1, r1 = qr_update(q, r, u, v, False)\n check_qr(q1, r1, a1, self.rtol, self.atol)\n check_qr(q, r, a, self.rtol, self.atol)\n\n q2, r2 = qr_update(q, r, u, v, True)\n check_qr(q2, r2, a1, self.rtol, self.atol)\n # verify the overwriting, no good way to check u and v.\n assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)\n assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)\n\n def test_empty_inputs(self):\n a, q, r, u, v = self.generate('tall')\n assert_raises(ValueError, qr_update, np.array([]), r, u, v)\n assert_raises(ValueError, qr_update, q, np.array([]), u, v)\n assert_raises(ValueError, qr_update, q, r, np.array([]), v)\n assert_raises(ValueError, qr_update, q, r, u, np.array([]))\n\n def test_mismatched_shapes(self):\n a, q, r, u, v = self.generate('tall')\n assert_raises(ValueError, qr_update, q, r[1:], u, v)\n assert_raises(ValueError, qr_update, q[:-2], r, u, v)\n assert_raises(ValueError, qr_update, q, r, u[1:], v)\n assert_raises(ValueError, qr_update, q, r, u, v[1:])\n\n def test_unsupported_dtypes(self):\n dts = ['int8', 'int16', 'int32', 'int64', \n 'uint8', 'uint16', 'uint32', 'uint64',\n 'float16', 'longdouble', 'longcomplex',\n 'bool']\n a, q0, r0, u0, v0 = self.generate('tall')\n for dtype in dts:\n q = q0.real.astype(dtype)\n r = r0.real.astype(dtype)\n u = u0.real.astype(dtype)\n v = v0.real.astype(dtype)\n assert_raises(ValueError, qr_update, q, r0, u0, v0)\n assert_raises(ValueError, qr_update, q0, r, u0, v0)\n assert_raises(ValueError, qr_update, q0, r0, u, v0)\n assert_raises(ValueError, qr_update, q0, r0, u0, v)\n\n def test_integer_input(self):\n q = np.arange(16).reshape(4, 4)\n r = q.copy() # doesn't matter\n u = q[:, 0].copy()\n v = r[0, :].copy()\n assert_raises(ValueError, qr_update, q, r, u, v)\n\n def test_check_finite(self):\n a0, q0, r0, u0, v0 = self.generate('tall', p=3)\n\n q = q0.copy('F')\n q[1,1] = np.nan\n assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])\n assert_raises(ValueError, qr_update, q, r0, u0, v0)\n\n r = r0.copy('F')\n r[1,1] = np.nan\n assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])\n assert_raises(ValueError, qr_update, q0, r, u0, v0)\n\n u = u0.copy('F')\n u[0,0] = np.nan\n assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])\n assert_raises(ValueError, qr_update, q0, r0, u, v0)\n\n v = v0.copy('F')\n v[0,0] = np.nan\n assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])\n assert_raises(ValueError, qr_update, q0, r0, u, v)\n\n def test_economic_check_finite(self):\n a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3)\n\n q = q0.copy('F')\n q[1,1] = np.nan\n assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])\n assert_raises(ValueError, qr_update, q, r0, u0, v0)\n\n r = r0.copy('F')\n r[1,1] = np.nan\n assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])\n assert_raises(ValueError, qr_update, q0, r, u0, v0)\n\n u = u0.copy('F')\n u[0,0] = np.nan\n assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])\n assert_raises(ValueError, qr_update, q0, r0, u, v0)\n\n v = v0.copy('F')\n v[0,0] = np.nan\n assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])\n assert_raises(ValueError, qr_update, q0, r0, u, v)\n\nclass TestQRupdate_f(BaseQRupdate):\n dtype = np.dtype('f')\n\nclass TestQRupdate_F(BaseQRupdate):\n dtype = np.dtype('F')\n\nclass TestQRupdate_d(BaseQRupdate):\n dtype = np.dtype('d')\n\nclass TestQRupdate_D(BaseQRupdate):\n dtype = np.dtype('D')\n\ndef test_form_qTu():\n # We want to ensure that all of the code paths through this function are\n # tested. Most of them should be hit with the rest of test suite, but\n # explicit tests make clear precisely what is being tested.\n #\n # This function expects that Q is either C or F contiguous and square. \n # Economic mode decompositions (Q is (M, N), M != N) do not go through this\n # function. U may have any positive strides.\n #\n # Some of these test are duplicates, since contiguous 1d arrays are both C\n # and F.\n\n q_order = ['F', 'C']\n q_shape = [(8, 8), ]\n u_order = ['F', 'C', 'A'] # here A means is not F not C\n u_shape = [1, 3]\n dtype = ['f', 'd', 'F', 'D']\n\n for qo, qs, uo, us, d in \\\n itertools.product(q_order, q_shape, u_order, u_shape, dtype):\n if us == 1:\n yield check_form_qTu, qo, qs, uo, us, 1, d\n yield check_form_qTu, qo, qs, uo, us, 2, d\n else:\n yield check_form_qTu, qo, qs, uo, us, 2, d\n \ndef check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype):\n np.random.seed(47)\n if u_shape == 1 and u_ndim == 1:\n u_shape = (q_shape[0],)\n else:\n u_shape = (q_shape[0], u_shape)\n dtype = np.dtype(dtype)\n\n if dtype.char in 'fd':\n q = np.random.random(q_shape)\n u = np.random.random(u_shape)\n elif dtype.char in 'FD':\n q = np.random.random(q_shape) + 1j*np.random.random(q_shape)\n u = np.random.random(u_shape) + 1j*np.random.random(u_shape)\n else:\n ValueError(\"form_qTu doesn't support this dtype\")\n\n q = np.require(q, dtype, q_order)\n if u_order != 'A':\n u = np.require(u, dtype, u_order)\n else:\n u, = make_strided((u.astype(dtype),))\n \n rtol = 10.0 ** -(np.finfo(dtype).precision-2)\n atol = 2*np.finfo(dtype).eps\n\n expected = np.dot(q.T.conj(), u)\n res = _decomp_update._form_qTu(q, u)\n assert_allclose(res, expected, rtol=rtol, atol=atol)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n \n",
"\"\"\"Python wrappers around Brain.\n\nThis file is MACHINE GENERATED! Do not edit.\n\"\"\"\n\nimport collections as _collections\n\nfrom google.protobuf import text_format as _text_format\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\n\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\n_graph_def_version_outputs = [\"version\"]\n\n\ndef graph_def_version(name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n \"\"\"\n result = _op_def_lib.apply_op(\"GraphDefVersion\", name=name)\n return result\n\n\n_ops.RegisterShape(\"GraphDefVersion\")(None)\n_kernel_label_outputs = [\"result\"]\n\n\ndef kernel_label(name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`.\n \"\"\"\n result = _op_def_lib.apply_op(\"KernelLabel\", name=name)\n return result\n\n\n_ops.RegisterShape(\"KernelLabel\")(None)\n_old_outputs = [\"\"]\n\n\ndef old(name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n result = _op_def_lib.apply_op(\"Old\", name=name)\n return result\n\n\n_ops.RegisterShape(\"Old\")(None)\n_resource_create_op_outputs = [\"\"]\n\n\ndef resource_create_op(resource, name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n resource: A `Tensor` of type `resource`.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n result = _op_def_lib.apply_op(\"ResourceCreateOp\", resource=resource,\n name=name)\n return result\n\n\n_ops.RegisterShape(\"ResourceCreateOp\")(None)\n_resource_initialized_op_outputs = [\"initialized\"]\n\n\ndef resource_initialized_op(resource, name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n resource: A `Tensor` of type `resource`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n \"\"\"\n result = _op_def_lib.apply_op(\"ResourceInitializedOp\", resource=resource,\n name=name)\n return result\n\n\n_ops.RegisterShape(\"ResourceInitializedOp\")(None)\n_resource_using_op_outputs = [\"\"]\n\n\ndef resource_using_op(resource, name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n resource: A `Tensor` of type `resource`.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n result = _op_def_lib.apply_op(\"ResourceUsingOp\", resource=resource,\n name=name)\n return result\n\n\n_ops.RegisterShape(\"ResourceUsingOp\")(None)\n_stub_resource_handle_op_outputs = [\"resource\"]\n\n\ndef stub_resource_handle_op(container=None, shared_name=None, name=None):\n r\"\"\"Creates a handle to a StubResource\n\n Args:\n container: An optional `string`. Defaults to `\"\"`.\n shared_name: An optional `string`. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`.\n \"\"\"\n result = _op_def_lib.apply_op(\"StubResourceHandleOp\", container=container,\n shared_name=shared_name, name=name)\n return result\n\n\n_ops.RegisterShape(\"StubResourceHandleOp\")(None)\n_test_string_output_outputs = [\"output1\", \"output2\"]\n\n\n_TestStringOutputOutput = _collections.namedtuple(\"TestStringOutput\",\n _test_string_output_outputs)\n\n\ndef test_string_output(input, name=None):\n r\"\"\"TODO: add doc.\n\n Args:\n input: A `Tensor` of type `float32`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (output1, output2).\n output1: A `Tensor` of type `float32`.\n output2: A `Tensor` of type `string`.\n \"\"\"\n result = _op_def_lib.apply_op(\"TestStringOutput\", input=input, name=name)\n return _TestStringOutputOutput._make(result)\n\n\n_ops.RegisterShape(\"TestStringOutput\")(None)\ndef _InitOpDefLibrary():\n op_list = _op_def_pb2.OpList()\n _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n\n\n_InitOpDefLibrary.op_list_ascii = \"\"\"op {\n name: \"GraphDefVersion\"\n output_arg {\n name: \"version\"\n type: DT_INT32\n }\n is_stateful: true\n}\nop {\n name: \"KernelLabel\"\n output_arg {\n name: \"result\"\n type: DT_STRING\n }\n}\nop {\n name: \"Old\"\n deprecation {\n version: 8\n explanation: \"For reasons\"\n }\n}\nop {\n name: \"ResourceCreateOp\"\n input_arg {\n name: \"resource\"\n type: DT_RESOURCE\n }\n}\nop {\n name: \"ResourceInitializedOp\"\n input_arg {\n name: \"resource\"\n type: DT_RESOURCE\n }\n output_arg {\n name: \"initialized\"\n type: DT_BOOL\n }\n}\nop {\n name: \"ResourceUsingOp\"\n input_arg {\n name: \"resource\"\n type: DT_RESOURCE\n }\n}\nop {\n name: \"StubResourceHandleOp\"\n output_arg {\n name: \"resource\"\n type: DT_RESOURCE\n }\n attr {\n name: \"container\"\n type: \"string\"\n default_value {\n s: \"\"\n }\n }\n attr {\n name: \"shared_name\"\n type: \"string\"\n default_value {\n s: \"\"\n }\n }\n is_stateful: true\n}\nop {\n name: \"TestStringOutput\"\n input_arg {\n name: \"input\"\n type: DT_FLOAT\n }\n output_arg {\n name: \"output1\"\n type: DT_FLOAT\n }\n output_arg {\n name: \"output2\"\n type: DT_STRING\n }\n}\n\"\"\"\n\n\n_op_def_lib = _InitOpDefLibrary()\n",
"import numpy as np\nfrom scipy.linalg import block_diag\nfrom scipy.sparse import csr_matrix\nfrom scipy.special import psi\n\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,\n _dirichlet_expectation_2d)\n\nfrom sklearn.utils.testing import assert_allclose\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_greater_equal\nfrom sklearn.utils.testing import assert_raises_regexp\nfrom sklearn.utils.testing import if_safe_multiprocessing_with_blas\n\nfrom sklearn.utils.validation import NotFittedError\nfrom sklearn.externals.six.moves import xrange\n\n\ndef _build_sparse_mtx():\n # Create 3 topics and each topic has 3 disticnt words.\n # (Each word only belongs to a single topic.)\n n_topics = 3\n block = n_topics * np.ones((3, 3))\n blocks = [block] * n_topics\n X = block_diag(*blocks)\n X = csr_matrix(X)\n return (n_topics, X)\n\n\ndef test_lda_default_prior_params():\n # default prior parameter should be `1 / topics`\n # and verbose params should not affect result\n n_topics, X = _build_sparse_mtx()\n prior = 1. / n_topics\n lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,\n topic_word_prior=prior, random_state=0)\n lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)\n\n topic_distr_1 = lda_1.fit_transform(X)\n topic_distr_2 = lda_2.fit_transform(X)\n assert_almost_equal(topic_distr_1, topic_distr_2)\n\n\ndef test_lda_fit_batch():\n # Test LDA batch learning_offset (`fit` method with 'batch' learning)\n rng = np.random.RandomState(0)\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,\n learning_method='batch', random_state=rng)\n lda.fit(X)\n\n correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]\n for component in lda.components_:\n # Find top 3 words in each LDA component\n top_idx = set(component.argsort()[-3:][::-1])\n assert_true(tuple(sorted(top_idx)) in correct_idx_grps)\n\n\ndef test_lda_fit_online():\n # Test LDA online learning (`fit` method with 'online' learning)\n rng = np.random.RandomState(0)\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,\n evaluate_every=1, learning_method='online',\n random_state=rng)\n lda.fit(X)\n\n correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]\n for component in lda.components_:\n # Find top 3 words in each LDA component\n top_idx = set(component.argsort()[-3:][::-1])\n assert_true(tuple(sorted(top_idx)) in correct_idx_grps)\n\n\ndef test_lda_partial_fit():\n # Test LDA online learning (`partial_fit` method)\n # (same as test_lda_batch)\n rng = np.random.RandomState(0)\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,\n total_samples=100, random_state=rng)\n for i in xrange(3):\n lda.partial_fit(X)\n\n correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]\n for c in lda.components_:\n top_idx = set(c.argsort()[-3:][::-1])\n assert_true(tuple(sorted(top_idx)) in correct_idx_grps)\n\n\ndef test_lda_dense_input():\n # Test LDA with dense input.\n rng = np.random.RandomState(0)\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',\n random_state=rng)\n lda.fit(X.toarray())\n\n correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]\n for component in lda.components_:\n # Find top 3 words in each LDA component\n top_idx = set(component.argsort()[-3:][::-1])\n assert_true(tuple(sorted(top_idx)) in correct_idx_grps)\n\n\ndef test_lda_transform():\n # Test LDA transform.\n # Transform result cannot be negative\n rng = np.random.RandomState(0)\n X = rng.randint(5, size=(20, 10))\n n_topics = 3\n lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)\n X_trans = lda.fit_transform(X)\n assert_true((X_trans > 0.0).any())\n\n\ndef test_lda_fit_transform():\n # Test LDA fit_transform & transform\n # fit_transform and transform result should be the same\n for method in ('online', 'batch'):\n rng = np.random.RandomState(0)\n X = rng.randint(10, size=(50, 20))\n lda = LatentDirichletAllocation(n_topics=5, learning_method=method,\n random_state=rng)\n X_fit = lda.fit_transform(X)\n X_trans = lda.transform(X)\n assert_array_almost_equal(X_fit, X_trans, 4)\n\n\ndef test_lda_partial_fit_dim_mismatch():\n # test `n_features` mismatch in `partial_fit`\n rng = np.random.RandomState(0)\n n_topics = rng.randint(3, 6)\n n_col = rng.randint(6, 10)\n X_1 = np.random.randint(4, size=(10, n_col))\n X_2 = np.random.randint(4, size=(10, n_col + 1))\n lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,\n total_samples=20, random_state=rng)\n lda.partial_fit(X_1)\n assert_raises_regexp(ValueError, r\"^The provided data has\",\n lda.partial_fit, X_2)\n\n\ndef test_invalid_params():\n # test `_check_params` method\n X = np.ones((5, 10))\n\n invalid_models = (\n ('n_topics', LatentDirichletAllocation(n_topics=0)),\n ('learning_method',\n LatentDirichletAllocation(learning_method='unknown')),\n ('total_samples', LatentDirichletAllocation(total_samples=0)),\n ('learning_offset', LatentDirichletAllocation(learning_offset=-1)),\n )\n for param, model in invalid_models:\n regex = r\"^Invalid %r parameter\" % param\n assert_raises_regexp(ValueError, regex, model.fit, X)\n\n\ndef test_lda_negative_input():\n # test pass dense matrix with sparse negative input.\n X = -np.ones((5, 10))\n lda = LatentDirichletAllocation()\n regex = r\"^Negative values in data passed\"\n assert_raises_regexp(ValueError, regex, lda.fit, X)\n\n\ndef test_lda_no_component_error():\n # test `transform` and `perplexity` before `fit`\n rng = np.random.RandomState(0)\n X = rng.randint(4, size=(20, 10))\n lda = LatentDirichletAllocation()\n regex = r\"^no 'components_' attribute\"\n assert_raises_regexp(NotFittedError, regex, lda.transform, X)\n assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)\n\n\ndef test_lda_transform_mismatch():\n # test `n_features` mismatch in partial_fit and transform\n rng = np.random.RandomState(0)\n X = rng.randint(4, size=(20, 10))\n X_2 = rng.randint(4, size=(10, 8))\n\n n_topics = rng.randint(3, 6)\n lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)\n lda.partial_fit(X)\n assert_raises_regexp(ValueError, r\"^The provided data has\",\n lda.partial_fit, X_2)\n\n\n@if_safe_multiprocessing_with_blas\ndef test_lda_multi_jobs():\n n_topics, X = _build_sparse_mtx()\n # Test LDA batch training with multi CPU\n for method in ('online', 'batch'):\n rng = np.random.RandomState(0)\n lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,\n learning_method=method,\n evaluate_every=1,\n random_state=rng)\n lda.fit(X)\n\n correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]\n for c in lda.components_:\n top_idx = set(c.argsort()[-3:][::-1])\n assert_true(tuple(sorted(top_idx)) in correct_idx_grps)\n\n\n@if_safe_multiprocessing_with_blas\ndef test_lda_partial_fit_multi_jobs():\n # Test LDA online training with multi CPU\n rng = np.random.RandomState(0)\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,\n learning_offset=5., total_samples=30,\n random_state=rng)\n for i in range(2):\n lda.partial_fit(X)\n\n correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]\n for c in lda.components_:\n top_idx = set(c.argsort()[-3:][::-1])\n assert_true(tuple(sorted(top_idx)) in correct_idx_grps)\n\n\ndef test_lda_preplexity_mismatch():\n # test dimension mismatch in `perplexity` method\n rng = np.random.RandomState(0)\n n_topics = rng.randint(3, 6)\n n_samples = rng.randint(6, 10)\n X = np.random.randint(4, size=(n_samples, 10))\n lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,\n total_samples=20, random_state=rng)\n lda.fit(X)\n # invalid samples\n invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))\n assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,\n invalid_n_samples)\n # invalid topic number\n invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))\n assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,\n invalid_n_topics)\n\n\ndef test_lda_perplexity():\n # Test LDA perplexity for batch training\n # perplexity should be lower after each iteration\n n_topics, X = _build_sparse_mtx()\n for method in ('online', 'batch'):\n lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,\n learning_method=method,\n total_samples=100, random_state=0)\n lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,\n learning_method=method,\n total_samples=100, random_state=0)\n distr_1 = lda_1.fit_transform(X)\n perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)\n\n distr_2 = lda_2.fit_transform(X)\n perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)\n assert_greater_equal(perp_1, perp_2)\n\n perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)\n perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)\n assert_greater_equal(perp_1_subsampling, perp_2_subsampling)\n\n\ndef test_lda_score():\n # Test LDA score for batch training\n # score should be higher after each iteration\n n_topics, X = _build_sparse_mtx()\n for method in ('online', 'batch'):\n lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,\n learning_method=method,\n total_samples=100, random_state=0)\n lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,\n learning_method=method,\n total_samples=100, random_state=0)\n lda_1.fit_transform(X)\n score_1 = lda_1.score(X)\n\n lda_2.fit_transform(X)\n score_2 = lda_2.score(X)\n assert_greater_equal(score_2, score_1)\n\n\ndef test_perplexity_input_format():\n # Test LDA perplexity for sparse and dense input\n # score should be the same for both dense and sparse input\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,\n learning_method='batch',\n total_samples=100, random_state=0)\n distr = lda.fit_transform(X)\n perp_1 = lda.perplexity(X)\n perp_2 = lda.perplexity(X, distr)\n perp_3 = lda.perplexity(X.toarray(), distr)\n assert_almost_equal(perp_1, perp_2)\n assert_almost_equal(perp_1, perp_3)\n\n\ndef test_lda_score_perplexity():\n # Test the relationship between LDA score and perplexity\n n_topics, X = _build_sparse_mtx()\n lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,\n random_state=0)\n distr = lda.fit_transform(X)\n perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)\n\n score = lda.score(X)\n perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))\n assert_almost_equal(perplexity_1, perplexity_2)\n\n\ndef test_lda_empty_docs():\n \"\"\"Test LDA on empty document (all-zero rows).\"\"\"\n Z = np.zeros((5, 4))\n for X in [Z, csr_matrix(Z)]:\n lda = LatentDirichletAllocation(max_iter=750).fit(X)\n assert_almost_equal(lda.components_.sum(axis=0),\n np.ones(lda.components_.shape[1]))\n\n\ndef test_dirichlet_expectation():\n \"\"\"Test Cython version of Dirichlet expectation calculation.\"\"\"\n x = np.logspace(-100, 10, 10000)\n expectation = np.empty_like(x)\n _dirichlet_expectation_1d(x, 0, expectation)\n assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),\n atol=1e-19)\n\n x = x.reshape(100, 100)\n assert_allclose(_dirichlet_expectation_2d(x),\n psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),\n rtol=1e-11, atol=3e-9)\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\nimport json\nimport yaml\nimport warnings\nimport copy\nimport os\nimport re\nfrom six.moves import zip\n\nfrom .. import backend as K\nfrom .. import initializers\nfrom ..utils.io_utils import ask_to_proceed_with_overwrite\nfrom ..utils.layer_utils import print_summary as print_layer_summary\nfrom ..utils.generic_utils import has_arg\nfrom ..utils import conv_utils\nfrom ..legacy import interfaces\n\ntry:\n import h5py\nexcept ImportError:\n h5py = None\n\n\nclass InputSpec(object):\n \"\"\"Specifies the ndim, dtype and shape of every input to a layer.\n\n Every layer should expose (if appropriate) an `input_spec` attribute:\n a list of instances of InputSpec (one per input tensor).\n\n A None entry in a shape is compatible with any dimension,\n a None shape is compatible with any shape.\n\n # Arguments\n dtype: Expected datatype of the input.\n shape: Shape tuple, expected shape of the input\n (may include None for unchecked axes).\n ndim: Integer, expected rank of the input.\n max_ndim: Integer, maximum rank of the input.\n min_ndim: Integer, minimum rank of the input.\n axes: Dictionary mapping integer axes to\n a specific dimension value.\n \"\"\"\n\n def __init__(self, dtype=None,\n shape=None,\n ndim=None,\n max_ndim=None,\n min_ndim=None,\n axes=None):\n self.dtype = dtype\n self.shape = shape\n if shape is not None:\n self.ndim = len(shape)\n else:\n self.ndim = ndim\n self.max_ndim = max_ndim\n self.min_ndim = min_ndim\n self.axes = axes or {}\n\n\nclass Node(object):\n \"\"\"A `Node` describes the connectivity between two layers.\n\n Each time a layer is connected to some new input,\n a node is added to `layer.inbound_nodes`.\n Each time the output of a layer is used by another layer,\n a node is added to `layer.outbound_nodes`.\n\n # Arguments\n outbound_layer: the layer that takes\n `input_tensors` and turns them into `output_tensors`\n (the node gets created when the `call`\n method of the layer was called).\n inbound_layers: a list of layers, the same length as `input_tensors`,\n the layers from where `input_tensors` originate.\n node_indices: a list of integers, the same length as `inbound_layers`.\n `node_indices[i]` is the origin node of `input_tensors[i]`\n (necessary since each inbound layer might have several nodes,\n e.g. if the layer is being shared with a different data stream).\n tensor_indices: a list of integers,\n the same length as `inbound_layers`.\n `tensor_indices[i]` is the index of `input_tensors[i]` within the\n output of the inbound layer\n (necessary since each inbound layer might\n have multiple tensor outputs, with each one being\n independently manipulable).\n input_tensors: list of input tensors.\n output_tensors: list of output tensors.\n input_masks: list of input masks (a mask can be a tensor, or None).\n output_masks: list of output masks (a mask can be a tensor, or None).\n input_shapes: list of input shape tuples.\n output_shapes: list of output shape tuples.\n arguments: dictionary of keyword arguments that were passed to the\n `call` method of the layer at the call that created the node.\n\n `node_indices` and `tensor_indices` are basically fine-grained coordinates\n describing the origin of the `input_tensors`, verifying the following:\n\n `input_tensors[i] == inbound_layers[i].inbound_nodes[node_indices[i]].output_tensors[tensor_indices[i]]`\n\n A node from layer A to layer B is added to:\n A.outbound_nodes\n B.inbound_nodes\n \"\"\"\n\n def __init__(self, outbound_layer,\n inbound_layers, node_indices, tensor_indices,\n input_tensors, output_tensors,\n input_masks, output_masks,\n input_shapes, output_shapes,\n arguments=None):\n # Layer instance (NOT a list).\n # this is the layer that takes a list of input tensors\n # and turns them into a list of output tensors.\n # the current node will be added to\n # the inbound_nodes of outbound_layer.\n self.outbound_layer = outbound_layer\n\n # The following 3 properties describe where\n # the input tensors come from: which layers,\n # and for each layer, which node and which\n # tensor output of each node.\n\n # List of layer instances.\n self.inbound_layers = inbound_layers\n # List of integers, 1:1 mapping with inbound_layers.\n self.node_indices = node_indices\n # List of integers, 1:1 mapping with inbound_layers.\n self.tensor_indices = tensor_indices\n\n # Following 2 properties:\n # tensor inputs and outputs of outbound_layer.\n\n # List of tensors. 1:1 mapping with inbound_layers.\n self.input_tensors = input_tensors\n # List of tensors, created by outbound_layer.call().\n self.output_tensors = output_tensors\n\n # Following 2 properties: input and output masks.\n # List of tensors, 1:1 mapping with input_tensor.\n self.input_masks = input_masks\n # List of tensors, created by outbound_layer.compute_mask().\n self.output_masks = output_masks\n\n # Following 2 properties: input and output shapes.\n\n # List of shape tuples, shapes of input_tensors.\n self.input_shapes = input_shapes\n # List of shape tuples, shapes of output_tensors.\n self.output_shapes = output_shapes\n\n # Optional keyword arguments to layer's `call`.\n self.arguments = arguments\n\n # Add nodes to all layers involved.\n for layer in inbound_layers:\n if layer is not None:\n layer.outbound_nodes.append(self)\n outbound_layer.inbound_nodes.append(self)\n\n def get_config(self):\n inbound_names = []\n for layer in self.inbound_layers:\n if layer:\n inbound_names.append(layer.name)\n else:\n inbound_names.append(None)\n return {'outbound_layer': self.outbound_layer.name if self.outbound_layer else None,\n 'inbound_layers': inbound_names,\n 'node_indices': self.node_indices,\n 'tensor_indices': self.tensor_indices}\n\n\nclass Layer(object):\n \"\"\"Abstract base layer class.\n\n # Properties\n name: String, must be unique within a model.\n input_spec: List of InputSpec class instances\n each entry describes one required input:\n - ndim\n - dtype\n A layer with `n` input tensors must have\n an `input_spec` of length `n`.\n trainable: Boolean, whether the layer weights\n will be updated during training.\n uses_learning_phase: Whether any operation\n of the layer uses `K.in_training_phase()`\n or `K.in_test_phase()`.\n input_shape: Shape tuple. Provided for convenience,\n but note that there may be cases in which this\n attribute is ill-defined (e.g. a shared layer\n with multiple input shapes), in which case\n requesting `input_shape` will raise an Exception.\n Prefer using `layer.get_input_shape_for(input_shape)`,\n or `layer.get_input_shape_at(node_index)`.\n output_shape: Shape tuple. See above.\n inbound_nodes: List of nodes.\n outbound_nodes: List of nodes.\n input, output: Input/output tensor(s). Note that if the layer is used\n more than once (shared layer), this is ill-defined\n and will raise an exception. In such cases, use\n `layer.get_input_at(node_index)`.\n input_mask, output_mask: Same as above, for masks.\n trainable_weights: List of variables.\n non_trainable_weights: List of variables.\n weights: The concatenation of the lists trainable_weights and\n non_trainable_weights (in this order).\n constraints: Dict mapping weights to constraints.\n\n # Methods\n call(x, mask=None): Where the layer's logic lives.\n __call__(x, mask=None): Wrapper around the layer logic (`call`).\n If x is a Keras tensor:\n - Connect current layer with last layer from tensor:\n `self._add_inbound_node(last_layer)`\n - Add layer to tensor history\n If layer is not built:\n - Build from x._keras_shape\n get_weights()\n set_weights(weights)\n get_config()\n count_params()\n compute_output_shape(input_shape)\n compute_mask(x, mask)\n get_input_at(node_index)\n get_output_at(node_index)\n get_input_shape_at(node_index)\n get_output_shape_at(node_index)\n get_input_mask_at(node_index)\n get_output_mask_at(node_index)\n\n # Class Methods\n from_config(config)\n\n # Internal methods:\n build(input_shape)\n _add_inbound_node(layer, index=0)\n assert_input_compatibility()\n \"\"\"\n\n def __init__(self, **kwargs):\n self.input_spec = None\n self.supports_masking = False\n\n # These properties will be set upon call of self.build()\n self._trainable_weights = []\n self._non_trainable_weights = []\n self._constraints = {} # dict {tensor: constraint instance}\n self._losses = []\n self._updates = []\n self._per_input_losses = {}\n self._per_input_updates = {}\n self._built = False\n\n # These lists will be filled via successive calls\n # to self._add_inbound_node().\n self.inbound_nodes = []\n self.outbound_nodes = []\n\n # These properties should be set by the user via keyword arguments.\n # note that 'dtype', 'input_shape' and 'batch_input_shape'\n # are only applicable to input layers: do not pass these keywords\n # to non-input layers.\n allowed_kwargs = {'input_shape',\n 'batch_input_shape',\n 'batch_size',\n 'dtype',\n 'name',\n 'trainable',\n 'weights',\n 'input_dtype', # legacy\n }\n for kwarg in kwargs:\n if kwarg not in allowed_kwargs:\n raise TypeError('Keyword argument not understood:', kwarg)\n name = kwargs.get('name')\n if not name:\n prefix = self.__class__.__name__\n name = _to_snake_case(prefix) + '_' + str(K.get_uid(prefix))\n self.name = name\n\n self.trainable = kwargs.get('trainable', True)\n if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:\n # In this case we will later create an input layer\n # to insert before the current layer\n if 'batch_input_shape' in kwargs:\n batch_input_shape = tuple(kwargs['batch_input_shape'])\n elif 'input_shape' in kwargs:\n if 'batch_size' in kwargs:\n batch_size = kwargs['batch_size']\n else:\n batch_size = None\n batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])\n self.batch_input_shape = batch_input_shape\n\n # Set dtype.\n dtype = kwargs.get('dtype')\n if dtype is None:\n dtype = kwargs.get('input_dtype')\n if dtype is None:\n dtype = K.floatx()\n self.dtype = dtype\n\n if 'weights' in kwargs:\n self._initial_weights = kwargs['weights']\n else:\n self._initial_weights = None\n\n @property\n def losses(self):\n return self._losses\n\n @property\n def updates(self):\n return self._updates\n\n @property\n def built(self):\n return self._built\n\n @built.setter\n def built(self, value):\n self._built = value\n\n @property\n def constraints(self):\n return self._constraints\n\n @constraints.setter\n def constraints(self, constraints):\n self._constraints = constraints\n\n @property\n def trainable_weights(self):\n trainable = getattr(self, 'trainable', True)\n if trainable:\n return self._trainable_weights\n else:\n return []\n\n @trainable_weights.setter\n def trainable_weights(self, weights):\n self._trainable_weights = weights\n\n @property\n def non_trainable_weights(self):\n trainable = getattr(self, 'trainable', True)\n if not trainable:\n return self._trainable_weights + self._non_trainable_weights\n else:\n return self._non_trainable_weights\n\n @non_trainable_weights.setter\n def non_trainable_weights(self, weights):\n self._non_trainable_weights = weights\n\n @interfaces.legacy_add_weight_support\n def add_weight(self,\n name,\n shape,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=True,\n constraint=None):\n \"\"\"Adds a weight variable to the layer.\n\n # Arguments\n name: String, the name for the weight variable.\n shape: The shape tuple of the weight.\n dtype: The dtype of the weight.\n initializer: An Initializer instance (callable).\n regularizer: An optional Regularizer instance.\n trainable: A boolean, whether the weight should\n be trained via backprop or not (assuming\n that the layer itself is also trainable).\n constraint: An optional Constraint instance.\n\n # Returns\n The created weight variable.\n \"\"\"\n initializer = initializers.get(initializer)\n if dtype is None:\n dtype = K.floatx()\n weight = K.variable(initializer(shape), dtype=dtype, name=name)\n if regularizer is not None:\n self.add_loss(regularizer(weight))\n if constraint is not None:\n self.constraints[weight] = constraint\n if trainable:\n self._trainable_weights.append(weight)\n else:\n self._non_trainable_weights.append(weight)\n return weight\n\n def assert_input_compatibility(self, inputs):\n \"\"\"Checks compatibility between the layer and provided inputs.\n\n This checks that the tensor(s) `input`\n verify the input assumptions of the layer\n (if any). If not, exceptions are raised.\n\n # Arguments\n inputs: input tensor or list of input tensors.\n\n # Raises\n ValueError: in case of mismatch between\n the provided inputs and the expectations of the layer.\n \"\"\"\n inputs = _to_list(inputs)\n for x in inputs:\n try:\n K.is_keras_tensor(x)\n except ValueError:\n raise ValueError('Layer ' + self.name + ' was called with '\n 'an input that isn\\'t a symbolic tensor. '\n 'Received type: ' +\n str(type(x)) + '. Full input: ' +\n str(inputs) + '. All inputs to the layer '\n 'should be tensors.')\n\n if not self.input_spec:\n return\n if not isinstance(self.input_spec, (list, tuple)):\n input_spec = _to_list(self.input_spec)\n else:\n input_spec = self.input_spec\n if len(inputs) != len(input_spec):\n raise ValueError('Layer ' + self.name + ' expects ' +\n str(len(input_spec)) + ' inputs, '\n 'but it received ' + str(len(inputs)) +\n ' input tensors. Input received: ' +\n str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n if spec is None:\n continue\n\n # Check ndim.\n if spec.ndim is not None:\n if K.ndim(x) != spec.ndim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with layer ' +\n self.name + ': expected ndim=' +\n str(spec.ndim) + ', found ndim=' +\n str(K.ndim(x)))\n if spec.max_ndim is not None:\n ndim = K.ndim(x)\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with layer ' +\n self.name + ': expected max_ndim=' +\n str(spec.max_ndim) + ', found ndim=' +\n str(K.ndim(x)))\n if spec.min_ndim is not None:\n ndim = K.ndim(x)\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with layer ' +\n self.name + ': expected min_ndim=' +\n str(spec.min_ndim) + ', found ndim=' +\n str(K.ndim(x)))\n # Check dtype.\n if spec.dtype is not None:\n if K.dtype(x) != spec.dtype:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with layer ' +\n self.name + ': expected dtype=' +\n str(spec.dtype) + ', found dtype=' +\n str(K.dtype(x)))\n # Check specific shape axes.\n if spec.axes:\n try:\n x_shape = K.int_shape(x)\n except TypeError:\n x_shape = None\n if x_shape is not None:\n for axis, value in spec.axes.items():\n if value is not None and x_shape[int(axis)] not in {value, None}:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with layer ' +\n self.name + ': expected axis ' +\n str(axis) + ' of input shape to have '\n 'value ' + str(value) +\n ' but got shape ' + str(x_shape))\n # Check shape.\n if spec.shape is not None:\n try:\n x_shape = K.int_shape(x)\n except TypeError:\n x_shape = None\n if x_shape is not None:\n for spec_dim, dim in zip(spec.shape, x_shape):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError(\n 'Input ' + str(input_index) +\n ' is incompatible with layer ' +\n self.name + ': expected shape=' +\n str(spec.shape) + ', found shape=' +\n str(x_shape))\n\n def call(self, inputs, **kwargs):\n \"\"\"This is where the layer's logic lives.\n\n # Arguments\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments.\n\n # Returns\n A tensor or list/tuple of tensors.\n \"\"\"\n return inputs\n\n def __call__(self, inputs, **kwargs):\n \"\"\"Wrapper around self.call(), for handling internal references.\n\n If a Keras tensor is passed:\n - We call self._add_inbound_node().\n - If necessary, we `build` the layer to match\n the _keras_shape of the input(s).\n - We update the _keras_shape of every input tensor with\n its new shape (obtained via self.compute_output_shape).\n This is done as part of _add_inbound_node().\n - We update the _keras_history of the output tensor(s)\n with the current layer.\n This is done as part of _add_inbound_node().\n\n # Arguments\n inputs: Can be a tensor or list/tuple of tensors.\n **kwargs: Additional keyword arguments to be passed to `call()`.\n\n # Returns\n Output of the layer's `call` method.\n\n # Raises\n ValueError: in case the layer is missing shape information\n for its `build` call.\n \"\"\"\n if isinstance(inputs, list):\n inputs = inputs[:]\n with K.name_scope(self.name):\n # Handle laying building (weight creating, input spec locking).\n if not self.built:\n # Raise exceptions in case the input is not compatible\n # with the input_spec specified in the layer constructor.\n self.assert_input_compatibility(inputs)\n\n # Collect input shapes to build layer.\n input_shapes = []\n for x_elem in _to_list(inputs):\n if hasattr(x_elem, '_keras_shape'):\n input_shapes.append(x_elem._keras_shape)\n elif hasattr(K, 'int_shape'):\n input_shapes.append(K.int_shape(x_elem))\n else:\n raise ValueError('You tried to call layer \"' + self.name +\n '\". This layer has no information'\n ' about its expected input shape, '\n 'and thus cannot be built. '\n 'You can build it manually via: '\n '`layer.build(batch_input_shape)`')\n if len(input_shapes) == 1:\n self.build(input_shapes[0])\n else:\n self.build(input_shapes)\n self.built = True\n\n # Load weights that were specified at layer instantiation.\n if self._initial_weights is not None:\n self.set_weights(self._initial_weights)\n\n # Raise exceptions in case the input is not compatible\n # with the input_spec set at build time.\n self.assert_input_compatibility(inputs)\n\n # Handle mask propagation.\n previous_mask = _collect_previous_mask(inputs)\n user_kwargs = copy.copy(kwargs)\n if not _is_all_none(previous_mask):\n # The previous layer generated a mask.\n if has_arg(self.call, 'mask'):\n if 'mask' not in kwargs:\n # If mask is explicitly passed to __call__,\n # we should override the default mask.\n kwargs['mask'] = previous_mask\n # Handle automatic shape inference (only useful for Theano).\n input_shape = _collect_input_shape(inputs)\n\n # Actually call the layer, collecting output(s), mask(s), and shape(s).\n output = self.call(inputs, **kwargs)\n output_mask = self.compute_mask(inputs, previous_mask)\n\n # If the layer returns tensors from its inputs, unmodified,\n # we copy them to avoid loss of tensor metadata.\n output_ls = _to_list(output)\n inputs_ls = _to_list(inputs)\n output_ls_copy = []\n for x in output_ls:\n if x in inputs_ls:\n x = K.identity(x)\n output_ls_copy.append(x)\n if len(output_ls_copy) == 1:\n output = output_ls_copy[0]\n else:\n output = output_ls_copy\n\n # Infering the output shape is only relevant for Theano.\n if all([s is not None for s in _to_list(input_shape)]):\n output_shape = self.compute_output_shape(input_shape)\n else:\n if isinstance(input_shape, list):\n output_shape = [None for _ in input_shape]\n else:\n output_shape = None\n\n # Add an inbound node to the layer, so that it keeps track\n # of the call and of all new variables created during the call.\n # This also updates the layer history of the output tensor(s).\n # If the input tensor(s) had not previous Keras history,\n # this does nothing.\n self._add_inbound_node(input_tensors=inputs, output_tensors=output,\n input_masks=previous_mask, output_masks=output_mask,\n input_shapes=input_shape, output_shapes=output_shape,\n arguments=user_kwargs)\n\n # Apply activity regularizer if any:\n if hasattr(self, 'activity_regularizer') and self.activity_regularizer is not None:\n regularization_losses = [self.activity_regularizer(x) for x in _to_list(output)]\n self.add_loss(regularization_losses, _to_list(inputs))\n return output\n\n def _add_inbound_node(self, input_tensors, output_tensors,\n input_masks, output_masks,\n input_shapes, output_shapes, arguments=None):\n \"\"\"Internal method to create an inbound node for the layer.\n\n # Arguments\n input_tensors: list of input tensors.\n output_tensors: list of output tensors.\n input_masks: list of input masks (a mask can be a tensor, or None).\n output_masks: list of output masks (a mask can be a tensor, or None).\n input_shapes: list of input shape tuples.\n output_shapes: list of output shape tuples.\n arguments: dictionary of keyword arguments that were passed to the\n `call` method of the layer at the call that created the node.\n \"\"\"\n input_tensors = _to_list(input_tensors)\n output_tensors = _to_list(output_tensors)\n input_masks = _to_list(input_masks)\n output_masks = _to_list(output_masks)\n input_shapes = _to_list(input_shapes)\n output_shapes = _to_list(output_shapes)\n\n # Collect input tensor(s) coordinates.\n inbound_layers = []\n node_indices = []\n tensor_indices = []\n for x in input_tensors:\n if hasattr(x, '_keras_history'):\n inbound_layer, node_index, tensor_index = x._keras_history\n inbound_layers.append(inbound_layer)\n node_indices.append(node_index)\n tensor_indices.append(tensor_index)\n else:\n inbound_layers.append(None)\n node_indices.append(None)\n tensor_indices.append(None)\n\n # Create node, add it to inbound nodes.\n Node(\n self,\n inbound_layers=inbound_layers,\n node_indices=node_indices,\n tensor_indices=tensor_indices,\n input_tensors=input_tensors,\n output_tensors=output_tensors,\n input_masks=input_masks,\n output_masks=output_masks,\n input_shapes=input_shapes,\n output_shapes=output_shapes,\n arguments=arguments\n )\n\n # Update tensor history, _keras_shape and _uses_learning_phase.\n for i in range(len(output_tensors)):\n output_tensors[i]._keras_shape = output_shapes[i]\n uses_lp = any([getattr(x, '_uses_learning_phase', False) for x in input_tensors])\n uses_lp = getattr(self, 'uses_learning_phase', False) or uses_lp\n output_tensors[i]._uses_learning_phase = getattr(output_tensors[i], '_uses_learning_phase', False) or uses_lp\n output_tensors[i]._keras_history = (self,\n len(self.inbound_nodes) - 1,\n i)\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape of the layer.\n\n Assumes that the layer will be built\n to match that input shape provided.\n\n # Arguments\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\n # Returns\n An input shape tuple.\n \"\"\"\n if hasattr(self, 'get_output_shape_for'):\n msg = \"Class `{}.{}` defines `get_output_shape_for` but does not override `compute_output_shape`. \" + \\\n \"If this is a Keras 1 layer, please implement `compute_output_shape` to support Keras 2.\"\n warnings.warn(msg.format(type(self).__module__, type(self).__name__), stacklevel=2)\n return input_shape\n\n def compute_mask(self, inputs, mask=None):\n \"\"\"Computes an output mask tensor.\n\n # Arguments\n inputs: Tensor or list of tensors.\n mask: Tensor or list of tensors.\n\n # Returns\n None or a tensor (or list of tensors,\n one per output tensor of the layer).\n \"\"\"\n if not self.supports_masking:\n if mask is not None:\n if isinstance(mask, list):\n if any(m is not None for m in mask):\n raise TypeError('Layer ' + self.name +\n ' does not support masking, '\n 'but was passed an input_mask: ' +\n str(mask))\n else:\n raise TypeError('Layer ' + self.name +\n ' does not support masking, '\n 'but was passed an input_mask: ' +\n str(mask))\n # masking not explicitly supported: return None as mask\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask\n\n def build(self, input_shape):\n \"\"\"Creates the layer weights.\n\n Must be implemented on all layers that have weights.\n\n # Arguments\n input_shape: Keras tensor (future input to layer)\n or list/tuple of Keras tensors to reference\n for weight shape computations.\n \"\"\"\n self.built = True\n\n def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n \"\"\"Retrieves an attribute (e.g. input_tensors) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n # Arguments\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n # Returns\n The layer's attribute `attr` at the node of index `node_index`.\n\n # Raises\n RuntimeError: If the layer has no inbound nodes.\n ValueError: If the index is does not match any node.\n \"\"\"\n if not self.inbound_nodes:\n raise RuntimeError('The layer has never been called '\n 'and thus has no defined ' + attr_name + '.')\n if not len(self.inbound_nodes) > node_index:\n raise ValueError('Asked to get ' + attr_name +\n ' at node ' + str(node_index) +\n ', but the layer has only ' +\n str(len(self.inbound_nodes)) + ' inbound nodes.')\n values = getattr(self.inbound_nodes[node_index], attr)\n if len(values) == 1:\n return values[0]\n else:\n return values\n\n def get_input_shape_at(self, node_index):\n \"\"\"Retrieves the input shape(s) of a layer at a given node.\n\n # Arguments\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n # Returns\n A shape tuple\n (or list of shape tuples if the layer has multiple inputs).\n \"\"\"\n return self._get_node_attribute_at_index(node_index,\n 'input_shapes',\n 'input shape')\n\n def get_output_shape_at(self, node_index):\n \"\"\"Retrieves the output shape(s) of a layer at a given node.\n\n # Arguments\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n # Returns\n A shape tuple\n (or list of shape tuples if the layer has multiple outputs).\n \"\"\"\n return self._get_node_attribute_at_index(node_index,\n 'output_shapes',\n 'output shape')\n\n def get_input_at(self, node_index):\n \"\"\"Retrieves the input tensor(s) of a layer at a given node.\n\n # Arguments\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n # Returns\n A tensor (or list of tensors if the layer has multiple inputs).\n \"\"\"\n return self._get_node_attribute_at_index(node_index,\n 'input_tensors',\n 'input')\n\n def get_output_at(self, node_index):\n \"\"\"Retrieves the output tensor(s) of a layer at a given node.\n\n # Arguments\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n # Returns\n A tensor (or list of tensors if the layer has multiple outputs).\n \"\"\"\n return self._get_node_attribute_at_index(node_index,\n 'output_tensors',\n 'output')\n\n def get_input_mask_at(self, node_index):\n \"\"\"Retrieves the input mask tensor(s) of a layer at a given node.\n\n # Arguments\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n # Returns\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n \"\"\"\n return self._get_node_attribute_at_index(node_index,\n 'input_masks',\n 'input mask')\n\n def get_output_mask_at(self, node_index):\n \"\"\"Retrieves the output mask tensor(s) of a layer at a given node.\n\n # Arguments\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n # Returns\n A mask tensor\n (or list of tensors if the layer has multiple outputs).\n \"\"\"\n return self._get_node_attribute_at_index(node_index,\n 'output_masks',\n 'output mask')\n\n @property\n def input(self):\n \"\"\"Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n # Returns\n Input tensor or list of input tensors.\n\n # Raises\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n if len(self.inbound_nodes) > 1:\n raise AttributeError('Layer ' + self.name +\n ' has multiple inbound nodes, '\n 'hence the notion of \"layer input\" '\n 'is ill-defined. '\n 'Use `get_input_at(node_index)` instead.')\n elif not self.inbound_nodes:\n raise AttributeError('Layer ' + self.name +\n ' is not connected, no input to return.')\n return self._get_node_attribute_at_index(0, 'input_tensors',\n 'input')\n\n @property\n def output(self):\n \"\"\"Retrieves the output tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n # Returns\n Output tensor or list of output tensors.\n\n # Raises\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n if not self.inbound_nodes:\n raise AttributeError('Layer ' + self.name +\n ' has no inbound nodes.')\n if len(self.inbound_nodes) > 1:\n raise AttributeError('Layer ' + self.name +\n ' has multiple inbound nodes, '\n 'hence the notion of \"layer output\" '\n 'is ill-defined. '\n 'Use `get_output_at(node_index)` instead.')\n return self._get_node_attribute_at_index(0, 'output_tensors',\n 'output')\n\n @property\n def input_mask(self):\n \"\"\"Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n # Returns\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n # Raises\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n if len(self.inbound_nodes) != 1:\n raise AttributeError('Layer ' + self.name +\n ' has multiple inbound nodes, ' +\n 'hence the notion of \"layer input mask\" '\n 'is ill-defined. '\n 'Use `get_input_mask_at(node_index)` '\n 'instead.')\n return self._get_node_attribute_at_index(0, 'input_masks',\n 'input mask')\n\n @property\n def output_mask(self):\n \"\"\"Retrieves the output mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n # Returns\n Output mask tensor (potentially None) or list of output\n mask tensors.\n\n # Raises\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n if len(self.inbound_nodes) != 1:\n raise AttributeError('Layer ' + self.name +\n ' has multiple inbound nodes, '\n 'hence the notion of \"layer output mask\" '\n 'is ill-defined. '\n 'Use `get_output_mask_at(node_index)` '\n 'instead.')\n return self._get_node_attribute_at_index(0, 'output_masks',\n 'output mask')\n\n @property\n def input_shape(self):\n \"\"\"Retrieves the input shape tuple(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n # Returns\n Input shape tuple\n (or list of input shape tuples, one tuple per input tensor).\n\n # Raises\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n if not self.inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined input shape.')\n all_input_shapes = set([str(node.input_shapes) for node in self.inbound_nodes])\n if len(all_input_shapes) == 1:\n input_shapes = self.inbound_nodes[0].input_shapes\n if len(input_shapes) == 1:\n return input_shapes[0]\n else:\n return input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different input shapes. Hence '\n 'the notion of \"input shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_input_shape_at(node_index)` '\n 'instead.')\n\n @property\n def output_shape(self):\n \"\"\"Retrieves the output shape tuple(s) of a layer.\n\n Only applicable if the layer has one inbound node,\n or if all inbound nodes have the same output shape.\n\n # Returns\n Output shape tuple\n (or list of input shape tuples, one tuple per output tensor).\n\n # Raises\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n if not self.inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set([str(node.output_shapes) for node in self.inbound_nodes])\n if len(all_output_shapes) == 1:\n output_shapes = self.inbound_nodes[0].output_shapes\n if len(output_shapes) == 1:\n return output_shapes[0]\n else:\n return output_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.')\n\n def add_loss(self, losses, inputs=None):\n \"\"\"Add losses to the layer.\n\n The loss may potentially be conditional on some inputs tensors,\n for instance activity losses are conditional on the layer's inputs.\n\n # Arguments\n losses: loss tensor or list of loss tensors\n to add to the layer.\n inputs: input tensor or list of inputs tensors to mark\n the losses as conditional on these inputs.\n If None is passed, the loss is assumed unconditional\n (e.g. L2 weight regularization, which only depends\n on the layer's weights variables, not on any inputs tensors).\n \"\"\"\n if losses is None or losses == []:\n return\n # Update self.losses\n losses = _to_list(losses)\n if hasattr(self, '_losses'):\n self._losses += losses\n # Update self._per_input_updates\n if isinstance(input, list) and inputs == []:\n inputs = None\n if inputs is not None:\n inputs_hash = _object_list_uid(inputs)\n else:\n # Updates indexed by None are unconditional\n # rather than input-dependent\n inputs_hash = None\n if inputs_hash not in self._per_input_losses:\n self._per_input_losses[inputs_hash] = []\n self._per_input_losses[inputs_hash] += losses\n\n def add_update(self, updates, inputs=None):\n \"\"\"Add updates to the layer.\n\n The updates may potentially be conditional on some inputs tensors,\n for instance batch norm updates are conditional on the layer's inputs.\n\n # Arguments\n updates: update op or list of update ops\n to add to the layer.\n inputs: input tensor or list of inputs tensors to mark\n the updates as conditional on these inputs.\n If None is passed, the updates are assumed unconditional.\n \"\"\"\n if updates is None or updates == []:\n return\n # Update self.updates\n updates = _to_list(updates)\n if hasattr(self, '_updates'):\n self._updates += updates\n # Update self._per_input_updates\n if isinstance(inputs, list) and inputs == []:\n inputs = None\n if inputs is not None:\n inputs_hash = _object_list_uid(inputs)\n else:\n # Updates indexed by None are unconditional\n # rather than input-dependent\n inputs_hash = None\n if inputs_hash not in self._per_input_updates:\n self._per_input_updates[inputs_hash] = []\n self._per_input_updates[inputs_hash] += updates\n\n def get_updates_for(self, inputs):\n if inputs is not None:\n inputs_hash = _object_list_uid(inputs)\n else:\n inputs_hash = None\n if inputs_hash in self._per_input_updates:\n return self._per_input_updates[inputs_hash]\n return []\n\n def get_losses_for(self, inputs):\n if inputs is not None:\n inputs_hash = _object_list_uid(inputs)\n else:\n inputs_hash = None\n if inputs_hash in self._per_input_losses:\n return self._per_input_losses[inputs_hash]\n return []\n\n @property\n def weights(self):\n return self.trainable_weights + self.non_trainable_weights\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the layer, from Numpy arrays.\n\n # Arguments\n weights: a list of Numpy arrays. The number\n of arrays and their shape must match\n number of the dimensions of the weights\n of the layer (i.e. it should match the\n output of `get_weights`).\n\n # Raises\n ValueError: If the provided weights list does not match the\n layer's specifications.\n \"\"\"\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('You called `set_weights(weights)` on layer \"' +\n self.name +\n '\" with a weight list of length ' +\n str(len(weights)) +\n ', but the layer was expecting ' +\n str(len(params)) +\n ' weights. Provided weights: ' +\n str(weights)[:50] + '...')\n if not params:\n return\n weight_value_tuples = []\n param_values = K.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Layer weight shape ' +\n str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n K.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current weights of the layer.\n\n # Returns\n Weights values as a list of numpy arrays.\n \"\"\"\n params = self.weights\n return K.batch_get_value(params)\n\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable)\n containing the configuration of a layer.\n The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n The config of a layer does not include connectivity\n information, nor the layer class name. These are handled\n by `Container` (one layer of abstraction above).\n\n # Returns\n Python dictionary.\n \"\"\"\n config = {'name': self.name,\n 'trainable': self.trainable}\n if hasattr(self, 'batch_input_shape'):\n config['batch_input_shape'] = self.batch_input_shape\n if hasattr(self, 'dtype'):\n config['dtype'] = self.dtype\n return config\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates a layer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same layer from the config\n dictionary. It does not handle layer connectivity\n (handled by Container), nor weights (handled by `set_weights`).\n\n # Arguments\n config: A Python dictionary, typically the\n output of get_config.\n\n # Returns\n A layer instance.\n \"\"\"\n return cls(**config)\n\n def count_params(self):\n \"\"\"Count the total number of scalars composing the weights.\n\n # Returns\n An integer count.\n\n # Raises\n RuntimeError: if the layer isn't yet built\n (in which case its weights aren't yet defined).\n \"\"\"\n if not self.built:\n if self.__class__.__name__ == 'Sequential':\n self.build()\n else:\n raise RuntimeError('You tried to call `count_params` on ' +\n self.name + ', but the layer isn\\'t built. '\n 'You can build it manually via: `' +\n self.name + '.build(batch_input_shape)`.')\n return sum([K.count_params(p) for p in self.weights])\n\n\nclass InputLayer(Layer):\n \"\"\"Layer to be used as an entry point into a graph.\n\n It can either wrap an existing tensor (pass an `input_tensor` argument)\n or create its a placeholder tensor (pass arguments `input_shape`\n or `batch_input_shape` as well as `dtype`).\n\n # Arguments\n input_shape: Shape tuple, not including the batch axis.\n batch_size: Optional input batch size (integer or None).\n batch_input_shape: Shape tuple, including the batch axis.\n dtype: Datatype of the input.\n input_tensor: Optional tensor to use as layer input\n instead of creating a placeholder.\n sparse: Boolean, whether the placeholder created\n is meant to be sparse.\n name: Name of the layer (string).\n \"\"\"\n\n @interfaces.legacy_input_support\n def __init__(self, input_shape=None, batch_size=None,\n batch_input_shape=None,\n dtype=None, input_tensor=None, sparse=False, name=None):\n if not name:\n prefix = 'input'\n name = prefix + '_' + str(K.get_uid(prefix))\n super(InputLayer, self).__init__(dtype=dtype, name=name)\n\n self.trainable = False\n self.built = True\n self.sparse = sparse\n\n if input_shape and batch_input_shape:\n raise ValueError('Only provide the input_shape OR '\n 'batch_input_shape argument to '\n 'InputLayer, not both at the same time.')\n if input_tensor is not None and batch_input_shape is None:\n # If input_tensor is set, and batch_input_shape is not set:\n # Attempt automatic input shape inference.\n try:\n batch_input_shape = K.int_shape(input_tensor)\n except TypeError:\n if not input_shape and not batch_input_shape:\n raise ValueError('InputLayer was provided '\n 'an input_tensor argument, '\n 'but its input shape cannot be '\n 'automatically inferred. '\n 'You should pass an input_shape or '\n 'batch_input_shape argument.')\n if not batch_input_shape:\n if not input_shape:\n raise ValueError('An Input layer should be passed either '\n 'a `batch_input_shape` or an `input_shape`.')\n else:\n batch_input_shape = (batch_size,) + tuple(input_shape)\n else:\n batch_input_shape = tuple(batch_input_shape)\n\n if not dtype:\n if input_tensor is None:\n dtype = K.floatx()\n else:\n dtype = K.dtype(input_tensor)\n\n self.batch_input_shape = batch_input_shape\n self.dtype = dtype\n\n if input_tensor is None:\n self.is_placeholder = True\n input_tensor = K.placeholder(shape=batch_input_shape,\n dtype=dtype,\n sparse=self.sparse,\n name=self.name)\n else:\n self.is_placeholder = False\n input_tensor._keras_shape = batch_input_shape\n # Create an input node to add to self.outbound_node\n # and set output_tensors' _keras_history.\n input_tensor._uses_learning_phase = False\n input_tensor._keras_history = (self, 0, 0)\n Node(self,\n inbound_layers=[],\n node_indices=[],\n tensor_indices=[],\n input_tensors=[input_tensor],\n output_tensors=[input_tensor],\n input_masks=[None],\n output_masks=[None],\n input_shapes=[batch_input_shape],\n output_shapes=[batch_input_shape])\n\n def get_config(self):\n config = {'batch_input_shape': self.batch_input_shape,\n 'dtype': self.dtype,\n 'sparse': self.sparse,\n 'name': self.name}\n return config\n\n\ndef Input(shape=None, batch_shape=None,\n name=None, dtype=K.floatx(), sparse=False,\n tensor=None):\n \"\"\"`Input()` is used to instantiate a Keras tensor.\n\n A Keras tensor is a tensor object from the underlying backend\n (Theano or TensorFlow), which we augment with certain\n attributes that allow us to build a Keras model\n just by knowing the inputs and outputs of the model.\n\n For instance, if a, b and c are Keras tensors,\n it becomes possible to do:\n `model = Model(input=[a, b], output=c)`\n\n The added Keras attributes are:\n ._keras_shape: Integer shape tuple propagated\n via Keras-side shape inference.\n ._keras_history: Last layer applied to the tensor.\n the entire layer graph is retrievable from that layer,\n recursively.\n\n # Arguments\n shape: A shape tuple (integer), not including the batch size.\n For instance, `shape=(32,)` indicates that the expected input\n will be batches of 32-dimensional vectors.\n batch_shape: A shape tuple (integer), including the batch size.\n For instance, `batch_shape=(10, 32)` indicates that\n the expected input will be batches of 10 32-dimensional vectors.\n `batch_shape=(None, 32)` indicates batches of an arbitrary number\n of 32-dimensional vectors.\n name: An optional name string for the layer.\n Should be unique in a model (do not reuse the same name twice).\n It will be autogenerated if it isn't provided.\n dtype: The data type expected by the input, as a string\n (`float32`, `float64`, `int32`...)\n sparse: A boolean specifying whether the placeholder\n to be created is sparse.\n tensor: Optional existing tensor to wrap into the `Input` layer.\n If set, the layer will not create a placeholder tensor.\n\n # Returns\n A tensor.\n\n # Example\n\n ```python\n # this is a logistic regression in Keras\n x = Input(shape=(32,))\n y = Dense(16, activation='softmax')(x)\n model = Model(x, y)\n ```\n \"\"\"\n if not batch_shape and tensor is None:\n assert shape, ('Please provide to Input either a `shape`'\n ' or a `batch_shape` argument. Note that '\n '`shape` does not include the batch '\n 'dimension.')\n if shape and not batch_shape:\n batch_shape = (None,) + tuple(shape)\n input_layer = InputLayer(batch_input_shape=batch_shape,\n name=name, dtype=dtype,\n sparse=sparse,\n input_tensor=tensor)\n # Return tensor including _keras_shape and _keras_history.\n # Note that in this case train_output and test_output are the same pointer.\n outputs = input_layer.inbound_nodes[0].output_tensors\n if len(outputs) == 1:\n return outputs[0]\n else:\n return outputs\n\n\nclass Container(Layer):\n \"\"\"A Container is a directed acyclic graph of layers.\n\n It is the topological form of a \"model\". A Model\n is simply a Container with added training routines.\n\n # Properties\n name\n inputs\n outputs\n input_layers\n output_layers\n input_spec (list of class instances)\n each entry describes one required input:\n - ndim\n - dtype\n trainable (boolean)\n input_shape\n output_shape\n inbound_nodes: list of nodes\n outbound_nodes: list of nodes\n trainable_weights (list of variables)\n non_trainable_weights (list of variables)\n constraints (list of tuples (weight, constraint))\n\n # Methods\n summary\n get_layer\n get_weights\n set_weights\n get_config\n compute_output_shape\n\n # Class Methods\n from_config\n\n # Raises\n TypeError: if input tensors are not Keras tensors from InputLayer objects\n \"\"\"\n\n @interfaces.legacy_model_constructor_support\n def __init__(self, inputs, outputs, name=None):\n # Handle `name` argument.\n if not name:\n prefix = self.__class__.__name__.lower()\n name = prefix + '_' + str(K.get_uid(prefix))\n self.name = name\n\n self.supports_masking = False\n self.trainable = True\n self._per_input_losses = {}\n self._per_input_updates = {}\n\n # Container-specific properties.\n if isinstance(inputs, (list, tuple)):\n self.inputs = list(inputs) # Tensor or list of tensors.\n else:\n self.inputs = [inputs]\n if isinstance(outputs, (list, tuple)):\n self.outputs = list(outputs)\n else:\n self.outputs = [outputs]\n\n # Check for redundancy in inputs.\n if len(set(self.inputs)) != len(self.inputs):\n raise ValueError('The list of inputs passed to the model '\n 'is redundant. '\n 'All inputs should only appear once.'\n ' Found: ' + str(self.inputs))\n\n # Check for redundancy in outputs.\n if len(set(self.outputs)) != len(self.outputs):\n warnings.warn('The list of outputs passed to the model '\n 'is redundant. '\n 'All outputs should only appear once.'\n ' Found: ' + str(self.outputs))\n\n # List of initial layers (1 to 1 mapping with self.inputs,\n # hence the same layer might appear twice)\n self.input_layers = []\n self.input_layers_node_indices = []\n self.input_layers_tensor_indices = []\n # list of layers (1 to 1 mapping with self.inputs,\n # hence the same layer might appear twice)\n self.output_layers = []\n self.output_layers_node_indices = []\n self.output_layers_tensor_indices = []\n # all layers in order of horizontal graph traversal.\n # Entries are unique. Includes input and output layers.\n self.layers = []\n\n # This is for performance optimization\n # when calling the Container on new inputs.\n # every time the Container is called on a set on input tensors,\n # we compute the output tensors,\n # output masks and output shapes in one pass,\n # then cache them here. When one of these output is queried later,\n # we retrieve it from there instead of recomputing it.\n self._output_mask_cache = {}\n self._output_tensor_cache = {}\n self._output_shape_cache = {}\n\n # User-provided arguments validation.\n for x in self.inputs:\n # Check that x is a Keras tensor.\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise TypeError('Input tensors to a ' + cls_name + ' ' +\n 'must be Keras tensors. Found: ' + str(x) +\n ' (missing Keras metadata).')\n # Check that x is an input tensor.\n layer, node_index, tensor_index = x._keras_history\n if len(layer.inbound_nodes) > 1 or (layer.inbound_nodes and layer.inbound_nodes[0].inbound_layers):\n cls_name = self.__class__.__name__\n warnings.warn(cls_name + ' inputs must come from '\n 'a Keras Input layer, '\n 'they cannot be the output of '\n 'a previous non-Input layer. '\n 'Here, a tensor specified as '\n 'input to \"' + self.name +\n '\" was not an Input tensor, '\n 'it was generated by layer ' +\n layer.name + '.\\n'\n 'Note that input tensors are '\n 'instantiated via `tensor = Input(shape)`.\\n'\n 'The tensor that caused the issue was: ' +\n str(x.name))\n for x in self.outputs:\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise TypeError('Output tensors to a ' + cls_name + ' must be '\n 'Keras tensors. Found: ' + str(x))\n # Build self.output_layers:\n for x in self.outputs:\n layer, node_index, tensor_index = x._keras_history\n self.output_layers.append(layer)\n self.output_layers_node_indices.append(node_index)\n self.output_layers_tensor_indices.append(tensor_index)\n\n # Fill in the output mask cache.\n masks = []\n for x in self.inputs:\n layer, node_index, tensor_index = x._keras_history\n node = layer.inbound_nodes[node_index]\n mask = node.output_masks[tensor_index]\n masks.append(mask)\n mask_cache_key = ','.join([str(id(x)) for x in self.inputs])\n mask_cache_key += '_' + ','.join([str(id(x)) for x in masks])\n masks = []\n for x in self.outputs:\n layer, node_index, tensor_index = x._keras_history\n node = layer.inbound_nodes[node_index]\n mask = node.output_masks[tensor_index]\n masks.append(mask)\n if len(masks) == 1:\n mask = masks[0]\n else:\n mask = masks\n self._output_mask_cache[mask_cache_key] = mask\n\n # Build self.input_layers:\n for x in self.inputs:\n layer, node_index, tensor_index = x._keras_history\n # It's supposed to be an input layer, so only one node\n # and one tensor output.\n assert node_index == 0\n assert tensor_index == 0\n self.input_layers.append(layer)\n self.input_layers_node_indices.append(node_index)\n self.input_layers_tensor_indices.append(tensor_index)\n\n # Build self.input_names and self.output_names.\n self.input_names = []\n self.output_names = []\n self._feed_input_names = []\n self._feed_inputs = []\n self._feed_input_shapes = []\n for i, layer in enumerate(self.input_layers):\n # Check that layer is an InputLayer.\n if not isinstance(layer, InputLayer):\n raise TypeError(\n 'Input layers to a `Model` must be `InputLayer` objects. '\n 'Received inputs: {}. '\n 'Input {} (0-based) originates '\n 'from layer type `{}`.'.format(inputs,\n i,\n layer.__class__.__name__))\n self.input_names.append(layer.name)\n if layer.is_placeholder:\n self._feed_input_names.append(layer.name)\n self._feed_inputs.append(layer.input)\n self._feed_input_shapes.append(self.inputs[i]._keras_shape)\n for layer in self.output_layers:\n self.output_names.append(layer.name)\n\n self.internal_input_shapes = [x._keras_shape for x in self.inputs]\n self.internal_output_shapes = [x._keras_shape for x in self.outputs]\n\n # Container_nodes: set of nodes included in the graph\n # (not all nodes included in the layers\n # are relevant to the current graph).\n container_nodes = set() # ids of all nodes relevant to the Container\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n layer_indices = {} # dict {layer: index in traversal}\n nodes_in_decreasing_depth = []\n\n def build_map_of_graph(tensor, finished_nodes, nodes_in_progress,\n layer=None, node_index=None, tensor_index=None):\n \"\"\"Builds a map of the graph of layers.\n\n This recursively updates the map `layer_indices`,\n the list `nodes_in_decreasing_depth` and the set `container_nodes`.\n\n # Arguments\n tensor: Some tensor in a graph.\n finished_nodes: Set of nodes whose subgraphs have been traversed\n completely. Useful to prevent duplicated work.\n nodes_in_progress: Set of nodes that are currently active on the\n recursion stack. Useful to detect cycles.\n layer: Layer from which `tensor` comes from. If not provided,\n will be obtained from `tensor._keras_history`.\n node_index: Node index from which `tensor` comes from.\n tensor_index: Tensor_index from which `tensor` comes from.\n\n # Raises\n RuntimeError: if a cycle is detected.\n \"\"\"\n if not layer or node_index is None or tensor_index is None:\n layer, node_index, tensor_index = tensor._keras_history\n node = layer.inbound_nodes[node_index]\n\n # Prevent cycles.\n if node in nodes_in_progress:\n raise RuntimeError(\n 'The tensor ' + str(tensor) + ' at layer \"' +\n layer.name + '\" is part of a cycle.')\n\n # Don't repeat work for shared subgraphs\n if node in finished_nodes:\n return\n\n node_key = layer.name + '_ib-' + str(node_index)\n # Update container_nodes.\n container_nodes.add(node_key)\n\n # Store the traversal order for layer sorting.\n if layer not in layer_indices:\n layer_indices[layer] = len(layer_indices)\n\n nodes_in_progress.add(node)\n\n # Propagate to all previous tensors connected to this node.\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n tensor_index = node.tensor_indices[i]\n build_map_of_graph(x, finished_nodes, nodes_in_progress,\n layer, node_index, tensor_index)\n\n finished_nodes.add(node)\n nodes_in_progress.remove(node)\n\n nodes_in_decreasing_depth.append(node)\n\n finished_nodes = set()\n nodes_in_progress = set()\n for x in self.outputs:\n build_map_of_graph(x, finished_nodes, nodes_in_progress)\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.outbound_layer, 0)\n # If we've seen this layer before at a higher depth, we should use that depth instead\n # of the node depth. This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.outbound_layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n for i in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n inbound_node = inbound_layer.inbound_nodes[node_index]\n previous_depth = nodes_depths.get(inbound_node, 0)\n nodes_depths[inbound_node] = max(depth + 1, previous_depth)\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = {}\n for node, depth in nodes_depths.items():\n if depth not in nodes_by_depth:\n nodes_by_depth[depth] = []\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = {}\n for layer, depth in layers_depths.items():\n if depth not in layers_by_depth:\n layers_by_depth[depth] = []\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers and self.layers_by_depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Container.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n for layer in layers_for_depth:\n layers.append(layer)\n self.layers = layers\n self.layers_by_depth = layers_by_depth\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = []\n for x in self.inputs:\n computable_tensors.append(x)\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.outbound_layer\n if layer:\n for x in node.input_tensors:\n if x not in computable_tensors:\n raise RuntimeError(\n 'Graph disconnected: '\n 'cannot obtain value for tensor ' +\n str(x) + ' at layer \"' + layer.name + '\". '\n 'The following previous layers '\n 'were accessed without issue: ' +\n str(layers_with_complete_input))\n for x in node.output_tensors:\n computable_tensors.append(x)\n layers_with_complete_input.append(layer.name)\n\n # Set self.nodes and self.nodes_by_depth.\n self.container_nodes = container_nodes\n self.nodes_by_depth = nodes_by_depth\n\n # Ensure name unicity, which will be crucial for serialization\n # (since serialized nodes refer to layers by their name).\n all_names = [layer.name for layer in self.layers]\n for name in all_names:\n if all_names.count(name) != 1:\n raise RuntimeError('The name \"' + name + '\" is used ' +\n str(all_names.count(name)) +\n ' times in the model. '\n 'All layer names should be unique.')\n\n # Layer parameters.\n # The new container starts with a single inbound node\n # for its inputs, and no outbound nodes.\n self.outbound_nodes = [] # Will be appended to by future calls to __call__\n self.inbound_nodes = [] # Will be appended to below, and by future calls to __call__\n # Create the node linking internal inputs to internal outputs.\n Node(outbound_layer=self,\n inbound_layers=[],\n node_indices=[],\n tensor_indices=[],\n input_tensors=self.inputs,\n output_tensors=self.outputs,\n # No container-level masking for now.\n input_masks=[None for _ in self.inputs],\n output_masks=[None for _ in self.outputs],\n input_shapes=[x._keras_shape for x in self.inputs],\n output_shapes=[x._keras_shape for x in self.outputs])\n self.built = True\n\n # The following are implemented as property functions:\n # self.constraints\n # self.trainable_weights\n # self.non_trainable_weights\n # self.input_spec\n\n def get_layer(self, name=None, index=None):\n \"\"\"Retrieves a layer based on either its name (unique) or index.\n\n Indices are based on order of horizontal graph traversal (bottom-up).\n\n # Arguments\n name: String, name of layer.\n index: Integer, index of layer.\n\n # Returns\n A layer instance.\n\n # Raises\n ValueError: In case of invalid layer name or index.\n \"\"\"\n # It would be unreliable to build a dictionary\n # based on layer names, because names can potentially\n # be changed at any point by the user\n # without the container being notified of it.\n if index is not None:\n if len(self.layers) <= index:\n raise ValueError('Was asked to retrieve layer at index ' +\n str(index) + ' but model only has ' +\n str(len(self.layers)) + ' layers.')\n else:\n return self.layers[index]\n else:\n if not name:\n raise ValueError('Provide either a layer name or layer index.')\n layer = None\n for layer in self.layers:\n if layer.name == name:\n return layer\n if not layer:\n raise ValueError('No such layer: ' + name)\n\n @property\n def updates(self):\n \"\"\"Retrieve the model's updates.\n\n Will only include updates that are either\n inconditional, or conditional on inputs to this model\n (e.g. will not include updates that depend on tensors\n that aren't inputs to this model).\n\n # Returns\n A list of update ops.\n \"\"\"\n updates = []\n for layer in self.layers:\n if hasattr(layer, 'updates'):\n # Collect updates that are dependent on inputs\n # that are part of the model.\n for node_index, node in enumerate(layer.inbound_nodes):\n node_key = layer.name + '_ib-' + str(node_index)\n if node_key in self.container_nodes:\n # The model owns this layer node.\n inputs = node.input_tensors\n updates += layer.get_updates_for(inputs)\n # Collect unconditional updates.\n updates += layer.get_updates_for(None)\n return updates\n\n @property\n def losses(self):\n \"\"\"Retrieve the model's losses.\n\n Will only include losses that are either\n inconditional, or conditional on inputs to this model\n (e.g. will not include losses that depend on tensors\n that aren't inputs to this model).\n\n # Returns\n A list of loss tensors.\n \"\"\"\n losses = []\n # Retrieve losses for all internal layers.\n for layer in self.layers:\n if hasattr(layer, 'losses'):\n # Collect losses that are dependent on inputs\n # that are part of the model.\n for node_index, node in enumerate(layer.inbound_nodes):\n node_key = layer.name + '_ib-' + str(node_index)\n if node_key in self.container_nodes:\n # The model owns this layer node.\n inputs = node.input_tensors\n losses += layer.get_losses_for(inputs)\n # Collect unconditional losses.\n losses += layer.get_losses_for(None)\n # Add any potential unconditional model-level loss.\n losses += self.get_losses_for(None)\n return losses\n\n @property\n def uses_learning_phase(self):\n return any([x._uses_learning_phase for x in self.outputs])\n\n @property\n def stateful(self):\n return any([(hasattr(layer, 'stateful') and layer.stateful) for layer in self.layers])\n\n def reset_states(self):\n for layer in self.layers:\n if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):\n layer.reset_states()\n\n @property\n def state_updates(self):\n \"\"\"Returns the `updates` from all layers that are stateful.\n\n This is useful for separating training updates and\n state updates, e.g. when we need to update a layer's internal state\n during prediction.\n\n # Returns\n A list of update ops.\n \"\"\"\n state_updates = []\n for layer in self.layers:\n if getattr(layer, 'stateful', False):\n if hasattr(layer, 'updates'):\n state_updates += layer.updates\n return state_updates\n\n @property\n def constraints(self):\n cons = {}\n for layer in self.layers:\n for key, value in layer.constraints.items():\n if key in cons and cons[key] != value:\n raise ValueError('Received multiple constraints '\n 'for one weight tensor: ' + str(key))\n cons[key] = value\n return cons\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n weights = []\n for layer in self.layers:\n weights += layer.trainable_weights\n return weights\n\n @property\n def non_trainable_weights(self):\n weights = []\n for layer in self.layers:\n weights += layer.non_trainable_weights\n if not self.trainable:\n trainable_weights = []\n for layer in self.layers:\n trainable_weights += layer.trainable_weights\n return trainable_weights + weights\n return weights\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n # Returns\n A flat list of Numpy arrays.\n \"\"\"\n weights = []\n for layer in self.layers:\n weights += layer.weights\n return K.batch_get_value(weights)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the model.\n\n # Arguments\n weights: A list of Numpy arrays with shapes and types matching\n the output of `model.get_weights()`.\n \"\"\"\n tuples = []\n for layer in self.layers:\n num_param = len(layer.weights)\n layer_weights = weights[:num_param]\n for sw, w in zip(layer.weights, layer_weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n K.batch_set_value(tuples)\n\n @property\n def input_spec(self):\n \"\"\"Gets the model's input specs.\n\n # Returns\n A list of `InputSpec` instances (one per input to the model)\n or a single instance if the model has only one input.\n \"\"\"\n specs = []\n for layer in getattr(self, 'input_layers', []):\n if layer.input_spec is None:\n specs.append(None)\n else:\n if not isinstance(layer.input_spec, list):\n raise TypeError('Layer ' + layer.name +\n ' has an input_spec attribute that '\n 'is not a list. We expect a list. '\n 'Found input_spec = ' +\n str(layer.input_spec))\n specs += layer.input_spec\n if len(specs) == 1:\n return specs[0]\n return specs\n\n def call(self, inputs, mask=None):\n \"\"\"Call the model on new inputs.\n\n In this case `call` just reapplies\n all ops in the graph to the new inputs\n (e.g. build a new computational graph from the provided inputs).\n\n A model is callable on non-Keras tensors.\n\n # Arguments\n inputs: A tensor or list of tensors.\n mask: A mask or list of masks. A mask can be\n either a tensor or None (no mask).\n\n # Returns\n A tensor if there is a single output, or\n a list of tensors if there are more than one outputs.\n \"\"\"\n inputs = _to_list(inputs)\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = _to_list(mask)\n cache_key = ','.join([str(id(x)) for x in inputs])\n cache_key += '_' + ','.join([str(id(x)) for x in masks])\n if cache_key in self._output_tensor_cache:\n return self._output_tensor_cache[cache_key]\n else:\n output_tensors, _, _ = self.run_internal_graph(inputs, masks)\n return output_tensors\n\n def compute_mask(self, inputs, mask):\n inputs = _to_list(inputs)\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = _to_list(mask)\n cache_key = ','.join([str(id(x)) for x in inputs])\n cache_key += '_' + ','.join([str(id(x)) for x in masks])\n if cache_key in self._output_mask_cache:\n return self._output_mask_cache[cache_key]\n else:\n _, output_masks, _ = self.run_internal_graph(inputs, masks)\n return output_masks\n\n def compute_output_shape(self, input_shape):\n input_shapes = _to_list(input_shape)\n if len(input_shapes) != len(self.input_layers):\n raise ValueError('Invalid input_shape argument ' +\n str(input_shape) + ': model has ' +\n str(len(self.input_layers)) + ' tensor inputs.')\n\n cache_key = ','.join([str(x) for x in input_shapes])\n if cache_key in self._output_shape_cache:\n output_shapes = self._output_shape_cache[cache_key]\n if isinstance(output_shapes, list) and len(output_shapes) == 1:\n return output_shapes[0]\n return output_shapes\n else:\n # Bad luck, we have to run the graph manually.\n layers_to_output_shapes = {}\n for i in range(len(input_shapes)):\n layer = self.input_layers[i]\n input_shape = input_shapes[i]\n # It's an input layer: compute_output_shape is identity,\n # and there is only one node and one tensor output.\n shape_key = layer.name + '_0_0'\n layers_to_output_shapes[shape_key] = input_shape\n\n depth_keys = list(self.nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n # Iterate over nodes, by depth level.\n if len(depth_keys) > 1:\n for depth in depth_keys:\n nodes = self.nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n if layer in self.input_layers:\n # We've already covered the input layers\n # a few lines above.\n continue\n # Potentially redundant list,\n # same size of node.input_tensors.\n input_shapes = []\n for j in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[j]\n node_index = node.node_indices[j]\n tensor_index = node.tensor_indices[j]\n shape_key = inbound_layer.name + '_%s_%s' % (node_index, tensor_index)\n input_shape = layers_to_output_shapes[shape_key]\n input_shapes.append(input_shape)\n\n if len(input_shapes) == 1:\n output_shape = layer.compute_output_shape(input_shapes[0])\n else:\n output_shape = layer.compute_output_shape(input_shapes)\n\n output_shapes = _to_list(output_shape)\n node_index = layer.inbound_nodes.index(node)\n for j in range(len(output_shapes)):\n shape_key = layer.name + '_%s_%s' % (node_index, j)\n layers_to_output_shapes[shape_key] = output_shapes[j]\n\n # Read final output shapes from layers_to_output_shapes.\n output_shapes = []\n output_shape_keys = []\n for i in range(len(self.output_layers)):\n layer = self.output_layers[i]\n node_index = self.output_layers_node_indices[i]\n tensor_index = self.output_layers_tensor_indices[i]\n shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)\n output_shape_keys.append(shape_key)\n\n for i, key in enumerate(output_shape_keys):\n assert key in layers_to_output_shapes\n output_shapes.append(layers_to_output_shapes[key])\n # Store in cache.\n self._output_shape_cache[cache_key] = output_shapes\n if isinstance(output_shapes, list) and len(output_shapes) == 1:\n return output_shapes[0]\n return output_shapes\n\n def run_internal_graph(self, inputs, masks=None):\n \"\"\"Computes output tensors for new inputs.\n\n # Note:\n - Expects `inputs` to be a list (potentially with 1 element).\n - Can be run on non-Keras tensors.\n\n # Arguments\n inputs: List of tensors\n masks: List of masks (tensors or None).\n\n # Returns\n Three lists: output_tensors, output_masks, output_shapes\n \"\"\"\n if masks is None:\n masks = [None for _ in range(len(inputs))]\n\n # Dictionary mapping reference tensors to tuples\n # (computed tensor, compute mask)\n # we assume a 1:1 mapping from tensor to mask\n # TODO: raise exception when a `.compute_mask()` call\n # does not return a list the same size as `call`\n tensor_map = {}\n for x, y, mask in zip(self.inputs, inputs, masks):\n tensor_map[str(id(x))] = (y, mask)\n\n depth_keys = list(self.nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = self.nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if str(id(x)) in tensor_map:\n computed_data.append(tensor_map[str(id(x))])\n\n if len(computed_data) == len(reference_input_tensors):\n # call layer\n with K.name_scope(layer.name):\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_mask\n output_tensors = _to_list(layer.call(computed_tensor, **kwargs))\n output_masks = _to_list(layer.compute_mask(computed_tensor,\n computed_mask))\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_masks\n output_tensors = _to_list(layer.call(computed_tensors, **kwargs))\n output_masks = _to_list(layer.compute_mask(computed_tensors,\n computed_masks))\n\n # Apply activity regularizer if any:\n if hasattr(layer, 'activity_regularizer') and layer.activity_regularizer is not None:\n regularization_losses = [layer.activity_regularizer(x) for x in computed_tensors]\n layer.add_loss(regularization_losses, computed_tensors)\n\n # Update model updates and losses:\n # Keep track of updates that depend on the inputs\n # (e.g. BN updates).\n self.add_update(layer.get_updates_for(computed_tensors), inputs)\n # Keep track of unconditional updates (e.g. a counter).\n self.add_update(layer.get_updates_for(None), None)\n # Keep track of losses that depend on the inputs\n # (e.g. activity regularizers).\n self.add_loss(layer.get_losses_for(computed_tensors), inputs)\n # Keep track of unconditional losses\n # (e.g. weight regularizers).\n self.add_loss(layer.get_losses_for(None), None)\n\n # Update _keras_shape.\n if all([hasattr(x, '_keras_shape') for x in computed_tensors]):\n if len(computed_tensors) == 1:\n shapes = _to_list(layer.compute_output_shape(computed_tensors[0]._keras_shape))\n uses_learning_phase = computed_tensors[0]._uses_learning_phase\n else:\n shapes = _to_list(layer.compute_output_shape([x._keras_shape for x in computed_tensors]))\n uses_learning_phase = any([x._uses_learning_phase for x in computed_tensors])\n for x, s in zip(output_tensors, shapes):\n x._keras_shape = s\n x._uses_learning_phase = getattr(x, '_uses_learning_phase', False) or uses_learning_phase\n\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors, output_tensors, output_masks):\n tensor_map[str(id(x))] = (y, mask)\n\n output_tensors = []\n output_masks = []\n output_shapes = []\n for x in self.outputs:\n assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)\n tensor, mask = tensor_map[str(id(x))]\n if hasattr(tensor, '_keras_shape') and output_shapes is not None:\n shape = tensor._keras_shape\n output_shapes.append(shape)\n else:\n output_shapes = None\n output_tensors.append(tensor)\n output_masks.append(mask)\n\n # Update cache;\n # keys are based on ids on input tensors and inputs masks.\n cache_key = ','.join([str(id(x)) for x in inputs])\n cache_key += '_' + ','.join([str(id(x)) for x in masks])\n\n if len(output_tensors) == 1:\n output_tensors = output_tensors[0]\n self._output_tensor_cache[cache_key] = output_tensors\n else:\n self._output_tensor_cache[cache_key] = output_tensors\n\n if len(output_masks) == 1:\n output_masks = output_masks[0]\n self._output_mask_cache[cache_key] = output_masks\n else:\n self._output_mask_cache[cache_key] = output_masks\n\n if output_shapes is not None:\n input_shapes = [x._keras_shape for x in inputs]\n cache_key = ','.join([str(x) for x in input_shapes])\n if len(output_shapes) == 1:\n output_shapes = output_shapes[0]\n self._output_shape_cache[cache_key] = output_shapes\n else:\n self._output_shape_cache[cache_key] = output_shapes\n return output_tensors, output_masks, output_shapes\n\n def get_config(self):\n config = {\n 'name': self.name,\n }\n node_conversion_map = {}\n for layer in self.layers:\n if issubclass(layer.__class__, Container):\n # Containers start with a pre-existing node\n # linking their input to output.\n kept_nodes = 1\n else:\n kept_nodes = 0\n for original_node_index, node in enumerate(layer.inbound_nodes):\n node_key = layer.name + '_ib-' + str(original_node_index)\n if node_key in self.container_nodes:\n node_conversion_map[node_key] = kept_nodes\n kept_nodes += 1\n layer_configs = []\n for layer in self.layers: # From the earliest layers on.\n layer_class_name = layer.__class__.__name__\n layer_config = layer.get_config()\n filtered_inbound_nodes = []\n for original_node_index, node in enumerate(layer.inbound_nodes):\n node_key = layer.name + '_ib-' + str(original_node_index)\n if node_key in self.container_nodes:\n # The node is relevant to the model:\n # add to filtered_inbound_nodes.\n if node.arguments:\n try:\n json.dumps(node.arguments)\n kwargs = node.arguments\n except TypeError:\n warnings.warn(\n 'Layer ' + layer.name +\n ' was passed non-serializable keyword arguments: ' +\n str(node.arguments) + '. They will not be included '\n 'in the serialized model (and thus will be missing '\n 'at deserialization time).')\n kwargs = {}\n else:\n kwargs = {}\n if node.inbound_layers:\n node_data = []\n for i in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n tensor_index = node.tensor_indices[i]\n node_key = inbound_layer.name + '_ib-' + str(node_index)\n new_node_index = node_conversion_map.get(node_key, 0)\n node_data.append([inbound_layer.name,\n new_node_index,\n tensor_index,\n kwargs])\n filtered_inbound_nodes.append(node_data)\n layer_configs.append({\n 'name': layer.name,\n 'class_name': layer_class_name,\n 'config': layer_config,\n 'inbound_nodes': filtered_inbound_nodes,\n })\n config['layers'] = layer_configs\n\n # Gather info about inputs and outputs.\n model_inputs = []\n for i in range(len(self.input_layers)):\n layer = self.input_layers[i]\n node_index = self.input_layers_node_indices[i]\n node_key = layer.name + '_ib-' + str(node_index)\n new_node_index = node_conversion_map[node_key]\n tensor_index = self.input_layers_tensor_indices[i]\n model_inputs.append([layer.name, new_node_index, tensor_index])\n config['input_layers'] = model_inputs\n model_outputs = []\n for i in range(len(self.output_layers)):\n layer = self.output_layers[i]\n node_index = self.output_layers_node_indices[i]\n node_key = layer.name + '_ib-' + str(node_index)\n new_node_index = node_conversion_map[node_key]\n tensor_index = self.output_layers_tensor_indices[i]\n model_outputs.append([layer.name, new_node_index, tensor_index])\n config['output_layers'] = model_outputs\n return copy.deepcopy(config)\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Instantiates a Model from its config (output of `get_config()`).\n\n # Arguments\n config: Model config dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n # Returns\n A model instance.\n\n # Raises\n ValueError: In case of improperly formatted config dict.\n \"\"\"\n # layer instances created during\n # the graph reconstruction process\n created_layers = {}\n\n def process_layer(layer_data):\n \"\"\"Deserialize a layer, then call it on appropriate inputs.\n\n # Arguments\n layer_data: layer config dict.\n\n # Raises\n ValueError: In case of improperly formatted `layer_data` dict.\n \"\"\"\n layer_name = layer_data['name']\n\n # Instantiate layer.\n from ..layers import deserialize as deserialize_layer\n layer = deserialize_layer(layer_data,\n custom_objects=custom_objects)\n created_layers[layer_name] = layer\n\n # Gather layer inputs.\n inbound_nodes_data = layer_data['inbound_nodes']\n for node_data in inbound_nodes_data:\n input_tensors = []\n for input_data in node_data:\n inbound_layer_name = input_data[0]\n inbound_node_index = input_data[1]\n inbound_tensor_index = input_data[2]\n if len(input_data) == 3:\n kwargs = {}\n elif len(input_data) == 4:\n kwargs = input_data[3]\n else:\n raise ValueError('Improperly formatted model config.')\n if inbound_layer_name not in created_layers:\n raise ValueError('Missing layer: ' + inbound_layer_name)\n inbound_layer = created_layers[inbound_layer_name]\n inbound_node = inbound_layer.inbound_nodes[inbound_node_index]\n input_tensors.append(inbound_node.output_tensors[inbound_tensor_index])\n # Call layer on its inputs, thus creating the node\n # and building the layer if needed.\n if input_tensors:\n if len(input_tensors) == 1:\n layer(input_tensors[0], **kwargs)\n else:\n layer(input_tensors, **kwargs)\n\n for layer_data in config['layers']:\n process_layer(layer_data)\n\n name = config.get('name')\n input_tensors = []\n output_tensors = []\n for layer_data in config['input_layers']:\n layer_name, node_index, tensor_index = layer_data\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n layer_output_tensors = layer.inbound_nodes[node_index].output_tensors\n input_tensors.append(layer_output_tensors[tensor_index])\n for layer_data in config['output_layers']:\n layer_name, node_index, tensor_index = layer_data\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n layer_output_tensors = layer.inbound_nodes[node_index].output_tensors\n output_tensors.append(layer_output_tensors[tensor_index])\n return cls(inputs=input_tensors, outputs=output_tensors, name=name)\n\n def save(self, filepath, overwrite=True, include_optimizer=True):\n \"\"\"Save the model to a single HDF5 file.\n\n The savefile includes:\n - The model architecture, allowing to re-instantiate the model.\n - The model weights.\n - The state of the optimizer, allowing to resume training\n exactly where you left off.\n\n This allows you to save the entirety of the state of a model\n in a single file.\n\n Saved models can be reinstantiated via `keras.models.load_model`.\n The model returned by `load_model`\n is a compiled model ready to be used (unless the saved model\n was never compiled in the first place).\n\n # Arguments\n filepath: String, path to the file to save the weights to.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n # Example\n\n ```python\n from keras.models import load_model\n\n model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'\n del model # deletes the existing model\n\n # returns a compiled model\n # identical to the previous one\n model = load_model('my_model.h5')\n ```\n \"\"\"\n from ..models import save_model\n save_model(self, filepath, overwrite, include_optimizer)\n\n def save_weights(self, filepath, overwrite=True):\n \"\"\"Dumps all layer weights to a HDF5 file.\n\n The weight file has:\n - `layer_names` (attribute), a list of strings\n (ordered names of model layers).\n - For every layer, a `group` named `layer.name`\n - For every such layer group, a group attribute `weight_names`,\n a list of strings\n (ordered names of weights tensor of the layer).\n - For every weight in the layer, a dataset\n storing the weight value, named after the weight tensor.\n\n # Arguments\n filepath: String, path to the file to save the weights to.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n\n # Raises\n ImportError: If h5py is not available.\n \"\"\"\n if h5py is None:\n raise ImportError('`save_weights` requires h5py.')\n # If file exists and should not be overwritten:\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n f = h5py.File(filepath, 'w')\n save_weights_to_hdf5_group(f, self.layers)\n f.flush()\n f.close()\n\n def load_weights(self, filepath, by_name=False):\n \"\"\"Loads all layer weights from a HDF5 save file.\n\n If `by_name` is False (default) weights are loaded\n based on the network's topology, meaning the architecture\n should be the same as when the weights were saved.\n Note that layers that don't have weights are not taken\n into account in the topological ordering, so adding or\n removing layers is fine as long as they don't have weights.\n\n If `by_name` is True, weights are loaded into layers\n only if they share the same name. This is useful\n for fine-tuning or transfer-learning models where\n some of the layers have changed.\n\n # Arguments\n filepath: String, path to the weights file to load.\n by_name: Boolean, whether to load weights by name\n or by topological order.\n\n # Raises\n ImportError: If h5py is not available.\n \"\"\"\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n if by_name:\n load_weights_from_hdf5_group_by_name(f, self.layers)\n else:\n load_weights_from_hdf5_group(f, self.layers)\n\n if hasattr(f, 'close'):\n f.close()\n\n def _updated_config(self):\n \"\"\"Util hared between different serialization methods.\n\n # Returns\n Model config with Keras version information added.\n \"\"\"\n from .. import __version__ as keras_version\n\n config = self.get_config()\n model_config = {\n 'class_name': self.__class__.__name__,\n 'config': config,\n 'keras_version': keras_version,\n 'backend': K.backend()\n }\n return model_config\n\n def to_json(self, **kwargs):\n \"\"\"Returns a JSON string containing the network configuration.\n\n To load a network from a JSON save file, use\n `keras.models.model_from_json(json_string, custom_objects={})`.\n\n # Arguments\n **kwargs: Additional keyword arguments\n to be passed to `json.dumps()`.\n\n # Returns\n A JSON string.\n \"\"\"\n def get_json_type(obj):\n # If obj is any numpy type\n if type(obj).__module__ == np.__name__:\n return obj.item()\n\n # If obj is a python 'type'\n if type(obj).__name__ == type.__name__:\n return obj.__name__\n\n raise TypeError('Not JSON Serializable:', obj)\n\n model_config = self._updated_config()\n return json.dumps(model_config, default=get_json_type, **kwargs)\n\n def to_yaml(self, **kwargs):\n \"\"\"Returns a yaml string containing the network configuration.\n\n To load a network from a yaml save file, use\n `keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n\n `custom_objects` should be a dictionary mapping\n the names of custom losses / layers / etc to the corresponding\n functions / classes.\n\n # Arguments\n **kwargs: Additional keyword arguments\n to be passed to `yaml.dump()`.\n\n # Returns\n A YAML string.\n \"\"\"\n return yaml.dump(self._updated_config(), **kwargs)\n\n def summary(self, line_length=None, positions=None, print_fn=print):\n \"\"\"Prints a string summary of the network.\n\n # Arguments\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements\n in each line. If not provided,\n defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n \"\"\"\n return print_layer_summary(self,\n line_length=line_length,\n positions=positions,\n print_fn=print_fn)\n\n\ndef get_source_inputs(tensor, layer=None, node_index=None):\n \"\"\"Returns the list of input tensors necessary to compute `tensor`.\n\n Output will always be a list of tensors\n (potentially with 1 element).\n\n # Arguments\n tensor: The tensor to start from.\n layer: Origin layer of the tensor. Will be\n determined via tensor._keras_history if not provided.\n node_index: Origin node index of the tensor.\n\n # Returns\n List of input tensors.\n \"\"\"\n if not hasattr(tensor, '_keras_history'):\n return tensor\n\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer.inbound_nodes:\n return [tensor]\n else:\n node = layer.inbound_nodes[node_index]\n if not node.inbound_layers:\n # Reached an Input layer, stop recursion.\n return node.input_tensors\n else:\n source_tensors = []\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n previous_sources = get_source_inputs(x,\n layer,\n node_index)\n # Avoid input redundancy.\n for x in previous_sources:\n if x not in source_tensors:\n source_tensors.append(x)\n return source_tensors\n\n\ndef _to_list(x):\n \"\"\"Normalizes a list/tensor into a list.\n\n If a tensor is passed, we return\n a list of size 1 containing the tensor.\n\n # Arguments\n x: target object to be normalized.\n\n # Returns\n A list.\n \"\"\"\n if isinstance(x, list):\n return x\n return [x]\n\n\ndef _object_list_uid(object_list):\n object_list = _to_list(object_list)\n return ', '.join([str(abs(id(x))) for x in object_list])\n\n\ndef _is_all_none(iterable_or_element):\n if not isinstance(iterable_or_element, (list, tuple)):\n iterable = [iterable_or_element]\n else:\n iterable = iterable_or_element\n for element in iterable:\n if element is not None:\n return False\n return True\n\n\ndef _collect_previous_mask(input_tensors):\n \"\"\"Retrieves the output mask(s) of the previous node.\n\n # Arguments\n input_tensors: A tensor or list of tensors.\n\n # Returns\n A mask tensor or list of mask tensors.\n \"\"\"\n input_tensors = _to_list(input_tensors)\n masks = []\n for x in input_tensors:\n if hasattr(x, '_keras_history'):\n inbound_layer, node_index, tensor_index = x._keras_history\n node = inbound_layer.inbound_nodes[node_index]\n mask = node.output_masks[tensor_index]\n masks.append(mask)\n else:\n masks.append(None)\n if len(masks) == 1:\n return masks[0]\n return masks\n\n\ndef _to_snake_case(name):\n intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\\1_\\2', name)\n insecure = re.sub('([a-z])([A-Z])', r'\\1_\\2', intermediate).lower()\n # If the class is private the name starts with \"_\" which is not secure\n # for creating scopes. We prefix the name with \"private\" in this case.\n if insecure[0] != '_':\n return insecure\n return 'private' + insecure\n\n\ndef _collect_input_shape(input_tensors):\n \"\"\"Collects the output shape(s) of a list of Keras tensors.\n\n # Arguments\n input_tensors: list of input tensors (or single input tensor).\n\n # Returns\n List of shape tuples (or single tuple), one tuple per input.\n \"\"\"\n input_tensors = _to_list(input_tensors)\n shapes = []\n for x in input_tensors:\n try:\n shapes.append(K.int_shape(x))\n except TypeError:\n shapes.append(None)\n if len(shapes) == 1:\n return shapes[0]\n return shapes\n\n\ndef save_weights_to_hdf5_group(f, layers):\n from .. import __version__ as keras_version\n\n f.attrs['layer_names'] = [layer.name.encode('utf8') for layer in layers]\n f.attrs['backend'] = K.backend().encode('utf8')\n f.attrs['keras_version'] = str(keras_version).encode('utf8')\n\n for layer in layers:\n g = f.create_group(layer.name)\n symbolic_weights = layer.weights\n weight_values = K.batch_get_value(symbolic_weights)\n weight_names = []\n for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):\n if hasattr(w, 'name') and w.name:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n weight_names.append(name.encode('utf8'))\n g.attrs['weight_names'] = weight_names\n for name, val in zip(weight_names, weight_values):\n param_dset = g.create_dataset(name, val.shape,\n dtype=val.dtype)\n if not val.shape:\n # scalar\n param_dset[()] = val\n else:\n param_dset[:] = val\n\n\ndef preprocess_weights_for_loading(layer, weights,\n original_keras_version=None,\n original_backend=None):\n \"\"\"Converts layers weights from Keras 1 format to Keras 2.\n\n # Arguments\n layer: Layer instance.\n weights: List of weights values (Numpy arrays).\n original_keras_version: Keras version for the weights, as a string.\n original_backend: Keras backend the weights were trained with,\n as a string.\n\n # Returns\n A list of weights values (Numpy arrays).\n \"\"\"\n if original_keras_version == '1':\n if layer.__class__.__name__ == 'Bidirectional':\n num_weights_per_layer = len(weights) // 2\n\n forward_weights = preprocess_weights_for_loading(layer.forward_layer,\n weights[:num_weights_per_layer],\n original_keras_version,\n original_backend)\n backward_weights = preprocess_weights_for_loading(layer.backward_layer,\n weights[num_weights_per_layer:],\n original_keras_version,\n original_backend)\n weights = forward_weights + backward_weights\n\n if layer.__class__.__name__ == 'TimeDistributed':\n weights = preprocess_weights_for_loading(layer.layer,\n weights,\n original_keras_version,\n original_backend)\n\n if layer.__class__.__name__ == 'Conv1D':\n shape = weights[0].shape\n # Handle Keras 1.1 format\n if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters:\n # Legacy shape:\n # (filters, input_dim, filter_length, 1)\n assert shape[0] == layer.filters and shape[2:] == (layer.kernel_size[0], 1)\n weights[0] = np.transpose(weights[0], (2, 3, 1, 0))\n weights[0] = weights[0][:, 0, :, :]\n\n if layer.__class__.__name__ == 'Conv2D':\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, stack_size, filters)\n weights[0] = np.transpose(weights[0], (2, 3, 1, 0))\n\n if layer.__class__.__name__ == 'Conv2DTranspose':\n if layer.data_format == 'channels_last':\n # old: (kernel_rows, kernel_cols, stack_size, filters)\n # new: (kernel_rows, kernel_cols, filters, stack_size)\n weights[0] = np.transpose(weights[0], (0, 1, 3, 2))\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, filters, stack_size)\n weights[0] = np.transpose(weights[0], (2, 3, 0, 1))\n\n if layer.__class__.__name__ == 'Conv3D':\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, ...)\n # new: (..., stack_size, filters)\n weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))\n\n if layer.__class__.__name__ == 'GRU':\n if len(weights) == 9:\n kernel = np.concatenate([weights[0],\n weights[3],\n weights[6]], axis=-1)\n recurrent_kernel = np.concatenate([weights[1],\n weights[4],\n weights[7]], axis=-1)\n bias = np.concatenate([weights[2],\n weights[5],\n weights[8]], axis=-1)\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ == 'LSTM':\n if len(weights) == 12:\n # old: i, c, f, o\n # new: i, f, c, o\n kernel = np.concatenate([weights[0],\n weights[6],\n weights[3],\n weights[9]], axis=-1)\n recurrent_kernel = np.concatenate([weights[1],\n weights[7],\n weights[4],\n weights[10]], axis=-1)\n bias = np.concatenate([weights[2],\n weights[8],\n weights[5],\n weights[11]], axis=-1)\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ == 'ConvLSTM2D':\n if len(weights) == 12:\n kernel = np.concatenate([weights[0],\n weights[6],\n weights[3],\n weights[9]], axis=-1)\n recurrent_kernel = np.concatenate([weights[1],\n weights[7],\n weights[4],\n weights[10]], axis=-1)\n bias = np.concatenate([weights[2],\n weights[8],\n weights[5],\n weights[11]], axis=-1)\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, stack_size, filters)\n kernel = np.transpose(kernel, (2, 3, 1, 0))\n recurrent_kernel = np.transpose(recurrent_kernel,\n (2, 3, 1, 0))\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ in ['Model', 'Sequential']:\n new_weights = []\n # trainable weights\n for sublayer in layer.layers:\n num_weights = len(sublayer.trainable_weights)\n if num_weights > 0:\n new_weights.extend(preprocess_weights_for_loading(\n layer=sublayer,\n weights=weights[:num_weights],\n original_keras_version=original_keras_version,\n original_backend=original_backend))\n weights = weights[num_weights:]\n\n # non-trainable weights\n for sublayer in layer.layers:\n num_weights = len([l for l in sublayer.weights if l not in sublayer.trainable_weights])\n if num_weights > 0:\n new_weights.extend(preprocess_weights_for_loading(\n layer=sublayer,\n weights=weights[:num_weights],\n original_keras_version=original_keras_version,\n original_backend=original_backend))\n weights = weights[num_weights:]\n weights = new_weights\n\n conv_layers = ['Conv1D',\n 'Conv2D',\n 'Conv3D',\n 'Conv2DTranspose',\n 'ConvLSTM2D']\n if layer.__class__.__name__ in conv_layers:\n if original_backend and K.backend() != original_backend:\n weights[0] = conv_utils.convert_kernel(weights[0])\n if layer.__class__.__name__ == 'ConvLSTM2D':\n weights[1] = conv_utils.convert_kernel(weights[1])\n if K.int_shape(layer.weights[0]) != weights[0].shape:\n weights[0] = np.transpose(weights[0], (3, 2, 0, 1))\n if layer.__class__.__name__ == 'ConvLSTM2D':\n weights[1] = np.transpose(weights[1], (3, 2, 0, 1))\n return weights\n\n\ndef load_weights_from_hdf5_group(f, layers):\n \"\"\"Implements topological (order-based) weight loading.\n\n # Arguments\n f: A pointer to a HDF5 group.\n layers: a list of target layers.\n\n # Raises\n ValueError: in case of mismatch between provided layers\n and weights file.\n \"\"\"\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n filtered_layers = []\n for layer in layers:\n weights = layer.weights\n if weights:\n filtered_layers.append(layer)\n\n layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]\n filtered_layer_names = []\n for name in layer_names:\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n if weight_names:\n filtered_layer_names.append(name)\n layer_names = filtered_layer_names\n if len(layer_names) != len(filtered_layers):\n raise ValueError('You are trying to load a weight file '\n 'containing ' + str(len(layer_names)) +\n ' layers into a model with ' +\n str(len(filtered_layers)) + ' layers.')\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n weight_values = [g[weight_name] for weight_name in weight_names]\n layer = filtered_layers[k]\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(layer,\n weight_values,\n original_keras_version,\n original_backend)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\" in the current model) was found to '\n 'correspond to layer ' + name +\n ' in the save file. '\n 'However the new layer ' + layer.name +\n ' expects ' + str(len(symbolic_weights)) +\n ' weights, but the saved weights have ' +\n str(len(weight_values)) +\n ' elements.')\n weight_value_tuples += zip(symbolic_weights, weight_values)\n K.batch_set_value(weight_value_tuples)\n\n\ndef load_weights_from_hdf5_group_by_name(f, layers):\n \"\"\"Implements name-based weight loading.\n\n (instead of topological weight loading).\n\n Layers that have no matching name are skipped.\n\n # Arguments\n f: A pointer to a HDF5 group.\n layers: a list of target layers.\n\n # Raises\n ValueError: in case of mismatch between provided layers\n and weights file.\n \"\"\"\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n # New file format.\n layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]\n\n # Reverse index of layer name to list of layers with name.\n index = {}\n for layer in layers:\n if layer.name:\n index.setdefault(layer.name, []).append(layer)\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n weight_values = [g[weight_name] for weight_name in weight_names]\n\n for layer in index.get(name, []):\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(\n layer,\n weight_values,\n original_keras_version,\n original_backend)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\") expects ' +\n str(len(symbolic_weights)) +\n ' weight(s), but the saved weights' +\n ' have ' + str(len(weight_values)) +\n ' element(s).')\n # Set values.\n for i in range(len(weight_values)):\n weight_value_tuples.append((symbolic_weights[i],\n weight_values[i]))\n K.batch_set_value(weight_value_tuples)\n",
"# Authors: Emmanuelle Gouillart <[email protected]>\n# Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport scipy as sp\nfrom scipy import ndimage\nfrom scipy import misc\n\nfrom nose.tools import assert_equal, assert_true\nfrom numpy.testing import assert_raises\n\nfrom sklearn.feature_extraction.image import (\n img_to_graph, grid_to_graph, extract_patches_2d,\n reconstruct_from_patches_2d, PatchExtractor, extract_patches)\nfrom sklearn.utils.graph import connected_components\nfrom sklearn.utils.testing import SkipTest\nfrom sklearn.utils.fixes import sp_version\n\nif sp_version < (0, 12):\n raise SkipTest(\"Skipping because SciPy version earlier than 0.12.0 and \"\n \"thus does not include the scipy.misc.face() image.\")\n\ndef test_img_to_graph():\n x, y = np.mgrid[:4, :4] - 10\n grad_x = img_to_graph(x)\n grad_y = img_to_graph(y)\n assert_equal(grad_x.nnz, grad_y.nnz)\n # Negative elements are the diagonal: the elements of the original\n # image. Positive elements are the values of the gradient, they\n # should all be equal on grad_x and grad_y\n np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],\n grad_y.data[grad_y.data > 0])\n\n\ndef test_grid_to_graph():\n #Checking that the function works with graphs containing no edges\n size = 2\n roi_size = 1\n # Generating two convex parts with one vertex\n # Thus, edges will be empty in _to_graph\n mask = np.zeros((size, size), dtype=np.bool)\n mask[0:roi_size, 0:roi_size] = True\n mask[-roi_size:, -roi_size:] = True\n mask = mask.reshape(size ** 2)\n A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)\n assert_true(connected_components(A)[0] == 2)\n\n # Checking that the function works whatever the type of mask is\n mask = np.ones((size, size), dtype=np.int16)\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)\n assert_true(connected_components(A)[0] == 1)\n\n # Checking dtype of the graph\n mask = np.ones((size, size))\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)\n assert_true(A.dtype == np.bool)\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)\n assert_true(A.dtype == np.int)\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)\n assert_true(A.dtype == np.float)\n\n\ndef test_connect_regions():\n try:\n face = sp.face(gray=True)\n except AttributeError:\n # Newer versions of scipy have face in misc\n from scipy import misc\n face = misc.face(gray=True)\n for thr in (50, 150):\n mask = face > thr\n graph = img_to_graph(face, mask)\n assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])\n\n\ndef test_connect_regions_with_grid():\n try:\n face = sp.face(gray=True)\n except AttributeError:\n # Newer versions of scipy have face in misc\n from scipy import misc\n face = misc.face(gray=True)\n mask = face > 50\n graph = grid_to_graph(*face.shape, mask=mask)\n assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])\n\n mask = face > 150\n graph = grid_to_graph(*face.shape, mask=mask, dtype=None)\n assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])\n\n\ndef _downsampled_face():\n try:\n face = sp.face(gray=True)\n except AttributeError:\n # Newer versions of scipy have face in misc\n from scipy import misc\n face = misc.face(gray=True)\n face = face.astype(np.float32)\n face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]\n + face[1::2, 1::2])\n face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]\n + face[1::2, 1::2])\n face = face.astype(np.float32)\n face /= 16.0\n return face\n\n\ndef _orange_face(face=None):\n face = _downsampled_face() if face is None else face\n face_color = np.zeros(face.shape + (3,))\n face_color[:, :, 0] = 256 - face\n face_color[:, :, 1] = 256 - face / 2\n face_color[:, :, 2] = 256 - face / 4\n return face_color\n\n\ndef _make_images(face=None):\n face = _downsampled_face() if face is None else face\n # make a collection of faces\n images = np.zeros((3,) + face.shape)\n images[0] = face\n images[1] = face + 1\n images[2] = face + 2\n return images\n\ndownsampled_face = _downsampled_face()\norange_face = _orange_face(downsampled_face)\nface_collection = _make_images(downsampled_face)\n\n\ndef test_extract_patches_all():\n face = downsampled_face\n i_h, i_w = face.shape\n p_h, p_w = 16, 16\n expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)\n patches = extract_patches_2d(face, (p_h, p_w))\n assert_equal(patches.shape, (expected_n_patches, p_h, p_w))\n\n\ndef test_extract_patches_all_color():\n face = orange_face\n i_h, i_w = face.shape[:2]\n p_h, p_w = 16, 16\n expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)\n patches = extract_patches_2d(face, (p_h, p_w))\n assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))\n\n\ndef test_extract_patches_all_rect():\n face = downsampled_face\n face = face[:, 32:97]\n i_h, i_w = face.shape\n p_h, p_w = 16, 12\n expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)\n\n patches = extract_patches_2d(face, (p_h, p_w))\n assert_equal(patches.shape, (expected_n_patches, p_h, p_w))\n\n\ndef test_extract_patches_max_patches():\n face = downsampled_face\n i_h, i_w = face.shape\n p_h, p_w = 16, 16\n\n patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)\n assert_equal(patches.shape, (100, p_h, p_w))\n\n expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))\n patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)\n assert_equal(patches.shape, (expected_n_patches, p_h, p_w))\n\n assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),\n max_patches=2.0)\n assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),\n max_patches=-1.0)\n\n\ndef test_reconstruct_patches_perfect():\n face = downsampled_face\n p_h, p_w = 16, 16\n\n patches = extract_patches_2d(face, (p_h, p_w))\n face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)\n np.testing.assert_array_equal(face, face_reconstructed)\n\n\ndef test_reconstruct_patches_perfect_color():\n face = orange_face\n p_h, p_w = 16, 16\n\n patches = extract_patches_2d(face, (p_h, p_w))\n face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)\n np.testing.assert_array_equal(face, face_reconstructed)\n\n\ndef test_patch_extractor_fit():\n faces = face_collection\n extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)\n assert_true(extr == extr.fit(faces))\n\n\ndef test_patch_extractor_max_patches():\n faces = face_collection\n i_h, i_w = faces.shape[1:3]\n p_h, p_w = 8, 8\n\n max_patches = 100\n expected_n_patches = len(faces) * max_patches\n extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,\n random_state=0)\n patches = extr.transform(faces)\n assert_true(patches.shape == (expected_n_patches, p_h, p_w))\n\n max_patches = 0.5\n expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)\n * max_patches)\n extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,\n random_state=0)\n patches = extr.transform(faces)\n assert_true(patches.shape == (expected_n_patches, p_h, p_w))\n\n\ndef test_patch_extractor_max_patches_default():\n faces = face_collection\n extr = PatchExtractor(max_patches=100, random_state=0)\n patches = extr.transform(faces)\n assert_equal(patches.shape, (len(faces) * 100, 19, 25))\n\n\ndef test_patch_extractor_all_patches():\n faces = face_collection\n i_h, i_w = faces.shape[1:3]\n p_h, p_w = 8, 8\n expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)\n extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)\n patches = extr.transform(faces)\n assert_true(patches.shape == (expected_n_patches, p_h, p_w))\n\n\ndef test_patch_extractor_color():\n faces = _make_images(orange_face)\n i_h, i_w = faces.shape[1:3]\n p_h, p_w = 8, 8\n expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)\n extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)\n patches = extr.transform(faces)\n assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))\n\n\ndef test_extract_patches_strided():\n\n image_shapes_1D = [(10,), (10,), (11,), (10,)]\n patch_sizes_1D = [(1,), (2,), (3,), (8,)]\n patch_steps_1D = [(1,), (1,), (4,), (2,)]\n\n expected_views_1D = [(10,), (9,), (3,), (2,)]\n last_patch_1D = [(10,), (8,), (8,), (2,)]\n\n image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]\n patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]\n patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]\n\n expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]\n last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]\n\n image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]\n patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]\n patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]\n\n expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]\n last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]\n\n image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D\n patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D\n patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D\n expected_views = expected_views_1D + expected_views_2D + expected_views_3D\n last_patches = last_patch_1D + last_patch_2D + last_patch_3D\n\n for (image_shape, patch_size, patch_step, expected_view,\n last_patch) in zip(image_shapes, patch_sizes, patch_steps,\n expected_views, last_patches):\n image = np.arange(np.prod(image_shape)).reshape(image_shape)\n patches = extract_patches(image, patch_shape=patch_size,\n extraction_step=patch_step)\n\n ndim = len(image_shape)\n\n assert_true(patches.shape[:ndim] == expected_view)\n last_patch_slices = [slice(i, i + j, None) for i, j in\n zip(last_patch, patch_size)]\n assert_true((patches[[slice(-1, None, None)] * ndim] ==\n image[last_patch_slices].squeeze()).all())\n\n\ndef test_extract_patches_square():\n # test same patch size for all dimensions\n face = downsampled_face\n i_h, i_w = face.shape\n p = 8\n expected_n_patches = ((i_h - p + 1), (i_w - p + 1))\n patches = extract_patches(face, patch_shape=p)\n assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],\n p, p))\n\n\ndef test_width_patch():\n # width and height of the patch should be less than the image\n x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n assert_raises(ValueError, extract_patches_2d, x, (4, 1))\n assert_raises(ValueError, extract_patches_2d, x, (1, 4))\n"
] | [
[
"numpy.dual.register_func",
"numpy.testing.Tester"
],
[
"numpy.log",
"numpy.log2",
"scipy.sparse.issparse",
"numpy.sqrt",
"numpy.unique",
"numpy.reshape",
"numpy.ascontiguousarray",
"numpy.atleast_1d",
"numpy.copy",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.eager.execute.make_str",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.eager.execute.make_int",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.eager.execute.execute"
],
[
"numpy.linspace",
"numpy.ma.transpose",
"numpy.ma.squeeze",
"numpy.transpose"
],
[
"tensorflow.python.eager.execute.convert_to_mixed_eager_tensors",
"tensorflow.python.eager.execute.make_type",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.eager.execute.make_bool",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.eager.execute.make_int",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.eager.execute.execute"
],
[
"scipy.optimize.OptimizeResult",
"numpy.linspace",
"numpy.isfinite",
"numpy.finfo",
"numpy.ones",
"numpy.size",
"numpy.copy",
"numpy.zeros_like",
"numpy.argmin",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.random.RandomState",
"numpy.where",
"numpy.isinf",
"numpy.fabs"
],
[
"numpy.min",
"numpy.arange",
"numpy.around",
"numpy.round",
"numpy.max",
"numpy.argmax",
"numpy.mod",
"numpy.array"
],
[
"numpy.sqrt",
"numpy.issubdtype",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.ones_like",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.argmax",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.log",
"scipy.sparse.csr_matrix",
"scipy.stats.scoreatpercentile",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.logaddexp",
"numpy.log2",
"numpy.abs",
"numpy.ones",
"numpy.sign",
"numpy.average",
"numpy.empty"
],
[
"numpy.loadtxt"
],
[
"numpy.distutils.misc_util.Configuration",
"numpy.get_include"
],
[
"scipy.misc.imresize",
"numpy.unique",
"numpy.asarray",
"numpy.arange",
"sklearn.utils.deprecated",
"numpy.searchsorted",
"numpy.array",
"scipy.misc.pilutil.imread",
"numpy.zeros",
"numpy.random.RandomState"
],
[
"numpy.split",
"numpy.linspace",
"numpy.min",
"numpy.asarray",
"numpy.unique",
"numpy.issubdtype",
"numpy.clip",
"numpy.nonzero",
"numpy.max",
"numpy.array"
],
[
"numpy.ndarray",
"numpy.dtype",
"numpy.zeros_like",
"numpy.tri",
"numpy.arange",
"numpy.eye",
"numpy.finfo",
"numpy.insert",
"numpy.zeros",
"numpy.delete",
"numpy.testing.assert_raises",
"numpy.require",
"numpy.testing.assert_allclose",
"numpy.testing.assert_",
"numpy.array",
"scipy.linalg._decomp_update._form_qTu",
"numpy.testing.run_module_suite",
"numpy.random.random",
"scipy.linalg.qr",
"numpy.random.seed",
"numpy.ones"
],
[
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.framework.op_def_library.OpDefLibrary"
],
[
"sklearn.utils.testing.assert_array_almost_equal",
"scipy.linalg.block_diag",
"sklearn.utils.testing.assert_almost_equal",
"numpy.logspace",
"numpy.empty_like",
"sklearn.utils.testing.assert_greater_equal",
"sklearn.externals.six.moves.xrange",
"scipy.sparse.csr_matrix",
"numpy.ones",
"scipy.special.psi",
"sklearn.decomposition.LatentDirichletAllocation",
"sklearn.decomposition._online_lda._dirichlet_expectation_2d",
"sklearn.decomposition._online_lda._dirichlet_expectation_1d",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.sum",
"sklearn.utils.testing.assert_raises_regexp",
"numpy.random.randint"
],
[
"numpy.concatenate",
"numpy.transpose"
],
[
"sklearn.utils.testing.SkipTest",
"sklearn.utils.graph.connected_components",
"sklearn.feature_extraction.image.extract_patches",
"sklearn.feature_extraction.image.img_to_graph",
"sklearn.feature_extraction.image.PatchExtractor",
"scipy.face",
"scipy.misc.face",
"numpy.ones",
"numpy.testing.assert_array_equal",
"sklearn.feature_extraction.image.reconstruct_from_patches_2d",
"sklearn.feature_extraction.image.grid_to_graph",
"sklearn.feature_extraction.image.extract_patches_2d",
"numpy.testing.assert_raises",
"scipy.ndimage.label",
"numpy.prod",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.10",
"0.16",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"1.2"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
desikan95/ludwig | [
"c6b5594db98b6eaadc7bcd32e38983fe3cb4c3f2",
"c6b5594db98b6eaadc7bcd32e38983fe3cb4c3f2"
] | [
"tests/integration_tests/test_combiners.py",
"tests/integration_tests/scripts/run_train_horovod.py"
] | [
"import logging\n\nimport pytest\nimport tensorflow as tf\n\nfrom ludwig.combiners.combiners import (\n ConcatCombiner,\n SequenceConcatCombiner,\n SequenceCombiner,\n ComparatorCombiner,\n sequence_encoder_registry,\n)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nlogging.getLogger(\"ludwig\").setLevel(logging.INFO)\n\nBATCH_SIZE = 16\nSEQ_SIZE = 12\nHIDDEN_SIZE = 128\nOTHER_HIDDEN_SIZE = 32\nFC_SIZE = 64\nBASE_FC_SIZE = 256\n\n\n# set up simulated encoder outputs\[email protected]\ndef encoder_outputs():\n # generates simulated encoder outputs dictionary:\n # feature_1: shape [b, h1] tensor\n # feature_2: shape [b, h2] tensor\n # feature_3: shape [b, s, h1] tensor\n # feature_4: shape [b, sh, h2] tensor\n\n encoder_outputs = {}\n shapes_list = [\n [BATCH_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, OTHER_HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, OTHER_HIDDEN_SIZE],\n ]\n feature_names = [\"feature_\" + str(i + 1) for i in range(len(shapes_list))]\n\n for feature_name, batch_shape in zip(feature_names, shapes_list):\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(batch_shape, dtype=tf.float32)\n }\n if len(batch_shape) > 2:\n encoder_outputs[feature_name][\n \"encoder_output_state\"] = tf.random.normal(\n [batch_shape[0], batch_shape[2]], dtype=tf.float32\n )\n\n return encoder_outputs\n\n\n# setup encoder outputs for ComparatorCombiner\[email protected]\ndef encoder_comparator_outputs():\n # generates simulated encoder outputs dictionary:\n # feature_1: shape [b, h1] tensor\n # feature_2: shape [b, h2] tensor\n # feature_3: shape [b, s, h1] tensor\n # feature_4: shape [b, sh, h2] tensor\n\n encoder_outputs = {}\n shapes_list = [\n [BATCH_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, OTHER_HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, OTHER_HIDDEN_SIZE],\n ]\n text_feature_names = [\"text_feature_\" + str(i + 1) for i in\n range(len(shapes_list))]\n image_feature_names = [\n \"image_feature_\" + str(i + 1) for i in range(len(shapes_list))\n ]\n for i, (feature_name, batch_shape) in enumerate(\n zip(text_feature_names, shapes_list)\n ):\n # is there a better way to do this?\n if i == 0 or i == 3:\n dot_product_shape = [batch_shape[0], BASE_FC_SIZE]\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(dot_product_shape,\n dtype=tf.float32)\n }\n else:\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(batch_shape,\n dtype=tf.float32)\n }\n\n for i, (feature_name, batch_shape) in enumerate(\n zip(image_feature_names, shapes_list)\n ):\n if i == 0 or i == 3:\n dot_product_shape = [batch_shape[0], BASE_FC_SIZE]\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(dot_product_shape,\n dtype=tf.float32)\n }\n else:\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(batch_shape,\n dtype=tf.float32)\n }\n\n return encoder_outputs\n\n\n# test for simple concatenation combiner\[email protected](\"fc_layer\",\n [None, [{\"fc_size\": 64}, {\"fc_size\": 64}]])\ndef test_concat_combiner(encoder_outputs, fc_layer):\n # clean out unneeded encoder outputs\n del encoder_outputs[\"feature_3\"]\n del encoder_outputs[\"feature_4\"]\n\n # setup combiner to test\n combiner = ConcatCombiner(fc_layers=fc_layer)\n\n # concatenate encoder outputs\n results = combiner(encoder_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n # confirm correct output shapes\n if fc_layer:\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE,\n FC_SIZE]\n else:\n # calculate expected hidden size for concatenated tensors\n hidden_size = 0\n for k in encoder_outputs:\n hidden_size += encoder_outputs[k][\"encoder_output\"].shape[1]\n\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE,\n hidden_size]\n\n\n# test for sequence concatenation combiner\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"main_sequence_feature\", [None, \"feature_3\"])\ndef test_sequence_concat_combiner(\n encoder_outputs, main_sequence_feature, reduce_output\n):\n combiner = SequenceConcatCombiner(\n main_sequence_feature=main_sequence_feature,\n reduce_output=reduce_output\n )\n\n # calculate expected hidden size for concatenated tensors\n hidden_size = 0\n for k in encoder_outputs:\n hidden_size += encoder_outputs[k][\"encoder_output\"].shape[-1]\n\n # concatenate encoder outputs\n results = combiner(encoder_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n # confirm correct shape\n if reduce_output is None:\n assert results[\"combiner_output\"].shape.as_list() == [\n BATCH_SIZE,\n SEQ_SIZE,\n hidden_size,\n ]\n else:\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE,\n hidden_size]\n\n\n# test for sequence combiner\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"encoder\", sequence_encoder_registry)\[email protected](\"main_sequence_feature\", [None, \"feature_3\"])\ndef test_sequence_combiner(\n encoder_outputs, main_sequence_feature, encoder, reduce_output\n):\n combiner = SequenceCombiner(\n main_sequence_feature=main_sequence_feature,\n encoder=encoder,\n reduce_output=reduce_output,\n )\n\n # calculate expected hidden size for concatenated tensors\n hidden_size = 0\n for k in encoder_outputs:\n hidden_size += encoder_outputs[k][\"encoder_output\"].shape[-1]\n\n # concatenate encoder outputs\n results = combiner(encoder_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n combiner_shape = results[\"combiner_output\"].shape\n # test for correct dimension\n if reduce_output:\n assert len(combiner_shape) == 2\n else:\n assert len(combiner_shape) == 3\n\n # Shape test assumes on Ludwig sequence encoder defaults\n # parallel encoders: # layers = 4, fc_size=256\n # non-parallel encoders: fc_size=256\n # if defaults change, then this test has to be updated\n default_layer = 4\n default_fc_size = 256\n\n if \"parallel\" in encoder:\n combiner_shape[-1] == default_layer * default_fc_size\n else:\n combiner_shape[-1] == default_fc_size\n\n\[email protected](\"fc_layer\",\n [None, [{\"fc_size\": 64}, {\"fc_size\": 64}]])\[email protected](\"entity_1\", [[\"text_feature_1\", \"text_feature_2\"]])\[email protected](\"entity_2\", [[\"image_feature_1\", \"image_feature_2\"]])\ndef test_comparator_combiner(encoder_comparator_outputs, fc_layer, entity_1,\n entity_2):\n # clean out unneeded encoder outputs since we only have 2 layers\n del encoder_comparator_outputs[\"text_feature_3\"]\n del encoder_comparator_outputs[\"image_feature_3\"]\n del encoder_comparator_outputs[\"text_feature_4\"]\n del encoder_comparator_outputs[\"image_feature_4\"]\n\n # setup combiner to test set to 256 for case when none as it's the default size\n fc_size = fc_layer[0][\"fc_size\"] if fc_layer else 256\n combiner = ComparatorCombiner(\n entity_1, entity_2, fc_layers=fc_layer, fc_size=fc_size\n )\n\n # concatenate encoder outputs\n results = combiner(encoder_comparator_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n # confirm correct output shapes\n # concat on axis=1\n # because of dot products, 2 of the shapes added will be the fc_size\n # other 2 will be of shape BATCH_SIZE\n # this assumes dimensionality = 2\n size = BATCH_SIZE * 2 + fc_size * 2\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE, size]\n",
"# -*- coding: utf-8 -*-\n# Copyright (c) 2020 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport argparse\nimport json\nimport os\nimport shutil\nimport sys\n\nimport horovod.tensorflow as hvd\nimport numpy as np\n\nPATH_HERE = os.path.abspath(os.path.dirname(__file__))\nPATH_ROOT = os.path.join(PATH_HERE, '..', '..', '..')\nsys.path.insert(0, os.path.abspath(PATH_ROOT))\n\nimport ludwig.utils.horovod_utils\n\nfrom ludwig.api import LudwigModel\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--rel-path', required=True)\nparser.add_argument('--input-features', required=True)\nparser.add_argument('--output-features', required=True)\nparser.add_argument('--ludwig-kwargs', required=True)\n\n\ndef run_api_experiment(input_features, output_features, dataset, **kwargs):\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {'type': 'concat', 'fc_size': 14},\n 'training': {'epochs': 2}\n }\n\n model = LudwigModel(config)\n output_dir = None\n\n try:\n # Training with csv\n _, _, output_dir = model.train(\n dataset=dataset,\n **kwargs\n )\n\n model.predict(dataset=dataset)\n\n # Attempt loading saved model, should broadcast successfully\n model_dir = os.path.join(output_dir, 'model') if output_dir else None\n loaded_model = LudwigModel.load(model_dir)\n\n # Model loading should broadcast weights from coordinator\n loaded_weights = loaded_model.model.get_weights()\n bcast_weights = hvd.broadcast_object(loaded_weights)\n for loaded, bcast in zip(loaded_weights, bcast_weights):\n assert np.allclose(loaded, bcast)\n finally:\n if output_dir:\n shutil.rmtree(output_dir, ignore_errors=True)\n\n\ndef test_horovod_intent_classification(rel_path, input_features,\n output_features, **kwargs):\n run_api_experiment(input_features,\n output_features,\n dataset=rel_path,\n **kwargs)\n\n # Horovod should be initialized following training. If not, this will raise an exception.\n assert hvd.size() == 2\n assert ludwig.utils.horovod_utils._HVD.rank() == hvd.rank()\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n test_horovod_intent_classification(args.rel_path,\n json.loads(args.input_features),\n json.loads(args.output_features),\n **json.loads(args.ludwig_kwargs))\n"
] | [
[
"tensorflow.random.normal"
],
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
taijihagino/tensorflow-hangul-recognition | [
"8c39444ea8accd8b3d935f277e34fa2797564114"
] | [
"tools/classify-hangul.py"
] | [
"#!/usr/bin/env python\n\nimport argparse\nimport io\nimport os\nimport sys\n\nimport tensorflow as tf\n\nSCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Default paths.\nDEFAULT_LABEL_FILE = os.path.join(\n SCRIPT_PATH, '../labels/2350-common-hangul.txt'\n)\nDEFAULT_GRAPH_FILE = os.path.join(\n SCRIPT_PATH, '../saved-model/optimized_hangul_tensorflow.pb'\n)\n\n\ndef read_image(file):\n \"\"\"Read an image file and convert it into a 1-D floating point array.\"\"\"\n file_content = tf.read_file(file)\n image = tf.image.decode_jpeg(file_content, channels=1)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.reshape(image, (1, 64*64))\n return image\n\n\ndef classify(args):\n \"\"\"Classify a character.\n\n This method will import the saved model from the given graph file, and will\n pass in the given image pixels as input for the classification. The top\n five predictions will be printed.\n \"\"\"\n labels = io.open(args.label_file,\n 'r', encoding='utf-8').read().splitlines()\n\n if not os.path.isfile(args.image):\n print('Error: Image %s not found.' % args.image)\n sys.exit(1)\n\n # Load graph and parse file.\n with tf.gfile.GFile(args.graph_file, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(\n graph_def,\n input_map=None,\n return_elements=None,\n name='hangul-model',\n producer_op_list=None\n )\n\n # Get relevant nodes.\n x = graph.get_tensor_by_name('hangul-model/input:0')\n y = graph.get_tensor_by_name('hangul-model/output:0')\n keep_prob = graph.get_tensor_by_name('hangul-model/keep_prob:0')\n\n image = read_image(args.image)\n sess = tf.InteractiveSession()\n image_array = sess.run(image)\n sess.close()\n with tf.Session(graph=graph) as graph_sess:\n predictions = graph_sess.run(y, feed_dict={x: image_array,\n keep_prob: 1.0})\n prediction = predictions[0]\n\n # Get the indices that would sort the array, then only get the indices that\n # correspond to the top 5 predictions.\n sorted_indices = prediction.argsort()[::-1][:5]\n for index in sorted_indices:\n label = labels[index]\n confidence = prediction[index]\n print('%s (confidence = %.5f)' % (label, confidence))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('image', type=str,\n help='Image to pass to model for classification.')\n parser.add_argument('--label-file', type=str, dest='label_file',\n default=DEFAULT_LABEL_FILE,\n help='File containing newline delimited labels.')\n parser.add_argument('--graph-file', type=str, dest='graph_file',\n default=DEFAULT_GRAPH_FILE,\n help='The saved model graph file to use for '\n 'classification.')\n classify(parser.parse_args())\n"
] | [
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.InteractiveSession",
"tensorflow.read_file",
"tensorflow.gfile.GFile",
"tensorflow.reshape",
"tensorflow.image.convert_image_dtype",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
neuml/txtai | [
"b25173a8650a73dbcae43caa278f32020fef0236"
] | [
"src/python/txtai/vectors/words.py"
] | [
"\"\"\"\nWord Vectors module\n\"\"\"\n\nimport os\nimport pickle\nimport tempfile\n\nfrom errno import ENOENT\nfrom multiprocessing import Pool\n\nimport numpy as np\n\n# Conditionally import Word Vector libraries as they aren't installed by default\ntry:\n import fasttext\n from pymagnitude import converter, Magnitude\n\n WORDS = True\nexcept ImportError:\n WORDS = False\n\nfrom .base import Vectors\nfrom ..pipeline import Tokenizer\n\n# Multiprocessing helper methods\n# pylint: disable=W0603\nVECTORS = None\n\n\ndef create(config, scoring):\n \"\"\"\n Multiprocessing helper method. Creates a global embeddings object to be accessed in a new subprocess.\n\n Args:\n config: vector configuration\n scoring: scoring instance\n \"\"\"\n\n global VECTORS\n\n # Create a global embedding object using configuration and saved\n VECTORS = WordVectors(config, scoring)\n\n\ndef transform(document):\n \"\"\"\n Multiprocessing helper method. Transforms document into an embeddings vector.\n\n Args:\n document: (id, data, tags)\n\n Returns:\n (id, embedding)\n \"\"\"\n\n return (document[0], VECTORS.transform(document))\n\n\nclass SerialPool:\n \"\"\"\n Custom pool to execute vector transforms serially.\n \"\"\"\n\n def __init__(self, vectors):\n global VECTORS\n VECTORS = vectors\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def imap(self, func, iterable):\n \"\"\"\n Single process version of imap.\n\n Args:\n func: function to run\n iterable: iterable to use\n \"\"\"\n\n for x in iterable:\n yield func(x)\n\n\nclass WordVectors(Vectors):\n \"\"\"\n Builds sentence embeddings/vectors using weighted word embeddings.\n \"\"\"\n\n def load(self, path):\n # Ensure that vector path exists\n if not path or not os.path.isfile(path):\n raise IOError(ENOENT, \"Vector model file not found\", path)\n\n # Load magnitude model. If this is a training run (uninitialized config), block until vectors are fully loaded\n return Magnitude(path, case_insensitive=True, blocking=not self.initialized)\n\n def index(self, documents):\n ids, dimensions, stream = [], None, None\n\n # Shared objects with Pool\n args = (self.config, self.scoring)\n\n # Convert all documents to embedding arrays, stream embeddings to disk to control memory usage\n with SerialPool(self) if \"parallel\" in self.config and not self.config[\"parallel\"] else Pool(\n os.cpu_count(), initializer=create, initargs=args\n ) as pool:\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".npy\", delete=False) as output:\n stream = output.name\n for uid, embedding in pool.imap(transform, documents):\n if not dimensions:\n # Set number of dimensions for embeddings\n dimensions = embedding.shape[0]\n\n ids.append(uid)\n pickle.dump(embedding, output, protocol=4)\n\n return (ids, dimensions, len(ids), stream)\n\n def transform(self, document):\n # Convert to tokens if necessary\n if isinstance(document[1], str):\n document = (document[0], Tokenizer.tokenize(document[1]), document[2])\n\n # Generate weights for each vector using a scoring method\n weights = self.scoring.weights(document) if self.scoring else None\n\n # pylint: disable=E1133\n if weights and [x for x in weights if x > 0]:\n # Build weighted average embeddings vector. Create weights array os float32 to match embeddings precision.\n embedding = np.average(self.lookup(document[1]), weights=np.array(weights, dtype=np.float32), axis=0)\n else:\n # If no weights, use mean\n embedding = np.mean(self.lookup(document[1]), axis=0)\n\n return embedding\n\n def lookup(self, tokens):\n \"\"\"\n Queries word vectors for given list of input tokens.\n\n Args:\n tokens: list of tokens to query\n\n Returns:\n word vectors array\n \"\"\"\n\n return self.model.query(tokens)\n\n @staticmethod\n def isdatabase(path):\n \"\"\"\n Checks if this is a SQLite database file which is the file format used for word vectors databases.\n\n Args:\n path: path to check\n\n Returns:\n True if this is a SQLite database\n \"\"\"\n\n if isinstance(path, str) and os.path.isfile(path) and os.path.getsize(path) >= 100:\n # Read 100 byte SQLite header\n with open(path, \"rb\") as f:\n header = f.read(100)\n\n # Check for SQLite header\n return header.startswith(b\"SQLite format 3\\000\")\n\n return False\n\n @staticmethod\n def build(data, size, mincount, path):\n \"\"\"\n Builds fastText vectors from a file.\n\n Args:\n data: path to input data file\n size: number of vector dimensions\n mincount: minimum number of occurrences required to register a token\n path: path to output file\n \"\"\"\n\n # Train on data file using largest dimension size\n model = fasttext.train_unsupervised(data, dim=size, minCount=mincount)\n\n # Output file path\n print(f\"Building {size} dimension model\")\n\n # Output vectors in vec/txt format\n with open(path + \".txt\", \"w\", encoding=\"utf-8\") as output:\n words = model.get_words()\n output.write(f\"{len(words)} {model.get_dimension()}\\n\")\n\n for word in words:\n # Skip end of line token\n if word != \"</s>\":\n vector = model.get_word_vector(word)\n data = \"\"\n for v in vector:\n data += \" \" + str(v)\n\n output.write(word + data + \"\\n\")\n\n # Build magnitude vectors database\n print(\"Converting vectors to magnitude format\")\n converter.convert(path + \".txt\", path + \".magnitude\", subword=True)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rok20/flood-warning-rok20 | [
"660b0e87de60732df2ee85bf8bdd83c9806168b7"
] | [
"floodsystem/plot.py"
] | [
"\nfrom .analysis import polyfit\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\ndef plot_water_levels(station, dates, levels):\n #plot the dates and water levels\n plt.plot(dates, levels, label = 'Water level')\n\n # Add axis labels, rotate date labels and add plot title\n plt.xlabel('date')\n plt.ylabel('water level (m)')\n plt.xticks(rotation=45)\n plt.title(\"Station \" + station.name)\n\n #add typical high and low\n low, high = station.typical_range[0], station.typical_range[1]\n plt.axhline(y = low, color = 'b', label = 'Typical low')\n plt.axhline(y = high, color = 'r', label = 'Typical High')\n\n\n # Display plot\n plt.tight_layout() \n # This makes sure plot does not cut off date labels\n\n plt.show()\n\ndef plot_water_level_with_fit(station, dates, levels, p):\n list = polyfit(dates, levels, p)\n \n dates2 = matplotlib.dates.date2num(dates) \n\n #adjust dates so values aren't so high\n dates2 = dates2 - dates2[0]\n \n #provide points at set intervals for the polynomial\n points = np.linspace(dates2[0], dates2[-1], 30)\n \n #plot data in hours for each curve, label the axis and provide a title.\n plt.plot(24*dates2, levels, label = \"Actual data\")\n plt.plot(24*points, list[0](points), label = \"Best fit polynomial\")\n plt.xlabel(\"Hours in the past\")\n plt.ylabel(\"Water Level\")\n plt.title(station)\n \n\n plt.show\n\n\n"
] | [
[
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.dates.date2num",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jokteur/ASMA | [
"25ac8a0455c680232d56c18d31de62c3188b7153",
"25ac8a0455c680232d56c18d31de62c3188b7153"
] | [
"plots/exploration/finite_size_fluct.py",
"plots/exploration/interspike_corr.py"
] | [
"import time\nimport copy\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.animation as animation\n\nimport flowrect\nfrom flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM\nfrom flowrect.simulations import particle_population, flow_rectification, FR_finite_fluctuations\n\n\n# Plot saving parameters\nsave = False\nsave_path = \"\"\nsave_name = \"\"\n\n\ndef moving_average(x, w):\n return np.convolve(x, np.ones(w), \"valid\") / w\n\n\n# Simulation parameters\nN = 500\ndt = 1e-2\nI_ext = 5\nparams = dict(\n time_end=10,\n dt=dt,\n Lambda=[1.0, 2.5],\n Gamma=[-5.5, 1.0],\n # Lambda=[67.0, 7.0, 3.0],\n # Gamma=[-3.0, 4.0, -3.0],\n c=10,\n lambda_kappa=1,\n I_ext=5,\n I_ext_time=5,\n interaction=0.3,\n)\n\na_cutoff = 7\n\nprint(f\"FR with finite size fluctuations\")\nt = time.time()\nts, a_grid, rho_tN, m_t_exact, x_t, en_cons, A_orig, A1, Abar, S = FR_finite_fluctuations(\n a_cutoff=a_cutoff, **params\n)\nprint(f\"{time.time() - t:.2f}\")\n\nprint(f\"Particle simulation\")\nt = time.time()\nts, M, spikes, A, X = particle_population(**params, N=N, Gamma_ext=True)\nm_t = calculate_mt(M, spikes)\n# m_ts = np.zeros(m_t.T.shape)\n# w = 50\n# m_ts[: -w + 1, 0] = moving_average(m_t.T[:, 0], w)\n# m_ts[: -w + 1, 1] = moving_average(m_t.T[:, 1], w)\n# m_ts[-w + 1 :, :] = m_ts[-w, :]\nprint(f\"{time.time() - t:.2f}\")\n\nprint(f\"Flow rectification approximation\")\nt = time.time()\nts, a_grid, rho_t, m_t_exact, x_t, en_cons, A_t = flow_rectification(a_cutoff=a_cutoff, **params)\nprint(f\"{time.time() - t:.2f}s\")\n\n\nI_ext_vec = np.concatenate((np.zeros(int(len(ts) / 2)), I_ext * np.ones(int(len(ts) / 2))))\nages = calculate_age(spikes.T) * params[\"dt\"]\nA_av = moving_average(A, 100)\n\n# Animated plots\n\n\nclass AnimatedPlot:\n def __init__(self, xlim=10, ylim=10):\n self.fig = plt.figure(figsize=(5.5, 9))\n gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])\n self.fig.suptitle(fr\"PDE vs particle simulation $N=${N}\")\n\n self.ax1 = plt.subplot(gs[0])\n self.ax2 = plt.subplot(gs[1])\n self.xlim, self.ylim = xlim, ylim\n self.plots = {}\n\n def init_plot(self):\n\n self.plots[\"title\"] = self.ax1.text(\n 0.5,\n 0.85,\n \"\",\n bbox={\"facecolor\": \"w\", \"alpha\": 0.5, \"pad\": 5},\n transform=self.ax1.transAxes,\n ha=\"center\",\n )\n\n # density plot (PDE)\n self.plots[\"p_rho\"] = self.ax1.plot([], [], \"-k\", label=\"Particle\")[0]\n self.plots[\"rho\"] = self.ax1.plot(a_grid, rho_t[0], \"--r\", linewidth=1, label=\"PDE\")[0]\n self.plots[\"rhoN\"] = self.ax1.plot(a_grid, rho_tN[0], \"-b\", linewidth=1, label=\"Finite\")[0]\n self.plots[\"S\"] = self.ax1.plot(a_grid, S[0], \"g\", linewidth=1)[0]\n self.ax1.set_ylim(0, 4)\n self.ax1.set_title(\"Probability density distribution\")\n self.ax1.legend(handles=[self.plots[\"rho\"], self.plots[\"p_rho\"], self.plots[\"rhoN\"]])\n self.ax1.set_xlabel(\"Age a (s)\")\n self.ax1.set_ylabel(r\"$\\rho_t$\")\n\n self.ax2.plot()\n self.ax2.set_title(\"External input\")\n self.plots[\"vline\"] = self.ax2.plot([], [], \"-r\", linewidth=1)[0]\n self.ax2.set_ylim(0, 6)\n self.ax2.plot(ts, I_ext_vec, \"-k\")\n self.ax2.set_ylabel(r\"$I^{ext}$ (a.u.)\")\n self.ax2.set_xlabel(r\"$t$ (s)\")\n\n return tuple(self.plots.values())\n\n def calculate_hist(self, i):\n hist, bins = np.histogram(ages[:, i], bins=50, density=True)\n bins = (bins[1:] + bins[:-1]) / 2\n # w = 2\n return bins, hist # moving_average(hist, w)\n\n def animate(self, i):\n t = dt * i\n # Scatter\n self.plots[\"title\"].set_text(fr\"Time $t=${t:.2f}s\")\n # Particule rho\n bins, hist = self.calculate_hist(i)\n self.plots[\"p_rho\"].set_data(bins, hist)\n self.plots[\"vline\"].set_data(np.array([t, t]), np.array([0, 6]))\n\n # PDE rho\n self.plots[\"rho\"].set_data(a_grid, rho_t[i])\n self.plots[\"rhoN\"].set_data(a_grid, rho_tN[i])\n self.plots[\"S\"].set_data(a_grid, S[i])\n return tuple(self.plots.values())\n\n\n# Scatter plot\nlim = 20\npl = AnimatedPlot(xlim=lim, ylim=lim)\nanim_int = 4 # Want every 10ms\nprint(anim_int)\n\nani = animation.FuncAnimation(\n pl.fig,\n func=pl.animate,\n frames=range(0, len(M), anim_int),\n init_func=pl.init_plot,\n interval=5,\n blit=True,\n)\n\nif save:\n ani.save(os.path.join(save_path, save_name))\n\nplt.figure()\nA_av = moving_average(A, 50)\nplt.plot(ts, A, \"--k\", label=\"Particle\")\nplt.plot(ts[: len(A_av)], A_av, \"--r\", label=\"P. rolling av.\")\nplt.plot(ts, A_t, \"-b\", linewidth=1.5, label=\"PDE\")\nplt.plot(ts, A1, \"-c\", label=\"A1\")\nplt.ylim(0, 10)\nplt.plot(ts, Abar, \"-.g\", label=\"Abar\")\nplt.legend()\nplt.show()",
"import time\nimport copy\nimport os\nfrom multiprocessing import Pool\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport flowrect\nfrom flowrect.simulations import ISIC_particle, ISIC_2nd_order, particle_population\nfrom flowrect.simulations.util import calculate_mt, calculate_nt\n\nparams = dict(\n time_end=40,\n dt=1e-2,\n Lambda=np.array([15.3, 2.5]),\n Gamma=np.array([-5.0, 0.5]),\n c=1,\n lambda_kappa=2,\n I_ext=2.0,\n I_ext_time=20,\n interaction=0.0,\n)\n\nI_vec = np.arange(0, 10, 0.2)\n\nN = 500\n_, M_P, spikes_P, _, _ = particle_population(N=N, use_LambdaGamma=True, **params)\nm_tp = calculate_mt(M_P, spikes_P)\nn_tp = calculate_nt(m_tp)\npre_input_idx = int(int(params[\"time_end\"] / params[\"dt\"]) / 2) - 1\n\n_, _, ISIC, left, right, T_n, T_nplusone, Tbar = ISIC_particle(K=25000, **params)\nISIC_2nd_order(n_t0=n_tp[pre_input_idx], m_t0=m_tp[pre_input_idx], **params)\nprint(left, ISIC, right, Tbar)\n\n\n# def multiproc(I):\n# cparams = copy.copy(params)\n# cparams[\"I_ext\"] = I\n# _, _, ISIC, left, right, _, _, _ = ISIC_particle(K=25000, **cparams)\n\n# return ISIC, left, right\n\n\n# if __name__ == \"__main__\":\n# p = Pool(12)\n# t = time.time()\n# res = p.map(multiproc, I_vec)\n\n# ISICS = []\n# ISIC_lefts = []\n# ISIC_rights = []\n# i = 0\n# for el in res:\n# ISIC, left, right = el\n# ISICS.append(ISIC)\n# ISIC_lefts.append(ISIC - left)\n# ISIC_rights.append(right - ISIC)\n\n# plt.errorbar(I_vec, ISICS, fmt=\"kx\", yerr=[ISIC_lefts, ISIC_rights])\n# plt.xlabel(r\"I_{ext}\")\n# plt.ylim(-1, 1)\n# plt.ylabel(r\"Correlation\")\n# plt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"numpy.ones",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.histogram",
"matplotlib.pyplot.figure"
],
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
igor-krawczuk/pytorch-lightning | [
"7de51f78ac2ec09b230e1cb8a786f872de3b861f"
] | [
"tests/models/test_gpu.py"
] | [
"import os\n\nimport pytest\nimport torch\n\nimport tests.base.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core import memory\nfrom pytorch_lightning.trainer.distrib_parts import (\n parse_gpu_ids,\n determine_root_gpu_device,\n)\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import LightningTestModel\n\nPRETEND_N_OF_GPUS = 16\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model_ddp2(tmpdir):\n \"\"\"Make sure DDP2 works.\"\"\"\n\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=True,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=2,\n weights_summary=None,\n distributed_backend='ddp2'\n )\n\n tutils.run_model_test(trainer_options, model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model_ddp(tmpdir):\n \"\"\"Make sure DDP works.\"\"\"\n\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=False,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend='ddp'\n )\n\n tutils.run_model_test(trainer_options, model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_ddp_all_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Make sure DDP works with dataloaders passed to fit()\"\"\"\n\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(default_save_path=tmpdir,\n show_progress_bar=False,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend='ddp')\n\n fit_options = dict(train_dataloader=model.train_dataloader(),\n val_dataloaders=model.val_dataloader())\n\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model, **fit_options)\n assert result == 1, \"DDP doesn't work with dataloaders passed to fit().\"\n\n\ndef test_optimizer_return_options():\n tutils.reset_seed()\n\n trainer = Trainer()\n model, hparams = tutils.get_default_model()\n\n # single optimizer\n opt_a = torch.optim.Adam(model.parameters(), lr=0.002)\n opt_b = torch.optim.SGD(model.parameters(), lr=0.002)\n scheduler_a = torch.optim.lr_scheduler.StepLR(opt_a, 10)\n scheduler_b = torch.optim.lr_scheduler.StepLR(opt_b, 10)\n\n # single optimizer\n optim, lr_sched, freq = trainer.init_optimizers(opt_a)\n assert len(optim) == 1 and len(lr_sched) == 0 and len(freq) == 0\n\n # opt tuple\n opts = (opt_a, opt_b)\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]\n assert len(lr_sched) == 0 and len(freq) == 0\n\n # opt list\n opts = [opt_a, opt_b]\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]\n assert len(lr_sched) == 0 and len(freq) == 0\n\n # opt tuple of 2 lists\n opts = ([opt_a], [scheduler_a])\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 1 and len(lr_sched) == 1 and len(freq) == 0\n assert optim[0] == opts[0][0]\n assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',\n frequency=1, reduce_on_plateau=False, monitor='val_loss')\n\n # opt single dictionary\n opts = {\"optimizer\": opt_a, \"lr_scheduler\": scheduler_a}\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 1 and len(lr_sched) == 1 and len(freq) == 0\n assert optim[0] == opt_a\n assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',\n frequency=1, reduce_on_plateau=False, monitor='val_loss')\n\n # opt multiple dictionaries with frequencies\n opts = (\n {\"optimizer\": opt_a, \"lr_scheduler\": scheduler_a, \"frequency\": 1},\n {\"optimizer\": opt_b, \"lr_scheduler\": scheduler_b, \"frequency\": 5},\n )\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 2 and len(lr_sched) == 2 and len(freq) == 2\n assert optim[0] == opt_a\n assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',\n frequency=1, reduce_on_plateau=False, monitor='val_loss')\n assert freq == [1, 5]\n\n\ndef test_cpu_slurm_save_load(tmpdir):\n \"\"\"Verify model save/load/checkpoint on CPU.\"\"\"\n tutils.reset_seed()\n\n hparams = tutils.get_default_hparams()\n model = LightningTestModel(hparams)\n\n # logger file to get meta\n logger = tutils.get_default_testtube_logger(tmpdir, False)\n version = logger.version\n\n trainer_options = dict(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir)\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n real_global_step = trainer.global_step\n\n # traning complete\n assert result == 1, 'amp + ddp model failed to complete'\n\n # predict with trained model before saving\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n model.eval()\n pred_before_saving = model(x)\n\n # test HPC saving\n # simulate snapshot on slurm\n saved_filepath = trainer.hpc_save(tmpdir, logger)\n assert os.path.exists(saved_filepath)\n\n # new logger file to get meta\n logger = tutils.get_default_testtube_logger(tmpdir, False, version=version)\n\n trainer_options = dict(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir),\n )\n trainer = Trainer(**trainer_options)\n model = LightningTestModel(hparams)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_pred_same():\n assert trainer.global_step == real_global_step and trainer.global_step > 0\n\n # predict with loaded model to make sure answers are the same\n trainer.model.eval()\n new_pred = trainer.model(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n model.on_epoch_start = assert_pred_same\n\n # by calling fit again, we trigger training, loading weights from the cluster\n # and our hook to predict using current model before any more weight updates\n trainer.fit(model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_none_backend(tmpdir):\n \"\"\"Make sure when using multiple GPUs the user can't use `distributed_backend = None`.\"\"\"\n tutils.reset_seed()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=False,\n max_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus='-1'\n )\n\n with pytest.warns(UserWarning):\n tutils.run_model_test(trainer_options, model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model_dp(tmpdir):\n \"\"\"Make sure DP works.\"\"\"\n tutils.reset_seed()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=False,\n distributed_backend='dp',\n max_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus='-1'\n )\n\n tutils.run_model_test(trainer_options, model)\n\n # test memory helper functions\n memory.get_memory_profile('min_max')\n\n\[email protected]\ndef mocked_device_count(monkeypatch):\n def device_count():\n return PRETEND_N_OF_GPUS\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]\ndef mocked_device_count_0(monkeypatch):\n def device_count():\n return 0\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(0, 0, None, id=\"Oth gpu, expect 1 gpu to use.\"),\n pytest.param(1, 1, None, id=\"1st gpu, expect 1 gpu to use.\"),\n pytest.param(-1, PRETEND_N_OF_GPUS, \"ddp\", id=\"-1 - use all gpus\"),\n pytest.param('-1', PRETEND_N_OF_GPUS, \"ddp\", id=\"'-1' - use all gpus\"),\n pytest.param(3, 3, \"ddp\", id=\"3rd gpu - 1 gpu to use (backend:ddp)\")\n])\ndef test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(None, 0, \"ddp\", id=\"None - expect 0 gpu to use.\"),\n])\ndef test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"O gpus, expect gpu root device to be None.\"),\n pytest.param(1, 0, \"ddp\", id=\"1 gpu, expect gpu root device to be 0.\"),\n pytest.param(-1, 0, \"ddp\", id=\"-1 - use all gpus, expect gpu root device to be 0.\"),\n pytest.param('-1', 0, \"ddp\", id=\"'-1' - use all gpus, expect gpu root device to be 0.\"),\n pytest.param(3, 0, \"ddp\", id=\"3 gpus, expect gpu root device to be 0.(backend:ddp)\")\n])\ndef test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\[email protected]_param_tests\[email protected]([\n 'gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, None, id=\"None is None\"),\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"None is None\"),\n])\ndef test_root_gpu_property_0_passing(\n mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\n# Asking for a gpu when non are available will result in a MisconfigurationException\[email protected]_param_tests\[email protected]([\n 'gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(1, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param([1, 2], None, \"ddp\"),\n pytest.param([0, 1], None, \"ddp\"),\n pytest.param(-1, None, \"ddp\"),\n pytest.param('-1', None, \"ddp\")\n])\ndef test_root_gpu_property_0_raising(\n mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n with pytest.raises(MisconfigurationException):\n Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu'], [\n pytest.param(None, None, id=\"No gpus, expect gpu root device to be None\"),\n pytest.param([0], 0, id=\"Oth gpu, expect gpu root device to be 0.\"),\n pytest.param([1], 1, id=\"1st gpu, expect gpu root device to be 1.\"),\n pytest.param([3], 3, id=\"3rd gpu, expect gpu root device to be 3.\"),\n pytest.param([1, 2], 1, id=\"[1, 2] gpus, expect gpu root device to be 1.\"),\n])\ndef test_determine_root_gpu_device(gpus, expected_root_gpu):\n assert determine_root_gpu_device(gpus) == expected_root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_gpu_ids'], [\n pytest.param(None, None),\n pytest.param(0, None),\n pytest.param(1, [0]),\n pytest.param(3, [0, 1, 2]),\n pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id=\"-1 - use all gpus\"),\n pytest.param([0], [0]),\n pytest.param([1, 3], [1, 3]),\n pytest.param('0', [0]),\n pytest.param('3', [3]),\n pytest.param('1, 3', [1, 3]),\n pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id=\"'-1' - use all gpus\"),\n])\ndef test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):\n assert parse_gpu_ids(gpus) == expected_gpu_ids\n\n\[email protected]_param_tests\[email protected](['gpus'], [\n pytest.param(0.1),\n pytest.param(-2),\n pytest.param(False),\n pytest.param([]),\n pytest.param([-1]),\n pytest.param([None]),\n pytest.param(['0']),\n pytest.param((0, 1)),\n])\ndef test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\[email protected](\"gpus\", [''])\ndef test_parse_gpu_fail_on_empty_string(mocked_device_count, gpus):\n # This currently results in a ValueError instead of MisconfigurationException\n with pytest.raises(ValueError):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\[email protected](\"gpus\", [[1, 2, 19], -1, '-1'])\ndef test_parse_gpu_fail_on_non_existant_id(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\ndef test_parse_gpu_fail_on_non_existant_id_2(mocked_device_count):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids([1, 2, 19])\n\n\[email protected]_param_tests\[email protected](\"gpus\", [-1, '-1'])\ndef test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\n# if __name__ == '__main__':\n# pytest.main([__file__])\n"
] | [
[
"torch.eq",
"torch.cuda.device_count",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
friggog/trimesh | [
"8ba65f6f19664b15bec1d13115af5040365254da",
"8ba65f6f19664b15bec1d13115af5040365254da"
] | [
"trimesh/visual/texture.py",
"trimesh/path/entities.py"
] | [
"import copy\n\nimport numpy as np\n\nfrom .base import Visuals\nfrom . import color\n\nfrom .. import util\nfrom .. import caching\nfrom .. import grouping\n\nfrom .material import SimpleMaterial, PBRMaterial, empty_material # NOQA\n\n\nclass TextureVisuals(Visuals):\n def __init__(self,\n uv=None,\n material=None,\n image=None,\n face_materials=None):\n \"\"\"\n Store a single material and per-vertex UV coordinates\n for a mesh.\n\n If passed UV coordinates and a single image it will\n create a SimpleMaterial for the image.\n\n Parameters\n --------------\n uv : (n, 2) float\n UV coordinates for the mesh\n material : Material\n Store images and properties\n image : PIL.Image\n Can be passed to automatically create material\n \"\"\"\n\n # store values we care about enough to hash\n self.vertex_attributes = caching.DataStore()\n # cache calculated values\n self._cache = caching.Cache(self.vertex_attributes.fast_hash)\n\n # should be (n, 2) float\n self.uv = uv\n\n if material is None:\n if image is None:\n self.material = empty_material()\n else:\n # if an image is passed create a SimpleMaterial\n self.material = SimpleMaterial(image=image)\n else:\n # if passed assign\n self.material = material\n\n self.face_materials = face_materials\n\n def _verify_crc(self):\n \"\"\"\n Dump the cache if anything in self.vertex_attributes has changed.\n \"\"\"\n self._cache.verify()\n\n @property\n def kind(self):\n \"\"\"\n Return the type of visual data stored\n\n Returns\n ----------\n kind : str\n What type of visuals are defined\n \"\"\"\n return 'texture'\n\n @property\n def defined(self):\n \"\"\"\n Check if any data is stored\n\n Returns\n ----------\n defined : bool\n Are UV coordinates and images set?\n \"\"\"\n ok = self.material is not None\n return ok\n\n def crc(self):\n \"\"\"\n Get a CRC of the stored data.\n\n Returns\n --------------\n crc : int\n Hash of items in self.vertex_attributes\n \"\"\"\n return self.vertex_attributes.crc()\n\n @property\n def uv(self):\n \"\"\"\n Get the stored UV coordinates.\n\n Returns\n ------------\n uv : (n, 2) float\n Pixel position per- vertex\n \"\"\"\n return self.vertex_attributes.get('uv', None)\n\n @uv.setter\n def uv(self, values):\n \"\"\"\n Set the UV coordinates.\n\n Parameters\n --------------\n values : (n, 2) float\n Pixel locations on a texture per- vertex\n \"\"\"\n if values is None:\n self.vertex_attributes['uv'] = None\n else:\n self.vertex_attributes['uv'] = np.asanyarray(\n values, dtype=np.float64)\n\n def copy(self, uv=None):\n \"\"\"\n Return a copy of the current TextureVisuals object.\n\n Returns\n ----------\n copied : TextureVisuals\n Contains the same information in a new object\n \"\"\"\n if uv is None:\n uv = self.uv\n if uv is not None:\n uv = uv.copy()\n copied = TextureVisuals(\n uv=uv,\n material=self.material.copy(),\n face_materials=copy.copy(self.face_materials))\n\n return copied\n\n def to_color(self):\n \"\"\"\n Convert textured visuals to a ColorVisuals with vertex\n color calculated from texture.\n\n Returns\n -----------\n vis : trimesh.visuals.ColorVisuals\n Contains vertex color from texture\n \"\"\"\n # find the color at each UV coordinate\n colors = self.material.to_color(self.uv)\n # create ColorVisuals from result\n vis = color.ColorVisuals(vertex_colors=colors)\n return vis\n\n def face_subset(self, face_index):\n \"\"\"\n Get a copy of\n \"\"\"\n if self.uv is not None:\n indices = np.unique(self.mesh.faces[face_index].flatten())\n return self.copy(self.uv[indices])\n else:\n return self.copy()\n\n def update_vertices(self, mask):\n \"\"\"\n Apply a mask to remove or duplicate vertex properties.\n\n Parameters\n ------------\n mask : (len(vertices),) bool or (n,) int\n Mask which can be used like: `vertex_attribute[mask]`\n \"\"\"\n # collect updated masked values\n updates = {}\n for key, value in self.vertex_attributes.items():\n # DataStore will convert None to zero-length array\n if len(value) == 0:\n continue\n try:\n # store the update\n updates[key] = value[mask]\n except BaseException:\n # usual reason is an incorrect size or index\n util.log.warning('failed to update visual: `{}`'.format(key))\n # clear all values from the vertex attributes\n self.vertex_attributes.clear()\n # apply the updated values\n self.vertex_attributes.update(updates)\n\n def update_faces(self, mask):\n \"\"\"\n Apply a mask to remove or duplicate face properties,\n not applicable to texture visuals.\n \"\"\"\n pass\n\n def concatenate(self, others):\n \"\"\"\n Concatenate this TextureVisuals object with others\n and return the result without modifying this visual.\n\n Parameters\n -----------\n others : (n,) Visuals\n Other visual objects to concatenate\n\n Returns\n -----------\n concatenated : TextureVisuals\n Concatenated visual objects\n \"\"\"\n util.log.warning('concatenating texture: may result in visual artifacts')\n from .objects import concatenate\n return concatenate(self, others)\n\n\ndef unmerge_faces(faces, *args, **kwargs):\n \"\"\"\n Textured meshes can come with faces referencing vertex\n indices (`v`) and an array the same shape which references\n vertex texture indices (`vt`) and sometimes even normal (`vn`).\n\n Vertex locations with different values of any of these can't\n be considered the \"same\" vertex, and for our simple data\n model we need to not combine these vertices.\n\n Parameters\n -------------\n faces : (n, d) int\n References vertex indices\n *args : (n, d) int\n Various references of corresponding values\n This is usually UV coordinates or normal indexes\n maintain_faces : bool\n Do not alter original faces and return no-op masks.\n\n Returns\n -------------\n new_faces : (m, d) int\n New faces for masked vertices\n mask_v : (p,) int\n A mask to apply to vertices\n mask_* : (p,) int\n A mask to apply to vt array to get matching UV coordinates\n Returns as many of these as args were passed\n \"\"\"\n # unfortunately Python2 doesn't let us put named kwargs\n # after an `*args` sequence so we have to do this ugly get\n maintain_faces = kwargs.get('maintain_faces', False)\n\n # don't alter faces\n if maintain_faces:\n # start with not altering faces at all\n result = [faces]\n # find the maximum index referenced by faces\n max_idx = faces.max()\n # add a vertex mask which is just ordered\n result.append(np.arange(max_idx + 1))\n\n # now given the order is fixed do our best on the rest of the order\n for arg in args:\n # create a mask of the attribute-vertex mapping\n # note that these might conflict since we're not unmerging\n masks = np.zeros((3, max_idx + 1), dtype=np.int64)\n # set the mask using the unmodified face indexes\n for i, f, a in zip(range(3), faces.T, arg.T):\n masks[i][f] = a\n # find the most commonly occurring attribute (i.e. UV coordinate)\n # and use that index note that this is doing a float conversion\n # and then median before converting back to int: could also do this as\n # a column diff and sort but this seemed easier and is fast enough\n result.append(np.median(masks, axis=0).astype(np.int64))\n\n return result\n\n # stack into pairs of (vertex index, texture index)\n stackable = [np.asanyarray(faces).reshape(-1)]\n # append multiple args to the correlated stack\n # this is usually UV coordinates (vt) and normals (vn)\n for arg in args:\n stackable.append(np.asanyarray(arg).reshape(-1))\n\n # unify them into rows of a numpy array\n stack = np.column_stack(stackable)\n # find unique pairs: we're trying to avoid merging\n # vertices that have the same position but different\n # texture coordinates\n unique, inverse = grouping.unique_rows(stack)\n\n # only take the unique pairs\n pairs = stack[unique]\n # try to maintain original vertex order\n order = pairs[:, 0].argsort()\n # apply the order to the pairs\n pairs = pairs[order]\n\n # we re-ordered the vertices to try to maintain\n # the original vertex order as much as possible\n # so to reconstruct the faces we need to remap\n remap = np.zeros(len(order), dtype=np.int64)\n remap[order] = np.arange(len(order))\n\n # the faces are just the inverse with the new order\n new_faces = remap[inverse].reshape((-1, 3))\n\n # the mask for vertices and masks for other args\n result = [new_faces]\n result.extend(pairs.T)\n\n return result\n\n\ndef power_resize(image, resample=1, square=False):\n \"\"\"\n Resize a PIL image so every dimension is a power of two.\n\n Parameters\n ------------\n image : PIL.Image\n Input image\n resample : int\n Passed to Image.resize\n square : bool\n If True, upsize to a square image\n\n Returns\n -------------\n resized : PIL.Image\n Input image resized\n \"\"\"\n # what is the current resolution of the image in pixels\n size = np.array(image.size, dtype=np.int64)\n # what is the resolution of the image upsized to the nearest\n # power of two on each axis: allow rectangular textures\n new_size = (2 ** np.ceil(np.log2(size))).astype(np.int64)\n\n # make every dimension the largest\n if square:\n new_size = np.ones(2, dtype=np.int64) * new_size.max()\n\n # if we're not powers of two upsize\n if (size != new_size).any():\n return image.resize(new_size, resample=resample)\n\n return image.copy()\n",
"\"\"\"\nentities.py\n--------------\n\nBasic geometric primitives which only store references to\nvertex indices rather than vertices themselves.\n\"\"\"\nimport numpy as np\n\nfrom copy import deepcopy\nfrom .arc import discretize_arc, arc_center\nfrom .curve import discretize_bezier, discretize_bspline\n\nfrom .. import util\nfrom ..util import ABC\n\n\nclass Entity(ABC):\n\n def __init__(self,\n points,\n closed=None,\n layer=None,\n metadata=None,\n color=None,\n **kwargs):\n # points always reference vertex indices and are int\n self.points = np.asanyarray(points, dtype=np.int64)\n # save explicit closed\n if closed is not None:\n self.closed = closed\n # save the passed layer\n if layer is not None:\n self.layer = layer\n if metadata is not None:\n self.metadata.update(metadata)\n\n self._cache = {}\n\n # save the passed color\n self.color = color\n # save any other kwargs for general use\n self.kwargs = kwargs\n\n @property\n def metadata(self):\n \"\"\"\n Get any metadata about the entity.\n\n Returns\n ---------\n metadata : dict\n Bag of properties.\n \"\"\"\n if not hasattr(self, '_metadata'):\n self._metadata = {}\n # note that we don't let a new dict be assigned\n return self._metadata\n\n @property\n def layer(self):\n \"\"\"\n Set the layer the entity resides on as a shortcut\n to putting it in the entity metadata.\n\n Returns\n ----------\n layer : any\n Hashable layer identifier.\n \"\"\"\n return self.metadata.get('layer')\n\n @layer.setter\n def layer(self, value):\n \"\"\"\n Set the current layer of the entity.\n\n Returns\n ----------\n layer : any\n Hashable layer indicator\n \"\"\"\n self.metadata['layer'] = value\n\n def to_dict(self):\n \"\"\"\n Returns a dictionary with all of the information\n about the entity.\n\n Returns\n -----------\n as_dict : dict\n Has keys 'type', 'points', 'closed'\n \"\"\"\n return {'type': self.__class__.__name__,\n 'points': self.points.tolist(),\n 'closed': self.closed}\n\n @property\n def closed(self):\n \"\"\"\n If the first point is the same as the end point\n the entity is closed\n\n Returns\n -----------\n closed : bool\n Is the entity closed or not?\n \"\"\"\n closed = (len(self.points) > 2 and\n self.points[0] == self.points[-1])\n return closed\n\n @property\n def nodes(self):\n \"\"\"\n Returns an (n,2) list of nodes, or vertices on the path.\n Note that this generic class function assumes that all of the\n reference points are on the path which is true for lines and\n three point arcs.\n\n If you were to define another class where that wasn't the case\n (for example, the control points of a bezier curve),\n you would need to implement an entity- specific version of this\n function.\n\n The purpose of having a list of nodes is so that they can then be\n added as edges to a graph so we can use functions to check\n connectivity, extract paths, etc.\n\n The slicing on this function is essentially just tiling points\n so the first and last vertices aren't repeated. Example:\n\n self.points = [0,1,2]\n returns: [[0,1], [1,2]]\n \"\"\"\n return np.column_stack((self.points,\n self.points)).reshape(\n -1)[1:-1].reshape((-1, 2))\n\n @property\n def end_points(self):\n \"\"\"\n Returns the first and last points. Also note that if you\n define a new entity class where the first and last vertices\n in self.points aren't the endpoints of the curve you need to\n implement this function for your class.\n\n Returns\n -------------\n ends : (2,) int\n Indices of the two end points of the entity\n \"\"\"\n return self.points[[0, -1]]\n\n @property\n def is_valid(self):\n \"\"\"\n Is the current entity valid.\n\n Returns\n -----------\n valid : bool\n Is the current entity well formed\n \"\"\"\n return True\n\n def reverse(self, direction=-1):\n \"\"\"\n Reverse the current entity in place.\n\n Parameters\n ----------------\n direction : int\n If positive will not touch direction\n If negative will reverse self.points\n \"\"\"\n if direction < 0:\n self._direction = -1\n else:\n self._direction = 1\n\n def _orient(self, curve):\n \"\"\"\n Reverse a curve if a flag is set.\n\n Parameters\n --------------\n curve : (n, dimension) float\n Curve made up of line segments in space\n\n Returns\n ------------\n orient : (n, dimension) float\n Original curve, but possibly reversed\n \"\"\"\n if hasattr(self, '_direction') and self._direction < 0:\n return curve[::-1]\n return curve\n\n def bounds(self, vertices):\n \"\"\"\n Return the AABB of the current entity.\n\n Parameters\n -----------\n vertices : (n, dimension) float\n Vertices in space\n\n Returns\n -----------\n bounds : (2, dimension) float\n Coordinates of AABB, in (min, max) form\n \"\"\"\n bounds = np.array([vertices[self.points].min(axis=0),\n vertices[self.points].max(axis=0)])\n return bounds\n\n def length(self, vertices):\n \"\"\"\n Return the total length of the entity.\n\n Parameters\n --------------\n vertices : (n, dimension) float\n Vertices in space\n\n Returns\n ---------\n length : float\n Total length of entity\n \"\"\"\n diff = np.diff(self.discrete(vertices), axis=0) ** 2\n length = (np.dot(diff, [1] * vertices.shape[1]) ** 0.5).sum()\n return length\n\n def explode(self):\n \"\"\"\n Split the entity into multiple entities.\n\n Returns\n ------------\n explode : list of Entity\n Current entity split into multiple entities.\n \"\"\"\n return [self.copy()]\n\n def copy(self):\n \"\"\"\n Return a copy of the current entity.\n\n Returns\n ------------\n copied : Entity\n Copy of current entity\n \"\"\"\n copied = deepcopy(self)\n # only copy metadata if set\n if hasattr(self, '_metadata'):\n copied._metadata = deepcopy(self._metadata)\n # check for very annoying subtle copy failures\n assert id(copied._metadata) != id(self._metadata)\n assert id(copied.points) != id(self.points)\n return copied\n\n def __hash__(self):\n \"\"\"\n Return a hash that represents the current entity.\n\n Returns\n ----------\n hashed : int\n Hash of current class name, points, and closed\n \"\"\"\n return hash(self._bytes())\n\n def _bytes(self):\n \"\"\"\n Get hashable bytes that define the current entity.\n\n Returns\n ------------\n data : bytes\n Hashable data defining the current entity\n \"\"\"\n # give consistent ordering of points for hash\n if self.points[0] > self.points[-1]:\n return (self.__class__.__name__.encode('utf-8') +\n self.points.tobytes())\n else:\n return (self.__class__.__name__.encode('utf-8') +\n self.points[::-1].tobytes())\n\n\nclass Text(Entity):\n \"\"\"\n Text to annotate a 2D or 3D path.\n \"\"\"\n\n def __init__(self,\n origin,\n text,\n height=None,\n vector=None,\n normal=None,\n align=None,\n layer=None,\n color=None,\n metadata=None):\n \"\"\"\n An entity for text labels.\n\n Parameters\n --------------\n origin : int\n Index of a single vertex for text origin\n text : str\n The text to label\n height : float or None\n The height of text\n vector : int or None\n An vertex index for which direction text\n is written along unitized: vector - origin\n normal : int or None\n A vertex index for the plane normal:\n vector is along unitized: normal - origin\n align : (2,) str or None\n Where to draw from for [horizontal, vertical]:\n 'center', 'left', 'right'\n \"\"\"\n # where is text placed\n self.origin = origin\n # what direction is the text pointing\n self.vector = vector\n # what is the normal of the text plane\n self.normal = normal\n # how high is the text entity\n self.height = height\n # what layer is the entity on\n if layer is not None:\n self.layer = layer\n\n if metadata is not None:\n self.metadata.update(metadata)\n\n # what color is the entity\n self.color = color\n\n # None or (2,) str\n if align is None:\n # if not set make everything centered\n align = ['center', 'center']\n elif util.is_string(align):\n # if only one is passed set for both\n # horizontal and vertical\n align = [align, align]\n elif len(align) != 2:\n # otherwise raise rror\n raise ValueError('align must be (2,) str')\n\n if any(i not in ['left', 'right', 'center']\n for i in align):\n print('nah')\n\n self.align = align\n\n # make sure text is a string\n if hasattr(text, 'decode'):\n self.text = text.decode('utf-8')\n else:\n self.text = str(text)\n\n @property\n def origin(self):\n \"\"\"\n The origin point of the text.\n\n Returns\n -----------\n origin : int\n Index of vertices\n \"\"\"\n return self.points[0]\n\n @origin.setter\n def origin(self, value):\n value = int(value)\n if not hasattr(self, 'points') or self.points.ptp() == 0:\n self.points = np.ones(3, dtype=np.int64) * value\n else:\n self.points[0] = value\n\n @property\n def vector(self):\n \"\"\"\n A point representing the text direction\n along the vector: vertices[vector] - vertices[origin]\n\n Returns\n ----------\n vector : int\n Index of vertex\n \"\"\"\n return self.points[1]\n\n @vector.setter\n def vector(self, value):\n if value is None:\n return\n self.points[1] = int(value)\n\n @property\n def normal(self):\n \"\"\"\n A point representing the plane normal along the\n vector: vertices[normal] - vertices[origin]\n\n Returns\n ------------\n normal : int\n Index of vertex\n \"\"\"\n return self.points[2]\n\n @normal.setter\n def normal(self, value):\n if value is None:\n return\n self.points[2] = int(value)\n\n def plot(self, vertices, show=False):\n \"\"\"\n Plot the text using matplotlib.\n\n Parameters\n --------------\n vertices : (n, 2) float\n Vertices in space\n show : bool\n If True, call plt.show()\n \"\"\"\n if vertices.shape[1] != 2:\n raise ValueError('only for 2D points!')\n\n import matplotlib.pyplot as plt\n\n # get rotation angle in degrees\n angle = np.degrees(self.angle(vertices))\n\n # TODO: handle text size better\n plt.text(*vertices[self.origin],\n s=self.text,\n rotation=angle,\n ha=self.align[0],\n va=self.align[1],\n size=18)\n\n if show:\n plt.show()\n\n def angle(self, vertices):\n \"\"\"\n If Text is 2D, get the rotation angle in radians.\n\n Parameters\n -----------\n vertices : (n, 2) float\n Vertices in space referenced by self.points\n\n Returns\n ---------\n angle : float\n Rotation angle in radians\n \"\"\"\n\n if vertices.shape[1] != 2:\n raise ValueError('angle only valid for 2D points!')\n\n # get the vector from origin\n direction = vertices[self.vector] - vertices[self.origin]\n # get the rotation angle in radians\n angle = np.arctan2(*direction[::-1])\n\n return angle\n\n def length(self, vertices):\n return 0.0\n\n def discrete(self, *args, **kwargs):\n return np.array([])\n\n @property\n def closed(self):\n return False\n\n @property\n def is_valid(self):\n return True\n\n @property\n def nodes(self):\n return np.array([])\n\n @property\n def end_points(self):\n return np.array([])\n\n def _bytes(self):\n data = b''.join([b'Text',\n self.points.tobytes(),\n self.text.encode('utf-8')])\n return data\n\n\nclass Line(Entity):\n \"\"\"\n A line or poly-line entity\n \"\"\"\n\n def discrete(self, vertices, scale=1.0):\n \"\"\"\n Discretize into a world- space path.\n\n Parameters\n ------------\n vertices: (n, dimension) float\n Points in space\n scale : float\n Size of overall scene for numerical comparisons\n\n Returns\n -------------\n discrete: (m, dimension) float\n Path in space composed of line segments\n \"\"\"\n return self._orient(vertices[self.points])\n\n @property\n def is_valid(self):\n \"\"\"\n Is the current entity valid.\n\n Returns\n -----------\n valid : bool\n Is the current entity well formed\n \"\"\"\n valid = np.any((self.points - self.points[0]) != 0)\n return valid\n\n def explode(self):\n \"\"\"\n If the current Line entity consists of multiple line\n break it up into n Line entities.\n\n Returns\n ----------\n exploded: (n,) Line entities\n \"\"\"\n # copy over the current layer\n layer = self.layer\n points = np.column_stack((\n self.points,\n self.points)).ravel()[1:-1].reshape((-1, 2))\n exploded = [Line(i, layer=layer) for i in points]\n return exploded\n\n def _bytes(self):\n # give consistent ordering of points for hash\n if self.points[0] > self.points[-1]:\n return b'Line' + self.points.tobytes()\n else:\n return b'Line' + self.points[::-1].tobytes()\n\n\nclass Arc(Entity):\n\n @property\n def closed(self):\n \"\"\"\n A boolean flag for whether the arc is closed (a circle) or not.\n\n Returns\n ----------\n closed : bool\n If set True, Arc will be a closed circle\n \"\"\"\n return getattr(self, '_closed', False)\n\n @closed.setter\n def closed(self, value):\n \"\"\"\n Set the Arc to be closed or not, without\n changing the control points\n\n Parameters\n ------------\n value : bool\n Should this Arc be a closed circle or not\n \"\"\"\n self._closed = bool(value)\n\n @property\n def is_valid(self):\n \"\"\"\n Is the current Arc entity valid.\n\n Returns\n -----------\n valid : bool\n Does the current Arc have exactly 3 control points\n \"\"\"\n return len(np.unique(self.points)) == 3\n\n def _bytes(self):\n # give consistent ordering of points for hash\n order = int(self.points[0] > self.points[-1]) * 2 - 1\n return b'Arc' + bytes(self.closed) + self.points[::order].tobytes()\n\n def length(self, vertices):\n \"\"\"\n Return the arc length of the 3-point arc.\n\n Parameter\n ----------\n vertices : (n, d) float\n Vertices for overall drawing.\n\n Returns\n -----------\n length : float\n Length of arc.\n \"\"\"\n # find the actual radius and angle span\n if self.closed:\n # we don't need the angular span as\n # it's indicated as a closed circle\n fit = self.center(\n vertices, return_normal=False, return_angle=False)\n return np.pi * fit['radius'] * 4\n # get the angular span of the circular arc\n fit = self.center(\n vertices, return_normal=False, return_angle=True)\n return fit['span'] * fit['radius'] * 2\n\n def discrete(self, vertices, scale=1.0):\n \"\"\"\n Discretize the arc entity into line sections.\n\n Parameters\n ------------\n vertices : (n, dimension) float\n Points in space\n scale : float\n Size of overall scene for numerical comparisons\n\n Returns\n -------------\n discrete : (m, dimension) float\n Path in space made up of line segments\n \"\"\"\n\n return self._orient(discretize_arc(\n vertices[self.points],\n close=self.closed,\n scale=scale))\n\n def center(self, vertices, **kwargs):\n \"\"\"\n Return the center information about the arc entity.\n\n Parameters\n -------------\n vertices : (n, dimension) float\n Vertices in space\n\n Returns\n -------------\n info : dict\n With keys: 'radius', 'center'\n \"\"\"\n return arc_center(vertices[self.points], **kwargs)\n\n def bounds(self, vertices):\n \"\"\"\n Return the AABB of the arc entity.\n\n Parameters\n -----------\n vertices: (n, dimension) float\n Vertices in space\n\n Returns\n -----------\n bounds : (2, dimension) float\n Coordinates of AABB in (min, max) form\n \"\"\"\n if util.is_shape(vertices, (-1, 2)) and self.closed:\n # if we have a closed arc (a circle), we can return the actual bounds\n # this only works in two dimensions, otherwise this would return the\n # AABB of an sphere\n info = self.center(vertices, return_normal=False, return_angle=False)\n bounds = np.array([info['center'] - info['radius'],\n info['center'] + info['radius']],\n dtype=np.float64)\n else:\n # since the AABB of a partial arc is hard, approximate\n # the bounds by just looking at the discrete values\n discrete = self.discrete(vertices)\n bounds = np.array([discrete.min(axis=0),\n discrete.max(axis=0)],\n dtype=np.float64)\n return bounds\n\n\nclass Curve(Entity):\n \"\"\"\n The parent class for all wild curves in space.\n \"\"\"\n @property\n def nodes(self):\n # a point midway through the curve\n mid = self.points[len(self.points) // 2]\n return [[self.points[0], mid],\n [mid, self.points[-1]]]\n\n\nclass Bezier(Curve):\n \"\"\"\n An open or closed Bezier curve\n \"\"\"\n\n def discrete(self, vertices, scale=1.0, count=None):\n \"\"\"\n Discretize the Bezier curve.\n\n Parameters\n -------------\n vertices : (n, 2) or (n, 3) float\n Points in space\n scale : float\n Scale of overall drawings (for precision)\n count : int\n Number of segments to return\n\n Returns\n -------------\n discrete : (m, 2) or (m, 3) float\n Curve as line segments\n \"\"\"\n return self._orient(discretize_bezier(\n vertices[self.points],\n count=count,\n scale=scale))\n\n\nclass BSpline(Curve):\n \"\"\"\n An open or closed B- Spline.\n \"\"\"\n\n def __init__(self,\n points,\n knots,\n layer=None,\n metadata=None,\n color=None,\n **kwargs):\n self.points = np.asanyarray(points, dtype=np.int64)\n self.knots = np.asanyarray(knots, dtype=np.float64)\n if layer is not None:\n self.layer = layer\n if metadata is not None:\n self.metadata.update(metadata)\n self._cache = {}\n self.kwargs = kwargs\n self.color = color\n\n def discrete(self, vertices, count=None, scale=1.0):\n \"\"\"\n Discretize the B-Spline curve.\n\n Parameters\n -------------\n vertices : (n, 2) or (n, 3) float\n Points in space\n scale : float\n Scale of overall drawings (for precision)\n count : int\n Number of segments to return\n\n Returns\n -------------\n discrete : (m, 2) or (m, 3) float\n Curve as line segments\n \"\"\"\n discrete = discretize_bspline(\n control=vertices[self.points],\n knots=self.knots,\n count=count,\n scale=scale)\n return self._orient(discrete)\n\n def _bytes(self):\n # give consistent ordering of points for hash\n if self.points[0] > self.points[-1]:\n return (b'BSpline' +\n self.knots.tobytes() +\n self.points.tobytes())\n else:\n return (b'BSpline' +\n self.knots[::-1].tobytes() +\n self.points[::-1].tobytes())\n\n def to_dict(self):\n \"\"\"\n Returns a dictionary with all of the information\n about the entity.\n \"\"\"\n return {'type': self.__class__.__name__,\n 'points': self.points.tolist(),\n 'knots': self.knots.tolist(),\n 'closed': self.closed}\n"
] | [
[
"numpy.log2",
"numpy.arange",
"numpy.median",
"numpy.ones",
"numpy.asanyarray",
"numpy.column_stack",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.unique",
"numpy.ones",
"numpy.arctan2",
"numpy.asanyarray",
"numpy.any",
"numpy.column_stack",
"matplotlib.pyplot.text",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
itsameercat/polychrom | [
"a3a39290857bc889627f9d437faa050248bbdc13",
"a3a39290857bc889627f9d437faa050248bbdc13"
] | [
"polychrom/polymer_analyses.py",
"polychrom/legacy/forces.py"
] | [
"# Code written by: Maksim Imakaev ([email protected])\n\"\"\"\nAnalyses of polymer conformations\n=================================\n\n\nThis module presents a collection of utils to work with polymer conformations.\n\n\nTools for calculating contacts\n------------------------------\n\nThe main function calculating contacts is: :py:func:`polychrom.polymer_analyses.calculate_contacts`\nRight now it is a simple wrapper around scipy.cKDTree. \n\nAnother function :py:func:`polychrom.polymer_analyses.smart_contacts` was added recently\nto help build contact maps with a large contact radius. \nIt randomly sub-samples the monomers; by default selecting N/cutoff monomers. It then \ncalculates contacts from sub-sampled monomers only. It is especially helpful when the same code \nneeds to calculate contacts at large and small contact radii.Because of sub-sampling at large\ncontact radius, it avoids the problem of having way-too-many-contacts at a large contact radius. \nFor ordinary contacts, the number of contacts scales as contact_radius^3; however, with smart_contacts \nit would only scale linearly with contact radius, which leads to significant speedsups. \n\n\nTools to calculate P(s) and R(s) \n----------------------------------\n\nWe provide functions to calculate P(s), Rg^2(s) and R^2(s) for polymers. \nBy default, they use log-spaced bins on the X axis, with about 10 bins per order of magnitude, \nbut aligned such that the last bins ends exactly at (N-1). They output (bin, scaling) \nfor Rg^2 and R^2, and (bin_mid, scaling) for contacts. In either case, the \nreturned values are ready to plot. The difference is that Rg and R^2 are evaluated\nat a given value of s, while contacts are aggregated for (bins[0].. bins[1]), (bins[1]..bins[2]). \nTherefore, we have to return bin mids for contacts. \n\n\"\"\"\n\nfrom math import sqrt\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.spatial import ckdtree\n\ntry:\n from . import _polymer_math\nexcept:\n pass\n\n\ndef calculate_contacts(data, cutoff=1.7):\n \"\"\"Calculates contacts between points give the contact radius (cutoff)\n\n Parameters\n ----------\n data : Nx3 array\n Coordinates of points\n cutoff : float , optional\n Cutoff distance (contact radius)\n\n Returns\n -------\n k by 2 array of contacts. Each row corresponds to a contact.\n \"\"\"\n if data.shape[1] != 3:\n raise ValueError(\"Incorrect polymer data shape. Must be Nx3.\")\n\n if np.isnan(data).any():\n raise RuntimeError(\"Data contains NANs\")\n\n tree = ckdtree.cKDTree(data)\n pairs = tree.query_pairs(cutoff, output_type=\"ndarray\")\n return pairs\n\n\ndef smart_contacts(data, cutoff=1.7, min_cutoff=2.1, percent_func=lambda x: 1 / x):\n \"\"\"Calculates contacts for a polymer, give the contact radius (cutoff)\n This method takes a random fraction of the monomers that is equal to (\n 1/cutoff).\n\n This is done to make contact finding faster, and because if cutoff radius\n is R, and monomer (i,j) are in contact, then monomers (i+a), and (j+b)\n are likely in contact if |a| + |b| <~ R (the polymer could not run away\n by more than R in R steps)\n\n This method will have # of contacts grow approximately linearly with\n contact radius, not cubically, which should drastically speed up\n computations of contacts for large (5+) contact radii. This should allow\n using the same code both for small and large contact radius, without the\n need to reduce the # of conformations, subsample the data, or both at\n very large contact radii.\n\n\n Parameters\n ----------\n data : Nx3 array\n Polymer coordinates\n cutoff : float , optional\n Cutoff distance that defines contact\n min_cutoff : float, optional\n Apply the \"smart\" reduction of contacts only when cutoff\n is less than this value\n percent_func : callable, optional \n Function that calculates fraction of monomers to use, as a function of cutoff\n Default is 1/cutoff \n\n Returns\n -------\n k by 2 array of contacts. Each row corresponds to a contact.\n \"\"\"\n if data.shape[1] != 3:\n raise ValueError(\"Incorrect polymer data shape. Must be Nx3.\")\n\n if np.isnan(data).any():\n raise RuntimeError(\"Data contains NANs\")\n\n if cutoff > min_cutoff:\n frac = percent_func(cutoff)\n inds = np.nonzero(np.random.random(len(data)) < frac)[0]\n\n conts = calculate_contacts(data[inds], cutoff)\n conts[:, 0] = inds[conts[:, 0]]\n conts[:, 1] = inds[conts[:, 1]]\n return conts\n\n else:\n return calculate_contacts(data, cutoff)\n\n\ndef generate_bins(N, start=4, bins_per_order_magn=10):\n lstart = np.log10(start)\n lend = np.log10(N - 1) + 1e-6\n num = int(np.ceil((lend - lstart) * bins_per_order_magn))\n bins = np.unique(np.logspace(lstart, lend, dtype=int, num=max(num, 0)))\n if len(bins) > 0:\n assert bins[-1] == N - 1\n return bins\n\n\ndef contact_scaling(data, bins0=None, cutoff=1.1, integrate=False, ring=False):\n \"\"\"\n Returns contact probability scaling for a given polymer conformation\n Contact between monomers X and X+1 is counted as s=1 \n \n\n Parameters\n ----------\n data : Nx3 array of ints/floats\n Input polymer conformation\n bins0 : list or None\n Bins to calculate scaling.\n Bins should probably be log-spaced; log-spaced bins can be quickly\n calculated using mirnylib.numtuis.logbinsnew.\n If None, bins will be calculated automatically\n cutoff : float, optional\n Cutoff to calculate scaling\n integrate : bool, optional\n if True, will return cumulative probability\n ring : bool, optional\n If True, will calculate contacts for the ring\n intContacts : bool, optional\n If True, will speed up calculation of contacts for a cubit lattice case.\n verbose : bool, optional\n If True, print some information.\n\n Returns\n -------\n (mids, contact probabilities) where \"mids\" contains\n geometric means of bin start/end\n \n\n \"\"\"\n data = np.asarray(data)\n N = data.shape[0]\n assert data.shape[1] == 3\n\n if bins0 is None:\n bins0 = generate_bins(N)\n\n bins0 = np.array(bins0)\n bins = [(bins0[i], bins0[i + 1]) for i in range(len(bins0) - 1)]\n contacts = np.array(calculate_contacts(data, cutoff))\n\n contacts = contacts[:, 1] - contacts[:, 0] # contact lengthes\n\n if ring:\n mask = contacts > N // 2\n contacts[mask] = N - contacts[mask]\n\n scontacts = np.sort(contacts) # sorted contact lengthes\n # count of contacts\n connumbers = np.diff(np.searchsorted(scontacts, bins0, side=\"left\"))\n\n if ring:\n possible = np.diff(N * bins0)\n else:\n possible = np.diff(N * bins0 + 0.5 * bins0 - 0.5 * (bins0 ** 2))\n\n connumbers = connumbers / possible\n\n a = [sqrt(i[0] * (i[1] - 1)) for i in bins]\n return a, connumbers\n\n\n\ndef slope_contact_scaling(mids, cp, sigma=2):\n \n smooth=lambda x: gaussian_filter1d(x, sigma)\n \n # P(s) has to be smoothed in logspace, and both P and s have to be smoothed. \n # It is discussed in detail here\n # https://gist.github.com/mimakaev/4becf1310ba6ee07f6b91e511c531e73\n \n # Values sigma=1.5-2 look reasonable for reasonable simulations\n \n slope = np.diff(smooth(np.log(cp))) / np.diff(\n smooth(np.log(mids)))\n \n return mids[1:], slope\n\n\ndef Rg2_scaling(data, bins=None, ring=False):\n \"\"\"Calculates average gyration radius of subchains a function of s\n \n Parameters\n ----------\n \n data: Nx3 array\n bins: subchain lengths at which to calculate Rg\n ring: treat polymer as a ring (default: False) \n \"\"\"\n\n data = np.asarray(data, float)\n N = data.shape[0]\n assert data.shape[1] == 3\n\n data = np.concatenate([[[0, 0, 0]], data])\n\n if bins is None:\n bins = generate_bins(N)\n\n coms = np.cumsum(data, 0) # cumulative sum of locations to calculate COM\n coms2 = np.cumsum(data ** 2, 0) # cumulative sum of locations^2 to calculate RG\n\n def radius_gyration(len2):\n data\n if ring:\n comsadd = coms[1:len2, :].copy()\n coms2add = coms2[1:len2, :].copy()\n comsadd += coms[-1, :][None, :]\n coms2add += coms2[-1, :][None, :]\n comsw = np.concatenate([coms, comsadd], axis=0)\n coms2w = np.concatenate([coms2, coms2add], axis=0)\n else:\n comsw = coms\n coms2w = coms2\n\n coms2d = (-coms2w[:-len2, :] + coms2w[len2:, :]) / len2\n comsd = ((comsw[:-len2, :] - comsw[len2:, :]) / len2) ** 2\n diffs = coms2d - comsd\n sums = np.sum(diffs, 1)\n return np.mean(sums)\n\n rads = [0.0 for i in range(len(bins))]\n for i in range(len(bins)):\n rads[i] = radius_gyration(int(bins[i]))\n return np.array(bins), rads\n\n\ndef R2_scaling(data, bins=None, ring=False):\n \"\"\"\n Returns end-to-end distance scaling of a given polymer conformation.\n ..warning:: This method averages end-to-end scaling over all possible\n subchains of given length\n\n Parameters\n ----------\n\n data: Nx3 array\n bins: the same as in giveCpScaling\n\n \"\"\"\n data = np.asarray(data, float)\n N = data.shape[0]\n assert data.shape[1] == 3\n data = data.T\n\n if bins is None:\n bins = generate_bins(N)\n if ring:\n data = np.concatenate([data, data], axis=1)\n\n rads = [0.0 for i in range(len(bins))]\n for i in range(len(bins)):\n length = bins[i]\n if ring:\n rads[i] = np.mean(\n (np.sum((data[:, :N] - data[:, length : length + N]) ** 2, 0))\n )\n else:\n rads[i] = np.mean((np.sum((data[:, :-length] - data[:, length:]) ** 2, 0)))\n return np.array(bins), rads\n\n\ndef Rg2(data):\n \"\"\"\n Simply calculates gyration radius of a polymer chain.\n \"\"\"\n data = np.asarray(data)\n assert data.shape[1] == 3\n return np.mean((data - np.mean(data, axis=0)) ** 2) * 3\n\n\ndef Rg2_matrix(data):\n \"\"\"\n Uses dynamic programming and vectorizing to calculate Rg for each subchain of the polymer. \n Returns a matrix for which an element [i,j] is Rg of a subchain from i to j including i and j\n \"\"\"\n\n data = np.asarray(data, float)\n assert data.shape[1] == 3\n N = data.shape[0]\n data = np.concatenate([[[0, 0, 0]], data])\n\n coms = np.cumsum(data, 0) # cumulative sum of locations to calculate COM\n coms2 = np.cumsum(data ** 2, 0) # cumulative sum of locations^2 to calculate RG\n\n dists = np.abs(np.arange(N)[:, None] - np.arange(N)[None, :]) + 1\n coms2d = (-coms2[:-1, None, :] + coms2[None, 1::, :]) / dists[:, :, None]\n comsd = ((coms[:-1, None, :] - coms[None, 1:, :]) / dists[:, :, None]) ** 2\n sums = np.sum(coms2d - comsd, 2)\n np.fill_diagonal(sums, 0)\n mask = np.arange(N)[:, None] > np.arange(N)[None, :]\n sums[mask] = sums.T[mask]\n return sums\n\n\ndef ndarray_groupby_aggregate(\n df,\n ndarray_cols,\n aggregate_cols,\n value_cols=[],\n sample_cols=[],\n preset=\"sum\",\n ndarray_agg=lambda x: np.sum(x, axis=0),\n value_agg=lambda x: x.sum(),\n):\n \"\"\"\n A version of pd.groupby that is aware of numpy arrays as values of columns \n \n * aggregates columns ndarray_cols using ndarray_agg aggregator,\n * aggregates value_cols using value_agg aggregator,\n * takes the first element in sample_cols,\n * aggregates over aggregate_cols\n \n It has presets for sum, mean and nanmean. \n \"\"\"\n\n if preset == \"sum\":\n ndarray_agg = lambda x: np.sum(x, axis=0)\n value_agg = lambda x: x.sum()\n elif preset == \"mean\":\n ndarray_agg = lambda x: np.mean(x, axis=0)\n value_agg = lambda x: x.mean()\n elif preset == \"nanmean\":\n ndarray_agg = lambda x: np.nanmean(x, axis=0)\n value_agg = lambda x: x.mean()\n\n def combine_values(in_df):\n \"\"\"\n splits into ndarrays, 'normal' values, and samples;\n performs aggregation, and returns a Series\n \"\"\"\n average_arrs = pd.Series(\n index=ndarray_cols,\n data=[\n ndarray_agg([np.asarray(j) for j in in_df[i].values])\n for i in ndarray_cols\n ],\n )\n average_values = value_agg(in_df[value_cols])\n sample_values = in_df[sample_cols].iloc[0]\n agg_series = pd.concat([average_arrs, average_values, sample_values])\n return agg_series\n\n return df.groupby(aggregate_cols).apply(combine_values)\n\n\ndef streaming_ndarray_agg(\n in_stream,\n ndarray_cols,\n aggregate_cols,\n value_cols=[],\n sample_cols=[],\n chunksize=30000,\n add_count_col=False,\n divide_by_count=False,\n):\n \"\"\"\n Takes in_stream of dataframes\n \n Applies ndarray-aware groupby-sum or groupby-mean: treats ndarray_cols as numpy arrays, \n value_cols as normal values, for sample_cols takes the first element. \n \n Does groupby over aggregate_cols \n \n if add_count_col is True, adds column \"count\", if it's a string - adds column with add_count_col name \n\n if divide_by_counts is True, divides result by column \"count\". \n If it's a string, divides by divide_by_count column\n \n This function can be used for automatically aggregating P(s), R(s) etc. \n for a set of conformations that is so large that all P(s) won't fit in RAM,\n and when averaging needs to be done over so many parameters \n that for-loops are not an issue. Examples may include simulations in which sweep\n over many parameters has been performed. \n \n \"\"\"\n value_cols_orig = [i for i in value_cols]\n ndarray_cols, value_cols = list(ndarray_cols), list(value_cols)\n aggregate_cols, sample_cols = list(aggregate_cols), list(sample_cols)\n if add_count_col is not False:\n if add_count_col is True:\n add_count_col = \"count\"\n value_cols.append(add_count_col)\n\n def agg_one(dfs, aggregate):\n \"\"\"takes a list of DataFrames and old aggregate\n performs groupby and aggregation and returns new aggregate\"\"\"\n if add_count_col is not False:\n for i in dfs:\n i[add_count_col] = 1\n\n df = pd.concat(dfs + ([aggregate] if aggregate is not None else []), sort=False)\n aggregate = ndarray_groupby_aggregate(\n df,\n ndarray_cols=ndarray_cols,\n aggregate_cols=aggregate_cols,\n value_cols=value_cols,\n sample_cols=sample_cols,\n preset=\"sum\",\n )\n return aggregate.reset_index()\n\n aggregate = None\n cur = []\n count = 0\n for i in in_stream:\n cur.append(i)\n count += len(i)\n if count > chunksize:\n aggregate = agg_one(cur, aggregate)\n cur = []\n count = 0\n if len(cur) > 0:\n aggregate = agg_one(cur, aggregate)\n\n if divide_by_count is not False:\n if divide_by_count is True:\n divide_by_count = \"count\"\n for i in ndarray_cols + value_cols_orig:\n aggregate[i] = aggregate[i] / aggregate[divide_by_count]\n\n return aggregate\n\n\ndef kabsch_msd(P, Q):\n \"\"\"\n Calculates MSD between two vectors using Kabash alcorithm \n Borrowed from https://github.com/charnley/rmsd with some changes \n \n rmsd is licenced with a 2-clause BSD licence \n \n Copyright (c) 2013, Jimmy Charnley Kromann <[email protected]> & Lars Bratholm\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n \n \"\"\"\n P = P - np.mean(P, axis=0)\n Q = Q - np.mean(Q, axis=0)\n C = np.dot(np.transpose(P), Q)\n\n V, S, W = np.linalg.svd(C)\n d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0\n if d:\n S[-1] = -S[-1]\n V[:, -1] = -V[:, -1]\n\n # Create Rotation matrix U\n U = np.dot(V, W)\n dist = np.mean((np.dot(P, U) - Q) ** 2) * 3\n return dist\n\n\nkabsch_rmsd = kabsch_msd\n\n\ndef mutualSimplify(a, b, verbose=False):\n \"\"\"\n Ported here from openmmlib.\n\n Given two polymer rings, it attempts to reduce the number of monomers in each of\n them while preserving the linking between them. It does so by trying to remove\n monomers one-by-one. If no other bonds pass through the triangle formed by the 2\n old bonds and 1 new bond, it accepts removal of the monomer. It does so until no\n monomers in either of the rings can be removed.\n\n \"\"\"\n if verbose:\n print(\"Starting mutual simplification of polymers\")\n while True:\n la, lb = len(a), len(b)\n if verbose:\n print(len(a), len(b), \"before; \", end=\" \")\n a, b = _polymer_math.mutualSimplify(a, b)\n if verbose:\n print(len(a), len(b), \"after one; \", end=\" \")\n b, a = _polymer_math.mutualSimplify(b, a)\n if verbose:\n print(len(a), len(b), \"after two; \")\n\n if (len(a) == la) and (len(b) == lb):\n if verbose:\n print(\"Mutual simplification finished\")\n return a, b\n\n\ndef getLinkingNumber(data1, data2, simplify=True, randomOffset=True, verbose=False):\n \"\"\"\n Ported here from openmmlib as well.\n\n \"\"\"\n if simplify:\n data1, data2 = mutualSimplify(a=data1, b=data2, verbose=verbose)\n return _polymer_math.getLinkingNumber(data1, data2, randomOffset=randomOffset)\n\n\ndef calculate_cistrans(data, chains, chain_id=0, cutoff=5, pbc_box=False, box_size=None):\n \n \"\"\"\n Analysis of the territoriality of polymer chains from simulations, using the cis/trans ratio.\n Cis signal is computed for the marked chain ('chain_id') as amount of contacts of the chain with itself\n Trans signal is the total amount of trans contacts for the marked chain with other chains from 'chains' \n (and with all the replicas for 'pbc_box'=True)\n \n \"\"\"\n if data.shape[1] != 3:\n raise ValueError(\"Incorrect polymer data shape. Must be Nx3.\")\n\n if np.isnan(data).any():\n raise RuntimeError(\"Data contains NANs\")\n \n N = len(data)\n \n if pbc_box == True:\n if box_size is None:\n raise ValueError(\"Box size is not given\")\n else:\n data_scaled = np.mod(data, box_size)\n \n else:\n box_size = None\n data_scaled = np.copy(data)\n \n if chains is None:\n chains = [[0, N]]\n chain_id = 0\n\n chain_start = chains[chain_id][0]\n chain_end = chains[chain_id][1]\n \n # all contact pairs available in the scaled data\n tree = ckdtree.cKDTree(data_scaled, boxsize=box_size)\n pairs = tree.query_pairs(cutoff, output_type=\"ndarray\")\n \n # total number of contacts of the marked chain:\n # each contact is counted twice if both monomers belong to the marked chain and \n # only once if just one of the monomers in the pair belong to the marked chain\n all_signal = len(pairs[pairs<chain_end])-len(pairs[pairs<chain_start])\n \n # contact pairs of the marked chain with itself\n tree = ckdtree.cKDTree(data[chain_start:chain_end], boxsize=None)\n pairs = tree.query_pairs(cutoff, output_type=\"ndarray\")\n \n # doubled number of contacts of the marked chain with itself (i.e. cis signal)\n cis_signal = 2*len(pairs)\n \n assert all_signal >= cis_signal\n \n trans_signal = all_signal - cis_signal\n \n return cis_signal, trans_signal\n",
"import simtk.openmm as openmm\nimport numpy as np\n\n\n\"\"\"\nThis is a collection of old forces that are likely no longer used \nThese were a part of openmmlib before April 2019, but were removed during spring cleaning. \n\nThey should be importable and may or may not just work. It should not be difficult to make them compatible \nwith the new library. \n\n\"\"\"\n\n\ndef minimizing_repulsive_Force(sim_object):\n \"\"\"\n Adds a special force which could be use for very efficient resolution of crossings\n Use this force to perform (local) energy minimization if your monomers are all \"on top of each other\"\n E.g. if you start your simulations with fractional brownyan motion with h < 0.4\n Then switch to a normal force, and re-do energy minimization. \n \"\"\"\n radius = sim_object.conlen * 1.3\n\n nbCutOffDist = radius * 1.0\n repul_energy = \"1000* REPe * (1-r/REPr)^2 \"\n\n sim_object.force_dict[\"Nonbonded_minimizing_Force\"] = openmm.CustomNonbondedForce(\n repul_energy\n )\n repulforceGr = sim_object.force_dict[\"Nonbonded_minimizing_Force\"]\n repulforceGr.addGlobalParameter(\"REPe\", sim_object.kT)\n repulforceGr.addGlobalParameter(\"REPr\", sim_object.kT)\n for _ in range(sim_object.N):\n repulforceGr.addParticle(())\n repulforceGr.setCutoffDistance(nbCutOffDist)\n\n\ndef fix_particles_Z_coordinate(\n sim_object, particles, zCoordinates, k=0.3, useOtherAxis=\"z\", mode=\"abs\", gap=None\n):\n \"\"\"Limits position of a set of particles in z coordinate\n\n Parameters\n ----------\n particles : list\n List of particles to be fixed.\n zCoordinates : list, or tuple of length 2\n If has length of particles, then should contain all Z coordinates\n If has length 2, then contains z coordinates of first and\n Nth particles, and the rest is approximated linearly.\n k : float, optional\n Strength of attraction, measured in kT/(bondlength)\n useOtherAxis : \"x\",\"y\" or \"z\", optional\n Apply the same in the other dimension\n gap: float or None\n if gap is not None, then the force creates a gap of the width \"gap\" \n (+- 0.5 * gap) during which the force is not acting. The force starts acting \n after the particle moved 0.5 * gap. \n \"\"\"\n\n if not len(particles) == len(zCoordinates):\n assert len(zCoordinates) == 2\n start, stop = tuple(zCoordinates)\n zCoordinates = []\n for par in particles:\n zCoordinates.append(\n start + float(stop - start) * (par / float(sim_object.N))\n )\n\n if (mode == \"abs\") and (gap is None):\n zFixForce = openmm.CustomExternalForce(\n \"ZFIXk * (sqrt((%s - ZFIXr0)^2 + ZFIXa^2) - ZFIXa)\" % (useOtherAxis,)\n )\n zFixForce.addGlobalParameter(\"ZFIXk\", k * sim_object.kT / (sim_object.conlen))\n elif (mode == \"abs\") and (gap is not None):\n zFixForce = openmm.CustomExternalForce(\n \"ZFIXk * step(%s - ZFIXr0 - ZFIXgap * 0.5) *\"\n \" (sqrt((%s - ZFIXr0 - ZFIXgap * 0.5)^2 + ZFIXa^2) - ZFIXa) + \"\n \"ZFIXk * step(-%s + ZFIXr0 - ZFIXgap * 0.5) * \"\n \"(sqrt((-%s + ZFIXr0 - ZFIXgap * 0.5)^2 + ZFIXa^2) - ZFIXa)\"\n % (useOtherAxis, useOtherAxis, useOtherAxis, useOtherAxis)\n )\n\n zFixForce.addGlobalParameter(\"ZFIXk\", k * sim_object.kT / (sim_object.conlen))\n zFixForce.addGlobalParameter(\"ZFIXgap\", sim_object.conlen * gap)\n\n elif (mode == \"quadratic\") and (gap is None):\n zFixForce = openmm.CustomExternalForce(\n \"ZFIXk * ((%s - ZFIXr0)^2)\" % (useOtherAxis,)\n )\n zFixForce.addGlobalParameter(\n \"ZFIXk\", k * sim_object.kT / (sim_object.conlen * sim_object.conlen)\n )\n elif (mode == \"quadratic\") and (gap is not None):\n zFixForce = openmm.CustomExternalForce(\n \"ZFIXk * (step(%s - ZFIXr0 - ZFIXgap * 0.5) * \"\n \"(%s - ZFIXr0 - ZFIXgap * 0.5)^2 + \"\n \"step(-%s + ZFIXr0 - ZFIXgap * 0.5) * \"\n \"(-%s + ZFIXr0 - ZFIXgap * 0.5)^2)\"\n % (useOtherAxis, useOtherAxis, useOtherAxis, useOtherAxis)\n )\n\n zFixForce.addGlobalParameter(\n \"ZFIXk\", k * sim_object.kT / (sim_object.conlen * sim_object.conlen)\n )\n zFixForce.addGlobalParameter(\"ZFIXgap\", sim_object.conlen * gap)\n else:\n raise RuntimeError(\"Not implemented\")\n\n zFixForce.addPerParticleParameter(\"ZFIXr0\")\n\n zFixForce.addGlobalParameter(\"ZFIXa\", 0.05 * sim_object.conlen)\n for par, zcoor in zip(particles, zCoordinates):\n zFixForce.addParticle(int(par), [float(zcoor)])\n sim_object.force_dict[\"fixZCoordinates\"] = zFixForce\n\n\ndef lamina_attraction(sim_object, width=1, depth=1, r=None):\n \"\"\"Attracts one domain to the lamina. Infers radius\n from spherical confinement, that has to be initialized already.\n\n Parameters\n ----------\n\n width : float, optional\n Width of attractive layer next to the lamina, nm.\n depth : float, optional\n Depth of attractive potential in kT\n note- depth < 0 for attractive! >0 is repulsive\n r : float, optional\n Radius of an attractive cage. If not specified, inferred\n from previously defined spherical potential.\n \"\"\"\n\n sim_object.metadata[\"laminaAttraction\"] = repr(\n {\"width\": width, \"depth\": depth, \"r\": r}\n )\n laminaForce = openmm.CustomExternalForce(\n \"step(LAMr-LAMaa + LAMwidth) * step(LAMaa + LAMwidth - LAMr) \"\n \"* LAMdepth * (LAMr-LAMaa + LAMwidth) * (LAMaa + LAMwidth - LAMr) \"\n \"/ (LAMwidth * LAMwidth);\"\n \"LAMr = sqrt(x^2 + y^2 + z^2 + LAMtt^2)\"\n )\n sim_object.force_dict[\"Lamina attraction\"] = laminaForce\n\n # adding all the particles on which force acts\n for i in range(sim_object.N):\n if sim_object.domains[i] > 0.5:\n laminaForce.addParticle(i, [])\n if r is None:\n try:\n r = sim_object.sphericalConfinementRadius\n except:\n raise ValueError(\n \"No spherical confinement radius defined\"\n \" yet. Apply spherical confinement first!\"\n )\n if sim_object.verbose == True:\n print(\"Lamina attraction added with r = %d\" % r)\n\n laminaForce.addGlobalParameter(\"LAMaa\", r * nm)\n laminaForce.addGlobalParameter(\"LAMwidth\", width * nm)\n laminaForce.addGlobalParameter(\"LAMdepth\", depth * sim_object.kT)\n laminaForce.addGlobalParameter(\"LAMtt\", 0.01 * nm)\n\n\ndef useDomains(sim_object, domains=None, filename=None):\n \"\"\"\n Sets up domains for the simulation.\n Also, pickles domain vector to \"domains.dat\".\n\n Parameters\n ----------\n\n domains : boolean array or None\n N-long array with domain vector\n filename : str or None\n Filename with pickled domain vector\n\n \"\"\"\n\n if domains is not None:\n sim_object.domains = domains\n\n elif filename is not None:\n sim_object.domains = pickle.load(open(domains))\n else:\n sim_object.exit(\"You have to specify domain vector or filename!\")\n\n if len(sim_object.domains) != sim_object.N:\n sim_object._exitProgram(\"Wrong domain lengths\")\n\n pickle.dump(\n sim_object.domains, open(os.path.join(sim_object.folder, \"domains.dat\"), \"wb\")\n )\n\n\ndef lennard_jones_force(\n sim_object,\n cutoff=2.5,\n domains=False,\n epsilonRep=0.24,\n epsilonAttr=0.27,\n blindFraction=(-1),\n sigmaRep=None,\n sigmaAttr=None,\n):\n\n \"\"\"\n Adds a lennard-jones force, that allows for mutual attraction.\n This is the slowest force out of all repulsive.\n\n .. note ::\n This is the only force that allows for so-called \"exceptions'.\n Exceptions allow you to change parameters of the force\n for a specific pair of particles.\n This can be used to create short-range attraction between\n pairs of particles.\n See manual for Openmm.NonbondedForce.addException.\n\n Parameters\n ----------\n\n cutoff : float, optional\n Radius cutoff value. Default is good.\n domains : bool, optional\n Use domains, defined by\n :py:func:'setDomains <Simulation.setDomains>'\n epsilonRep : float, optional\n Epsilon (attraction strength) for LJ-force for all particles\n (except for domain) in kT\n epsilonAttr : float, optional\n Epsilon for attractive domain (if domains are used) in kT\n blindFraction : float, 0<x<1\n Fraction of particles that are \"transparent\" -\n used here instead of truncation\n sigmaRep, sigmaAttr: float, optional\n Radius of particles in the LJ force. For advanced fine-tuning.\n\n \"\"\"\n sim_object.metadata[\"LennardJonesForce\"] = repr(\n {\n \"cutoff\": cutoff,\n \"domains\": domains,\n \"epsilonRep\": epsilonRep,\n \"epsilonAttr\": epsilonAttr,\n \"blindFraction\": blindFraction,\n }\n )\n\n if blindFraction > 0.99:\n sim_object._exitProgram(\n \"why do you need this force without particles???\"\n \" set blindFraction between 0 and 1\"\n )\n if (sigmaRep is None) and (sigmaAttr is None):\n sigmaAttr = sigmaRep = sim_object.conlen\n else:\n sigmaAttr = sigmaAttr * sim_object.conlen\n sigmaRep = sigmaRep * sim_object.conlen\n\n epsilonRep = epsilonRep * sim_object.kT\n epsilonAttr = epsilonAttr * sim_object.kT\n\n nbCutOffDist = sim_object.conlen * cutoff\n sim_object.epsilonRep = epsilonRep\n repulforce = openmm.NonbondedForce()\n\n sim_object.force_dict[\"Nonbonded\"] = repulforce\n for i in range(sim_object.N):\n particleParameters = [0.0, 0.0, 0.0]\n\n if np.random.random() > blindFraction:\n particleParameters[1] = sigmaRep\n particleParameters[2] = epsilonRep\n\n if domains == True:\n if sim_object.domains[i] != 0:\n particleParameters[1] = sigmaAttr\n particleParameters[2] = epsilonAttr\n\n repulforce.addParticle(*particleParameters)\n\n repulforce.setCutoffDistance(nbCutOffDist)\n\n\ndef soft_lennard_jones_force(sim_object, epsilon=0.42, trunc=2, cutoff=2.5):\n \"\"\"A softened version of lennard-Jones force.\n Now we're moving to polynomial forces, so go there instead.\n \"\"\"\n\n nbCutOffDist = sim_object.conlen * cutoff\n\n repul_energy = (\n \"step(REPcut2 - REPU) * REPU +\"\n \" step(REPU - REPcut2) * REPcut2 * (1 + tanh(REPU/REPcut2 - 1));\"\n \"REPU = 4 * REPe * ((REPsigma/r2)^12 - (REPsigma/r2)^6);\"\n \"r2 = (r^10. + (REPsigma03)^10.)^0.1\"\n )\n sim_object.force_dict[\"Nonbonded\"] = openmm.CustomNonbondedForce(repul_energy)\n repulforceGr = sim_object.force_dict[\"Nonbonded\"]\n repulforceGr.addGlobalParameter(\"REPe\", sim_object.kT * epsilon)\n\n repulforceGr.addGlobalParameter(\"REPsigma\", sim_object.conlen)\n repulforceGr.addGlobalParameter(\"REPsigma03\", 0.3 * sim_object.conlen)\n repulforceGr.addGlobalParameter(\"REPcut\", sim_object.kT * trunc)\n repulforceGr.addGlobalParameter(\"REPcut2\", 0.5 * trunc * sim_object.kT)\n\n for _ in range(sim_object.N):\n repulforceGr.addParticle(())\n\n repulforceGr.setCutoffDistance(nbCutOffDist)\n\n\ndef attractive_interaction(sim_object, i, j, epsilon, sigma=None, length=3):\n \"\"\"Adds attractive short-range interaction of strength epsilon\n between particles i,j and a few neighboring particles\n requires :py:func:'LennardJones Force<Simulation.addLennardJonesForce>'\n\n Parameters\n ----------\n i,j : int\n Interacting particles\n epsilon : float\n LJ strength\n sigma : float, optional\n LJ length. If you increase it past 1.5, note the cutoff!\n length : int, optional, default = 3\n Number of particles around i,j that also attract each other\n\n \"\"\"\n\n if type(sim_object.force_dict[\"Nonbonded\"]) != openmm.NonbondedForce:\n sim_object.exit(\n \"Cannot add interactions\" \" without Lennard-Jones nonbonded force\"\n )\n\n if sigma is None:\n sigma = 1.1 * sim_object.conlen\n epsilon = epsilon * units.kilocalorie_per_mole\n if (min(i, j) < length) or (max(i, j) > sim_object.N - length):\n print(\"!!!!!!!!!bond with %d and %d is out of range!!!!!\" % (i, j))\n return\n repulforce = sim_object.force_dict[\"Nonbonded\"]\n for t1 in range(\n int(np.ceil(i - length / 2)), int(np.ceil(i + (length - length / 2)))\n ):\n for t2 in range(\n int(np.ceil(j - length / 2)), int(np.ceil(j + (length - length / 2)))\n ):\n repulforce.addException(t1, t2, 0, sigma, epsilon, True)\n if sim_object.verbose == True:\n print(\"Exception added between\" \" particles %d and %d\" % (t1, t2))\n\n for tt in range(i - length, i + length):\n repulforce.setParticleParameters(\n tt, 0, sim_object.conlen, sim_object.epsilonRep\n )\n for tt in range(j - length, j + length):\n repulforce.setParticleParameters(\n tt, 0, sim_object.conlen, sim_object.epsilonRep\n )\n\n\ndef gravity(sim_object, k=0.1, cutoff=None):\n \"\"\"adds force pulling downwards in z direction\n When using cutoff, acts only when z>cutoff\"\"\"\n sim_object.metadata[\"gravity\"] = repr({\"k\": k, \"cutoff\": cutoff})\n if cutoff is None:\n gravity = openmm.CustomExternalForce(\"kG * z\")\n else:\n gravity = openmm.CustomExternalForce(\"kG * (z - cutoffG) * step(z - cutoffG)\")\n gravity.addGlobalParameter(\"cutoffG\", cutoff * nm)\n gravity.addGlobalParameter(\"kG\", k * sim_object.kT / (nm))\n\n for i in range(sim_object.N):\n gravity.addParticle(i, [])\n sim_object.force_dict[\"Gravity\"] = gravity\n\n\ndef exclude_sphere(sim_object, r=5, position=(0, 0, 0)):\n \"\"\"Excludes particles from a sphere of radius r at certain position.\n \"\"\"\n\n spherForce = openmm.CustomExternalForce(\n \"step(EXaa-r) * EXkb * (sqrt((r-EXaa)*(r-EXaa) + EXt*EXt) - EXt) ;\"\n \"r = sqrt((x-EXx)^2 + (y-EXy)^2 + (z-EXz)^2 + EXtt^2)\"\n )\n sim_object.force_dict[\"ExcludeSphere\"] = spherForce\n\n for i in range(sim_object.N):\n spherForce.addParticle(i, [])\n\n sim_object.sphericalConfinementRadius = r\n if sim_object.verbose == True:\n print(\"Spherical confinement with radius = %lf\" % r)\n # assigning parameters of the force\n spherForce.addGlobalParameter(\"EXkb\", 2 * sim_object.kT / nm)\n spherForce.addGlobalParameter(\"EXaa\", (r - 1.0 / 3) * nm)\n spherForce.addGlobalParameter(\"EXt\", (1.0 / 3) * nm / 10.0)\n spherForce.addGlobalParameter(\"EXtt\", 0.01 * nm)\n spherForce.addGlobalParameter(\"EXx\", position[0] * sim_object.conlen)\n spherForce.addGlobalParameter(\"EXy\", position[1] * sim_object.conlen)\n spherForce.addGlobalParameter(\"EXz\", position[2] * sim_object.conlen)\n\n\ndef attraction_to_the_core(sim_object, k, r0, coreParticles=[]):\n\n \"\"\"Attracts a subset of particles to the core,\n repells the rest from the core\"\"\"\n\n attractForce = openmm.CustomExternalForce(\n \" COREk * ((COREr - CORErn) ^ 2) ; \" \"COREr = sqrt(x^2 + y^2 + COREtt^2)\"\n )\n attractForce.addGlobalParameter(\n \"COREk\", k * sim_object.kT / (sim_object.conlen * sim_object.conlen)\n )\n attractForce.addGlobalParameter(\"CORErn\", r0 * sim_object.conlen)\n attractForce.addGlobalParameter(\"COREtt\", 0.001 * sim_object.conlen)\n sim_object.force_dict[\"CoreAttraction\"] = attractForce\n for i in coreParticles:\n attractForce.addParticle(int(i), [])\n\n if r0 > 0.1:\n\n excludeForce = openmm.CustomExternalForce(\n \" CORE2k * ((CORE2r - CORE2rn) ^ 2) * step(CORE2rn - CORE2r) ;\"\n \"CORE2r = sqrt(x^2 + y^2 + CORE2tt^2)\"\n )\n excludeForce.addGlobalParameter(\n \"CORE2k\", k * sim_object.kT / (sim_object.conlen * sim_object.conlen)\n )\n excludeForce.addGlobalParameter(\"CORE2rn\", r0 * sim_object.conlen)\n excludeForce.addGlobalParameter(\"CORE2tt\", 0.001 * sim_object.conlen)\n sim_object.force_dict[\"CoreExclusion\"] = excludeForce\n for i in range(sim_object.N):\n excludeForce.addParticle(i, [])\n\n\ndef create_walls(sim_object, left=None, right=None, k=0.5):\n \"creates walls at x = left, x = right, x direction only\"\n if left is None:\n left = sim_object.data[0][0] + 1.0 * nm\n else:\n left = 1.0 * nm * left\n if right is None:\n right = sim_object.data[-1][0] - 1.0 * nm\n else:\n right = 1.0 * nm * right\n\n if sim_object.verbose == True:\n print(\"left wall created at \", left / (1.0 * nm))\n print(\"right wall created at \", right / (1.0 * nm))\n\n extforce2 = openmm.CustomExternalForce(\n \" WALLk * (sqrt((x - WALLright) * (x-WALLright) + WALLa * WALLa ) - WALLa) * step(x-WALLright) \"\n \"+ WALLk * (sqrt((x - WALLleft) * (x-WALLleft) + WALLa * WALLa ) - WALLa) * step(WALLleft - x) \"\n )\n extforce2.addGlobalParameter(\"WALLk\", k * sim_object.kT / nm)\n extforce2.addGlobalParameter(\"WALLleft\", left)\n extforce2.addGlobalParameter(\"WALLright\", right)\n extforce2.addGlobalParameter(\"WALLa\", 1 * nm)\n for i in range(sim_object.N):\n extforce2.addParticle(i, [])\n sim_object.force_dict[\"WALL Force\"] = extforce2\n\n\ndef spherical_well(sim_object, r=10, depth=1):\n \"\"\"pushes particles towards a boundary\n of a cylindrical well to create uniform well coverage\"\"\"\n\n extforce4 = openmm.CustomExternalForce(\n \"WELLdepth * (((sin((WELLr * 3.141592 * 0.5) / WELLwidth)) ^ 10) -1) * step(-WELLr + WELLwidth);\"\n \"WELLr = sqrt(x^2 + y^2 + z^2 + WELLtt^2)\"\n )\n sim_object.force_dict[\"Well attraction\"] = extforce4\n\n # adding all the particles on which force acts\n for i in range(sim_object.N):\n if sim_object.domains[i] > 0.5:\n extforce4.addParticle(i, [])\n if r is None:\n try:\n r = sim_object.sphericalConfinementRadius * 0.5\n except:\n exit(\n \"No spherical confinement radius defined yet.\"\n \" Apply spherical confinement first!\"\n )\n if sim_object.verbose == True:\n print(\"Well attraction added with r = %d\" % r)\n\n # assigning parameters of the force\n extforce4.addGlobalParameter(\"WELLwidth\", r * nm)\n extforce4.addGlobalParameter(\"WELLdepth\", depth * sim_object.kT)\n extforce4.addGlobalParameter(\"WELLtt\", 0.01 * nm)\n\n\n## from class \"yeast simulation\"\n\n\ndef add_nucleolus(sim_object, k=1, r=None):\n \"method\"\n if r is None:\n r = sim_object.sphericalConfinementRadius\n\n extforce3 = openmm.CustomExternalForce(\n \"step(r-NUCaa) * NUCkb * (sqrt((r-NUCaa)*(r-NUCaa) + NUCt*NUCt) - NUCt);\"\n \"r = sqrt(x^2 + y^2 + (z + NUCoffset )^2 + NUCtt^2)\"\n )\n\n sim_object.force_dict[\"NucleolusConfinement\"] = extforce3\n # adding all the particles on which force acts\n if sim_object.verbose == True:\n print(\"NUCleolus confinement from radius = %lf\" % r)\n # assigning parameters of the force\n extforce3.addGlobalParameter(\"NUCkb\", k * sim_object.kT / nm)\n extforce3.addGlobalParameter(\"NUCaa\", (r - 1.0 / k) * nm * 1.75)\n extforce3.addGlobalParameter(\"NUCoffset\", (r - 1.0 / k) * nm * 1.1)\n extforce3.addGlobalParameter(\"NUCt\", (1.0 / k) * nm / 10.0)\n extforce3.addGlobalParameter(\"NUCtt\", 0.01 * nm)\n for i in range(sim_object.N):\n extforce3.addParticle(i, [])\n\n\ndef add_lamina_attraction(sim_object, width=1, depth=1, r=None, particles=None):\n extforce3 = openmm.CustomExternalForce(\n \"-1 * step(LAMr-LAMaa + LAMwidth) * step(LAMaa + LAMwidth - LAMr) * LAMdepth\"\n \"* abs( (LAMr-LAMaa + LAMwidth) * (LAMaa + LAMwidth - LAMr)) / (LAMwidth * LAMwidth);\"\n \"LAMr = sqrt(x^2 + y^2 + z^2 + LAMtt^2)\"\n )\n sim_object.force_dict[\"Lamina attraction\"] = extforce3\n\n # re-defines lamina attraction based on particle index instead of domains.\n\n # adding all the particles on which force acts\n if particles is None:\n for i in range(sim_object.N):\n extforce3.addParticle(i, [])\n if sim_object.verbose == True:\n print(\"particle %d laminated! \" % i)\n\n else:\n for i in particles:\n extforce3.addParticle(i, [])\n if sim_object.verbose == True:\n print(\"particle %d laminated! \" % i)\n\n if r is None:\n try:\n r = sim_object.sphericalConfinementRadius\n except:\n exit(\n \"No spherical confinement radius defined yet.\"\n \"Apply spherical confinement first!\"\n )\n\n if sim_object.verbose == True:\n print(\"Lamina attraction added with r = %d\" % r)\n\n # assigning parameters of the force\n extforce3.addGlobalParameter(\"LAMaa\", r * nm)\n extforce3.addGlobalParameter(\"LAMwidth\", width * nm)\n extforce3.addGlobalParameter(\"LAMdepth\", depth * sim_object.kT)\n extforce3.addGlobalParameter(\"LAMtt\", 0.01 * nm)\n\n\n# old energy minimization\n\n\ndef old_energy_minimization(\n sim_object, stepsPerIteration=100, maxIterations=1000, failNotConverged=True\n):\n \"\"\"Runs system at smaller timestep and higher collision\n rate to resolve possible conflicts.\n\n Now we're moving towards local energy minimization,\n this is here for backwards compatibility.\n \"\"\"\n\n print(\"Performing energy minimization\")\n sim_object._apply_forces()\n if (maxIterations is True) or (maxIterations is False):\n raise ValueError(\n \"Please stop using the old notation and read the new energy minimization code\"\n )\n if (failNotConverged is not True) and (failNotConverged is not False):\n raise ValueError(\n \"Please stop using the old notation and read the new energy minimization code\"\n )\n\n def_step = sim_object.integrator.getStepSize()\n def_fric = sim_object.integrator.getFriction()\n\n def minimizeDrop():\n drop = 10.0\n for dummy in range(maxIterations):\n if drop < 1:\n drop = 1.0\n if drop > 10000:\n raise RuntimeError(\"Timestep too low. Perhaps, \" \"something is wrong!\")\n\n sim_object.integrator.setStepSize(def_step / float(drop))\n sim_object.integrator.setFriction(def_fric * drop)\n # sim_object.reinitialize()\n numAttempts = 3\n maxEk = 4 * sim_object.kT\n for attempt in range(numAttempts):\n a = sim_object.do_block(stepsPerIteration, save=False)\n # sim_object.initVelocities()\n if a[\"eK\"] > maxEk:\n drop *= 2\n print(\"Timestep decreased {0}\".format(1.0 / drop))\n sim_object.init_velocities()\n break\n if attempt == numAttempts - 1:\n if drop == 1.0:\n return 0\n drop /= 2\n print(\"Timestep decreased by {0}\".format(drop))\n sim_object.init_velocities()\n return -1\n\n if failNotConverged and (minimizeDrop() == -1):\n raise RuntimeError(\n \"Reached maximum number of iterations and still not converged\\n\"\n \"increase maxIterations or set failNotConverged=False\"\n )\n sim_object.integrator.setFriction(def_fric)\n sim_object.integrator.setStepSize(def_step)\n # sim_object.reinitialize()\n print(\"Finished energy minimization\")\n\n\ndef check_connectivity(sim_object, newcoords=None, maxBondSizeMultipler=10):\n \"\"\" checks connectivity of all harmonic (& abslim) bonds\n can be passed to doBlock as a checkFunction, in which case it will also trigger re-initialization\n to modify the maximum bond size multipler, pass this function to doBlock as, \n e.g. doBlock( 100,checkFunctions = [lambda x:a.checkConnectivity(x,6)])\n \"\"\"\n\n if not hasattr(sim_object, \"bondLengths\"):\n raise ValueError(\n \"must use either harmonic or abs bonds to use checkConnectivty\"\n )\n\n if newcoords == None:\n newcoords = sim_object.get_data()\n printPositiveResult = True\n else:\n printPositiveResult = False\n\n # sim_object.bondLengths is a list of lists (see above) [..., [int(i), int(j), float(distance), float(bondSize)], ...]\n bondArray = np.array(sim_object.bondLengths)\n bondDists = np.sqrt(\n np.sum(\n (\n newcoords[np.array(bondArray[:, 0], dtype=int)]\n - newcoords[np.array(bondArray[:, 1], dtype=int)]\n )\n ** 2,\n axis=1,\n )\n )\n bondDistsSorted = np.sort(bondDists)\n if (bondDists > (bondArray[:, 2] + maxBondSizeMultipler * bondArray[:, 3])).any():\n isConnected = False\n print(\"!! connectivity check failed !!\")\n print(\"median bond size is \", np.median(bondDists))\n print(\"longest 10 bonds are\", bondDistsSorted[-10:])\n\n else:\n isConnected = True\n if printPositiveResult:\n print(\"connectivity check passed.\")\n print(\"median bond size is \", np.median(bondDists))\n print(\"longest 10 bonds are\", bondDistsSorted[-10:])\n\n return isConnected\n"
] | [
[
"numpy.dot",
"numpy.asarray",
"numpy.cumsum",
"numpy.concatenate",
"numpy.mean",
"numpy.fill_diagonal",
"numpy.searchsorted",
"numpy.nanmean",
"numpy.linalg.svd",
"numpy.arange",
"numpy.linalg.det",
"numpy.ceil",
"numpy.copy",
"numpy.diff",
"pandas.concat",
"numpy.log",
"numpy.isnan",
"numpy.log10",
"scipy.spatial.ckdtree.cKDTree",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.sort",
"numpy.mod"
],
[
"numpy.random.random",
"numpy.median",
"numpy.sort",
"numpy.ceil",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lemoner20/tensorlayer | [
"69bd591f247b4a67f8968bd29c3660b22dbffae4"
] | [
"example/tutorial_frozenlake_dqn.py"
] | [
"import gym, random, time\nimport numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\nimport matplotlib.pyplot as plt\n\n\"\"\" Q-Network Q(a, s) - TD Learning, Off-Policy, e-Greedy Exploration (GLIE)\n\nQ(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A))\ndelta_w = R + lambda * Q(newS, newA)\n\nSee David Silver RL Tutorial Lecture 5 - Q-Learning for more details.\n\nEN: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.5m3361vlw\nCN: https://zhuanlan.zhihu.com/p/25710327\n\nNote: Policy Network has been proved to be better than Q-Learning, see tutorial_atari_pong.py\n\"\"\"\n## The FrozenLake v0 environment\n# https://gym.openai.com/envs/FrozenLake-v0\n# The agent controls the movement of a character in a grid world. Some tiles of\n# the grid are walkable, and others lead to the agent falling into the water.\n# Additionally, the movement direction of the agent is uncertain and only partially\n# depends on the chosen direction. The agent is rewarded for finding a walkable\n# path to a goal tile.\n# SFFF (S: starting point, safe)\n# FHFH (F: frozen surface, safe)\n# FFFH (H: hole, fall to your doom)\n# HFFG (G: goal, where the frisbee is located)\n# The episode ends when you reach the goal or fall in a hole. You receive a reward\n# of 1 if you reach the goal, and zero otherwise.\nenv = gym.make('FrozenLake-v0')\n\ndef to_one_hot(i, n_classes=None):\n a = np.zeros(n_classes, 'uint8')\n a[i] = 1\n return a\n\nrender = False # display the game environment\nrunning_reward = None\n\ntf.reset_default_graph()\n## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function.\n# 4x4 grid can be represented by one-hot vector with 16 integers.\ninputs = tf.placeholder(shape=[1, 16], dtype=tf.float32)\nnet = InputLayer(inputs, name='observation')\nnet = DenseLayer(net, n_units=4, act=tf.identity,\n W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')\ny = net.outputs # action-value / rewards of 4 actions\npredict = tf.argmax(y, 1) # chose action greedily with reward. in Q-Learning, policy is greedy, so we use \"max\" to select the next action.\n\n## Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.\nnextQ = tf.placeholder(shape=[1, 4], dtype=tf.float32)\nloss = tl.cost.mean_squared_error(nextQ, y, is_mean=False) # tf.reduce_sum(tf.square(nextQ - y))\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)\n\n## Set learning parameters\nlambd = .99 # decay factor\ne = 0.1 # e-Greedy Exploration, the larger the more random\nnum_episodes = 10000\nwith tf.Session() as sess:\n tl.layers.initialize_global_variables(sess)\n for i in range(num_episodes):\n ## Reset environment and get first new observation\n episode_time = time.time()\n s = env.reset() # observation is state, integer 0 ~ 15\n rAll = 0\n for j in range(99): # step index, maximum step is 99\n if render: env.render()\n ## Choose an action by greedily (with e chance of random action) from the Q-network\n a, allQ = sess.run([predict, y], feed_dict={inputs : [to_one_hot(s, 16)]})\n ## e-Greedy Exploration !!! sample random action\n if np.random.rand(1) < e:\n a[0] = env.action_space.sample()\n ## Get new state and reward from environment\n s1, r, d, _ = env.step(a[0])\n ## Obtain the Q' values by feeding the new state through our network\n Q1 = sess.run(y, feed_dict={inputs : [to_one_hot(s1, 16)]})\n ## Obtain maxQ' and set our target value for chosen action.\n maxQ1 = np.max(Q1) # in Q-Learning, policy is greedy, so we use \"max\" to select the next action.\n targetQ = allQ\n targetQ[0, a[0]] = r + lambd * maxQ1\n ## Train network using target and predicted Q values\n # it is not real target Q value, it is just an estimation,\n # but check the Q-Learning update formula:\n # Q'(s,a) <- Q(s,a) + alpha(r + lambd * maxQ(s',a') - Q(s, a))\n # minimizing |r + lambd * maxQ(s',a') - Q(s, a)|^2 equal to force\n # Q'(s,a) ≈ Q(s,a)\n _ = sess.run(train_op, {inputs : [to_one_hot(s, 16)], nextQ : targetQ})\n rAll += r\n s = s1\n ## Reduce chance of random action if an episode is done.\n if d == True:\n e = 1./((i/50) + 10) # reduce e, GLIE: Greey in the limit with infinite Exploration\n break\n\n ## Note that, the rewards here with random action\n running_reward = rAll if running_reward is None else running_reward * 0.99 + rAll * 0.01\n print(\"Episode [%d/%d] sum reward:%f running reward:%f took:%.5fs %s\" %\n (i, num_episodes, rAll, running_reward, time.time()-episode_time, '' if rAll == 0 else ' !!!!!!!!'))\n"
] | [
[
"tensorflow.random_uniform_initializer",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.reset_default_graph",
"tensorflow.train.GradientDescentOptimizer",
"numpy.random.rand",
"tensorflow.Session",
"tensorflow.argmax",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
oasislabs/ready-layer-2 | [
"dcfc5edd2a645ab7a0dfc54933a71a7dca923fff",
"dcfc5edd2a645ab7a0dfc54933a71a7dca923fff"
] | [
"demo/train_models.py",
"demo/evaluator.py"
] | [
"#!/usr/bin/env python3\n\nfrom contextlib import contextmanager\nfrom os import path as osp\n\nimport joblib\nimport pandas as pd\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.svm import SVC\n\n\nDEMO_DIR = osp.abspath(osp.dirname(__file__))\nDATA_DIR = osp.join(DEMO_DIR, \"data\")\nMODELS_DIR = osp.join(DEMO_DIR, \"models\")\n\n\n@contextmanager\ndef load_data(train=True):\n df = pd.read_csv(osp.join(DATA_DIR, f'iris_{\"train\" if train else \"test\"}.csv'), header=None)\n df.columns = [\"sepal length\", \"sepal width\", \"petal length\", \"petal width\", \"label\"]\n\n X = df.drop([\"label\"], axis=1)\n y = pd.factorize(df[\"label\"], sort=True)[0]\n\n yield X, y\n\n\ndef main():\n with load_data(train=True) as (X, y):\n model_a = SVC(gamma=\"scale\")\n model_a.fit(X, y)\n\n model_b = AdaBoostClassifier()\n model_b.fit(X, y)\n\n print(\"train\")\n print(f\"├─ model A score: {model_a.score(X, y):.3f}\")\n print(f\"└─ model B score: {model_b.score(X, y):.3f}\")\n\n with load_data(train=False) as (X, y):\n print(\"\\ntest (debugging only. you wouldn't see these irl)\")\n print(f\"├─ model A score: {model_a.score(X, y):.3f}\")\n print(f\"└─ model B score: {model_b.score(X, y):.3f}\")\n\n joblib.dump(model_a, osp.join(MODELS_DIR, \"model_a.joblib\"))\n joblib.dump(model_b, osp.join(MODELS_DIR, \"model_b.joblib\"))\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python3\n\nimport argparse\n\nimport joblib\nimport pandas as pd\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-path\")\n parser.add_argument(\"--data-path\")\n args = parser.parse_args()\n\n X, y = load_data(args.data_path)\n model = joblib.load(args.model_path)\n print(model.score(X, y))\n\n\ndef load_data(data_path):\n df = pd.read_csv(data_path, header=None)\n df.columns = [\"sepal length\", \"sepal width\", \"petal length\", \"petal width\", \"label\"]\n\n X = df.drop([\"label\"], axis=1)\n y = pd.factorize(df[\"label\"], sort=True)[0]\n\n return X, y\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.ensemble.AdaBoostClassifier",
"pandas.factorize",
"sklearn.svm.SVC"
],
[
"pandas.factorize",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ag88/Test-stock-prediction-algorithms | [
"b2d12dc71acd0ed201976549cab7d2076db4721c",
"b2d12dc71acd0ed201976549cab7d2076db4721c"
] | [
"StockMarketTimeSeriesAnomalies/FindInflectionPoints.py",
"StockMarketLinearRegression/LinearRegression_Change.py"
] | [
"\n\n# http://github.com/timestocome\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n# use data leveled, log'd and otherwise smoothed in \n# https://github.com/timestocome/StockMarketData\n# to do some analysis\n\n# http://www.mdpi.com/1999-4893/5/4/588\n# after I started working through the algorithm\n# it became clear it's not so different than convolution\n# and convolution might be easier and faster so shifted \n# to using the built in scipy.signal library\n# The signal still needs to be stationary (rotated to x axis) in time\n# and for stocks because of inflation you'll need a log index or the \n# older ones will be too small to catch \n#\n# to find the bottoms of the Nasdad flip signal around the x axis and \n# repeat\n\n\n# import data that we've rotated to x axis to make stationary in time (see section 1 of above paper)\n# and scaled by taking the log\ndata = pd.read_csv('LeveledLogStockData.csv', index_col=0, parse_dates=True)\n\nfeatures = ['Nasdaq', 'S&P', 'Russell', 'DJIA', 'Gold', '1yr T', '10yr Bond']\ndata.columns = ['Nasdaq', 'S&P', 'Russell', 'DJIA', 'Gold', '1yr T', '10yr Bond']\n\n\n\nfor f in features:\n inverted_name = 'Flipped_' + f\n peaks_name = 'Peaks_' + f\n floors_name = 'Floors_' + f \n\n inverted_signal = data[f] * -1.\n\n peaks_ix = signal.find_peaks_cwt(data[f], np.arange(1, 253))\n peaks = np.zeros(len(data))\n for i in peaks_ix: peaks[i] = 1\n data[peaks_name] = peaks \n\n floor_ix = signal.find_peaks_cwt(inverted_signal, np.arange(1, 253))\n floors = np.zeros(len(data))\n for i in floor_ix: floors[i] = 1 \n data[floors_name] = floors \n\n\n\ninflection_dates = ['Peaks_Nasdaq', 'Floors_Nasdaq','Peaks_S&P', 'Floors_S&P', 'Peaks_Russell', 'Floors_Russell', 'Peaks_DJIA', \n 'Floors_DJIA', 'Peaks_Gold', 'Floors_Gold', 'Peaks_1yr T', 'Floors_1yr T', 'Peaks_10yr Bond', 'Floors_10yr Bond']\n\n\ndata[inflection_dates].to_csv(\"inflectionDates.csv\") \n\n\n\n\n\nplt.figure(figsize=(16,16))\nplt.plot(data['Nasdaq'])\nplt.plot(data['Peaks_Nasdaq'], c='green')\nplt.plot(data['Floors_Nasdaq'], c='red')\nplt.savefig('Inflection_dates_nasdaq.png')\nplt.show()\n\n\n",
"# http://github.com/timestocome\n\n\n# Attempt to use velocity, acceleration, momentum, energy, force, hooke's law\n# to predict changes 1 week, month, quarter into the future\n# This regression only uses data from the index itself\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport tensorflow as tf \n\n\n# pandas display options\npd.options.display.max_rows = 100\npd.options.display.max_columns = 25\npd.options.display.width = 1000\n\n\none_day = 1\none_week = 5\none_month = 21\none_quarter = 63\n\n\nlearning_rate = 0.01\ntraining_epochs = 2\n\n\n\n# read in file\ndef read_data(file_name):\n\n stock = pd.read_csv(file_name, parse_dates=True, index_col=0) # 31747 days of data \n n_samples = len(stock)\n\n \n # want to predict future gain/loss, these are the target values using in training the model\n stock['next_week'] = np.log( stock['Close'] / stock['Close'].shift(one_week) )\n stock['next_month'] = np.log( stock['Close'] / stock['Close'].shift(one_month) )\n stock['next_quarter'] = np.log( stock['Close'] / stock['Close'].shift(one_quarter) )\n \n\n # scale\n stock['Open'] = (stock['Open'] - stock['Open'].min()) / stock['Open'].max()\n\n \n # add in useful things\n stock['Velocity'] = stock['Open'] - stock['Open'].shift(1)\n stock['Acceleration'] = stock['Velocity'] - stock['Velocity'].shift(1)\n stock['Momentum'] = stock['Open'] * stock['Velocity']\n stock['Energy'] = stock['Open'] * stock['Velocity'] * stock['Velocity']\n stock['Force'] = stock['Open'] * stock['Acceleration']\n stock['Elastic'] = stock['Open'] * stock['Open']\n\n stock['VelocityAbs'] = stock['Velocity'].abs()\n stock['AccelerationAbs'] = stock['Acceleration'].abs()\n stock['MomentumAbs'] = stock['Momentum'].abs()\n stock['EnergyAbs'] = stock['Energy'].abs()\n stock['ForceAbs'] = stock['Force'].abs()\n stock['ElasticAbs'] = stock['Elastic'].abs()\n\n\n # scale volume\n stock['Volume'] = (stock['Volume'] - stock['Volume'].min()) / stock['Volume'].max()\n\n\n # ditch samples with NAN values\n stock = stock.dropna(axis=0)\n\n\n # flip order from newest to oldest to oldest to newest\n stock = stock.iloc[::-1]\n\n # shuffle data\n #stock = stock.sample(frac=1)\n\n\n # split data set into training and holdout\n # hold out all dates > 1/1/2017\n hold_out_stock = stock.loc[stock.index > '01-01-2016']\n\n # test and train on 1/1/85-12/31/2016\n train_stock = stock.loc[stock.index > '01-01-1985']\n train_stock = stock.loc[stock.index < '12-31-2015']\n\n\n\n # all stock is needed to walk back dates for testing hold out data\n return stock, train_stock, hold_out_stock\n\n\n\n#############################################################################################\n#############################################################################################\n# split into train, test, predict\n\nprint(\"Training scores:\")\n\nprint(\"DJIA\")\ndj_stock, dj_train, dj_hold_out = read_data('data/djia.csv')\n\nprint(\"S&P\")\nsp_stock, sp_train, sp_hold_out = read_data('data/S&P.csv')\n\nprint(\"Russell\")\nr_stock, r_train, r_hold_out = read_data('data/Russell2000.csv')\n\nprint(\"NASDAQ\")\nn_stock, n_train, n_hold_out = read_data('data/nasdaq.csv')\n\n#############################################################################################\n# check correlations\ndef check_features():\n print(\"**************** Correlations ********************************\")\n\n features = ['Open', 'Volume', 'next_week', 'next_month', 'next_quarter', 'Velocity', 'Momentum', 'Energy', 'Elastic', 'VelocityAbs', 'AccelerationAbs', 'MomentumAbs', 'EnergyAbs', 'ForceAbs', 'ElasticAbs']\n\n #features = ['Open', 'Volume', 'Energy', 'Elastic', 'VelocityAbs', 'AccelerationAbs', 'MomentumAbs', 'EnergyAbs', 'ForceAbs', 'ElasticAbs']\n correlations = dj_stock[features].corr()\n\n print(correlations[['Open', 'next_week', 'next_month', 'next_quarter']])\n\n\n\n# ditch features that don't effect the future prices\ncheck_features()\n\n\n\n\n\n\n\n\n# use correlations function below to find good features\nfeatures = ['Open', 'Volume', 'Velocity', 'Acceleration', 'Momentum', 'Energy', 'Force', 'Elastic', 'VelocityAbs', 'AccelerationAbs', 'MomentumAbs', 'EnergyAbs', 'ForceAbs', 'ElasticAbs']\nfeatures = ['Open', 'Elastic', 'AccelerationAbs', 'Volume']\n\ntarget = ['next_quarter'] # next_week, next_month, next_quarter\n\n# convert current training set to numpy array\nx_train = dj_train.as_matrix(columns=[features])\ny_train = dj_train.as_matrix(columns=[target])\nn_features = len(features)\nn_out = 1\nn_samples = len(y_train)\n\nX = tf.placeholder('float')\nY = tf.placeholder('float')\n\ndef model(X, w):\n return tf.multiply(X, w)\n\nw = tf.Variable(tf.random_normal([n_features, n_out]), name='weights')\ny_model = model(X, w)\ncost = tf.square(Y - y_model)\npredict = tf.nn.relu(tf.multiply(w, X))\n\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n\nfor epoch in range(training_epochs):\n for (x, y) in zip(x_train, y_train):\n sess.run(train_op, feed_dict={X: x, Y: y})\n\n\nw_value = sess.run(w)\n\nsess.close()\n\n\n################################################\n# plot training data\n################################################\nplt.figure(figsize=(15,15))\n\nplt.suptitle(\"Test Stock Predictions\")\n\nax1 = plt.subplot(211)\nax1.plot(y_train, 'b', label='Actual')\n\ny_learned = np.empty([n_samples])\ni = 0\nfor x in x_train:\n\n y_learned[i] = (x * w_value).sum()\n i += 1\n\nax1.plot(y_learned, 'r', label='Predicted')\nax1.set_title(\"Training data predictions\")\nax1.legend(loc='best')\n\n#############################################\n# plot hold out data\n##############################################\nx_test = dj_hold_out.as_matrix(columns=[features])\ny_test = dj_hold_out.as_matrix(columns=[target])\n\nz = range(len(y_learned), len(y_learned)+len(y_test))\n\nax2 = plt.subplot(212, sharex=ax1, sharey=ax1)\nax2.plot(z, y_test, 'b', label='Actual')\n\ny_predicted = np.empty([len(y_test)])\ni = 0\nfor x in x_test:\n y_predicted[i] = (x * w_value).sum()\n i += 1\n\nax2.plot(z, y_predicted, 'r', label='Prediction')\n\nax2.set_title(\"Hold out data predictions\")\nax2.legend(loc='best')\n\nplt.savefig('LinearRegression_Change.png')\n\nplt.show()\n\nprint(len(dj_hold_out))\n\n\n"
] | [
[
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"pandas.read_csv",
"tensorflow.multiply",
"tensorflow.placeholder",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.subplot",
"tensorflow.random_normal",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.square",
"matplotlib.pyplot.suptitle",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
codezakh/ALBEF | [
"16aee1da1b7682afcd5a5f1ded74fc8dc199a8cf"
] | [
"models/unified/contrastive.py"
] | [
"'''\n * Copyright (c) 2021, salesforce.com, inc.\n * All rights reserved.\n * SPDX-License-Identifier: BSD-3-Clause\n * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n'''\n\nfrom functools import partial\nfrom multiprocessing.sharedctypes import Value\nfrom models.vit import VisionTransformer, interpolate_pos_embed\nfrom models.med import BertConfig, BertForMaskedLM\n# from models.modality_wise_ln_med import BertConfig, BertForMaskedLM\nfrom enum import Enum\nfrom omegaconf import OmegaConf\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nimport numpy as np\nimport random\n\n\nclass VisionLanguageLearner(nn.Module):\n def __init__(self, \n text_encoder = None,\n tokenizer = None,\n config = None, \n temp = 0.07,\n init_deit = True\n ):\n super().__init__()\n \n self.tokenizer = tokenizer \n self.mlm_probability = config['mlm_probability']\n embed_dim = config['embed_dim']\n \n self.visual_encoder = VisionTransformer(\n img_size=config['image_res'], patch_size=16, embed_dim=768, depth=1, num_heads=12, \n mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) \n \n if init_deit:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth\",\n map_location=\"cpu\", check_hash=True)\n state_dict = checkpoint[\"model\"]\n pos_embed_reshaped = interpolate_pos_embed(state_dict['pos_embed'], self.visual_encoder)\n state_dict['pos_embed'] = pos_embed_reshaped\n msg = self.visual_encoder.load_state_dict(state_dict,strict=False)\n print(f'missing_keys={msg.missing_keys}\\tunexpected_keys={msg.unexpected_keys}') \n \n vision_width = config['vision_width'] \n bert_config = BertConfig.from_dict(OmegaConf.to_container(config.bert_config))\n \n self.text_encoder = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config) \n\n text_width = self.text_encoder.config.hidden_size\n self.vision_proj = nn.Linear(vision_width, embed_dim)\n self.text_proj = nn.Linear(text_width, embed_dim) \n\n self.temp = nn.Parameter(torch.ones([]) * config['temp']) \n self.queue_size = config['queue_size']\n self.momentum = config['momentum'] \n\n # Hardcoded from DALL-E's D-VAE.\n vocab_size = 8192\n self.mim_head = nn.Linear(self.visual_encoder.embed_dim, vocab_size)\n\n # create momentum models\n self.text_encoder_m = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config) \n self.vision_proj_m = nn.Linear(vision_width, embed_dim)\n self.text_proj_m = nn.Linear(text_width, embed_dim) \n self.visual_encoder_m = VisionTransformer(\n img_size=config['image_res'], patch_size=16, embed_dim=768, depth=1, num_heads=12, \n mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) \n \n self.model_pairs = [\n (self.visual_encoder, self.visual_encoder_m),\n (self.text_encoder, self.text_encoder_m),\n (self.text_proj, self.text_proj_m),\n (self.vision_proj, self.vision_proj_m)\n ]\n \n self.copy_params()\n\n # create the queue\n self.register_buffer(\"image_queue\", torch.randn(embed_dim, self.queue_size))\n self.register_buffer(\"text_queue\", torch.randn(embed_dim, self.queue_size))\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long)) \n \n self.image_queue = nn.functional.normalize(self.image_queue, dim=0)\n self.text_queue = nn.functional.normalize(self.text_queue, dim=0)\n\n\n def forward(self, image, text, visual_token_ids, masked_visual_token_pos, masked_visual_tok_labels, alpha=0, return_dict=False):\n with torch.no_grad():\n self.temp.clamp_(0.001,0.5)\n\n ## ================ ITA ====================== ##\n image_embeds = self.visual_encoder(image) \n image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)\n image_embeds = self.text_encoder.bert(\n inputs_embeds=image_embeds, \n attention_mask=image_atts,\n return_dict=True,\n mode='image'\n )\n image_embeds = image_embeds.last_hidden_state\n image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1) \n\n text_output = self.text_encoder.bert(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text') \n text_embeds = text_output.last_hidden_state\n text_feat = F.normalize(self.text_proj(text_embeds[:,0,:]),dim=-1) \n\n \n with torch.no_grad():\n self._momentum_update()\n\n image_embeds_m = self.visual_encoder_m(image) \n image_embeds_m = self.text_encoder.bert(\n inputs_embeds=image_embeds_m, \n attention_mask=image_atts,\n return_dict=True,\n mode='image'\n )\n image_embeds_m = image_embeds_m.last_hidden_state\n image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1) \n image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1) \n\n text_output_m = self.text_encoder_m.bert(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text') \n text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1) \n text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)\n\n sim_i2t_m = image_feat_m @ text_feat_all / self.temp\n sim_t2i_m = text_feat_m @ image_feat_all / self.temp\n\n sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)\n sim_targets.fill_diagonal_(1)\n\n sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets\n sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets \n\n sim_i2t = image_feat @ text_feat_all / self.temp \n sim_t2i = text_feat @ image_feat_all / self.temp \n\n loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()\n loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean() \n\n loss_ita = (loss_i2t+loss_t2i)/2\n self._dequeue_and_enqueue(image_feat_m, text_feat_m)\n\n \n ##================= MLM ========================## \n input_ids = text.input_ids.clone()\n labels = input_ids.clone()\n\n probability_matrix = torch.full(labels.shape, self.mlm_probability) \n input_ids, labels = self.mask(input_ids, self.text_encoder.config.vocab_size, image.device, targets=labels,\n probability_matrix = probability_matrix) \n \n with torch.no_grad():\n logits_m = self.text_encoder_m(input_ids, \n attention_mask = text.attention_mask,\n return_dict = True,\n return_logits = True,\n mode='text'\n ) \n mlm_output = self.text_encoder(input_ids, \n attention_mask = text.attention_mask,\n return_dict = True,\n labels = labels, \n soft_labels = F.softmax(logits_m,dim=-1),\n alpha = alpha,\n mode='text'\n ) \n loss_mlm = mlm_output.loss \n\n\n ##================= MIM ========================##\n post_mask_image_embeds = self.visual_encoder(image, masked_visual_token_pos)\n image_atts = torch.ones(post_mask_image_embeds.size()[:-1],dtype=torch.long).to(image.device)\n post_mask_cross_embeds = self.text_encoder.bert(\n inputs_embeds=post_mask_image_embeds, \n attention_mask=image_atts,\n return_dict=True,\n mode='image'\n )\n # Drop the CLS token, because we don't mask it.\n post_mask_cross_embeds = post_mask_cross_embeds.last_hidden_state[:, 1:]\n predicted_visual_tokens = self.mim_head(post_mask_cross_embeds)\n loss_mim = F.cross_entropy(\n input=predicted_visual_tokens[masked_visual_token_pos], \n target=masked_visual_tok_labels\n )\n\n if return_dict:\n return {\n 'losses': {\n 'loss_ita': loss_ita,\n 'loss_mlm': loss_mlm,\n 'loss_mim': loss_mim\n }\n }\n\n return loss_mlm, loss_mim, loss_ita\n\n \n\n @torch.no_grad() \n def copy_params(self):\n for model_pair in self.model_pairs: \n for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):\n param_m.data.copy_(param.data) # initialize\n param_m.requires_grad = False # not update by gradient \n\n \n @torch.no_grad() \n def _momentum_update(self):\n for model_pair in self.model_pairs: \n for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):\n param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)\n \n \n \n @torch.no_grad()\n def _dequeue_and_enqueue(self, image_feat, text_feat):\n # gather keys before updating queue\n image_feats = concat_all_gather(image_feat)\n text_feats = concat_all_gather(text_feat)\n\n batch_size = image_feats.shape[0]\n\n ptr = int(self.queue_ptr)\n assert self.queue_size % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n self.image_queue[:, ptr:ptr + batch_size] = image_feats.T\n self.text_queue[:, ptr:ptr + batch_size] = text_feats.T\n ptr = (ptr + batch_size) % self.queue_size # move pointer\n\n self.queue_ptr[0] = ptr \n \n \n def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):\n if masked_indices is None: \n masked_indices = torch.bernoulli(probability_matrix).bool()\n \n masked_indices[input_ids == self.tokenizer.pad_token_id] = False\n masked_indices[input_ids == self.tokenizer.cls_token_id] = False\n \n if targets is not None:\n targets[~masked_indices] = -100 # We only compute loss on masked tokens \n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices\n input_ids[indices_replaced] = self.tokenizer.mask_token_id\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)\n input_ids[indices_random] = random_words[indices_random] \n # The rest of the time (10% of the time) we keep the masked input tokens unchanged \n \n if targets is not None:\n return input_ids, targets\n else:\n return input_ids\n \n\[email protected]_grad()\ndef concat_all_gather(tensor):\n \"\"\"\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n \"\"\"\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output\n\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.functional.softmax",
"torch.ones",
"torch.randint",
"torch.full",
"torch.cat",
"torch.hub.load_state_dict_from_url",
"torch.randn",
"torch.zeros",
"torch.distributed.all_gather",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.no_grad",
"torch.bernoulli",
"torch.distributed.get_world_size",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jbrockmendel/sm2 | [
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf",
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf"
] | [
"sm2/regression/tests/test_cov.py",
"sm2/genmod/tests/results/glm_test_resids.py",
"sm2/tsa/stattools.py",
"sm2/discrete/tests/results/results_discrete.py",
"sm2/genmod/tests/test_constrained.py",
"sm2/discrete/tests/test_constrained.py",
"sm2/stats/tests/test_contrast.py",
"sm2/regression/tests/generate_lasso.py"
] | [
"\"\"\"Example: minimal OLS\n\n\"\"\"\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_allclose\n\nimport sm2.api as sm\n\n\[email protected]_vetted\ndef test_HC_use():\n np.random.seed(0)\n nsample = 100\n x = np.linspace(0, 10, 100)\n X = sm.add_constant(np.column_stack((x, x**2)), prepend=False)\n beta = np.array([1, 0.1, 10])\n y = np.dot(X, beta) + np.random.normal(size=nsample)\n\n results = sm.OLS(y, X).fit()\n\n # test cov_params\n idx = np.array([1, 2])\n # need to call HC0_se to have cov_HC0 available\n results.HC0_se\n cov12 = results.cov_params(column=[1, 2], cov_p=results.cov_HC0)\n assert_almost_equal(cov12,\n results.cov_HC0[idx[:, None], idx],\n decimal=15)\n\n # test t_test\n tvals = results.params / results.HC0_se\n ttest = results.t_test(np.eye(3), cov_p=results.cov_HC0)\n assert_almost_equal(ttest.tvalue,\n tvals,\n decimal=14)\n\n assert_almost_equal(ttest.sd,\n results.HC0_se,\n decimal=14)\n\n # test f_test\n ftest = results.f_test(np.eye(3)[:-1], cov_p=results.cov_HC0)\n slopes = results.params[:-1]\n idx = np.array([0, 1])\n cov_slopes = results.cov_HC0[idx[:, None], idx]\n fval = np.dot(slopes, np.dot(np.linalg.inv(cov_slopes), slopes)) / len(idx)\n assert_allclose(ftest.fvalue, fval, rtol=12)\n",
"\"\"\"\nThis file contains the residuals for testing GLM.\n\nAll residuals were obtained with Stata.\n\nThe residuals are column ordered as\n\nPearson, Deviance, Working, Anscombe, and Response Residuals\n\"\"\"\n\nimport numpy as np\n\nlbw = [-.67369007, -.86512534, -.06703079, -.93506245, -.31217507,\n -.38301082, -.52323205, -.01427243, -.55879206, -.12793027,\n -.68788286, -.88025592, -.07003063, -.95205459, -.32119762,\n -.96932399, -1.1510657, -.12098923, -1.2626094, -.48442686,\n -1.017216, -1.1919416, -.12709645, -1.3106868, -.50853393,\n -.56992387, -.75002865, -.04537362, -.80685236, -.24517662,\n -.38236113, -.52240243, -.01419429, -.5578939, -.12755193,\n -.70541551, -.89874491, -.07371959, -.97286469, -.33226988,\n -.53129095, -.70516948, -.03779124, -.75734471, -.22013309,\n -.59687896, -.78068461, -.05087588, -.84082824, -.26268069,\n -.72944938, -.92372831, -.07872676, -1.0010676, -.34729955,\n -.48089188, -.64503581, -.02865125, -.6913447, -.18782188,\n -1.7509231, -1.6748694, -.13984667, -1.9107428, -.75404181,\n -1.0993874, -1.2588746, -.13558784, -1.3901949, -.54723527,\n -.73406378, -.9284774, -.0796795, -1.0064397, -.35016393,\n -.73406378, -.9284774, -.0796795, -1.0064397, -.35016393,\n -.91970083, -1.1071944, -.11376204, -1.2113902, -.45824406,\n -.58253395, -.76443611, -.04792993, -.8228052, -.25336683,\n -.87010277, -1.0617464, -.10565948, -1.1587271, -.43087357,\n -.55091811, -.72809506, -.04159148, -.78261562, -.23284101,\n -.50228984, -.67078749, -.03241138, -.71955949, -.20146616,\n -.56681231, -.74645574, -.04474827, -.80290024, -.24315597,\n -.16739043, -.23509228, -.00072263, -.24969795, -.02725586,\n -.53429779, -.70869969, -.03836573, -.76123202, -.22207692,\n -.81310524, -1.0074762, -.09536044, -1.0963421, -.39800383,\n -.24319644, -.339003, -.00294417, -.36060255, -.05584178,\n -.62985338, -.81746346, -.05776093, -.88175202, -.28403447,\n -.27289888, -.37902784, -.00447115, -.403468, -.06931188,\n -.73980785, -.9343678, -.08086098, -1.0131078, -.35371946,\n -.79896487, -.99366645, -.09266053, -1.0805507, -.3896279,\n -.79896487, -.99366645, -.09266053, -1.0805507, -.3896279,\n -.95715693, -1.140454, -.11930111, -1.2501848, -.47812002,\n -1.2258645, -1.3545388, -.14405247, -1.5056478, -.60043853,\n -.27739012, -.38504179, -.00474005, -.40991694, -.07144772,\n -.65459114, -.84453257, -.06298676, -.91198968, -.29995988,\n -.46709275, -.62825848, -.02633193, -.67300059, -.17910031,\n -.54303849, -.71892471, -.04005189, -.77249964, -.22773411,\n -.54176628, -.71743989, -.03980502, -.77086265, -.22691015,\n -.25756493, -.35841857, -.00362961, -.38138453, -.06221253,\n -.55956791, -.73810992, -.04330137, -.79367479, -.2384528,\n -.69600671, -.88885063, -.07174285, -.96172188, -.32633864,\n -.23256318, -.32457262, -.00249768, -.34516994, -.05131047,\n -.64757466, -.83690034, -.06150185, -.90345369, -.2954536,\n -.28195528, -.39114407, -.00502406, -.41646289, -.07364416,\n -1.1570243, -1.3035317, -.14010172, -1.443813, -.57241299,\n -1.1570243, -1.3035317, -.14010172, -1.443813, -.57241299,\n -.30030789, -.41556527, -.00627721, -.44268348, -.08272435,\n -.55114605, -.72835967, -.04163627, -.78290767, -.23298882,\n -.3806923, -.52027022, -.01399469, -.5555858, -.12658158,\n -1.1987876, -1.33477, -.1426768, -1.4816055, -.58967487,\n -.94149929, -1.12666, -.11704805, -1.2340685, -.46989562,\n -.53813037, -.71318989, -.03910216, -.76617855, -.22455631,\n -.55398245, -.73164922, -.04219494, -.78653899, -.2348285,\n -.6479873, -.83735019, -.06158914, -.90395657, -.29571887,\n -.62689671, -.81419816, -.0571388, -.87811137, -.28212464,\n -.67810985, -.86985289, -.06796584, -.94036802, -.31499013,\n -1.4692211, -1.5166623, -.14786328, -1.7067836, -.68340511,\n -.40499087, -.55113968, -.01705699, -.58904006, -.14090647,\n -.67731506, -.86900381, -.06779774, -.93941488, -.31448425,\n -.62489302, -.81198168, -.0567176, -.87564093, -.28082972,\n -.62489302, -.81198168, -.0567176, -.87564093, -.28082972,\n -.57609901, -.75709845, -.04662119, -.81467722, -.24918728,\n -.60844593, -.79367679, -.0532758, -.85526404, -.27018297,\n -.29839126, -.41302333, -.00613785, -.43995243, -.08175784,\n -1.3570687, -1.4452542, -.14780947, -1.6172873, -.64808999,\n -.78708367, -.98195509, -.0903525, -1.0671844, -.38252574,\n -.5858308, -.76818375, -.04860366, -.82695911, -.25550797,\n -.31665807, -.43716753, -.00754852, -.46591063, -.09113411,\n -.82908669, -1.0229172, -.09834616, -1.1140383, -.40736693,\n -.69616804, -.88902083, -.07177681, -.96191342, -.32644055,\n -1.0915205, -1.2526359, -.13488154, -1.3827415, -.54367425,\n -.64853308, -.837945, -.0617046, -.90462156, -.29606968,\n -.94223393, -1.1273107, -.11715578, -1.2348278, -.47028421,\n -.816023, -1.0103085, -.09591088, -1.0995849, -.39972155,\n -.57856847, -.75991792, -.04712242, -.81779956, -.25079125,\n -.32734654, -.45120803, -.00846067, -.48102477, -.09678472,\n -.31077092, -.42940642, -.00707363, -.45756203, -.08807264,\n -.61538501, -.80142385, -.05472422, -.86388252, -.27467837,\n -1.1456594, -1.2948703, -.13930185, -1.4333765, -.5675742,\n -.21863111, -.30558898, -.00198616, -.32488425, -.045619,\n -.34650082, -.47620326, -.01025864, -.50796716, -.10719293,\n -.22518959, -.31453605, -.00221689, -.33444271, -.04826292,\n -.56093617, -.73968913, -.04357365, -.79541979, -.23934092,\n -.56471813, -.74404709, -.04432868, -.80023684, -.24179618,\n -.5589041, -.73734327, -.04316945, -.79282776, -.23802197,\n -.98405074, -1.1637857, -.12295866, -1.2775329, -.49196179,\n -.67623178, -.86784578, -.06756859, -.9381151, -.31379451,\n -.34443674, -.47352019, -.01005474, -.50507281, -.10605469,\n -.20538064, -.28745723, -.00157184, -.30552538, -.04047396,\n -.36261354, -.49705954, -.01193509, -.53048488, -.11620849,\n -.33216574, -.45751709, -.00889321, -.48782091, -.09937016,\n -.52834459, -.70170398, -.03723117, -.75353006, -.21822964,\n -.65107472, -.84071206, -.06224241, -.90771574, -.29770265,\n -2.2838855, -1.9116213, -.11327561, -2.2332026, -.83912829,\n -.57856847, -.75991792, -.04712242, -.81779956, -.25079125,\n -.30573921, -.4227577, -.00668307, -.45041336, -.08548557,\n -.26475363, -.36809496, -.0040096, -.39174994, -.06550308,\n -.50724876, -.67670909, -.03330933, -.72605772, -.2046457,\n -.50724876, -.67670909, -.03330933, -.72605772, -.2046457,\n -.29708892, -.41129496, -.00604429, -.43809571, -.08110349,\n -.39538836, -.53898588, -.01580695, -.57585866, -.13519643,\n -.95811737, -1.141295, -.11943637, -1.2511687, -.47862055,\n -.79777479, -.99249783, -.09243093, -1.0792158, -.38891913,\n -.4007824, -.54582043, -.01650278, -.58326946, -.1383964,\n -.59547213, -.77909775, -.05058538, -.83906658, -.26176764,\n -.67961214, -.87145655, -.06828352, -.9421685, -.31594589,\n -.35903387, -.49243968, -.01154966, -.52549403, -.11418612,\n -.25982794, -.36146745, -.00374651, -.38464992, -.06324112,\n -.63086237, -.8185763, -.0579734, -.88299313, -.28468594,\n -.52587777, -.6987977, -.03676448, -.75033205,\n -.21663702, -.19948442, -.27936589, -.00140862, -.29689136,\n -.03827107, -.61183026, -.7974596, -.05398148, -.85947135,\n -.27237604, -.50385167, -.67265442, -.03269316, -.72160779,\n -.20246694, -.48499858, -.65000311, -.02935791, -.69678155,\n -.19042999, -.42040434, -.57052211, -.01917023, -.61008878,\n -.15019447, -.48053382, -.64460216, -.02858999, -.69087017,\n -.18759474, -.37464721, -.51253189, -.01328506, -.54721232,\n -.12308435, -.49531764, -.66243224, -.03116534, -.71039719,\n -.19700616, -.36160926, -.49576422, -.01182621, -.52908537,\n -.11564002, -.75971278, -.95459728, -.08491171, -1.0360505,\n -.36595033, -.29016492,\n -.40209055, -.00556233, -.42821114, -.07765727, -.40546611,\n -.55173962, -.01712019, -.58969106, -.14119063, -.39648646,\n -.54037876, -.01594727, -.57736865, -.13584627, -.35179057,\n -.48306766, -.01079245, -.51537461, -.11012759, -.33522775,\n -.46151868, -.00917494, -.49213296, -.10102472, -.33337344,\n -.45909602, -.00900368, -.48952221, -.10002166, -.28349954,\n -.39320584, -.00512259, -.41867509, -.07439288, -.62237174,\n -.80918847, -.05618813, -.87252863, -.27919958, -.37781929,\n -.51659535, -.01365481, -.55160868, -.12491598, -.26957183,\n -.37456618, -.00427859, -.39868504, -.06774594, .61566918,\n .80174036, .05478366, .86423481, .27486236, 2.1552118, 1.8605156,\n .11994504, 2.1616254, .82285014, .6958925, .88873013, .07171881,\n .96158626, .32626648, .48936001, .65526554, .03011636, .70254431,\n .19320564, .94341916, 1.1283597, .11732917, 1.2360523, .47091059,\n 2.317558, 1.9244575, .11155378, 2.2513666, .84304062, 1.0055755,\n 1.1821359, .12569111, 1.2991214, .50277997, 1.1758629, 1.3177365,\n .14133315, 1.4609684, .58029986, .77665994, .97159873, .08829985,\n 1.0553839, .37624776, .92862761, 1.1152029, .11512854, 1.2207114,\n .46304378, 1.7104194, 1.6537851, .14148619, 1.8830756, .74525762,\n .51347062, .68411403, .03444926, .73418924, .20864293, 1.7302647,\n 1.6641798, .14069739, 1.8966961, .7496129, 1.7159282, 1.656683,\n .14127016, 1.886869, .74647663, 1.4097427, 1.4794559, .14814617,\n 1.659967, .66525789, 2.0442483, 1.8136166, .12572138, 2.0969416,\n .80691071, 1.7479484, 1.6733383, .13997094, 1.9087284, .75341056,\n 1.7094805, 1.6532902, .14152278, 1.8824281, .74504908, 1.4965596,\n 1.5332827, .14752494, 1.7278315, .69132856, 1.1936562, 1.330981,\n .14239054, 1.4770087, .58759741, .82652693, 1.0204559, .09787281,\n 1.1112146, .40587474, 2.2245013, 1.8884508, .11633994, 2.2006058,\n .83188774, 1.0349547, 1.206727, .12914052, 1.3281647, .51717209,\n 1.3170894, 1.4184715, .14713645, 1.5840959, .63433244, 3.1267629,\n 2.1805419, .07637108, 2.631322, .90720675, .57215628, .7525877,\n .04582366, .809684, .24662647, .86627248, 1.0581685, .10499903,\n 1.1545978, .42871115, .94218582, 1.1272681, .11714873, 1.2347781,\n .47025877, 1.1156811, 1.2716852, .13698079, 1.405528, .55451496,\n .53496988, .70948787, .03849454, .76210013, .22251157, 1.4497097,\n 1.5046175, .14802749, 1.6915825, .6775918, 1.2233611, 1.3527267,\n .14393463, 1.5034398, .59945723, 1.545073, 1.5620618, .14664053,\n 1.7644793, .70477532, 2.020635, 1.803272, .12694042, 2.0827983,\n .80326447, .59611095, .7798185, .05071725, .83986668, .26218226,\n .85655928, 1.0490513, .1033029, 1.144086, .42319688, 1.1157473,\n 1.2717369, .13698625, 1.40559, .55454427, 1.5187563, 1.5465618,\n .1471632, 1.7447092, .69757645, 1.2580261, 1.3775399, .14540261,\n 1.5337471, .61279777, 2.2387897, 1.8940894, .11559989, 2.2085158,\n .83367096, 1.4987735, 1.5346157, .14749226, 1.7295233, .69195908,\n .87949714, 1.0704799, .10725881, 1.1688167, .43614806, .81628943,\n 1.0105668, .09596102, 1.0998807, .39987821, 1.0340841, 1.2060057,\n .12904294, 1.327311, .51675179, 1.7490122, 1.6738861, .13992657,\n 1.9094491, .75363655, 1.6803635, 1.6378022, .14262258, 1.8622063,\n .73846784, 1.8051113, 1.7022925, .13748913, 1.9469661, .7651715,\n 1.5637979, 1.5729326, .14621078, 1.7783904, .70976331, 1.1993095,\n 1.3351545, .14270545, 1.4820722, .58988546, 1.3197815, 1.4202979,\n .1471939, 1.5863531, .63527917, .71898726, .91290406, .07655565,\n .98883657, .34077931, 1.5103554, 1.5415584, .14730892, 1.7383434,\n .69523097, 3.0022063, 2.1465826, .08091665, 2.5788383, .90013225,\n .64440768, .83344364, .06083212, .89959036, .29341668, 1.5019795,\n 1.5365427, .14744358, 1.7319699, .69286925, 1.3305489, 1.4275696,\n .1474058, 1.5953487, .63903614, .90521582, 1.094089, .11148405,\n 1.1961638, .45037299, .6236701, .81062745, .05646071, .87413186,\n .28003914, .71234937, .90599551, .07517106, .9810397, .3366244]\n\nlbw_resids = np.array(lbw).astype(float).reshape(-1, 5)\n\ncpunish = [.29883413, .29637762, 62.478695, .29638095, 1.7736344,\n .280627, .27622019, 6.5853375, .27623151, .80342558,\n 4.0930531, 2.9777878, 6.1503069, 3.0157174, 4.6881034,\n .16338859, .16114971, 1.1563983, .16115474, .31370176,\n .63595872, .59618385, 1.9109264, .59656954, .91769973,\n .9059739, .8066189, .99577089, .80822353, .9349684,\n .0532905, .0529548, .14244545, .05295515, .07395759,\n -.26830664, -.2766384, -1.0082872, -.27668319, -.41714051,\n -.62341484, -.68349824, -1.5652763, -.68459104, -.84732188,\n -1.1015655, -1.2743561, -5.3400286, -1.279951, -1.8643241,\n -1.2006618, -1.4021282, -6.6206839, -1.40923, -2.1211989,\n -1.2797534, -1.505173, -7.8054171, -1.5136295, -2.3382067,\n -.960585, -1.0954134, -3.8587164, -1.0992211, -1.5269969,\n .10846917, .10649195, .0921891, .10649783, .1027458,\n .02088367, .02081086, .02023963, .0208109, .02066675,\n .63647875, .56713011, .24880139, .56824372, .4653791,\n -.69597083, -.77000601, -1.9377178, -.77151335, -.97909358]\n\ncpunish_resids = np.array(cpunish).astype(float).reshape(-1, 5)\n\nscotvote = [.04317472, .04256856, -8338.93, .04256786, 2.4956853,\n -.01827077, -.01838325, 2762.4019, -.01838319, -.97334528,\n .05609817, .05508221, -7252.03, .05508069, 2.8365188,\n -.02280193, -.02297758, 4525.3102, -.02297747, -1.3300374,\n -.02505649, -.02526888, 8767.023, -.02526873, -1.7656214,\n -.1421743, -.14953291, 26174.801, -.14950069, -8.0880132,\n -.01973673, -.01986809, 5888.0406, -.01986801, -1.318784,\n .06763015, .06616299, -19473.553, .06616036, 4.4658962,\n .0202078, .02007327, -3928.3996, .02007319, 1.1706255,\n -.00841611, -.00843983, 2127.7741, -.00843983, -.53216875,\n -.04429363, -.04496504, 6971.3512, -.04496419, -2.3914787,\n .01158536, .01154092, -2667.3324, .01154091, .71006619,\n .05538677, .05439602, -15077.849, .05439455, 3.5896364,\n .09018494, .08760809, -23064.91, .087602, 5.7245313,\n .06595122, .06455471, -14747.68, .06455226, 4.0030388,\n .00220373, .00220212, -923.58004, .00220212, .16491647,\n .09775671, .09474141, -17697.14, .09473373, 5.5300885,\n -.0669005, -.06845346, 24989.253, -.06845044, -4.8180428,\n .05194846, .05107522, -13846.987, .05107401, 3.3432347,\n .01298505, .01292927, -1828.3796, .01292925, .67554021,\n .02257874, .02241101, -5988.3279, .02241091, 1.4506691,\n .01474752, .01467564, -5311.9833, .01467562, 1.0492967,\n .03640993, .03597719, -3483.054, .03597677, 1.665201,\n -.06613827, -.06765534, 10963.702, -.06765242, -3.633186,\n -.05046726, -.05134215, 15165.308, -.05134088, -3.3803129,\n -.02546479, -.02568421, 3585.6787, -.02568405, -1.3248006,\n -.08159133, -.08392239, 14470.556, -.08391679, -4.5841388,\n -.0330796, -.03345156, 6495.2667, -.03345121, -1.9226747,\n .00327289, .00326933, -1001.1863, .00326933, .22052594,\n -.02631155, -.02654593, 5824.1466, -.02654575, -1.5916286,\n .01183737, .01179098, -4763.0477, .01179097, .87390689,\n -.03325649, -.03363248, 11219.87, -.03363212, -2.3151557]\n\nscotvote_resids = np.array(scotvote).astype(float).reshape(-1, 5)\n\nstar98 = [-1.3375372, -1.3342565, -3674.3805, -1.3343393, -18.732624,\n .97808463, .99272841, 197.05157, .99291658, 5.7338226,\n 4.2825696, 4.29447, 7304.117, 4.2983582, 51.167264, .20665475,\n .20689409, 283.85261, .20689456, 2.2971775, -3.4397844,\n -3.7418743, -184.10712, -3.7732602, -12.963164, -8.8955127,\n -8.7070478, -94493.01, -8.7156529, -195.54523, 1.3093612,\n 1.3040684, 6904.5348, 1.3041169, 22.790356, -2.3354095,\n -2.3171211, -6891.6853, -2.3175198, -33.497868, -4.9509734,\n -4.9716533, -7444.4706, -4.9782445, -56.720276, -4.3896461,\n -4.3936251, -19119.121, -4.3958412, -71.687315, 2.6246727,\n 2.6002292, 33800.262, 2.6004266, 61.52101, 1.0381778,\n 1.0655404, 147.40536, 1.0658131, 5.4160865, -5.371452,\n -5.4255371, -8735.5422, -5.433774, -63.167122, -7.5162302,\n -7.6600055, -16610.436, -7.6793193, -97.902488, -4.3246609,\n -4.217551, -1806.9456, -4.2260371, -32.330796, .71999176,\n .72316703, 1710.1519, .7231808, 9.6064571, .83376421,\n .83436314, 761.12734, .83440607, 8.0881266, .41765273,\n .41667589, 28.903009, .41670531, 1.7147122, -.97120884,\n -.97003594, -4270.4621, -.97005936, -15.911094, -.78556304,\n -.78233445, -114.9327, -.78245171, -4.1393793, .4683723,\n .46647357, 82.954347, .46649502, 2.6303115, -3.16957,\n -3.1551738, -3676.7765, -3.1571205, -33.303456, -.38920026,\n -.38921763, -264.25346, -.38922293, -3.4207576, -1.0390527,\n -1.0501682, -142.6529, -1.0504701, -5.3602524, -.49574502,\n -.50267378, -24.102686, -.50273464, -1.80937, -2.2481933,\n -2.4896172, -69.551199, -2.5023778, -7.0576042, -1.3995564,\n -1.4016203, -7695.0826, -1.4016817, -24.701957, -.76201728,\n -.76276852, -248.87895, -.76283364, -5.2477435, -5.8106439,\n -5.7357935, -3949.2359, -5.7521318, -51.088192, -1.820624,\n -1.8167933, -25414.833, -1.816864, -43.837178, -2.7775306,\n -2.8101272, -5091.1243, -2.8111605, -33.992033, -3.1071576,\n -3.1348341, -28006.635, -3.1353206, -64.66399, 11.837296,\n 12.808605, 52779.155, 12.870791, 194.83, -3.9656907, -3.9744174,\n -4008.7749, -3.9787813, -39.800004, -4.5046818, -4.5023142,\n -3336.6423, -4.5100703, -40.757978, -.38346895, -.38304692,\n -125.48142, -.38305507, -2.6424896, 5.6273411, 5.681476,\n 71231.288, 5.6838456, 131.14722, -1.5145537, -1.5377442,\n -638.15567, -1.5381844, -11.35443, 2.2753821, 2.2209277,\n 373.39831, 2.2231916, 12.457389, -4.8882434, -4.9340401,\n -177334.75, -4.9347866, -161.82036, -1.4349869, -1.4382502,\n -490.99346, -1.438675, -10.03669, -.96129188, -.96925001,\n -1132.2858, -.96930259, -10.152092, -.59602753, -.61535222,\n -10.219633, -.61556693, -1.5369367, -1.6921098, -1.7682293,\n -209.62528, -1.7697022, -8.4352944, -2.065882, -2.0983974,\n -2413.6559, -2.0989602, -21.758492, 1.706241, 1.6722477,\n 619.3497, 1.6727942, 12.171354, -2.2661248, -2.4109784,\n -229.55882, -2.4155989, -10.563809, 1.813806, 1.7981115,\n 2640.4014, 1.7984068, 20.556565, 2.8637417, 2.7708527,\n 953.89903, 2.7734059, 19.851349, -1.8653504, -1.9698798,\n -174.47052, -1.9724197, -8.4673524, 3.0149566, 2.8208287,\n 242.43232, 2.82676, 13.013203, .45187706, .45323648,\n 89.545212, .45325533, 2.6344764, 2.7424901, 2.5741674,\n 100.25966, 2.5823554, 9.102041, .80475572, .80568002,\n 162.81099, .8057857, 4.7242981, -1.7931126, -1.8014064,\n -550.80513, -1.8023078, -12.098672, .41003106, .4090133,\n 40.285464, .40903525, 1.8920269, -8.1724325, -8.1912034,\n -1099.8395, -8.3432365, -41.880384, .33393717, .33561525,\n 16.178174, .33563459, 1.2173618, -4.2543897, -4.4474257,\n -2759.1931, -4.4560182, -36.825802, -3.0294284, -3.1254693,\n -2652.9286, -3.1277555, -28.983377, 2.6466251, 2.5383754,\n 209.60581, 2.5434964, 11.365696, -.53150465, -.53394445,\n -574.22139, -.53395369, -5.4537827, -4.5786254, -4.9826244,\n -850.48142, -5.0141021, -26.124321, .6145684, .61269984,\n 172.28365, .61273633, 4.0221801, -3.0290547, -3.4019779,\n -93.787279, -3.4386297, -9.5115812, 3.4030858, 3.4990013,\n 870.1726, 3.5068227, 21.599833, -1.814412, -1.8483025,\n -1673.4079, -1.8487382, -17.661376, -4.9709195, -5.139411,\n -1528.4106, -5.1619673, -33.550925, -2.8711722, -2.8576622,\n -758.19886, -2.8615509, -18.420463, -1.3502232, -1.3640438,\n -478.67408, -1.3643946, -9.5561658, -4.3706639, -4.4505896,\n -1448.8143, -4.4640268, -30.248407, -3.367544, -3.3462251,\n -2374.7355, -3.3494443, -29.974177, -1.8700967, -1.9265107,\n -533.05459, -1.9276199, -12.307408, -5.8411431, -5.7888752,\n -6974.89, -5.8005435, -61.969456, -3.0671616, -3.3185696,\n -465.64228, -3.3281753, -16.362208, .33866907, .33869449,\n 36.748824, .33870635, 1.6153433, .00332937, .0033292, .68084286,\n .0033292, .01961509, -2.1948692, -2.3506102, -201.54464,\n -2.3551288, -9.9021502, 1.2513472, 1.2525696, 331.50739,\n 1.2529014, 8.0367961, .2431842, .2423829, 57.23407, .24238525,\n 1.5014416, 2.3344863, 2.3363314, 519.08023, 2.3387547, 14.142919,\n 2.9247512, 2.9590047, 454.57521, 2.965438, 15.72514, -2.0556046,\n -2.0645602, -122.36309, -2.0686323, -8.026198, -3.6416917,\n -3.8107525, -127.30076, -3.8522025, -11.907277, -2.3465563,\n -2.3399588, -18062.323, -2.3401837, -46.331731, 3.1328605,\n 3.2237462, 4682.5244, 3.2254621, 35.819599, 2.6706612, 2.6160849,\n 4114.3045, 2.6168714, 30.844519, 2.3338673, 2.2998076, 5737.2307,\n 2.300211, 31.498137, 4.7426743, 4.4878081, 2137.1449, 4.4958392,\n 36.36025, -2.7264016, -2.7602159, -7855.6752, -2.7609288,\n -38.796069, -.25073542, -.24982419, -109.74435, -.24982577,\n -1.9037263, -1.8471557, -1.8428066, -5142.1808, -1.8430239,\n -25.984725, -2.15461, -2.1580564, -8379.8765, -2.1583395,\n -33.883764, -3.232046, -3.2299069, -4022.416, -3.2319278,\n -34.76541, 4.3258916, 4.4036403, 5374.9301, 4.4089414, 46.505897,\n 2.3506664, 2.3436886, 8472.7811, 2.3440641, 36.041483, -3.0103866,\n -3.0330376, -2827.8239, -3.0350909, -29.48259, 3.8880208,\n 3.8515615, 20380.935, 3.8528134, 67.539901, .30206136,\n .30102793, 111.56376, .30103128, 2.1672275, -.47552074,\n -.47774865, -535.52925, -.47775487, -4.947366, -1.2969297,\n -1.2873699, -448.58874, -1.2876585, -9.1038917, 5.4016685,\n 5.4129828, 75706.187, 5.4148884, 130.23541, -3.5207455,\n -3.4746651, -5392.0206, -3.4767282, -40.582638, 6.437813,\n 6.0694683, 9426.0253, 6.0779522, 73.102984, 2.9362577,\n 3.0932929, 995.57798, 3.0975282, 20.474872, 4.2903354,\n 4.4294349, 8449.8125, 4.4332812, 53.778645, 5.1739607,\n 5.1278401, 217916.35, 5.1285599, 180.01638, .04642506,\n .04642284, 27682.162, .04642284, 3.9075241, -4.2469413,\n -4.4455028, -6600.8666, -4.4499328, -49.194645, 3.5858353,\n 3.4955032, 2596.2005, 3.4987392, 32.198796, .74407075, .74065699,\n 3880.9731, .74066495, 12.903961, -4.087085, -4.1499899,\n -22604.866, -4.1515694, -72.278605, 4.9820244, 5.2840996,\n 5480.4086, 5.2940433, 51.429016, -1.4564105, -1.474377, -3103.33,\n -1.4744978, -18.741249, 2.7224042, 2.6837853, 10471.442,\n 2.6842648, 42.655062, 11.319421, 10.709754, 60422.779,\n 10.731002, 197.82585, 3.265598, 3.5595753, 597.22517, 3.5699894,\n 18.53622, -.9650115, -.96312035, -2380.073, -.96315384,\n -13.038223, 2.164339, 2.2153405, 1004.0559, 2.2165899,\n 16.754681, -.56780325, -.56618426, -454.15837, -.56619838,\n -5.2706926, -4.3053348, -4.2507666, -30890.39, -4.252092,\n -83.038422, -1.0564439, -1.0620748, -1721.3294, -1.0621333,\n -12.431369, -2.7316132, -2.7880892, -10933.338, -2.7886254,\n -43.370763, -1.5756134, -1.5805061, -12197.933, -1.5805756,\n -31.169406, 3.4853323, 3.4765582, 18127.864, 3.4775218,\n 60.387216, -3.0310116, -2.9837067, -9935.8247, -2.9844447,\n -45.025621, 5.4190658, 5.3833057, 3745.6544, 5.3969484,\n 47.913605, .836042, .83757213, 275.67062, .83765787, 5.7758454,\n 3.1916149, 3.3119053, 1458.2745, 3.3162779, 24.582162,\n -.13107013, -.13101089, -163.9307, -.13101103, -1.4121774,\n -.02167956, -.0216849, -4.2146913, -.0216849, -.12559013,\n 5.7062833, 5.5425845, 17395.771, 5.5473779, 82.74024,\n 3.9674132, 4.0510658, 5756.4774, 4.0547538, 44.914913,\n 1.0082364, 1.0081202, 580.6423, 1.0082229, 8.3883747,\n -.1607903, -.16067812, -7.4258153, -.16068035, -.57688327,\n -2.1794238, -2.168514, -892.76271, -2.1697763, -16.18601,\n 2.4158395, 2.3602945, 63.041821, 2.3699707, 7.1656397,\n .88352525, .8834159, 284.48156, .8835178, 6.0556954,\n 1.4023926, 1.4004412, 1876.6768, 1.4005975, 15.454071,\n -2.6481201, -2.6598485, -739.97358, -2.6629483, -17.312662,\n -.04596256, -.04598842, -1.0216786, -.04598851, -.12923324,\n -4.8706759, -4.9160841, -1506.657, -4.9347237, -32.940558,\n -.14989449, -.15037145, -4.8188818, -.15037368, -.47662029,\n -.70456375, -.70836868, -629.50443, -.70839399, -6.7859886,\n -2.9238357, -3.0439268, -353.87052, -3.0525781, -14.462724,\n -1.3816671, -1.416089, -245.24501, -1.4166886, -7.7648965,\n -2.6389059, -2.6411431, -299.06009, -2.6466421, -12.770329,\n 1.6796181, 1.6292513, 50.990814, 1.6319631, 5.2396749,\n -4.6907555, -4.0071153, -50.243176, -4.0598436, -10.340008,\n -2.4520787, -2.4202063, -255.88275, -2.424463, -11.544362,\n -.56336474, -.55902069, -125.96524, -.55905011, -3.4193484,\n -1.5439833, -1.5530797, -3645.9363, -1.5532245, -20.560393,\n -.35769835, -.35628075, -66.699031, -.3562898, -2.0435462,\n -5.8985411, -6.0079377, -4913.6259, -6.0258256, -55.500526,\n 4.7314097, 4.4310616, 685.04579, 4.4478122, 24.844689,\n -.83919065, -.84650215, -46.818948, -.84679291, -3.2066211,\n -.03089159, -.03089176, -160.02828, -.03089177, -.53451397,\n .88361004, .88295033, 130.47124, .8831211, 4.6703062, 2.3335326,\n 2.3548024, 2642.7267, 2.3556438, 24.323534, 4.4071647, 4.4439502,\n 69380.031, 4.444919, 110.45474, 3.2787794, 3.2745315, 66934.736,\n 3.2748563, 89.610449, 1.6362845, 1.6715687, 384.84329, 1.6724119,\n 10.10029, 6.3093651, 6.3182562, 32186.911, 6.3242009, 108.61343,\n 3.611326, 3.6092834, 40001.62, 3.6099395, 80.501442, 5.2158338,\n 5.266765, 49709.38, 5.2690472, 110.58476, 2.1901909, 2.1664396,\n 22261.835, 2.166564, 47.443309, -.52967183, -.52891298,\n -6096.2169, -.5289149, -11.958904, 1.7552088, 1.7533111,\n 7659.1081, 1.7534507, 28.682165, 9.3002312, 9.6458876,\n 70515.872, 9.6628501, 182.70824, -1.6751411, -1.6596802,\n -2489.8995, -1.6599057, -19.117372, .15967588, .15973638,\n 131.71812, .1597367, 1.4975272, -6.7038594, -6.8116189,\n -2053.7136, -6.8628644, -45.192173, 3.2862183, 3.307876,\n 3814.1386, 3.3101871, 34.535289, -3.5011058, -3.4842254,\n -2091.8209, -3.4883111, -29.487977, -5.918473, -6.0585339,\n -22523.518, -6.065055, -92.402812, -5.6222517, -5.9653862,\n -3338.7985, -5.9887053, -47.257447, -3.5602535, -3.6713991,\n -1269.7128, -3.6786167, -25.247736, -.92430902, -.9254755,\n -10749.074, -.92548614, -20.941212, -5.2384521, -5.3357226,\n -29922.325, -5.3390592, -93.641227, 3.9600059, 3.9203361,\n 15722.094, 3.9219252, 62.704786, -6.5203802, -6.6896631,\n -28737.546, -6.6976513, -106.90512, -6.2106413, -6.3132951,\n -73040.278, -6.3167014, -141.2359, -.20342402, -.2036375,\n -744.52393, -.20363773, -3.134929, 3.0717208, 2.997104,\n 1083.974, 3.0004496, 21.706716, -2.578223, -2.5895842,\n -34704.883, -2.589796, -61.330595, 2.1154631, 2.0707695,\n 913.4727, 2.0717072, 15.989522, -4.4212059, -4.6762251,\n -5267.8305, -4.6824606, -46.871022, -6.0308154, -6.1288048,\n -22200.014, -6.1357161, -93.118315, 2.0010941, 2.0050502,\n 8179.9494, 2.0052698, 31.995965, -4.7215135, -4.7224521,\n -10299.541, -4.7268107, -61.234115, -.61478931, -.61724539,\n -936.8533, -.61725705, -7.0747006, -1.7242039, -1.7260096,\n -31850.535, -1.7260607, -45.578987, 4.2597077, 4.2708827,\n 15752.019, 4.2731622, 65.871661, .13878808, .13874551,\n 110.47553, .13874572, 1.2862443, .06198083, .0619749,\n 17.679469, .06197494, .40800109, -.51497256, -.5153046,\n -11904.695, -.51530576, -14.669956, -5.0832571, -5.0614461,\n -115983.47, -5.0625443, -144.17615, 4.8631068, 4.9376385,\n 4604.7975, 4.9466392, 47.754326, .41687368, .41579762,\n 354.31543, .41580291, 3.9488109, 1.5740145, 1.5668996,\n 233.56283, 1.5678388, 8.3331091, -1.0156158, -1.0212038,\n -1092.6454, -1.0212729, -10.406692, 2.7223636, 2.7077122,\n 1547.9597, 2.7096859, 22.553674, -1.7144195, -1.7151036,\n -21079.76, -1.7151695, -39.570052, -1.1877669, -1.1941703,\n -5141.3335, -1.1942127, -19.357357, -9.1433754, -9.5217086,\n -84411.705, -9.5357864, -191.8103, -2.4873675, -2.4944945,\n -5322.7872, -2.4951476, -32.053319, -.52446998, -.52526652,\n -67.958396, -.52530591, -2.6539626, 2.3976154, 2.3905626,\n 12988.353, 2.3908659, 42.108622, 2.1940034, 2.1651172,\n 9681.0591, 2.1653379, 35.98591, -6.109209, -6.0299691,\n -7699.0563, -6.0425009, -65.988636, 3.8079023, 3.7502112,\n 44419.258, 3.7508204, 86.360332, -1.8819326, -1.8918879,\n -6906.1083, -1.8920842, -29.027781, -5.8802911, -5.9010268,\n -11508.862, -5.910263, -73.554629, .3642416, .36357882,\n 103.13817, .36358642, 2.3918439, -.1875515, -.18754808,\n -248.0582, -.18754846, -2.0587245, -3.2885067, -3.3249747,\n -12471.058, -3.3260255, -51.282232, -3.4245938, -3.5766289,\n -292.02275, -3.5949103, -15.073413, 1.6999336, 1.6998475,\n 5830.6976, 1.6999974, 25.636662, 2.7145418, 2.692024,\n 7781.6473, 2.6926642, 38.561584, 2.3688876, 2.4300853,\n 676.72969, 2.4324331, 15.601561, 8.4394979, 8.3138999,\n 61224.725, 8.3239664, 163.37541, 4.634536, 4.6071471,\n 7022.4616, 4.6122045, 53.231327, .9590249, .97735056,\n 38.64247, .97790484, 3.2878214, -2.041165, -2.0676384,\n -416.51119, -2.0694518, -12.016951, 7.3712772, 7.5493285,\n 83124.524, 7.5553493, 165.29951, 1.0943632, 1.095498,\n 1564.5913, 1.0955701, 12.328435, 15.712534, 15.419989,\n 1246536, 15.432621, 675.14817, .75482542, .7538249,\n 6187.394, .75383208, 15.219496, 1.5411183, 1.5461771,\n 2717.0765, 1.5463534, 18.617639, .05192873, .05191976,\n 257.80525, .05191977, .88586806, 6.9659606, 6.7887171,\n 14554.668, 6.8007292, 89.054275, -1.3180532, -1.3173208,\n -35718.803, -1.3173384, -39.59019, 3.1417625, 3.1357097,\n 1099.7541, 3.140007, 22.14186, -2.2540263, -2.2575945,\n -4491.5963, -2.2581012, -28.364354, -3.6364072, -3.6980171,\n -1193.2561, -3.7057156, -25.08185, -8.9597209, -9.0798549,\n -92417.588, -9.0911537, -195.03589, 1.3128072, 1.3119967,\n 6846.1057, 1.3120487, 22.765744, 5.0951244, 5.2515277,\n 1418.9377, 5.277164, 33.272924, -6.6168923, -6.7651375,\n -71965.581, -6.7695384, -146.60349, 1.4743967, 1.4749183,\n 3777.6522, 1.4750374, 20.175155, .88203319, .88612343,\n 873.71372, .88617114, 8.7925127, 10.232282, 10.227907,\n 24708.303, 10.269468, 137.27643, 2.4994204, 2.522306,\n 4996.1112, 2.52301, 31.484991, -.33601015, -.33565892,\n -94.016495, -.33566501, -2.1977061, 1.2014409, 1.201799,\n 1306.5399, 1.2019131, 12.354993, -.1917435, -.19167108,\n -12.795383, -.19167405, -.7777348, -1.1541729, -1.1510202,\n -841.79482, -1.1511491, -10.389213, -3.3155305, -3.322033,\n -7755.1242, -3.3234863, -44.011375, 1.638263, 1.6289084,\n 699.83365, 1.6294247, 12.338263, -2.3074323, -2.3163968,\n -5852.6398, -2.3168634, -31.468056, -2.9464432, -2.9898088,\n -4559.9816, -2.9912043, -34.081582, .64340142, .64392566,\n 1516.8902, .64393612, 8.5632652, 1.8216221, 1.8737544,\n 1093.3062, 1.8743269, 15.365716, -4.1484683, -4.1550817,\n -54011.578, -4.1559855, -97.59348, -2.0098146, -2.0067011,\n -6797.1668, -2.0069487, -30.168008, -3.0886434, -3.0641784,\n -6766.3444, -3.0653206, -40.114053, 3.1034238, 3.0604395,\n 12317.888, 3.0611532, 49.136717, -4.2798863, -4.165237,\n -2660.4643, -4.1713732, -36.526436, -2.7642694, -2.7673322,\n -601.62921, -2.7714243, -16.627612, -4.1354083, -4.150768,\n -3169.2294, -4.1567748, -37.843982, 2.085076, 2.110033,\n 337.53611, 2.1122867, 11.363741, 4.0312289, 4.1372091,\n 3906.6743, 4.1424073, 39.892756, -.2784614, -.27871515,\n -124.55464, -.27871771, -2.1295931, .91847241, .91775794,\n 6536.0012, .91777237, 17.666402, -6.5936505, -6.4164512,\n -4117.1251, -6.4394331, -56.3571, -3.7873428, -3.7775824,\n -15933.885, -3.7790071, -61.140686, -3.1255338, -3.1497479,\n -17261.446, -3.1504428, -55.246977, -.88895885, -.88950053,\n -146.0698, -.88966377, -4.8690165, 2.8107448, 2.8179615,\n 2663.2443, 2.8195846, 27.606919, -3.4316528, -3.4093229,\n -6258.9413, -3.4111217, -41.927843, .32348352, .32327145,\n 142.66038, .32327548, 2.4622713, 2.4586408, 2.4836465,\n 1053.3637, 2.4855562, 18.534868, -2.0863466, -2.1097879,\n -3798.3528, -2.1102179, -25.475501, 1.347211, 1.3375771,\n 207.73027, 1.3381379, 7.2242097, 1.2748403, 1.2586908,\n 390.71981, 1.2589676, 8.5952597, 4.663177, 4.6237577,\n 10949.742, 4.6275274, 61.980617, -1.0236763, -1.0387965,\n -129.94316, -1.0390997, -5.1446941, .65592571, .64964267,\n 88.122068, .64970828, 3.3594228, -2.8938186, -2.9083096,\n -7662.4106, -2.9092097, -40.034648, -.69787881, -.70020831,\n -245.99605, -.70025591, -4.929801, .91412968, .90636729,\n 27.986161, .90687785, 2.8596946, -.23737615, -.2370982,\n -8.2266245, -.23710685, -.77392452, 9.7240867, 8.4710294,\n 1709.1431, 8.5511321, 54.470164, 5.1143944, 4.797313,\n 1780.4902, 4.8086774, 35.978469, -1.3933966, -1.4279007,\n -342.37443, -1.428383, -8.7273744, -6.1643948, -6.2380461,\n -53226.802, -6.2421242, -126.46502, -1.1072716, -1.133251,\n -137.57627, -1.1336345, -5.5252331, 2.9131647, 2.6591858,\n 117.336, 2.6660231, 9.9858979, -.703409, -.69788492,\n -30.466814, -.69806848, -2.4702882, -1.3796104, -1.4028155,\n -502.94299, -1.4031774, -9.8554675, 3.4481209, 3.4947658,\n 2211.4348, 3.4988676, 29.735805, -5.1424068, -5.0932624,\n -28852.779, -5.0960527, -91.377689, 1.1658373, 1.1641565,\n 5674.2865, 1.1641943, 19.757366, 7.2585799, 7.4634011,\n 40841.836, 7.4728428, 129.10296, -.29489292, -.29380271,\n -90.475766, -.2938062, -1.9889338, 2.1286688, 2.1307039,\n 3118.6032, 2.1312262, 24.176426, 6.4743187, 6.7041785,\n 10245.251, 6.7206537, 75.44612, -.18655471, -.1872321,\n -8.1187575, -.18723562, -.65619663, -1.7840986, -1.807568,\n -1161.9624, -1.8081095, -15.464763, -3.9600761, -4.0954915,\n -885.68765, -4.109497, -24.037865, -2.3068248, -2.3243811,\n -3915.5288, -2.3249968, -27.517346, -7.0204058, -7.1839731,\n -15774.818, -7.1997808, -91.952371, 3.1544948, 3.0861172,\n 655.79074, 3.091473, 18.687088]\n\nstar98_resids = np.array(star98).astype(float).reshape(-1, 5)\n\ninvgauss = [\n [0.21946682, 0.19838235, -0.13116093, 0.19804544, 0.2329114],\n [-0.68724239, -1.20786, 0.29727304, -1.1461406, -0.6548399],\n [-0.02508381, -0.02537231, 0.00493116, -0.02537176, -0.01837619],\n [0.13333854, 0.12526482, -0.06542915, 0.12518341, 0.13250661],\n [-0.1828634, -0.20290189, 0.11601121, -0.20253671, -0.19796778],\n [-0.18576541, -0.20700925, 0.14184774, -0.20660529, -0.21392459],\n [-0.01680541, -0.01694517, 0.00684574, -0.01694497, -0.01569578],\n [0.01580889, 0.01566406, -0.0337669, 0.01566384, 0.02565121],\n [0.40499508, 0.3419356, -0.19304143, 0.3403085, 0.39859025],\n [0.04500324, 0.04404018, -0.01918807, 0.04403675, 0.04267593],\n [-0.35003226, -0.43573938, 0.19717053, -0.43227509, -0.36421909],\n [-0.44886321, -0.59409509, 0.14983417, -0.58638517, -0.3923042],\n [0.35983219, 0.3103634, -0.12058593, 0.30923487, 0.3149021],\n [0.61634589, 0.48829837, -0.21846474, 0.48391242, 0.54956633],\n [-0.19586429, -0.21798637, 0.08445516, -0.21757089, -0.18643276],\n [-0.67768345, -1.0811333, 0.12589839, -1.0427999, -0.4871933],\n [-0.43106322, -0.55561146, 0.10528459, -0.54969041, -0.3394889],\n [0.27120489, 0.24173716, -0.08854081, 0.24120493, 0.23528243],\n [-0.05090118, -0.0522168, 0.02073681, -0.05221113, -0.04754183],\n [0.38145175, 0.32775094, -0.09872406, 0.32649672, 0.30627443],\n [-0.06122628, -0.06313667, 0.02402832, -0.06312674, -0.05647762],\n [-0.27729954, -0.32438509, 0.10631392, -0.3230591, -0.25380849],\n [-0.17498754, -0.19254711, 0.083475, -0.19225397, -0.17226626],\n [-0.04475333, -0.04570496, 0.01064837, -0.04570159, -0.03493987],\n [2.1079261, 1.2436278, -0.36382129, 1.1877369, 1.4786871],\n [-0.59050542, -0.85032246, 0.09040464, -0.83176757, -0.3980059],\n [-0.27481622, -0.32238792, 0.13093539, -0.32102242, -0.27043148],\n [-0.32072485, -0.38688683, 0.12794677, -0.38462801, -0.29746887],\n [-0.49304951, -0.6674332, 0.11797934, -0.65734004, -0.38566096],\n [0.06418319, 0.06232442, -0.01895976, 0.06231545, 0.05385613],\n [-0.15233039, -0.16429293, 0.03637778, -0.16413654, -0.11907294],\n [0.14306921, 0.13431256, -0.04142349, 0.1342233, 0.11924944],\n [0.50771239, 0.41922154, -0.11900574, 0.41666974, 0.39440688],\n [-0.33686723, -0.4055089, 0.07946929, -0.40319374, -0.26225008],\n [-0.12603683, -0.13365709, 0.01972519, -0.13358036, -0.08557535],\n [-0.28690375, -0.33635948, 0.09007555, -0.3349458, -0.24568],\n [-0.38061163, -0.49506721, 0.36307402, -0.48940939, -0.47205916],\n [-0.61802926, -0.99345317, 0.28705958, -0.95710777, -0.60303156],\n [0.33500504, 0.29312132, -0.0745651, 0.29225169, 0.25579345],\n [0.09993473, 0.09567799, -0.02142368, 0.09564778, 0.0753562],\n [-0.77390406, -1.5092293, 0.24944552, -1.4025957, -0.66853885],\n [-0.56372333, -0.81484888, 0.13961703, -0.79670113, -0.4460328],\n [-0.58464894, -0.89675035, 0.25143829, -0.86997468, -0.55601168],\n [-0.79699816, -1.5367111, 0.18308007, -1.4316205, -0.61498129],\n [1.2612303, 0.8616029, -0.25955307, 0.84110028, 0.93817023],\n [0.16274853, 0.15026277, -0.14167113, 0.15010333, 0.19578609],\n [-0.43961546, -0.5730583, 0.12125264, -0.56640134, -0.36054187],\n [0.11414296, 0.10838846, -0.03769438, 0.10834013, 0.09940349],\n [0.234557, 0.21059654, -0.14686052, 0.21018955, 0.25281953],\n [-0.58416576, -0.89122107, 0.23552975, -0.86525941, -0.5437292],\n [0.25647739, 0.23192464, -0.03632729, 0.23153369, 0.1684433],\n [-0.45074863, -0.58662315, 0.09520965, -0.57989069, -0.33821532],\n [0.19326576, 0.17842124, -0.03625637, 0.17823145, 0.13939313],\n [0.56053862, 0.45568773, -0.12652518, 0.45244681, 0.43000418],\n [-0.19469865, -0.21453127, 0.03993593, -0.2141952, -0.14466825],\n [-0.19716704, -0.21788899, 0.04601467, -0.21752674, -0.15294373],\n [-0.43999815, -0.58606522, 0.20411479, -0.57811493, -0.42914302],\n [0.01566139, 0.01554434, -0.00569429, 0.01554419, 0.01408348],\n [-0.2383123, -0.26955511, 0.05428065, -0.26887452, -0.18336762],\n [-0.64722983, -0.99421314, 0.113679, -0.96432537, -0.45667696],\n [-0.19029977, -0.21185782, 0.10882145, -0.21145172, -0.19900944],\n [0.52360533, 0.42760203, -0.17110953, 0.42469226, 0.45439907],\n [0.53511334, 0.43209168, -0.26125299, 0.42881606, 0.53087744],\n [0.06765918, 0.06581012, -0.00721243, 0.0658017, 0.04041926],\n [-0.33830509, -0.4080903, 0.08346023, -0.40570789, -0.26732662],\n [0.25358176, 0.2261832, -0.14274032, 0.2256911, 0.26379764],\n [0.64897923, 0.50000076, -0.52550319, 0.49437774, 0.76211817],\n [-0.40741312, -0.51148167, 0.07673901, -0.50709672, -0.2942425],\n [-0.36424337, -0.45859178, 0.20539539, -0.45456159, -0.37914151],\n [0.03219191, 0.03166734, -0.02244757, 0.03166592, 0.03596645],\n [0.09131316, 0.08775474, -0.01880635, 0.08773163, 0.06794128],\n [0.09544249, 0.09147182, -0.02468448, 0.0914443, 0.07661477],\n [-0.26565384, -0.30975165, 0.12756023, -0.30853737, -0.26209526],\n [-0.67698377, -1.2354946, 0.45589181, -1.1640047, -0.74762319],\n [-0.5849103, -0.89192114, 0.2316015, -0.86599792, -0.54114872],\n [-0.41648676, -0.52673306, 0.08059767, -0.52192252, -0.30351836],\n [0.60186248, 0.4774551, -0.247836, 0.47321483, 0.56415222],\n [-0.48771891, -0.68555533, 0.27302358, -0.67248587, -0.5064343],\n [-0.23870823, -0.27432787, 0.13922256, -0.27344544, -0.25128012],\n [-0.46352127, -0.61110291, 0.10542772, -0.60338992, -0.3564851],\n [-0.15315845, -0.16768992, 0.15638745, -0.16746051, -0.19431423],\n [-0.11028172, -0.11661361, 0.03746978, -0.11655305, -0.09695557],\n [-0.09766739, -0.10260147, 0.03416514, -0.10255995, -0.08670367],\n [-0.25865969, -0.3031103, 0.20195439, -0.30184354, -0.30008683],\n [-0.29530742, -0.35636228, 0.25145118, -0.35427322, -0.3526502],\n [-0.43655156, -0.59436743, 0.35324208, -0.58503774, -0.51253616],\n [-0.0260924, -0.02641737, 0.00727071, -0.0264167, -0.02147229],\n [-0.68007447, -1.2304932, 0.41095371, -1.1612362, -0.72440028],\n [-0.55540409, -0.806563, 0.16968559, -0.78815111, -0.47130029],\n [0.08496324, 0.08153547, -0.0466319, 0.08151243, 0.08764458],\n [0.25291318, 0.2275826, -0.06526919, 0.22716071, 0.20287431],\n [-0.3459101, -0.43509049, 0.30138239, -0.43129855, -0.41625371],\n [-0.17190125, -0.1890793, 0.09316763, -0.18879372, -0.17658387],\n [-0.20852324, -0.23414686, 0.09714721, -0.23362349, -0.20366813],\n [0.59134795, 0.48223124, -0.06984852, 0.47890327, 0.36555927],\n [-0.25589175, -0.30711218, 0.63498647, -0.30541476, -0.43648469],\n [-0.03186028, -0.03232117, 0.005511, -0.03232006, -0.02236591],\n [-0.25594521, -0.28948219, 0.03054559, -0.28875201, -0.158766],\n [-0.51359911, -0.70176826, 0.10370339, -0.69049933, -0.37962706],\n [0.19992662, 0.18358969, -0.05191884, 0.18336752, 0.16070599],\n [-0.03252799, -0.03304344, 0.01041198, -0.03304208, -0.02803448],\n [0.87784986, 0.65610395, -0.17025957, 0.64692494, 0.6402172],\n [0.25557659, 0.22823559, -0.12059263, 0.22774936, 0.25068651],\n [-0.31197284, -0.37536793, 0.1441638, -0.37323551, -0.30388314],\n [-0.49409432, -0.69358795, 0.24006401, -0.68046772, -0.48939432],\n [0.29230454, 0.26153551, -0.03469097, 0.26099704, 0.18098358],\n [-0.43107557, -0.55170049, 0.08647818, -0.54614283, -0.31794191],\n [1.1243036, 0.79861683, -0.16621181, 0.78325509, 0.74900784],\n [0.94243785, 0.66779165, -0.7453546, 0.6547638, 1.098077],\n [-0.00547659, -0.00548998, 0.0009586, -0.00548998, -0.00385978],\n [-0.48125962, -0.64802831, 0.12750168, -0.63856528, -0.38943494],\n [0.08075752, 0.07765921, -0.04328822, 0.0776394, 0.08265236],\n [-0.01733649, -0.01747477, 0.003682, -0.01747458, -0.01303203],\n [0.37106276, 0.31699846, -0.18163582, 0.31569208, 0.36844733],\n [0.08082038, 0.0777816, -0.03548928, 0.07776256, 0.0773968],\n [0.04414503, 0.04321061, -0.02021886, 0.04320732, 0.04287296],\n [0.29116544, 0.25845228, -0.07178838, 0.25784145, 0.23003183],\n [0.28895977, 0.2544519, -0.1475, 0.25376729, 0.29095931],\n [-0.17460846, -0.18979305, 0.02755812, -0.18957327, -0.11888764],\n [-0.2784603, -0.3217463, 0.05463276, -0.32062952, -0.20386214],\n [-0.48120414, -0.65896058, 0.18281081, -0.64823028, -0.43910009],\n [0.43919416, 0.36448122, -0.28435028, 0.3623776, 0.47870238],\n [0.29138147, 0.25700789, -0.12079888, 0.25633418, 0.27374041],\n [-0.62673306, -1.0166031, 0.28405348, -0.9780172, -0.6065479],\n [-0.38350618, -0.48318203, 0.13895444, -0.47891006, -0.34446813],\n [-0.15554825, -0.16871708, 0.05613268, -0.16853152, -0.13952714],\n [-0.5004515, -0.69034345, 0.15487883, -0.67857984, -0.42649638],\n [0.01693351, 0.01677943, -0.01832247, 0.0167792, 0.02190295],\n [-0.2479185, -0.28563098, 0.1182882, -0.28467869, -0.24407863],\n [2.2482611, 1.2402532, -1.3060192, 1.1700227, 2.3635113],\n [0.20602966, 0.18306043, -1.0509977, 0.18263483, 0.44685205],\n [0.58486763, 0.47239982, -0.12643067, 0.46882798, 0.44224799],\n [-0.22978171, -0.26822207, 0.43025548, -0.2671554, -0.35683089],\n [-0.04893299, -0.05008781, 0.01291673, -0.05008327, -0.03954838],\n [-0.31992822, -0.38629891, 0.13558557, -0.38402033, -0.30277234],\n [0.23281539, 0.21056172, -0.07755681, 0.21020792, 0.20334075],\n [-0.40389696, -0.50231576, 0.06081306, -0.49835653, -0.27072146],\n [-0.4149099, -0.53505755, 0.14485935, -0.52933329, -0.36809624],\n [-0.19484302, -0.21478077, 0.04112692, -0.21444139, -0.14616452],\n [-0.59354453, -0.92007961, 0.2558687, -0.89126164, -0.56491693],\n [-0.22649251, -0.25446097, 0.05203575, -0.25388691, -0.17477522],\n [0.11308923, 0.10712155, -0.06333083, 0.1070691, 0.11744356],\n [2.1570618, 1.2739476, -0.29396154, 1.2169135, 1.3985625],\n [-0.36661502, -0.45229019, 0.100595, -0.44898252, -0.30015274],\n [-0.13090653, -0.14049298, 0.06964604, -0.14037609, -0.13364407],\n [1.034831, 0.73728361, -0.28908361, 0.72334743, 0.85230971],\n [0.01234337, 0.01225937, -0.01630469, 0.01225927, 0.01706357],\n [-0.02041978, -0.02062125, 0.00657333, -0.02062092, -0.01763217],\n [0.25698133, 0.23093649, -0.06508868, 0.23049756, 0.20485482],\n [-0.18856169, -0.21000433, 0.12057066, -0.20959888, -0.20467251],\n [-0.61349066, -0.96438489, 0.23401299, -0.93226802, -0.56056842],\n [-0.13403576, -0.14335573, 0.03787054, -0.14324782, -0.11081307],\n [-0.28087233, -0.32981483, 0.11523373, -0.32840075, -0.26295161],\n [-0.22715035, -0.25568596, 0.0578659, -0.25509017, -0.18142335],\n [-0.10101297, -0.10622081, 0.03092045, -0.10617609, -0.08577157],\n [-0.54132787, -0.75073436, 0.09183236, -0.73751806, -0.37755669],\n [2.2315997, 1.2410837, -1.1229495, 1.1726349, 2.236356],\n [-0.25446228, -0.29017245, 0.05324106, -0.28934008, -0.19033026],\n [1.3392881, 0.8691534, -0.84530931, 0.84265167, 1.447431],\n [-0.14885417, -0.16179664, 0.098637, -0.16160935, -0.16350484],\n [-0.181369, -0.19915316, 0.05233794, -0.19886303, -0.15100486],\n [-0.31967023, -0.39613132, 0.35022326, -0.3931111, -0.41520234],\n [-0.24715716, -0.28116998, 0.05816391, -0.28039245, -0.19225457],\n [1.4170507, 0.91116127, -0.71593757, 0.88220543, 1.421975],\n [-0.07102628, -0.07327601, 0.00833581, -0.07326414, -0.04381323],\n [0.09590878, 0.09172196, -0.03769426, 0.09169151, 0.08851306],\n [-0.39687496, -0.49270953, 0.06599145, -0.48888849, -0.27496348],\n [0.03874853, 0.03795295, -0.04323879, 0.03795023, 0.05063739],\n [-0.44362887, -0.57774381, 0.11003445, -0.57107979, -0.35118241],\n [-0.37567437, -0.4721807, 0.15220406, -0.46809172, -0.3502352],\n [-0.80446806, -1.7538868, 0.34012519, -1.5881386, -0.76072695],\n [-0.31456215, -0.37615399, 0.10474692, -0.37415682, -0.27470173],\n [1.4262995, 0.92384058, -0.54235857, 0.89542537, 1.3019051],\n [-0.07533168, -0.07813108, 0.02041389, -0.07811374, -0.06141913],\n [-0.25107743, -0.28541588, 0.04897872, -0.28463574, -0.18346403],\n [-0.23182627, -0.25881558, 0.02763425, -0.25829316, -0.14374773],\n [-0.55699071, -0.79988016, 0.13780416, -0.7826818, -0.44055089],\n [-0.60340691, -0.87652356, 0.08783114, -0.85648447, -0.39991377],\n [-0.59608332, -0.88509811, 0.13627737, -0.86245829, -0.45922187],\n [0.16977873, 0.1570718, -0.0786399, 0.15691349, 0.16550578],\n [-0.42714799, -0.58248304, 0.4346435, -0.57324717, -0.54130243],\n [-0.07064828, -0.07297453, 0.01213357, -0.07296177, -0.04947745],\n [-0.45992196, -0.62723585, 0.2410807, -0.61728428, -0.46721873],\n [0.63399242, 0.50020189, -0.21266542, 0.49554878, 0.55500684],\n [-0.5474725, -0.83875274, 0.41875875, -0.81384186, -0.63082104],\n [-0.73538742, -1.3490981, 0.24474851, -1.269732, -0.6420874],\n [-0.16915226, -0.18683668, 0.15054797, -0.18652915, -0.20499983],\n [-0.46165077, -0.62756406, 0.21855899, -0.61781118, -0.45332389],\n [-0.2516681, -0.29165745, 0.14377037, -0.29060301, -0.26309863],\n [1.1349967, 0.77223199, -0.69151736, 0.75346989, 1.2122925],\n [-0.07717557, -0.08009098, 0.01934847, -0.08007263, -0.06131199],\n [-0.19818136, -0.22060833, 0.07801391, -0.22018631, -0.18299641],\n [0.94335083, 0.66547504, -0.86786046, 0.65216005, 1.1559582],\n [1.2301549, 0.84702958, -0.24236003, 0.82768642, 0.90185533],\n [-0.52846413, -0.74437682, 0.14929506, -0.73001422, -0.4368867],\n [0.00926399, 0.00922251, -0.00363285, 0.00922248, 0.00854328],\n [-0.2320652, -0.26378555, 0.09157412, -0.26306529, -0.21445734],\n [-0.61930279, -0.98844461, 0.26019757, -0.95333125, -0.58440389],\n [-0.29300103, -0.34646666, 0.11354046, -0.34484973, -0.26913673],\n [-0.63014989, -1.0083751, 0.23337426, -0.97216438, -0.57015144],\n [0.20438505, 0.18552944, -0.15192759, 0.18524006, 0.2332679],\n [0.21299143, 0.19454797, -0.0567153, 0.19428223, 0.172644],\n [0.32055523, 0.28060605, -0.10244481, 0.27977921, 0.2761268],\n [0.35875689, 0.30637082, -0.24389494, 0.30510224, 0.39744566],\n [-0.04853752, -0.04991749, 0.06914812, -0.04991095, -0.06881057],\n [-0.01334856, -0.01343572, 0.0050181, -0.01343563, -0.01213797],\n [-0.14104088, -0.15198247, 0.06106202, -0.15184115, -0.13443027],\n [1.2296562, 0.8493208, -0.21855297, 0.83024186, 0.87106662],\n [-0.0963121, -0.10139637, 0.05513227, -0.10135166, -0.10075483],\n [-0.62115306, -0.91519607, 0.08714316, -0.89268037, -0.40664945],\n [0.14955772, 0.14007855, -0.04106003, 0.1399785, 0.12246791],\n [0.11541852, 0.10981708, -0.02383052, 0.1097718, 0.08594847],\n [-0.30922027, -0.36994709, 0.12261091, -0.36797216, -0.28621892],\n [-0.22920988, -0.26126886, 0.11985453, -0.26052408, -0.23265749],\n [-0.4513332, -0.58769819, 0.09554269, -0.58092621, -0.3389019],\n [0.47641919, 0.39138423, -0.26324856, 0.38887407, 0.4925589],\n [0.89197557, 0.65519863, -0.29946105, 0.64492007, 0.78107383],\n [1.8294421, 1.1029936, -0.61660499, 1.0572978, 1.6040754],\n [-0.05912358, -0.06104126, 0.04432384, -0.06103089, -0.06767002],\n [0.89695457, 0.66603428, -0.18647887, 0.65629947, 0.66947368],\n [-0.54183869, -0.81568954, 0.35721411, -0.79337711, -0.5941547],\n [-0.51559503, -0.75180669, 0.3284956, -0.7342743, -0.55897493],\n [0.80389009, 0.6029852, -0.29722955, 0.5947539, 0.72695095],\n [-0.15169744, -0.16470762, 0.07603953, -0.16452191, -0.15182454],\n [-0.03221923, -0.03273869, 0.01307928, -0.0327373, -0.03005717],\n [0.38336443, 0.32796541, -0.12744674, 0.32663763, 0.33460117],\n [-0.34412063, -0.41857653, 0.10036856, -0.41591208, -0.28752991],\n [-0.42144896, -0.54963099, 0.17197579, -0.54322411, -0.39384841],\n [-0.39443481, -0.5054271, 0.18219352, -0.50028514, -0.38415321],\n [0.55944951, 0.45919047, -0.07643799, 0.45621914, 0.36303934],\n [1.500497, 0.95328728, -0.65308846, 0.92136018, 1.4327071],\n [-0.08420238, -0.08775548, 0.02464356, -0.0877305, -0.0704359],\n [1.187384, 0.80804761, -0.4785498, 0.78843669, 1.1050447],\n [-0.00053604, -0.00053618, 0.00021674, -0.00053618, -0.00049941],\n [0.74469897, 0.5666345, -0.30080407, 0.5596433, 0.6935723],\n [-0.62972476, -1.0104427, 0.24209007, -0.9737469, -0.57690306],\n [-0.41289899, -0.56940561, 0.71719622, -0.55972, -0.62534336],\n [-0.23785253, -0.26611685, 0.02630208, -0.26555848, -0.14383962],\n [-0.25942357, -0.29728791, 0.0608765, -0.29637023, -0.20160416],\n [0.77751978, 0.5882403, -0.27139008, 0.58067893, 0.68973503],\n [0.50988209, 0.41425932, -0.27861761, 0.41129613, 0.52520158],\n [0.47834714, 0.39842639, -0.11605974, 0.39621571, 0.37589451],\n [-0.31169033, -0.36764786, 0.06178849, -0.36598262, -0.22897852],\n [-0.50561712, -0.71680521, 0.23888637, -0.70245612, -0.49615987],\n [0.07689107, 0.07409474, -0.03850472, 0.0740778, 0.07693051],\n [-0.68130941, -1.198946, 0.32210533, -1.1374172, -0.66871166],\n [-0.37679723, -0.48160687, 0.24461181, -0.47680598, -0.41106226],\n [-0.05865463, -0.06050934, 0.0379785, -0.06049957, -0.06393286],\n [-0.10330301, -0.10876277, 0.0317849, -0.1087147, -0.08786695],\n [-0.02583652, -0.02617324, 0.01174462, -0.02617251, -0.02502912],\n [-0.34649878, -0.43728814, 0.32933222, -0.43336602, -0.42922916],\n [-0.04844241, -0.04961063, 0.01689242, -0.04960594, -0.04295936],\n [0.47898364, 0.38890618, -0.47525159, 0.38610723, 0.60191011],\n [-0.22712013, -0.2526591, 0.02497461, -0.25218155, -0.13709239],\n [1.2311459, 0.82439862, -0.61134541, 0.80270725, 1.228321],\n [0.55495383, 0.45373311, -0.10086472, 0.45068091, 0.39606001],\n [0.0672676, 0.06540267, -0.00864428, 0.06539406, 0.04276846],\n [-0.29326405, -0.34657704, 0.1098119, -0.34497071, -0.26631708],\n [-0.64305896, -1.0798608, 0.33211549, -1.0329773, -0.65004023],\n [0.95204054, 0.69087213, -0.28404316, 0.67917358, 0.80150901],\n [-0.01583657, -0.01596697, 0.01008165, -0.01596679, -0.01716437],\n [-0.17308181, -0.19085442, 0.10896987, -0.19055084, -0.18690163],\n [0.23851663, 0.21641591, -0.04621634, 0.21607525, 0.17389522],\n [-0.24236265, -0.27736187, 0.09702571, -0.27652257, -0.22505193],\n [-0.32758453, -0.39962436, 0.16585724, -0.39700471, -0.32895543],\n [-0.10080331, -0.1064483, 0.06253581, -0.10639565, -0.10831853],\n [-0.57352885, -0.8450014, 0.16488262, -0.82421587, -0.47691224],\n [-0.39773343, -0.49071245, 0.05253993, -0.48712159, -0.25521208],\n [0.91120002, 0.66483476, -0.32773333, 0.65395074, 0.81644327],\n [1.7449753, 1.0830833, -0.39014084, 1.0430876, 1.3343712],\n [-0.45729182, -0.60007962, 0.10469619, -0.59275748, -0.35246493],\n [-0.29580673, -0.35005361, 0.10891897, -0.34840495, -0.26712624],\n [-0.31002337, -0.37474369, 0.18190652, -0.37250797, -0.32700599],\n [0.18654161, 0.16983522, -0.23194817, 0.1695863, 0.25273038],\n [0.91688764, 0.68605732, -0.11477754, 0.67653306, 0.57788273],\n [-0.01737575, -0.01751493, 0.00375574, -0.01751474, -0.01313825],\n [-0.33514683, -0.40624829, 0.1086914, -0.40375278, -0.29011163],\n [0.05586841, 0.05451575, -0.01074254, 0.05451029, 0.04062779],\n [-0.30097629, -0.37288715, 0.56313912, -0.37004973, -0.46727236],\n [-0.06435532, -0.06635487, 0.01555102, -0.06634452, -0.05050324],\n [1.683168, 1.047478, -0.48188034, 1.0092125, 1.397681],\n [-0.51668519, -0.69861335, 0.07909412, -0.68812903, -0.3482374],\n [-0.3657535, -0.45711543, 0.15771009, -0.45334944, -0.34814125],\n [-0.48537817, -0.62511791, 0.03986077, -0.61849789, -0.26581452],\n [0.00862779, 0.0085919, -0.00329303, 0.00859188, 0.00788512],\n [-0.09503767, -0.09982766, 0.04167813, -0.09978744, -0.09097246],\n [0.34723869, 0.2989993, -0.18031461, 0.29788735, 0.35164602],\n [-0.46695991, -0.61665162, 0.10343267, -0.60877636, -0.35597184],\n [-0.6871968, -1.1533617, 0.19424251, -1.1033873, -0.56821432],\n [-0.46411708, -0.65869919, 0.4833767, -0.64543078, -0.59273004],\n [0.70606231, 0.53756757, -0.4486713, 0.53096464, 0.76480042],\n [-0.1556854, -0.16978277, 0.09499918, -0.16957033, -0.16637263],\n [-0.66226392, -1.106026, 0.24847245, -1.0589874, -0.60180641],\n [-0.23801501, -0.2678183, 0.03885581, -0.26719805, -0.16389469],\n [0.04557001, 0.04440477, -0.09133413, 0.04439981, 0.07238921],\n [-0.13911314, -0.15088159, 0.13631851, -0.15071588, -0.17408996],\n [1.4387663, 0.91624473, -0.85058109, 0.88587159, 1.5213953],\n [-0.43092071, -0.57464196, 0.247748, -0.56678388, -0.45145205],\n [0.5834262, 0.47102589, -0.13195491, 0.46744966, 0.4478602],\n [-0.8064445, -1.732115, 0.31001814, -1.5742397, -0.73879173],\n [-0.05975525, -0.06163017, 0.03062133, -0.06162037, -0.060247],\n [0.71311673, 0.54503254, -0.35108538, 0.53852405, 0.70944924],\n [-0.28330589, -0.33648226, 0.18082704, -0.33482854, -0.30732757],\n [2.9488587, 1.4905628, -1.324286, 1.3809095, 2.8451592],\n [0.34215535, 0.29829967, -0.08280641, 0.29736632, 0.2686459],\n [-0.71592919, -1.2115151, 0.15170411, -1.1573919, -0.53776083],\n [0.26341899, 0.23734718, -0.04160406, 0.23691803, 0.17939897],\n [-0.05988568, -0.06166873, 0.01917075, -0.06165989, -0.0516145],\n [0.16575998, 0.15401103, -0.05504816, 0.15387239, 0.14462523],\n [-0.2465342, -0.28298038, 0.10048665, -0.28208579, -0.23030193],\n [0.12586652, 0.1184744, -0.07679535, 0.11840209, 0.13450191],\n [0.43413164, 0.3614944, -0.25643955, 0.35948236, 0.45893653],\n [-0.52625753, -0.75904535, 0.23348343, -0.74233574, -0.50570149],\n [-0.14582805, -0.15667334, 0.0332696, -0.15653905, -0.11226729],\n [-0.22907482, -0.26222842, 0.15482408, -0.26143162, -0.25328399],\n [1.7668508, 1.06039, -0.92693076, 1.0156843, 1.7953909],\n [-0.38041178, -0.48183652, 0.17407242, -0.47737956, -0.36933683],\n [0.34341296, 0.29508741, -0.22770138, 0.29395921, 0.37729091],\n [-0.61407417, -1.0983528, 0.84764951, -1.0388203, -0.8614485],\n [-0.2614967, -0.30345824, 0.1135654, -0.30234096, -0.24949939],\n [0.34180776, 0.30303645, -0.02205434, 0.3023056, 0.17272796],\n [-0.21350013, -0.23815688, 0.05022845, -0.23768342, -0.16605757],\n [-0.59242453, -0.9486729, 0.38552786, -0.91450675, -0.64682074],\n [0.00673239, 0.00671055, -0.00252798, 0.00671053, 0.00611947],\n [-0.18127834, -0.20054203, 0.09815037, -0.20020155, -0.18615353],\n [-0.34896946, -0.42871336, 0.12887359, -0.42570187, -0.31544448],\n [0.08732154, 0.08409452, -0.01621272, 0.08407465, 0.06276385],\n [0.6832916, 0.53295434, -0.19975321, 0.52750902, 0.57136282],\n [0.9849395, 0.70606212, -0.34240244, 0.6931893, 0.87256004],\n [0.30984234, 0.27086184, -0.14914363, 0.27004744, 0.30594175],\n [-0.48199152, -0.65029366, 0.1315505, -0.64067268, -0.39391311],\n [-0.02940279, -0.0297881, 0.00438273, -0.02978726, -0.01964191],\n [1.2810312, 0.87739896, -0.20992257, 0.85679861, 0.88322127],\n [0.40334006, 0.33972436, -0.23154519, 0.3380619, 0.42234708],\n [0.33373729, 0.29244946, -0.06847789, 0.29160111, 0.24800662],\n [0.85773527, 0.62214992, -0.66591624, 0.61158506, 0.99323507],\n [-0.27468661, -0.32545858, 0.20809911, -0.32390347, -0.31549446],\n [1.098823, 0.76927863, -0.33563801, 0.75321846, 0.93236407],\n [-0.12836792, -0.13758065, 0.06917673, -0.13747056, -0.13161367],\n [-0.58997764, -0.87334667, 0.14130656, -0.85134695, -0.46162339],\n [-0.25807961, -0.29852855, 0.10648919, -0.29747642, -0.24207372],\n [-0.07161748, -0.07431661, 0.03448991, -0.07429966, -0.07072724],\n [-0.68686562, -1.1449784, 0.18213472, -1.0966257, -0.55597521],\n [1.0381346, 0.73468237, -0.35969612, 0.7202469, 0.91866611],\n [-0.35002969, -0.43067728, 0.13277318, -0.42760686, -0.31923956],\n [-0.09843121, -0.10336873, 0.03025306, -0.10332747, -0.08369284],\n [-0.2136089, -0.24242867, 0.16133335, -0.24178269, -0.24509302],\n [-0.14422786, -0.15540111, 0.05049805, -0.15525701, -0.12807596],\n [-0.23448965, -0.26958699, 0.16418619, -0.26871486, -0.26234412],\n [-0.43457455, -0.57663471, 0.20609226, -0.56901747, -0.42697923],\n [0.08298978, 0.07996368, -0.02144022, 0.0799453, 0.06659416],\n [-0.39977868, -0.50144619, 0.08679864, -0.49718097, -0.30273367],\n [-0.13581469, -0.14577451, 0.05247304, -0.1456529, -0.12462922],\n [-0.64910803, -1.1625603, 0.54974883, -1.0992702, -0.77376561],\n [-0.62123103, -1.0200733, 0.34793076, -0.97942937, -0.64517327],\n [-0.29854263, -0.3501201, 0.06657682, -0.34864249, -0.22809823],\n [-0.47160741, -0.64448325, 0.20578781, -0.6341249, -0.45068208],\n [0.20952869, 0.1925899, -0.03197042, 0.192362, 0.14106582],\n [-0.23112507, -0.27006012, 0.43147481, -0.26897224, -0.35855836],\n [0.20444175, 0.18740119, -0.05319557, 0.18716482, 0.16444281],\n [-0.20074735, -0.22195347, 0.04144814, -0.22158087, -0.14948981],\n [-0.24836117, -0.2923441, 0.3454659, -0.2910528, -0.34930183],\n [0.60977539, 0.48693719, -0.16141134, 0.4828544, 0.49328882],\n [0.03950956, 0.03876269, -0.01720066, 0.03876034, 0.03772766],\n [1.0726371, 0.75963628, -0.26993955, 0.74477062, 0.85323247],\n [-0.32400804, -0.39286452, 0.14409017, -0.39044371, -0.31159607],\n [0.00748171, 0.00745607, -0.00178383, 0.00745605, 0.00584514],\n [-0.22912932, -0.26145243, 0.12801995, -0.26069511, -0.2377696],\n [-0.24334288, -0.27329276, 0.02858448, -0.27268006, -0.15015249],\n [0.98857169, 0.70159971, -0.47998489, 0.68803709, 0.97894426],\n [-0.256986, -0.29198901, 0.0394474, -0.29119699, -0.17336275],\n [1.1512684, 0.79469328, -0.37803586, 0.77678348, 1.0007034],\n [1.3369505, 0.9007234, -0.258813, 0.87772621, 0.9744261],\n [-0.03528889, -0.03584243, 0.00496118, -0.03584098, -0.02311871],\n [-0.51846139, -0.75303337, 0.29505226, -0.73582911, -0.54131982],\n [-0.36417282, -0.44940308, 0.10714898, -0.44610779, -0.30517178],\n [0.43107619, 0.36539202, -0.09131403, 0.36373302, 0.32376149],\n [-0.66455363, -1.036643, 0.11220702, -1.0032585, -0.46277515],\n [-0.43139516, -0.56290179, 0.14466203, -0.55631411, -0.37761134],\n [-0.0166183, -0.01675004, 0.00489267, -0.01674987, -0.01392888],\n [-0.20746924, -0.23084471, 0.05229881, -0.23040676, -0.16512383],\n [0.17001751, 0.15829983, -0.03393058, 0.15816537, 0.12518036],\n [-0.40028781, -0.48875604, 0.03449272, -0.48552314, -0.22275837],\n [-0.28769553, -0.33586097, 0.07163674, -0.33452343, -0.22803981],\n [0.01172257, 0.01166066, -0.00246261, 0.01166061, 0.00877992],\n [-0.16326594, -0.17786135, 0.05868567, -0.1776442, -0.14625737],\n [-0.13097163, -0.14039217, 0.05998306, -0.14027934, -0.12719529],\n [-0.05808555, -0.0596823, 0.01229087, -0.05967498, -0.04360968],\n [-0.37410184, -0.47392831, 0.20004904, -0.46953796, -0.38257394],\n [-0.34349912, -0.42429369, 0.17668618, -0.4211545, -0.34675916],\n [-0.01492567, -0.01503391, 0.00523041, -0.01503377, -0.01325799],\n [0.29068996, 0.25753057, -0.08563828, 0.25690199, 0.24369839],\n [-0.332227, -0.40395491, 0.13108667, -0.40139353, -0.30701019],\n [-0.40595735, -0.51559944, 0.11220129, -0.51072004, -0.33316769],\n [-0.61518924, -0.9272341, 0.13160628, -0.90172302, -0.46356221],\n [0.14075706, 0.13276657, -0.0224764, 0.13269102, 0.09621269],\n [-0.48528515, -0.67101151, 0.20765385, -0.65941019, -0.46073866],\n [-0.05503443, -0.05643122, 0.00948432, -0.05642531, -0.03858647],\n [1.750071, 1.0774354, -0.48320668, 1.0363146, 1.4357917],\n [1.4174127, 0.91338448, -0.6681414, 0.88463772, 1.3898364],\n [-0.38540542, -0.48559742, 0.13375997, -0.48130239, -0.34124314],\n [-0.56868899, -0.78925933, 0.06278987, -0.77530311, -0.34373446],\n [0.34710641, 0.29833116, -0.20567282, 0.29719408, 0.36731956],\n [0.07638697, 0.07404525, -0.00804917, 0.07403329, 0.04545761],\n [-0.56149623, -0.87177074, 0.40701726, -0.84427383, -0.63549505],\n [-0.24999416, -0.28689392, 0.08886123, -0.2859896, -0.22311769],\n [-0.23430048, -0.26493238, 0.06090427, -0.26426693, -0.18839731],\n [0.78760525, 0.61138863, -0.07971736, 0.60490119, 0.46245203],\n [-0.29943973, -0.35798383, 0.1537833, -0.35608827, -0.30212454],\n [1.5718706, 0.99354646, -0.52506383, 0.95953593, 1.3741218],\n [-0.10596756, -0.11224654, 0.06689919, -0.11218458, -0.11453342],\n [-0.54909035, -0.76884924, 0.09845905, -0.75451782, -0.39011359],\n [-0.32609736, -0.39539043, 0.1367698, -0.39295453, -0.30754235],\n [-0.15012378, -0.1619647, 0.04234255, -0.16180922, -0.12404197],\n [-0.39565971, -0.50809383, 0.18877217, -0.50283487, -0.38952668],\n [-0.57553404, -0.86613075, 0.21942925, -0.84247579, -0.52580193],\n [0.12683929, 0.1198912, -0.03643842, 0.11982781, 0.10544655],\n [0.02270402, 0.022484, -0.00311723, 0.02248365, 0.01475712],\n [-0.24123189, -0.27379475, 0.0612082, -0.2730645, -0.19241386],\n [0.21800662, 0.19905626, -0.04898011, 0.19878217, 0.16697946],\n [-0.60306868, -0.92619133, 0.19955451, -0.89837386, -0.52554314],\n [0.41430026, 0.35401909, -0.07404215, 0.35256447, 0.29402199],\n [-0.13443697, -0.14357866, 0.03083635, -0.14347514, -0.10368362],\n [0.14783533, 0.13784002, -0.08570558, 0.1377275, 0.15530968],\n [-0.29406624, -0.35009529, 0.14884304, -0.34832685, -0.29526793],\n [-0.57288525, -0.86310269, 0.23129864, -0.83940462, -0.53347342],\n [0.50213207, 0.4154233, -0.11637213, 0.41294565, 0.38860194],\n [0.63428636, 0.49900067, -0.24276877, 0.49424647, 0.58022689],\n [0.08921615, 0.085661, -0.02803146, 0.08563739, 0.07641657],\n [-0.58928721, -0.91068237, 0.26052254, -0.8825479, -0.56560049],\n [0.04359302, 0.04262897, -0.03360468, 0.04262541, 0.05036025],\n [-0.19631908, -0.21918695, 0.10491377, -0.21874408, -0.2007224],\n [0.2626382, 0.23384393, -0.12598651, 0.23331921, 0.25903391],\n [-0.03942293, -0.04011796, 0.00570469, -0.04011591, -0.02607673],\n [0.02482358, 0.02453316, -0.00854861, 0.02453259, 0.02192224],\n [0.25193872, 0.22580348, -0.09755285, 0.2253527, 0.23135911],\n [-0.48866528, -0.68518697, 0.25676136, -0.67231132, -0.49681444],\n [-0.37802814, -0.47936239, 0.18933894, -0.4748858, -0.3782446],\n [-0.34102701, -0.4160608, 0.12063284, -0.41333092, -0.30387224],\n [1.7724731, 1.034292, -1.968235, 0.98591764, 2.3125373],\n [-0.36586314, -0.44618456, 0.06774468, -0.44326864, -0.26273273],\n [0.12187065, 0.11574187, -0.02178408, 0.11569052, 0.08649463],\n [-0.31911072, -0.37837669, 0.0648449, -0.37655279, -0.23637209],\n [-0.27682526, -0.3188125, 0.04792746, -0.31775533, -0.19439061],\n [-0.64054568, -1.0224526, 0.1989004, -0.98611584, -0.54649805],\n [0.03933659, 0.03869117, -0.00481294, 0.0386894, 0.02460431],\n [-0.33307454, -0.40186308, 0.09530847, -0.39951192, -0.27653373],\n [0.09703165, 0.0931105, -0.01637223, 0.0930841, 0.06755459],\n [0.8318686, 0.61155395, -0.50656838, 0.602011, 0.88836719],\n [-0.21584148, -0.24218836, 0.07010333, -0.24165377, -0.18693021],\n [-0.4291746, -0.58532861, 0.4197218, -0.57603933, -0.53672668],\n [0.69636682, 0.54924672, -0.10184715, 0.54412435, 0.4622586],\n [-0.001548, -0.00154914, 0.00048186, -0.00154914, -0.00132179],\n [-0.08304844, -0.08639793, 0.01876542, -0.08637542, -0.06373095],\n [-0.35078789, -0.43258938, 0.14120095, -0.42943789, -0.32632644],\n [0.06817824, 0.06586494, -0.05215659, 0.06585186, 0.07856159],\n [-0.13281122, -0.14139016, 0.02233949, -0.14129787, -0.09236858],\n [-0.63619981, -1.0325971, 0.25367231, -0.99330654, -0.58997015],\n [-0.42647757, -0.55722192, 0.16400666, -0.55063623, -0.39074607],\n [-0.51893985, -0.73844766, 0.2072769, -0.72335158, -0.48150963],\n [-0.16373659, -0.17750708, 0.03532237, -0.17731432, -0.12372489],\n [0.54251637, 0.43383208, -0.38354983, 0.43023926, 0.60891849],\n [-0.50161375, -0.7221246, 0.33194245, -0.70639005, -0.55073654],\n [-0.15861102, -0.17244583, 0.06087459, -0.17224499, -0.14522596],\n [-0.16836261, -0.18502635, 0.10210102, -0.18475196, -0.17954933],\n [1.4866833, 0.95698828, -0.47154365, 0.92672422, 1.2774079],\n [1.2911355, 0.88861684, -0.16713676, 0.86827564, 0.82290279],\n [-0.09839541, -0.10358873, 0.04638835, -0.10354307, -0.09648568],\n [-0.09096955, -0.09470469, 0.01053519, -0.09467913, -0.05586694],\n [-0.49810824, -0.67926422, 0.12716917, -0.66849219, -0.39812529],\n [1.6610055, 1.0605991, -0.24860647, 1.0258457, 1.1111214],\n [-0.16366309, -0.17883711, 0.07673274, -0.178603, -0.16019073],\n [0.6569265, 0.51131516, -0.30129419, 0.5060029, 0.63829009],\n [1.0982562, 0.76135435, -0.47545138, 0.74458407, 1.0467617],\n [-0.51409591, -0.71837177, 0.16042185, -0.7051421, -0.43933312],\n [-0.1187299, -0.12597206, 0.03420609, -0.12589849, -0.09879869],\n [0.45654377, 0.38446577, -0.08691705, 0.38258035, 0.33090242],\n [-0.28488554, -0.33901862, 0.18828181, -0.33731459, -0.31265093],\n [0.21830474, 0.19756622, -0.1216919, 0.19723853, 0.22636329],\n [0.33133138, 0.28878174, -0.10937445, 0.28787446, 0.28850743],\n [0.08039078, 0.07735885, -0.03813546, 0.07733979, 0.07899334],\n [-0.02966155, -0.03009056, 0.00975126, -0.03008952, -0.02579246],\n [-0.32653548, -0.39441319, 0.11670383, -0.39207824, -0.2919613],\n [-0.33337953, -0.4151742, 0.29501499, -0.41186149, -0.40325892],\n [-0.49720072, -0.70191556, 0.25332848, -0.68819645, -0.50033302],\n [-0.09483072, -0.09969709, 0.04929172, -0.09965549, -0.09606548],\n [-0.2383112, -0.26735905, 0.03140062, -0.2667705, -0.15278675],\n [0.17384791, 0.16100567, -0.0570078, 0.16084775, 0.15104317],\n [-0.29885783, -0.34988492, 0.06073773, -0.34844004, -0.22138046],\n [-0.48540272, -0.66641093, 0.17968785, -0.65538423, -0.43912117],\n [-0.19823386, -0.21968728, 0.05525572, -0.21930114, -0.16315021],\n [-0.30018563, -0.35623518, 0.10974338, -0.35450114, -0.27043488],\n [-0.56089125, -0.85613782, 0.32826685, -0.83114068, -0.59111428],\n [-0.42403669, -0.58488574, 0.58203682, -0.57492417, -0.59373927],\n [-0.2883768, -0.34128525, 0.13392627, -0.33967655, -0.28136633],\n [-0.51613294, -0.7059601, 0.10185868, -0.69454947, -0.37860272],\n [0.65757961, 0.50757101, -0.43543632, 0.50194344, 0.72213286],\n [1.1870079, 0.82628202, -0.22119967, 0.80848295, 0.8542282],\n [0.27860199, 0.24704089, -0.11164627, 0.24644671, 0.25879004],\n [-0.24053122, -0.27385251, 0.07592384, -0.27308575, -0.20634013],\n [-0.16078607, -0.17692895, 0.16496248, -0.17665932, -0.20431677],\n [-0.32128487, -0.38526072, 0.09923591, -0.38315146, -0.27362754],\n [-0.2883748, -0.33796112, 0.08496367, -0.33654718, -0.24176455],\n [0.64638334, 0.50812646, -0.216429, 0.50325443, 0.56551212],\n [-0.19898496, -0.22156649, 0.07717696, -0.22114037, -0.18283221],\n [0.15496505, 0.14454288, -0.05577738, 0.14442617, 0.13888389],\n [0.00633671, 0.0063181, -0.00166539, 0.00631809, 0.00511397],\n [-0.4739938, -0.66681795, 0.34836642, -0.65404427, -0.53893601],\n [0.36246502, 0.31161843, -0.14269332, 0.31043506, 0.33469966],\n [-0.22925888, -0.26072451, 0.10405145, -0.26000712, -0.22197811],\n [0.4886355, 0.39530087, -0.49041548, 0.3923561, 0.61638879],\n [0.2447423, 0.22157177, -0.04705109, 0.22120688, 0.17796691],\n [0.45818104, 0.37993539, -0.21221618, 0.37772388, 0.44664338],\n [-0.32099268, -0.38539684, 0.10526541, -0.3832575, -0.27889153],\n [-0.12972388, -0.1380267, 0.02500792, -0.1379382, -0.09441677],\n [-0.31683287, -0.37357803, 0.05330802, -0.37189334, -0.22037433],\n [1.4682062, 0.96749296, -0.25056919, 0.94000071, 1.0260706],\n [-0.07950166, -0.08302061, 0.05841841, -0.08299466, -0.09038799],\n [-0.06855711, -0.07085819, 0.01818478, -0.07084532, -0.05549847],\n [-0.21092744, -0.23694868, 0.09130626, -0.2364151, -0.20103217],\n [-0.61472947, -0.96513955, 0.22680609, -0.93316874, -0.55550025],\n [0.4952139, 0.40151315, -0.39233881, 0.39858411, 0.57733201],\n [0.05935325, 0.05752996, -0.06177101, 0.05752063, 0.07578232],\n [0.28816721, 0.25615663, -0.06903522, 0.25556563, 0.22549138],\n [-0.06973866, -0.07231299, 0.03575098, -0.07229716, -0.07032154],\n [0.76735159, 0.58759668, -0.17989453, 0.58067771, 0.59613632],\n [0.0214232, 0.02120572, -0.00758547, 0.02120535, 0.01909533],\n [-0.59843416, -0.92357982, 0.22634383, -0.89521849, -0.54526857],\n [0.38819962, 0.32571873, -0.39803505, 0.32405296, 0.49319709],\n [-0.19707937, -0.21731471, 0.03854351, -0.21696909, -0.14413006],\n [-0.40534384, -0.51980512, 0.14965248, -0.51448429, -0.36637038],\n [0.75387704, 0.56679094, -0.45130491, 0.55917749, 0.8005105],\n [-0.50903893, -0.73255746, 0.29339799, -0.71662576, -0.53373995],\n [-0.4447331, -0.59350981, 0.19587434, -0.58535142, -0.42631978],\n [-0.56118785, -0.82137873, 0.1770618, -0.80184537, -0.48134573],\n [0.05405625, 0.05269526, -0.02015385, 0.05268955, 0.04901849],\n [0.5064457, 0.41581461, -0.16660501, 0.41313237, 0.44048178],\n [-0.41284779, -0.55286364, 0.38436762, -0.54508282, -0.50790861],\n [-0.29817206, -0.34948804, 0.06544106, -0.34802351, -0.226606],\n [0.29133822, 0.25698419, -0.12039233, 0.25631115, 0.27340592],\n [0.11252154, 0.10660969, -0.06318201, 0.10655795, 0.11695845],\n [-0.13277124, -0.14234073, 0.05467631, -0.14222589, -0.12445501],\n [-0.55454265, -0.79811435, 0.14813201, -0.78074838, -0.44996936],\n [1.3072545, 0.89034916, -0.21571659, 0.86883081, 0.90339553],\n [1.0007486, 0.71681392, -0.31062265, 0.70368218, 0.8536982],\n [-0.16159869, -0.17579135, 0.0553703, -0.17558389, -0.14247134],\n [-0.07081385, -0.07343344, 0.03221049, -0.07341729, -0.06861535],\n [-0.16043381, -0.1744756, 0.05708551, -0.17427105, -0.14323503],\n [-0.51880592, -0.72619974, 0.15403705, -0.71269002, -0.4360688],\n [-0.38869728, -0.48193385, 0.07466007, -0.47824067, -0.28256202],\n [0.0204857, 0.02029921, -0.00401082, 0.02029892, 0.01498723],\n [-0.67349836, -1.0435125, 0.0925214, -1.0108993, -0.43783966],\n [1.1593064, 0.803795, -0.30685635, 0.78610388, 0.93782224],\n [0.39465662, 0.33338519, -0.22956766, 0.33180876, 0.41507492],\n [-0.35065788, -0.42865261, 0.10374909, -0.42578449, -0.29439279],\n [-0.11717109, -0.12449126, 0.04628912, -0.1244151, -0.10832199],\n [-0.08785581, -0.09195866, 0.04124202, -0.09192674, -0.08602743],\n [-0.58266706, -0.93449817, 0.44822709, -0.90062578, -0.67265083],\n [0.02376894, 0.02349106, -0.01201546, 0.02349052, 0.02385595],\n [-0.67355988, -1.0792605, 0.13905122, -1.0402924, -0.50155559],\n [0.71359203, 0.53745127, -0.6508134, 0.53032017, 0.87189093],\n [-0.43603228, -0.5650238, 0.11086531, -0.55874855, -0.34803344],\n [0.20930669, 0.19143418, -0.05646736, 0.19118023, 0.17039803],\n [-0.35727676, -0.44590583, 0.18199924, -0.44227724, -0.35950343],\n [1.6753869, 1.0630095, -0.2812032, 1.0272074, 1.1643767],\n [0.4908936, 0.40681268, -0.12660653, 0.4044293, 0.3936893],\n [-0.14652734, -0.15811187, 0.05238908, -0.1579594, -0.13102959],\n [0.31785778, 0.27544281, -0.23317764, 0.27450336, 0.36118319],\n [-0.27537691, -0.32558569, 0.18544645, -0.32406848, -0.30411267],\n [-0.48698873, -0.6811047, 0.25166251, -0.66849482, -0.49237438],\n [0.29298545, 0.25481961, -0.34254518, 0.25399422, 0.38887795],\n [0.00874662, 0.0087142, -0.00103607, 0.00871418, 0.00541211],\n [0.78880147, 0.57458812, -1.1066426, 0.56508469, 1.1125621],\n [-0.2651808, -0.31172182, 0.19198616, -0.31036754, -0.30000467],\n [-0.07367286, -0.07638552, 0.02255351, -0.07636888, -0.06255851],\n [-0.68976582, -1.1810856, 0.2285198, -1.1260085, -0.60133857],\n [-0.44284919, -0.59496177, 0.23586881, -0.58640379, -0.45227655],\n [1.1574275, 0.80210806, -0.31606556, 0.78440858, 0.94608814],\n [-0.45541674, -0.59502664, 0.09697116, -0.58799469, -0.34263378],\n [-0.09223334, -0.09669847, 0.03780678, -0.09666246, -0.08632273],\n [-0.0124833, -0.01256213, 0.00636916, -0.01256204, -0.01256773],\n [1.4158797, 0.92342387, -0.46411344, 0.89589567, 1.229992],\n [0.97228034, 0.70692751, -0.2229686, 0.69509957, 0.74981174],\n [-0.34006705, -0.42291708, 0.24050992, -0.41958476, -0.3817368],\n [-0.26819996, -0.30448753, 0.02667655, -0.30367188, -0.15656416],\n [-0.26398234, -0.30512656, 0.08536277, -0.30406224, -0.22828782],\n [-0.55825751, -0.82607513, 0.21860396, -0.80530605, -0.51457939],\n [-0.16766537, -0.18511605, 0.15606077, -0.18481393, -0.20625453],\n [-0.01272429, -0.01280849, 0.00828946, -0.0128084, -0.01389764],\n [0.3518566, 0.29863266, -0.42628281, 0.29729802, 0.47259205],\n [0.5556532, 0.45481888, -0.09389467, 0.45179348, 0.38704327],\n [-0.37533789, -0.47597869, 0.20081775, -0.47153154, -0.38390665],\n [-0.2823153, -0.33137866, 0.10855139, -0.32996482, -0.25864936],\n [-0.44365506, -0.59286463, 0.20595612, -0.58464003, -0.43281123],\n [-0.19042374, -0.21159373, 0.09372189, -0.21120235, -0.18942519],\n [-0.46299754, -0.59724988, 0.06074254, -0.59084525, -0.29641041],\n [-0.34330638, -0.42155001, 0.14437466, -0.41860282, -0.3240621],\n [-0.36794163, -0.45993492, 0.15125125, -0.45613947, -0.34469021],\n [-0.25669541, -0.29799002, 0.13410812, -0.29688775, -0.26047963],\n [0.15008009, 0.14024978, -0.05537051, 0.14014256, 0.13561828],\n [-0.30648333, -0.37209994, 0.23626267, -0.36977611, -0.35406223],\n [-0.05142546, -0.05292612, 0.0545165, -0.05291883, -0.066065],\n [-0.03417275, -0.03472592, 0.00845988, -0.03472443, -0.02703449],\n [-0.11575649, -0.1223337, 0.02332548, -0.12227146, -0.08550347],\n [0.60106468, 0.48320686, -0.12932211, 0.4793917, 0.45378319],\n [-0.4304812, -0.55710014, 0.1186265, -0.55097481, -0.35294473],\n [-0.08322226, -0.08720463, 0.07806581, -0.08717289, -0.10264157],\n [-0.23064543, -0.26357348, 0.13260161, -0.26279277, -0.24163296],\n [0.21330742, 0.19475851, -0.05853626, 0.19449012, 0.17464479],\n [-0.57475093, -0.93020323, 0.55395287, -0.89521359, -0.71529863],\n [0.87328648, 0.66049213, -0.10854342, 0.65198361, 0.5490969],\n [-0.22979168, -0.26014119, 0.07675841, -0.25947518, -0.20088223],\n [0.28003539, 0.24793189, -0.12181976, 0.24732031, 0.26733625],\n [-0.55613717, -0.87231013, 0.49950373, -0.8435344, -0.67604799],\n [-0.29221015, -0.34113387, 0.06326927, -0.3397752, -0.22107407],\n [-0.23656182, -0.27398163, 0.22836836, -0.27299935, -0.29456767],\n [-0.19604252, -0.21782828, 0.07403688, -0.21742568, -0.17853618],\n [-0.60083484, -0.94430253, 0.27580695, -0.91288147, -0.58395826],\n [-0.00547621, -0.00549046, 0.00166924, -0.00549045, -0.0046434],\n [-0.0213368, -0.02153555, 0.00277666, -0.02153524, -0.01362293],\n [0.12983826, 0.12254873, -0.03846697, 0.12248057, 0.10905388],\n [0.31152157, 0.27198549, -0.15725302, 0.27115229, 0.31251323],\n [-0.24926324, -0.28712409, 0.11206531, -0.28616946, -0.24058722],\n [-0.44366991, -0.58611229, 0.15714355, -0.57860731, -0.39550214],\n [-0.27923967, -0.3282771, 0.12752518, -0.32684934, -0.27093185],\n [0.29608578, 0.26082062, -0.11793087, 0.26012283, 0.27447168],\n [0.23601646, 0.21220039, -0.1228077, 0.21180075, 0.23917368],\n [-0.08213268, -0.08616873, 0.10838062, -0.08613569, -0.11350227],\n [0.00044752, 0.00044743, -0.0001309, 0.00044743, 0.00037429],\n [1.4203149, 0.94835746, -0.21382878, 0.92305453, 0.95196682],\n [-0.09720393, -0.10232543, 0.05039981, -0.10228048, -0.09838801],\n [0.06584662, 0.06388567, -0.0200944, 0.06387594, 0.0558544],\n [0.41548604, 0.34920225, -0.20852496, 0.34745044, 0.41600668],\n [1.0305757, 0.73735664, -0.25589049, 0.72375941, 0.81610862],\n [0.00590486, 0.00588968, -0.00088055, 0.00588967, 0.00394519],\n [-0.46055261, -0.63089786, 0.26150477, -0.62060224, -0.48049555],\n [0.10013784, 0.09518753, -0.0887687, 0.09514677, 0.12119799],\n [0.93052524, 0.68548374, -0.19069824, 0.67492748, 0.69121158],\n [0.02028512, 0.02010138, -0.00414368, 0.02010111, 0.01505187],\n [1.8098205, 1.0718835, -1.1072545, 1.0243916, 1.9357509],\n [0.8189776, 0.59083974, -1.1819009, 0.58047062, 1.1660532],\n [-0.46953787, -0.63012622, 0.14410389, -0.62112897, -0.39903928],\n [-0.21602006, -0.24713774, 0.23918854, -0.24639336, -0.28156986],\n [-0.03863364, -0.0393815, 0.01520532, -0.03937909, -0.0356713],\n [-0.04683537, -0.04784446, 0.00830017, -0.04784083, -0.03314527],\n [1.3220231, 0.88728977, -0.32077514, 0.8642057, 1.0388901],\n [-0.24859345, -0.28562577, 0.09940402, -0.28470988, -0.23074791],\n [-0.29160799, -0.34358073, 0.10003715, -0.34204517, -0.25719551],\n [-0.25501821, -0.29229893, 0.07063503, -0.29139397, -0.20944219],\n [-0.29785083, -0.35675225, 0.17350525, -0.3548235, -0.31341041],\n [-0.0198085, -0.02000469, 0.0086732, -0.02000437, -0.01895123],\n [0.09321496, 0.08888984, -0.08523493, 0.08885641, 0.11399162],\n [-0.20218426, -0.22416563, 0.04888082, -0.22376818, -0.15869176],\n [-0.73340024, -1.2432168, 0.12718219, -1.1873251, -0.51528337],\n [-0.05938582, -0.06107651, 0.0139319, -0.06106849, -0.04614614],\n [-0.27714991, -0.32583833, 0.13577607, -0.32442021, -0.27527128],\n [1.819109, 1.1065563, -0.50113102, 1.0622646, 1.4913042],\n [0.13080335, 0.1234575, -0.03635812, 0.12338879, 0.10755313],\n [-0.24344208, -0.28111843, 0.15508732, -0.28015061, -0.26391609],\n [-0.02966079, -0.03009801, 0.01152855, -0.03009694, -0.0272724],\n [-0.06254188, -0.06450144, 0.02100664, -0.06449121, -0.05477421],\n [-0.43209991, -0.55131104, 0.07773608, -0.54589413, -0.30733144],\n [0.40490703, 0.34078423, -0.23531005, 0.33910179, 0.42572292],\n [-0.41954719, -0.53913051, 0.1203157, -0.53352059, -0.34858175],\n [-0.3628394, -0.45381439, 0.17222083, -0.4500505, -0.35660015],\n [-0.38457428, -0.49468597, 0.24817228, -0.4894975, -0.4187112],\n [0.03240107, 0.03191414, -0.01011152, 0.03191292, 0.02768992],\n [-0.41006257, -0.52671335, 0.1427386, -0.52125149, -0.36343262],\n [-0.67063163, -1.041895, 0.09911552, -1.0089364, -0.4467315],\n [-0.12851451, -0.13757402, 0.05927542, -0.13746768, -0.12510356],\n [-0.3719938, -0.45990369, 0.09630487, -0.45647218, -0.29871009],\n [-0.40347027, -0.50311655, 0.0668771, -0.49905461, -0.27923949],\n [-0.19335674, -0.21772217, 0.21485219, -0.21721188, -0.25232647],\n [0.07437463, 0.07189301, -0.02219004, 0.07187921, 0.06261512],\n [-0.03557874, -0.03622088, 0.01592149, -0.03621895, -0.03428716],\n [0.20918056, 0.19145159, -0.05255341, 0.19120155, 0.16629959],\n [0.27805743, 0.2480225, -0.06810202, 0.2474832, 0.21918947],\n [0.18468983, 0.16511164, -1.4715482, 0.16476661, 0.4647615],\n [-0.2575353, -0.29550811, 0.06952078, -0.29457847, -0.20970368],\n [0.01467121, 0.01457715, -0.00238058, 0.01457705, 0.01008204],\n [-0.41285416, -0.51797618, 0.06762362, -0.5135607, -0.28460373],\n [0.35563681, 0.3062542, -0.14814418, 0.30511644, 0.33463858],\n [-0.25857056, -0.2980628, 0.08700149, -0.29706156, -0.22658871],\n [2.3143122, 1.2965813, -0.6925129, 1.2267849, 1.9502962],\n [1.2885501, 0.83343651, -1.2770734, 0.8076381, 1.6186371],\n [-0.12692213, -0.13723212, 0.20535844, -0.13709271, -0.18773233],\n [0.26923237, 0.23674265, -0.29097175, 0.23609135, 0.34810623],\n [-0.1644801, -0.17959486, 0.06874828, -0.17936372, -0.15494332],\n [-0.12573299, -0.13397529, 0.03906078, -0.13388531, -0.10728931],\n [0.32052335, 0.27885662, -0.1631772, 0.27795737, 0.32245536],\n [1.2204833, 0.84285618, -0.23326903, 0.82390706, 0.88576173],\n [-0.24900477, -0.28478904, 0.07541898, -0.28393504, -0.21068907],\n [-0.40704517, -0.51038658, 0.07434913, -0.50605829, -0.29098037],\n [0.21943066, 0.20003196, -0.05533431, 0.19974663, 0.17466519],\n [-0.45510119, -0.58233022, 0.05571458, -0.57647363, -0.28471145],\n [-0.81117623, -1.520445, 0.13085642, -1.4248404, -0.55635477],\n [-0.77218206, -1.5102336, 0.25867285, -1.4026493, -0.67567847],\n [-0.5463344, -0.74681291, 0.06381628, -0.73478848, -0.33648021],\n [-0.01488634, -0.01499325, 0.0049014, -0.01499312, -0.01295115],\n [-0.39951562, -0.5232236, 0.29350692, -0.51693138, -0.45419111],\n [-0.59765957, -0.94833892, 0.32290001, -0.9154673, -0.61329384],\n [-0.31116488, -0.36913016, 0.08102167, -0.36734095, -0.25034415],\n [-0.55419425, -0.77405012, 0.08721173, -0.7598334, -0.37697248],\n [0.06660237, 0.0644445, -0.04062329, 0.06443285, 0.07116418],\n [0.37840857, 0.32086525, -0.25307749, 0.31941476, 0.41693513],\n [-0.01836144, -0.01852658, 0.00675922, -0.01852633, -0.01657983],\n [0.8405879, 0.62836978, -0.23908668, 0.61958976, 0.69649374],\n [-0.09265738, -0.09706769, 0.03161807, -0.09703272, -0.08157839],\n [-0.20843404, -0.23371337, 0.08828977, -0.23320372, -0.19722376],\n [-0.22152769, -0.24561922, 0.02358758, -0.24518348, -0.13228881],\n [-0.00989398, -0.0099387, 0.00206509, -0.00993866, -0.00739441],\n [0.34746868, 0.29703509, -0.29418024, 0.29582103, 0.41415058],\n [-0.48286069, -0.64976738, 0.12240594, -0.6403194, -0.38502778],\n [-0.06895623, -0.07133693, 0.02209925, -0.07132323, -0.05945452],\n [0.16298072, 0.14999578, -0.20560011, 0.1498236, 0.22187512],\n [0.24080838, 0.22023406, -0.01850433, 0.21994155, 0.12898786],\n [-0.51551132, -0.72856997, 0.19351423, -0.71423974, -0.46853256],\n [0.21480614, 0.19587928, -0.06340083, 0.19560181, 0.18019364],\n [0.08752845, 0.08406782, -0.03029766, 0.08404503, 0.07743058],\n [0.30806368, 0.27219212, -0.06720695, 0.27149809, 0.23365499],\n [0.94688487, 0.68427465, -0.34730971, 0.67238853, 0.85397881],\n [-0.43233564, -0.569952, 0.18376994, -0.56276164, -0.4095581],\n [-0.53267009, -0.7582047, 0.16905304, -0.7426797, -0.4577797],\n [0.04102751, 0.0401968, -0.02405138, 0.040194, 0.04326202],\n [-0.56663295, -0.80273476, 0.09480533, -0.78673008, -0.39338893],\n [-0.37045646, -0.48178134, 0.44890056, -0.47628198, -0.49760511],\n [-0.10084367, -0.10569215, 0.01747511, -0.10565331, -0.07083518],\n [0.04704931, 0.04606637, -0.0107544, 0.04606295, 0.0362444],\n [0.32137416, 0.27952841, -0.16267221, 0.27862382, 0.32269216],\n [-0.31983522, -0.38614446, 0.13531918, -0.38386942, -0.30251528],\n [0.17817824, 0.16448048, -0.07009303, 0.1643052, 0.16448945],\n [-0.1103941, -0.11668311, 0.03483861, -0.11662343, -0.0946951],\n [-0.00013749, -0.0001375, 7.475e-05, -0.0001375, -0.00014138],\n [-0.13713359, -0.14713003, 0.04626945, -0.1470087, -0.12028293],\n [-0.47766001, -0.68544176, 0.46513064, -0.67076398, -0.5965052],\n [-0.40203327, -0.50607888, 0.09272902, -0.50163829, -0.3106394],\n [-0.28636274, -0.33857538, 0.13562985, -0.33699759, -0.2812372],\n [0.18113612, 0.16722005, -0.06103794, 0.16704209, 0.15881079],\n [0.43928596, 0.36876772, -0.14358666, 0.36689253, 0.38125269],\n [-0.49480076, -0.68495104, 0.18182103, -0.67302608, -0.44652414],\n [-0.34704893, -0.41596757, 0.05255445, -0.41370144, -0.23306316],\n [-0.21356853, -0.24021161, 0.0899185, -0.23965917, -0.20167459],\n [-0.03935419, -0.04006861, 0.00747542, -0.04006645, -0.02850247],\n [0.92015694, 0.68409902, -0.14473168, 0.6741813, 0.62580515],\n [0.51444621, 0.42340162, -0.12858296, 0.42073645, 0.40828597],\n [-0.25471088, -0.29339574, 0.09420566, -0.29242042, -0.2303566],\n [-0.13162226, -0.14058773, 0.03703192, -0.14048603, -0.10866465],\n [1.5256867, 0.97351753, -0.50109969, 0.941521, 1.3262588],\n [-0.25711545, -0.29990523, 0.16870733, -0.29872397, -0.28149678],\n [-0.57010669, -0.84199052, 0.17948291, -0.82102463, -0.48863937],\n [-0.2481663, -0.27941, 0.02898028, -0.27875625, -0.15282911],\n [-0.39585463, -0.49543135, 0.08761031, -0.49129838, -0.30168396],\n [0.26396598, 0.23625542, -0.07620485, 0.23577178, 0.2198043],\n [0.20345859, 0.18405327, -0.22285928, 0.18374541, 0.26424347],\n [-0.63319563, -1.0172999, 0.23504772, -0.98016198, -0.57335106],\n [0.09770997, 0.09358417, -0.02358854, 0.09355515, 0.07665425],\n [0.97657256, 0.70907665, -0.22678578, 0.69711179, 0.75628407],\n [0.62465015, 0.49899984, -0.13068314, 0.49482956, 0.46720529],\n [0.90636958, 0.66285383, -0.31254602, 0.65216034, 0.80078919],\n [0.4695421, 0.39186196, -0.11990305, 0.38973403, 0.37532121],\n [0.40342544, 0.34071291, -0.19540737, 0.33909737, 0.3991773],\n [0.13367782, 0.12613466, -0.03201812, 0.12606377, 0.10459594],\n [-0.40380903, -0.51029971, 0.10080706, -0.50567009, -0.32034971],\n [-0.0388306, -0.0395016, 0.00540108, -0.03949967, -0.02534854],\n [-0.66782547, -1.0553002, 0.12645645, -1.019358, -0.48316891],\n [0.31847072, 0.27670872, -0.18917586, 0.27579959, 0.3372963],\n [0.07277888, 0.07021524, -0.04413034, 0.07020019, 0.07761149],\n [0.46503517, 0.38825082, -0.1265806, 0.38615148, 0.37971368],\n [-0.42325907, -0.5547309, 0.18842624, -0.54802331, -0.40718769],\n [0.26700903, 0.23807593, -0.09743584, 0.23755477, 0.24039949],\n [-0.51371982, -0.70883681, 0.12629997, -0.69673812, -0.40547302],\n [-0.11243706, -0.11977953, 0.09256012, -0.11969968, -0.13276725],\n [-0.28640328, -0.33759131, 0.11782752, -0.33607475, -0.26837635],\n [0.27297517, 0.24431756, -0.05828916, 0.24381737, 0.20556761],\n [-0.82425343, -1.4558008, 0.07241172, -1.3801574, -0.46165774],\n [0.23197297, 0.21044408, -0.05870715, 0.2101117, 0.18486949],\n [0.98806998, 0.70637739, -0.36917964, 0.6932897, 0.89663231],\n [0.23381589, 0.21149227, -0.07404639, 0.21113776, 0.20079858],\n [-0.33833613, -0.40485254, 0.06012981, -0.40268701, -0.2396655],\n [0.68204589, 0.52634513, -0.3281514, 0.52049998, 0.67335458],\n [1.5731096, 1.0056366, -0.37080283, 0.97284989, 1.2243258],\n [-0.45257303, -0.60588142, 0.18307374, -0.59737163, -0.42170731],\n [0.01959294, 0.01939979, -0.0118505, 0.01939947, 0.02087638],\n [0.67430133, 0.52815406, -0.18288737, 0.52293729, 0.5499298],\n [-0.49980304, -0.70102851, 0.21568603, -0.68783051, -0.47586445],\n [-0.03464258, -0.035257, 0.01690984, -0.03525518, -0.03436608],\n [0.80105849, 0.59930949, -0.3399519, 0.59098201, 0.75844712],\n [-0.10945449, -0.11524755, 0.02024744, -0.11519648, -0.07857585],\n [0.19586527, 0.18014534, -0.05092761, 0.17993536, 0.15750683],\n [-0.59356161, -0.84972679, 0.07853711, -0.83176759, -0.38107651],\n [-0.26176648, -0.30059589, 0.06376126, -0.29963959, -0.20597048],\n [-0.52670437, -0.7444074, 0.16303326, -0.72976361, -0.44889698],\n [-0.25229583, -0.28607887, 0.04027129, -0.28532731, -0.17243096],\n [-0.54254012, -0.79819515, 0.2514942, -0.77870402, -0.5290222],\n [-0.54033162, -0.76807782, 0.14728081, -0.75246839, -0.44139998],\n [-0.56976056, -0.84612526, 0.1959766, -0.824468, -0.50296719],\n [0.17571897, 0.1618744, -0.10052461, 0.16169285, 0.18378634],\n [-0.01966618, -0.01984563, 0.0044382, -0.01984536, -0.01508547],\n [-0.46794437, -0.64572302, 0.2668698, -0.63469658, -0.48892167],\n [0.90744845, 0.66879756, -0.22886194, 0.65852951, 0.72235247],\n [-0.19712819, -0.22030293, 0.10889611, -0.21984998, -0.20378863],\n [1.3898858, 0.91141256, -0.45367215, 0.88491521, 1.2057116],\n [0.27466983, 0.24204683, -0.20858318, 0.24140311, 0.31572604],\n [-0.66945557, -1.0585014, 0.12489742, -1.0223593, -0.48195777],\n [-0.0883628, -0.09218563, 0.02068614, -0.09215807, -0.06861454],\n [0.25378339, 0.22880327, -0.05290206, 0.22839435, 0.18958748],\n [-0.33129648, -0.39813119, 0.08484429, -0.39589913, -0.26507092],\n [-0.6846758, -1.1753391, 0.24822923, -1.1200304, -0.61510741],\n [0.51773831, 0.42045386, -0.25244933, 0.41743344, 0.51342255],\n [-0.36495, -0.44294497, 0.05775645, -0.44018761, -0.24871334],\n [-0.27098725, -0.31689904, 0.12582319, -0.31560895, -0.26438053],\n [0.2238209, 0.20271648, -0.09220094, 0.20238547, 0.20982418],\n [-0.3300101, -0.39369167, 0.06458042, -0.39165629, -0.24139509],\n [0.23984243, 0.21485965, -0.1510369, 0.21442699, 0.25901308],\n [-0.58287734, -0.86283572, 0.15567784, -0.84110082, -0.47293745],\n [0.61384652, 0.48385407, -0.28790581, 0.47931763, 0.60089696],\n [-0.48279673, -0.65891216, 0.1660104, -0.6484094, -0.42615211],\n [-0.29051501, -0.34520187, 0.15087778, -0.34349632, -0.29421455],\n [-0.35458256, -0.45526794, 0.45161194, -0.45056198, -0.4842579],\n [-0.40514188, -0.52326343, 0.18270347, -0.51759809, -0.39143863],\n [0.28227816, 0.25174487, -0.0612721, 0.25119586, 0.21373835],\n [-0.0049285, -0.0049399, 0.00134475, -0.00493989, -0.00402749],\n [-0.41690016, -0.54134562, 0.16759997, -0.53523816, -0.38766449],\n [0.16950891, 0.15707999, -0.06473905, 0.15692828, 0.15495078],\n [-0.01036676, -0.01041726, 0.0027779, -0.01041722, -0.00842062],\n [-0.30324818, -0.36382787, 0.16105665, -0.3618242, -0.30941048],\n [0.15392032, 0.14362663, -0.05565588, 0.14351201, 0.13815851],\n [-0.29522033, -0.37007469, 0.96310443, -0.36694351, -0.55165213],\n [0.05217972, 0.05099639, -0.010079, 0.05099192, 0.03800293],\n [-0.55561286, -0.79246508, 0.12499179, -0.77605455, -0.42574742],\n [0.0664843, 0.06432465, -0.04222881, 0.06431296, 0.0720044],\n [-0.46157157, -0.62567311, 0.20595031, -0.61612713, -0.4443824],\n [-0.01948926, -0.01968341, 0.0103965, -0.01968309, -0.01991449],\n [0.04701158, 0.04605471, -0.0084959, 0.04605146, 0.03348752],\n [-0.33983184, -0.42133979, 0.21858269, -0.41811148, -0.36959376],\n [-0.42373944, -0.55543162, 0.18732468, -0.54870924, -0.40670008],\n [-0.26879735, -0.31220996, 0.0954347, -0.31104661, -0.23980712],\n [-0.16985211, -0.18538364, 0.0511324, -0.1851473, -0.14342428],\n [-0.76538156, -1.4801955, 0.25816143, -1.3780855, -0.6712626],\n [-0.20615384, -0.23331385, 0.17956462, -0.2327193, -0.24805304],\n [-0.66010257, -1.0703162, 0.18677226, -1.0297548, -0.54599468],\n [-0.46155485, -0.6202997, 0.17067579, -0.61135728, -0.41739719],\n [-0.34629564, -0.4234232, 0.11569795, -0.42058323, -0.30274936],\n [1.9389514, 1.1104311, -1.5930197, 1.0549485, 2.2880297],\n [-0.21333669, -0.242139, 0.16379957, -0.24149298, -0.24612626],\n [-0.10510642, -0.11071165, 0.02982542, -0.11066186, -0.08702119],\n [-0.6117049, -0.89174243, 0.08338052, -0.87097166, -0.39663666],\n [0.31011561, 0.27181763, -0.120844, 0.27103209, 0.28538695],\n [-0.36212946, -0.46228943, 0.32447926, -0.45772692, -0.43986042],\n [0.15267489, 0.14266294, -0.04883894, 0.14255362, 0.13155594],\n [-0.71536372, -1.2602705, 0.22012304, -1.1953498, -0.60848485],\n [-0.69349745, -1.2647863, 0.37401124, -1.1917575, -0.71121591],\n [-0.20114185, -0.22287933, 0.048648, -0.22248863, -0.15789437],\n [0.00818593, 0.00815596, -0.00158002, 0.00815594, 0.00596041],\n [-0.27911439, -0.31827421, 0.02536087, -0.31736167, -0.15809551],\n [-0.06383529, -0.06626514, 0.09420362, -0.06624972, -0.09156716],\n [0.29710195, 0.26185165, -0.11004677, 0.26115681, 0.26882693],\n [0.06272224, 0.06087881, -0.02618575, 0.06086978, 0.05906263],\n [0.28103164, 0.25195927, -0.03951707, 0.25145921, 0.18412315],\n [0.32293377, 0.28161776, -0.12886881, 0.28074006, 0.29954926],\n [-0.04032292, -0.04122905, 0.03967449, -0.04122566, -0.0505299],\n [0.55452718, 0.45644487, -0.07007543, 0.45357528, 0.35060153],\n [-0.57105625, -0.87833166, 0.31484532, -0.85177237, -0.58996784],\n [-0.47576149, -0.66833331, 0.32920971, -0.65563788, -0.53018455],\n [-0.31204797, -0.37164481, 0.09411346, -0.36975933, -0.26365832],\n [-0.71218064, -1.169247, 0.11644927, -1.1226855, -0.49066193],\n [-0.47393724, -0.63823842, 0.14448975, -0.62891155, -0.40188602],\n [0.85134458, 0.61467274, -0.84160252, 0.60393653, 1.0685198],\n [0.2930939, 0.25736519, -0.16470283, 0.25664173, 0.30472976],\n [-0.28096243, -0.32780374, 0.08403295, -0.32650837, -0.23673295],\n [0.17636294, 0.16247957, -0.09677612, 0.16229766, 0.18191602],\n [-0.55304167, -0.79089422, 0.13581671, -0.77427408, -0.43634798],\n [0.29560585, 0.2614625, -0.0856697, 0.2608072, 0.24646831],\n [0.71711943, 0.55042914, -0.27892472, 0.544061, 0.65952814],\n [0.45420572, 0.37979882, -0.13932181, 0.37778024, 0.3859385],\n [-0.21270616, -0.23893976, 0.08520051, -0.23840197, -0.19755018],\n [-0.32082581, -0.38921207, 0.15911656, -0.38680062, -0.31995924],\n [0.14448218, 0.13517863, -0.06333295, 0.13507888, 0.13828107],\n [-0.54260781, -0.83635851, 0.48731543, -0.81082247, -0.65958496],\n [-0.38975271, -0.48967674, 0.11197595, -0.4854512, -0.32402437],\n [0.37023932, 0.3182899, -0.12073292, 0.31708058, 0.32107532],\n [-0.18500597, -0.20527146, 0.1053769, -0.20490228, -0.19321861],\n [-0.55421986, -0.81212618, 0.19929405, -0.79269647, -0.49654983],\n [-0.67388566, -1.107452, 0.18308313, -1.0631829, -0.54989982],\n [-0.55371898, -0.78864307, 0.12530861, -0.77244018, -0.42513798],\n [-0.79523549, -1.5748272, 0.22103194, -1.4586535, -0.65387116],\n [-0.03865596, -0.03944844, 0.02498407, -0.03944573, -0.04210904],\n [0.47907012, 0.39687889, -0.15691782, 0.39454529, 0.4160704],\n [0.27169184, 0.24131765, -0.11841986, 0.24075329, 0.25953898],\n [0.95433089, 0.67386839, -0.76430692, 0.66045863, 1.1166025],\n [-0.16884494, -0.18472779, 0.06701134, -0.18447918, -0.15633329],\n [-0.58048226, -0.90027987, 0.30768568, -0.87201929, -0.59188645],\n [0.09702386, 0.09255906, -0.05712252, 0.09252484, 0.1024546],\n [-0.09443758, -0.09917476, 0.04213729, -0.09913517, -0.09092056],\n [0.1002742, 0.0954982, -0.06121954, 0.0954603, 0.10717648],\n [0.46457249, 0.38602337, -0.16876349, 0.383825, 0.41764295],\n [-0.52327451, -0.73053254, 0.1368673, -0.71715104, -0.42162817],\n [-0.17938365, -0.19985912, 0.18494243, -0.1994705, -0.22832007],\n [0.0704082, 0.06802469, -0.03945309, 0.06801125, 0.07313397],\n [0.46637499, 0.39102241, -0.09593579, 0.38900583, 0.34686492],\n [2.2831696, 1.2679097, -0.97165757, 1.1976462, 2.1637472],\n [0.16763493, 0.15594166, -0.04285017, 0.15580586, 0.13404085],\n [0.38504135, 0.33230165, -0.06731415, 0.33110289, 0.27125872],\n [-0.03192792, -0.03249649, 0.03365936, -0.03249481, -0.04094105],\n [0.40354763, 0.34531342, -0.08321158, 0.34391962, 0.30037777],\n [-0.25298261, -0.29074769, 0.08774405, -0.28981173, -0.22394584],\n [-0.29640711, -0.35472974, 0.17429654, -0.35282939, -0.31287102],\n [-0.25858823, -0.29943226, 0.11084943, -0.29836165, -0.24565579],\n [1.6440042, 1.0388676, -0.36960944, 1.0032659, 1.2594849],\n [0.01411087, 0.01401799, -0.00412915, 0.01401789, 0.01180319],\n [-0.29628554, -0.35629179, 0.21293068, -0.35428001, -0.33437185],\n [-0.44348277, -0.58416217, 0.14689832, -0.57683638, -0.38660438],\n [-0.49725279, -0.69591546, 0.21562404, -0.68298171, -0.4741989],\n [0.40373151, 0.34334675, -0.12464552, 0.34184937, 0.34379322],\n [-0.32419631, -0.39015905, 0.10692326, -0.38793747, -0.28221024],\n [-0.2352134, -0.26889074, 0.11577911, -0.26808997, -0.23398864],\n [0.22479707, 0.2035298, -0.09219838, 0.20319513, 0.21043188],\n [-0.37924183, -0.46888179, 0.0826318, -0.46538209, -0.28752121],\n [0.04450384, 0.04354595, -0.0221385, 0.04354251, 0.0444281],\n [-0.26490466, -0.30975349, 0.15010926, -0.30849416, -0.27618848],\n [-0.24704163, -0.28358762, 0.09933805, -0.28268997, -0.22973578],\n [0.0313423, 0.03087196, -0.01301685, 0.03087078, 0.02946225],\n [0.16039389, 0.14846934, -0.11584567, 0.14832176, 0.1813128],\n [0.39469993, 0.34213706, -0.04033536, 0.34097517, 0.23249663],\n [0.00097178, 0.00097136, -0.00017334, 0.00097136, 0.00068922],\n [-0.73712058, -1.3740473, 0.27048396, -1.2890906, -0.66488959],\n [0.04697925, 0.04595206, -0.01661782, 0.04594831, 0.0418606],\n [1.0069131, 0.70432784, -0.69585313, 0.68955374, 1.1216151],\n [-0.19507028, -0.2158063, 0.05472894, -0.21543967, -0.1608952],\n [-0.5307784, -0.74514525, 0.1355714, -0.73104293, -0.42430177],\n [-0.38706565, -0.4910044, 0.15845866, -0.48640487, -0.36210824],\n [0.50030571, 0.40543876, -0.37112231, 0.40246703, 0.57060993],\n [0.45510098, 0.37989814, -0.15183835, 0.37784045, 0.39768787],\n [0.18191051, 0.1664679, -0.16334546, 0.16624975, 0.22111467],\n [0.43450774, 0.36581801, -0.13111312, 0.36401887, 0.36718962],\n [-0.43012689, -0.58741161, 0.42400243, -0.57800973, -0.5393415],\n [0.41487081, 0.35305325, -0.09603176, 0.35152603, 0.32093978],\n [-0.48219771, -0.63422421, 0.07253155, -0.62635458, -0.32309909],\n [-0.2116386, -0.23718223, 0.07532369, -0.23666973, -0.18896605],\n [-0.20662098, -0.23126894, 0.08351743, -0.23078014, -0.19247987],\n [0.10914574, 0.10398608, -0.02889256, 0.10394545, 0.08829644],\n [-0.79287517, -1.4584816, 0.13679515, -1.3719497, -0.55612204],\n [-0.38544108, -0.5007257, 0.31775297, -0.4950568, -0.45534986],\n [0.85446108, 0.62898569, -0.38836301, 0.61925316, 0.82772132],\n [-0.23429899, -0.26513387, 0.0639893, -0.26445959, -0.19152526],\n [-0.61137671, -0.92184413, 0.13907551, -0.89643443, -0.47021825],\n [-0.44567629, -0.61384907, 0.37975934, -0.60348654, -0.53234418],\n [-0.33107748, -0.40075158, 0.11352917, -0.39832555, -0.29196598],\n [-0.05769032, -0.05933543, 0.01781526, -0.05932762, -0.04912954],\n [0.48038593, 0.3914733, -0.38220613, 0.38875341, 0.56083624],\n [1.5607517, 0.97473808, -0.79440657, 0.93964577, 1.57005],\n [-0.46211161, -0.61760459, 0.14906972, -0.60902956, -0.39930467],\n [-0.16693896, -0.18823996, 0.7643983, -0.18778827, -0.34926498],\n [-0.02861234, -0.02902708, 0.01326052, -0.02902608, -0.02789754],\n [-0.44776451, -0.56447321, 0.04096984, -0.55945726, -0.25421307],\n [0.71796375, 0.54719255, -0.37553979, 0.54052169, 0.72883686],\n [-0.3381459, -0.41177679, 0.12047936, -0.40912526, -0.30203015],\n [0.28135613, 0.24837388, -0.14928807, 0.24773149, 0.28698285],\n [-0.07450749, -0.0774421, 0.03656232, -0.07742284, -0.07404366],\n [0.22981909, 0.20861652, -0.05946391, 0.20829112, 0.18450943],\n [0.21651875, 0.19623524, -0.11233909, 0.19591916, 0.2192051],\n [0.35450331, 0.30766748, -0.08719146, 0.30664031, 0.27984325],\n [-0.28768706, -0.33654921, 0.07936459, -0.33517283, -0.23595679],\n [-0.21308212, -0.24374959, 0.26516187, -0.24301661, -0.28876532],\n [-0.22045538, -0.25038035, 0.13113803, -0.24970553, -0.23359669],\n [-0.35033088, -0.42664505, 0.09100318, -0.42389582, -0.28163144],\n [0.40066749, 0.33973627, -0.16029453, 0.3382003, 0.37196804],\n [-0.53478631, -0.750588, 0.12707463, -0.73640291, -0.41733339],\n [1.5182092, 0.98331944, -0.32964134, 0.95306749, 1.1496835],\n [0.12071306, 0.11451876, -0.0286122, 0.11446581, 0.09412318],\n [-0.5278798, -0.72723082, 0.0984675, -0.71493766, -0.38001231],\n [-0.15113552, -0.1647059, 0.11267369, -0.16450312, -0.17266138],\n [0.43814029, 0.3651026, -0.22810829, 0.36308682, 0.44408471],\n [0.22195374, 0.20053065, -0.12584989, 0.20018673, 0.23145643],\n [-0.08587505, -0.09002097, 0.06542431, -0.08998762, -0.09881762],\n [-0.4150392, -0.52973121, 0.1083907, -0.52451128, -0.3342464],\n [-0.18745209, -0.21157255, 0.32214316, -0.21105678, -0.28289145],\n [0.3139836, 0.27330586, -0.18531653, 0.27243091, 0.33183284],\n [-0.55439278, -0.78393005, 0.10918544, -0.76846502, -0.40639025],\n [-0.17200928, -0.1904243, 0.15818232, -0.1900964, -0.21074821],\n [0.1193529, 0.11320535, -0.03249434, 0.11315261, 0.09746183],\n [0.8489274, 0.63186865, -0.26325452, 0.62277833, 0.72396179],\n [-0.7463687, -1.4410382, 0.31228717, -1.3421037, -0.70333731],\n [0.16040263, 0.1498098, -0.03528531, 0.14969332, 0.12199695],\n [-0.13309942, -0.14242628, 0.04265966, -0.14231745, -0.11476247],\n [-0.33564918, -0.41114434, 0.15851291, -0.40833741, -0.3293229],\n [-0.57516097, -0.82379133, 0.10189629, -0.80633307, -0.40699498],\n [1.9283559, 1.0953586, -2.0707035, 1.0390683, 2.487945],\n [-0.50510217, -0.72836955, 0.32299386, -0.71235445, -0.5482697],\n [-0.07564846, -0.07868182, 0.03768843, -0.07866156, -0.07555782],\n [-0.45992691, -0.63164958, 0.27849917, -0.62117591, -0.490242],\n [-0.08926896, -0.09371519, 0.06229344, -0.0936783, -0.0997603],\n [0.40149657, 0.33357001, -0.54631879, 0.33166777, 0.5605486],\n [0.31817766, 0.28187554, -0.0416558, 0.28118725, 0.20355493],\n [0.0714031, 0.0690895, -0.02329649, 0.06907701, 0.06193244],\n [0.01487609, 0.0147739, -0.00399054, 0.01477378, 0.01208778],\n [-0.64250012, -0.99031771, 0.12607992, -0.96008254, -0.47040737],\n [-0.44938868, -0.61231852, 0.28473992, -0.60265935, -0.48630366],\n [-0.02857917, -0.02899295, 0.01324853, -0.02899195, -0.02786756],\n [-0.32723235, -0.38792416, 0.05253607, -0.38605889, -0.22407841],\n [-0.23049008, -0.26139683, 0.08387148, -0.2607083, -0.20732401],\n [0.02034739, 0.02014464, -0.00965928, 0.02014431, 0.0199985],\n [0.16228071, 0.15160581, -0.03091546, 0.15148889, 0.11764672],\n [-0.44013133, -0.57620864, 0.1340597, -0.56929752, -0.37310489],\n [-0.67392705, -1.1627933, 0.29520191, -1.1070738, -0.64484942],\n [0.12211996, 0.11635374, -0.01163097, 0.11630838, 0.07026512],\n [0.73267565, 0.57461918, -0.08861483, 0.56900298, 0.45651317],\n [0.20424855, 0.18696722, -0.06249721, 0.18672391, 0.17340816],\n [-0.47129773, -0.62648378, 0.11234339, -0.61810387, -0.36817651],\n [-0.29860653, -0.35164583, 0.08079693, -0.35008403, -0.24333682],\n [-0.21556065, -0.24278656, 0.09161459, -0.24221506, -0.20419473],\n [-0.51458917, -0.73255356, 0.22330137, -0.71754403, -0.4908486],\n [-0.10226342, -0.10752412, 0.02751104, -0.10747904, -0.08317495],\n [-0.40297264, -0.51423312, 0.13650671, -0.5091737, -0.35392538],\n [0.14037235, 0.12878312, -0.97673804, 0.12862389, 0.33764248],\n [0.25369103, 0.22958608, -0.03666321, 0.22920511, 0.16773495],\n [-0.18176296, -0.1998888, 0.05867076, -0.19958808, -0.15709203],\n [2.6324636, 1.4174223, -0.68768306, 1.330773, 2.1202205],\n [0.58834522, 0.47112941, -0.18710233, 0.46727544, 0.50596975],\n [0.03624121, 0.03565592, -0.00801153, 0.03565435, 0.02760898],\n [-0.41267811, -0.56402326, 0.60021257, -0.55495088, -0.58909587],\n [0.19496184, 0.17785134, -0.13216935, 0.17760148, 0.21578438],\n [-0.41292923, -0.5134589, 0.05065796, -0.50941832, -0.25850939],\n [-0.14911381, -0.16095503, 0.04718856, -0.16079849, -0.12802678],\n [-0.65109408, -1.0373099, 0.17069642, -1.0007378, -0.52502579],\n [-0.3148101, -0.37004318, 0.04855218, -0.3684365, -0.21270548],\n [-0.47336597, -0.7393653, 1.7208247, -0.71541858, -0.91703708],\n [-0.67644548, -1.0496617, 0.09109062, -1.0166348, -0.43684069],\n [-0.08417269, -0.08763556, 0.01993774, -0.08761182, -0.06561696],\n [0.53337724, 0.44236198, -0.06233408, 0.43979156, 0.32855507],\n [-0.22685081, -0.2545495, 0.04718868, -0.25398732, -0.16934907],\n [-0.44340949, -0.56335271, 0.05592508, -0.55800675, -0.28016606],\n [-0.05488186, -0.05637341, 0.0175327, -0.05636666, -0.04726926],\n [2.4118393, 1.3067879, -1.1813467, 1.2284419, 2.3953447],\n [-0.27912404, -0.32391867, 0.06743894, -0.32272583, -0.21903411],\n [-0.16399678, -0.1790167, 0.06855064, -0.17878778, -0.15449131],\n [0.3684578, 0.31069431, -0.44509544, 0.30919378, 0.49440883],\n [-0.20177726, -0.22252927, 0.0323358, -0.22217424, -0.13808697],\n [-0.24144046, -0.27291117, 0.04683885, -0.27222954, -0.17609706],\n [0.08488658, 0.08138022, -0.05886781, 0.08135609, 0.09466628],\n [-0.41163198, -0.53121895, 0.1574401, -0.52550326, -0.37646203],\n [-0.27044505, -0.31588476, 0.12050904, -0.31461843, -0.26025708],\n [0.54361565, 0.44949553, -0.06363569, 0.44679912, 0.33504634],\n [-0.07609006, -0.07890912, 0.01836399, -0.07889172, -0.05968762],\n [-0.42941341, -0.55344993, 0.10837757, -0.54755483, -0.34190612],\n [-0.29898625, -0.36363006, 0.30759952, -0.36131838, -0.38028223],\n [-0.5437877, -0.77611569, 0.15019369, -0.75998426, -0.44618347],\n [0.29246074, 0.25673817, -0.17125863, 0.25601341, 0.30827564],\n [-0.44169266, -0.6107842, 0.44160929, -0.60021904, -0.55646287],\n [-0.31832995, -0.38001544, 0.08867623, -0.37803567, -0.26193733],\n [-0.42639324, -0.55165045, 0.12705299, -0.54559852, -0.35882153],\n [-0.22168002, -0.24825043, 0.04932021, -0.24772101, -0.16923986],\n [-0.6888711, -1.191829, 0.25390767, -1.1341642, -0.62229157],\n [-0.25067882, -0.29078606, 0.15589837, -0.28972126, -0.26958907],\n [3.265403, 1.5807553, -1.6408632, 1.4501572, 3.2708351],\n [0.56486373, 0.45419056, -0.21051909, 0.45061071, 0.51215681],\n [-0.37352746, -0.47202432, 0.18797241, -0.46774257, -0.37433152],\n [-0.27755529, -0.32002637, 0.05003166, -0.31894761, -0.19754133],\n [-0.49857145, -0.67428638, 0.10578841, -0.66415091, -0.37466314],\n [0.28115665, 0.24908246, -0.11190598, 0.24847442, 0.26057136],\n [0.1589048, 0.14792772, -0.05963129, 0.14780147, 0.14440849],\n [-0.47405353, -0.64409933, 0.17490553, -0.63412198, -0.42838024],\n [-0.13721991, -0.14771917, 0.06807197, -0.14758543, -0.13686031],\n [0.6298221, 0.49558253, -0.25459055, 0.4908683, 0.58672693],\n [0.71120398, 0.53979997, -0.48343931, 0.53301877, 0.78786796],\n [0.34151436, 0.29531036, -0.15310071, 0.29427292, 0.32931262],\n [-0.05270446, -0.05400628, 0.01059308, -0.05400092, -0.03889696],\n [-0.40095972, -0.51656558, 0.18547566, -0.51108099, -0.39069645],\n [-0.4825293, -0.64925046, 0.12278168, -0.63981687, -0.38524497],\n [-0.40415788, -0.51307116, 0.11465483, -0.50823475, -0.3345863],\n [-0.51995196, -0.73782877, 0.19427069, -0.72297937, -0.47183255],\n [-0.2727901, -0.31298492, 0.04365442, -0.31200154, -0.18659721],\n [-0.61114354, -0.93441405, 0.16903524, -0.90691829, -0.50168514],\n [-0.05718684, -0.05888843, 0.02756893, -0.05887999, -0.05649552],\n [0.19846387, 0.18235099, -0.05154158, 0.18213327, 0.15953289],\n [0.27316878, 0.24263868, -0.11302719, 0.24207159, 0.25646318],\n [0.79524934, 0.61537976, -0.08505513, 0.60868768, 0.47560424],\n [-0.64619972, -1.1533024, 0.5537232, -1.0912406, -0.77330725],\n [-0.06110944, -0.06323229, 0.06123938, -0.06322, -0.0770476],\n [-0.25456872, -0.29095935, 0.06078663, -0.29009543, -0.19898304],\n [0.80642094, 0.60885258, -0.22094136, 0.60091152, 0.65989809],\n [-0.07770532, -0.08078349, 0.02742688, -0.08076317, -0.06918882],\n [-0.39200663, -0.50803439, 0.26059399, -0.50238694, -0.43104943],\n [-0.41608515, -0.54343058, 0.19780547, -0.53702654, -0.4091453],\n [0.01959221, 0.01939402, -0.01500015, 0.01939369, 0.02258209],\n [-0.4843194, -0.70996538, 0.58947512, -0.69294681, -0.65150797],\n [0.27704727, 0.24549705, -0.12354784, 0.24489998, 0.26668032],\n [-0.18512321, -0.20297368, 0.03896519, -0.2026873, -0.1387425],\n [-0.58340675, -0.95305195, 0.54431923, -0.91584025, -0.71824982],\n [-0.05064422, -0.0519641, 0.02320251, -0.05195837, -0.04918978],\n [-0.10498592, -0.11048641, 0.02597161, -0.1104384, -0.08303547],\n [0.03076093, 0.03037519, -0.0029039, 0.03037439, 0.01764697],\n [-0.27453419, -0.320173, 0.09908578, -0.31891446, -0.24626994],\n [-0.6515159, -1.0879259, 0.28282864, -1.0416812, -0.62153817],\n [1.1185769, 0.77445081, -0.42916624, 0.75727466, 1.0240687],\n [-0.29643247, -0.34584272, 0.05465022, -0.34447659, -0.2125647],\n [-0.43005446, -0.56105373, 0.14764261, -0.55449628, -0.37939902],\n [1.3035537, 0.86413849, -0.53615763, 0.84026778, 1.2214062],\n [-0.00727818, -0.00730541, 0.00444327, -0.0073054, -0.00777904],\n [0.50568017, 0.42080919, -0.07849208, 0.41845109, 0.34240169],\n [0.13657359, 0.12810776, -0.06811048, 0.12802037, 0.13645594],\n [0.27213105, 0.24419565, -0.04711691, 0.24371883, 0.1910972],\n [-0.14401073, -0.15511006, 0.04904375, -0.15496764, -0.12670714],\n [-0.37686308, -0.47572429, 0.16942939, -0.47144855, -0.36374355],\n [0.49380072, 0.4177612, -0.03477605, 0.41582054, 0.25692392],\n [-0.28516746, -0.33369409, 0.08630964, -0.33232458, -0.24122913],\n [0.97391875, 0.69658602, -0.40726979, 0.6837152, 0.91759819],\n [-0.21114652, -0.23343958, 0.02756059, -0.23304809, -0.13494666],\n [-0.29165038, -0.34397952, 0.10471634, -0.34242316, -0.26116993],\n [-0.58799905, -0.82637479, 0.06098265, -0.81063597, -0.34806726],\n [-0.35016862, -0.43149749, 0.13948816, -0.4283766, -0.32461899],\n [-0.24855748, -0.28638194, 0.11620649, -0.28542645, -0.24305507],\n [-0.200873, -0.22581347, 0.1416515, -0.22529878, -0.22526727],\n [-0.34760689, -0.43057069, 0.17667238, -0.42730057, -0.34950909],\n [0.27784106, 0.24623086, -0.11950815, 0.24563322, 0.26424501],\n [-0.46968014, -0.63421204, 0.16478695, -0.62477722, -0.41736792],\n [-0.42899731, -0.5617414, 0.16536614, -0.55499416, -0.39336457],\n [1.4741272, 0.92062396, -1.2549725, 0.88747771, 1.7602649],\n [-0.71867437, -1.3147704, 0.28797293, -1.238103, -0.66754723],\n [-0.23630516, -0.26771612, 0.06438934, -0.26702239, -0.19301755],\n [-0.69865357, -1.095912, 0.08057587, -1.0597512, -0.42846917],\n [-0.20400243, -0.22783466, 0.07890274, -0.22737177, -0.18726829],\n [-0.43507294, -0.57736139, 0.2047588, -0.5697285, -0.42638205],\n [0.15163145, 0.14294262, -0.01358769, 0.1428597, 0.08549054],\n [-0.18178759, -0.20127684, 0.10274721, -0.20092933, -0.18936927],\n [-0.03970462, -0.04048841, 0.01449916, -0.04048583, -0.03575622],\n [0.82309233, 0.62042315, -0.20085387, 0.61223751, 0.64804093],\n [-0.08206623, -0.08566274, 0.04166731, -0.08563648, -0.08248687],\n [0.23351298, 0.2094973, -0.16347416, 0.20908661, 0.26123642],\n [0.15860602, 0.14748292, -0.07042477, 0.14735304, 0.15245146],\n [1.6272688, 1.0189635, -0.52624399, 0.98267852, 1.4072738],\n [-0.22717082, -0.25504722, 0.04845067, -0.25447862, -0.17100611],\n [0.28653685, 0.25667722, -0.036564, 0.25615987, 0.18175299],\n [-0.06460075, -0.06706285, 0.08656299, -0.06704721, -0.08973168],\n [-0.62309338, -0.94821141, 0.13582043, -0.92091239, -0.47246227],\n [0.80073602, 0.61230895, -0.13591818, 0.60502432, 0.55859291],\n [0.54252045, 0.43717936, -0.26000679, 0.43380193, 0.53491195],\n [0.10315753, 0.09825575, -0.04769827, 0.09821694, 0.10050281],\n [0.24287792, 0.21804105, -0.11237509, 0.21761873, 0.23667845],\n [-0.60679496, -1.017633, 0.52159417, -0.97366697, -0.72691264],\n [-0.51011918, -0.72644897, 0.24094299, -0.71153491, -0.50052894],\n [-0.43564899, -0.5752175, 0.17956829, -0.56787922, -0.40848649],\n [0.92055114, 0.68129428, -0.17295236, 0.67111579, 0.66427938],\n [-0.25796805, -0.29608321, 0.06963428, -0.29514817, -0.21005272],\n [-0.25954779, -0.30800237, 0.3510023, -0.30650352, -0.3616248],\n [-0.59309259, -0.91584086, 0.24492653, -0.88764721, -0.55646383],\n [-0.32028489, -0.39380116, 0.26357164, -0.39101263, -0.37815269],\n [-0.40280123, -0.51869574, 0.17653327, -0.51320867, -0.38548964],\n [0.84240279, 0.63857918, -0.1315783, 0.63048471, 0.57159],\n [-0.28482944, -0.33538536, 0.11730721, -0.33389774, -0.26699809],\n [-0.26469685, -0.29909258, 0.02193167, -0.29834988, -0.14538951],\n [-0.16829622, -0.18504933, 0.10711601, -0.18477188, -0.18239421],\n [-0.68122663, -1.181775, 0.28440964, -1.124054, -0.64148403],\n [-0.04533664, -0.04639939, 0.02243329, -0.04639524, -0.04517941],\n [0.29240118, 0.25983222, -0.06324136, 0.25922929, 0.22113788],\n [0.46460339, 0.386555, -0.15627043, 0.3843845, 0.40709007],\n [-0.03342123, -0.03395251, 0.00861708, -0.0339511, -0.02680063],\n [-0.58233764, -0.97883278, 0.74133018, -0.93618291, -0.79517698],\n [-0.3064705, -0.37392751, 0.28431409, -0.37147234, -0.37658982],\n [-0.04459337, -0.04551063, 0.0082021, -0.04550749, -0.03195203],\n [-0.36942726, -0.4525769, 0.07394099, -0.44948329, -0.27226451],\n [0.48085019, 0.38595972, -0.834553, 0.38286876, 0.72806111],\n [-0.54557937, -0.75687435, 0.0868044, -0.74352401, -0.37247392],\n [0.48304522, 0.39320996, -0.38772464, 0.39044886, 0.56560034],\n [-0.24937674, -0.28868329, 0.14594659, -0.28765513, -0.26281205],\n [-0.18936532, -0.21054859, 0.10276849, -0.21015454, -0.19460932],\n [-0.39239613, -0.49923999, 0.15489548, -0.49444712, -0.36266534],\n [0.64937485, 0.51498667, -0.13699943, 0.51040089, 0.48705713],\n [-0.4322005, -0.56615195, 0.15731694, -0.55933261, -0.38879918],\n [-0.20355937, -0.22802606, 0.09965492, -0.2275372, -0.20213289],\n [-0.21615618, -0.24310274, 0.08121952, -0.24254441, -0.19652082],\n [0.07510881, 0.07273854, -0.01211125, 0.07272608, 0.05150708],\n [-0.71129699, -1.2136775, 0.17291108, -1.1577944, -0.55930871],\n [0.6737645, 0.53227367, -0.12282293, 0.52737612, 0.48132885],\n [-0.29373897, -0.34934204, 0.14356277, -0.34759832, -0.29151774],\n [-0.42744692, -0.5608199, 0.1788832, -0.55398534, -0.40282955],\n [-0.39712056, -0.52312548, 0.35588397, -0.51656221, -0.48238566],\n [-0.23751271, -0.27111622, 0.09811775, -0.27032661, -0.22286939],\n [-0.03277917, -0.03329781, 0.00965973, -0.03329644, -0.02748297],\n [0.26410406, 0.23514991, -0.12046309, 0.23462228, 0.25614038],\n [-0.03434575, -0.03491003, 0.00925617, -0.03490848, -0.02795133],\n [0.13494873, 0.12713604, -0.0381914, 0.12706071, 0.11162918],\n [0.13203413, 0.124881, -0.02372015, 0.12481645, 0.09386563],\n [-0.04551417, -0.04654707, 0.0164027, -0.04654316, -0.04080809],\n [-0.05219991, -0.05359129, 0.02212956, -0.05358511, -0.04940612],\n [-0.30252488, -0.36302665, 0.16540026, -0.36102338, -0.3116708],\n [-0.51426378, -0.75157197, 0.34771465, -0.73383667, -0.56868916],\n [-0.29887443, -0.35190646, 0.0796393, -0.35034647, -0.24231389],\n [-0.09053879, -0.09499873, 0.05072437, -0.09496213, -0.09403843],\n [-0.0300005, -0.03044045, 0.01005493, -0.03043937, -0.02625561],\n [1.6115429, 1.0243185, -0.36370126, 0.99008196, 1.2361927],\n [-0.156591, -0.16938555, 0.0400831, -0.16921153, -0.12526843],\n [1.811829, 1.0941374, -0.63649492, 1.0490886, 1.610719],\n [-0.49368734, -0.6757864, 0.14772967, -0.66480961, -0.41603886],\n [-0.02370091, -0.02395868, 0.00473214, -0.02395822, -0.0174531],\n [0.04561786, 0.04461905, -0.02129833, 0.0446154, 0.04458771],\n [-0.26417627, -0.30595708, 0.09429969, -0.30486051, -0.23610723],\n [-0.0931778, -0.09753532, 0.02606962, -0.09750137, -0.07678267],\n [0.28507257, 0.25241947, -0.1046605, 0.25179795, 0.25718239],\n [0.28034666, 0.24551492, -0.28948403, 0.24479617, 0.35701116],\n [0.53782825, 0.44234603, -0.09356962, 0.4395423, 0.37828346],\n [-0.61897765, -0.97253054, 0.21661137, -0.94021132, -0.54956668],\n [0.78421678, 0.60406353, -0.11669902, 0.5972591, 0.52358816],\n [-0.03882953, -0.03957842, 0.01413856, -0.03957601, -0.03493439],\n [-0.06523687, -0.06717864, 0.00964069, -0.06716901, -0.04345517],\n [-0.21282534, -0.23992597, 0.1078811, -0.23935247, -0.21379982],\n [0.90192707, 0.66502038, -0.23618664, 0.65483939, 0.72701392],\n [0.38244456, 0.3281245, -0.1068623, 0.32684464, 0.31501446],\n [0.18015634, 0.16623594, -0.06756239, 0.1660569, 0.16368593],\n [-0.36356599, -0.4638294, 0.30871312, -0.45927525, -0.43376103],\n [-0.09210566, -0.09668124, 0.04745514, -0.09664337, -0.0930312],\n [0.17131159, 0.15900884, -0.04825369, 0.15886175, 0.1414853],\n [0.49760355, 0.41055199, -0.14611893, 0.40803251, 0.41671053],\n [-0.26615826, -0.30743391, 0.07708907, -0.3063715, -0.22187118],\n [-0.3927741, -0.49119506, 0.09098666, -0.48712552, -0.30392363],\n [-0.09436583, -0.09907181, 0.04037083, -0.09903271, -0.08958647],\n [-0.46225941, -0.62276812, 0.17801784, -0.61364244, -0.42372915],\n [-0.68894344, -1.1023655, 0.11421352, -1.0627932, -0.47683876],\n [0.54551863, 0.44161588, -0.1962092, 0.43834684, 0.4887906],\n [-0.45635762, -0.61845001, 0.2243287, -0.60902965, -0.45377631],\n [0.51392807, 0.42380904, -0.11608231, 0.42119474, 0.39433643],\n [-0.49514556, -0.65991554, 0.08047553, -0.6509263, -0.3404499],\n [-0.4747132, -0.64891854, 0.19645604, -0.63846945, -0.4457103],\n [-0.26195285, -0.29994651, 0.05379271, -0.29903144, -0.19471522],\n [-0.6243963, -0.99875786, 0.24995553, -0.96295355, -0.57979055],\n [-0.40910507, -0.53396084, 0.22267832, -0.52769928, -0.4208486],\n [-0.2342998, -0.26726459, 0.10501295, -0.26649427, -0.22591179],\n [2.4407724, 1.317305, -1.1861326, 1.237361, 2.4177196],\n [0.48137182, 0.39179562, -0.40147968, 0.38904091, 0.57088894],\n [0.88347144, 0.63016669, -0.98224625, 0.61833459, 1.1531305],\n [-0.37748634, -0.47241194, 0.12786077, -0.46847326, -0.33153033],\n [0.6029886, 0.47870953, -0.23536225, 0.4744856, 0.55521561],\n [0.25692875, 0.2299286, -0.09565444, 0.22945687, 0.23287356],\n [-0.02006736, -0.02023643, 0.00185917, -0.02023619, -0.01144046],\n [-0.10153493, -0.10651411, 0.01952647, -0.10647343, -0.07384054],\n [1.0620669, 0.75307903, -0.27944874, 0.73844545, 0.85745625],\n [0.30155225, 0.26708188, -0.06531001, 0.2664271, 0.22816287],\n [0.17414172, 0.16255392, -0.01972152, 0.16242552, 0.10615131],\n [0.04052029, 0.03973302, -0.01811988, 0.03973047, 0.03904003],\n [0.0995454, 0.09467985, -0.08338736, 0.09464023, 0.11822905],\n [-0.1098009, -0.11547485, 0.01617159, -0.11542601, -0.0730575],\n [-0.44427463, -0.6062158, 0.32152264, -0.59656536, -0.50255261],\n [-0.33979962, -0.40986904, 0.08038145, -0.40747774, -0.26477513],\n [-0.6619722, -0.98266748, 0.05866829, -0.95756575, -0.37185274],\n [0.04999817, 0.04877268, -0.02928264, 0.04876768, 0.05270472],\n [-0.1508781, -0.16430411, 0.10638183, -0.16410528, -0.16919335],\n [0.43989814, 0.37692724, -0.0381013, 0.37543202, 0.24522121],\n [0.33284499, 0.28780745, -0.18836734, 0.28679605, 0.34687517],\n [0.19602769, 0.18000773, -0.06084173, 0.17978985, 0.16722023],\n [-0.4337603, -0.57396928, 0.19590909, -0.56653304, -0.41930309],\n [0.00823044, 0.00819827, -0.00273524, 0.00819824, 0.00718275],\n [0.89286471, 0.65491122, -0.31419723, 0.64454243, 0.79420855],\n [-0.485029, -0.66434467, 0.17236992, -0.6535117, -0.43285486],\n [-0.19591362, -0.22008743, 0.16508003, -0.21959164, -0.23314061],\n [0.29846968, 0.2648503, -0.06051499, 0.26422096, 0.22091802],\n [-0.51177164, -0.7090097, 0.14101934, -0.69660593, -0.4195854],\n [-0.08107079, -0.08478746, 0.06715827, -0.08475907, -0.09592963],\n [-0.51330833, -0.7044032, 0.11384153, -0.6927808, -0.39146756],\n [-0.28434784, -0.34314381, 0.3405642, -0.34113181, -0.38046054],\n [-0.22041142, -0.25004037, 0.12217904, -0.24937866, -0.22812084],\n [-0.28288225, -0.33456338, 0.15166649, -0.33299857, -0.28954118],\n [-0.38045734, -0.48755831, 0.24374344, -0.48259472, -0.41322979],\n [0.02184406, 0.02163048, -0.00460415, 0.02163014, 0.0163788],\n [0.55876591, 0.45226093, -0.16342586, 0.44890758, 0.46730833],\n [-0.08268958, -0.08625577, 0.03425598, -0.08623014, -0.07766451],\n [-0.24595302, -0.28107306, 0.07941274, -0.28024022, -0.21258935],\n [-0.20771709, -0.23236015, 0.07678811, -0.23187411, -0.18782618],\n [0.07956144, 0.07648132, -0.05316748, 0.07646146, 0.08763827],\n [-0.11617647, -0.12358576, 0.05869661, -0.12350707, -0.11658064],\n [0.69686488, 0.5196206, -1.1872426, 0.51223505, 1.0486298],\n [-0.58188967, -0.86594781, 0.17081948, -0.84355226, -0.48724743],\n [2.0124807, 1.144415, -1.4286583, 1.0858262, 2.2619028],\n [0.59690036, 0.47855196, -0.15696886, 0.47467906, 0.4818173],\n [0.36272796, 0.31308576, -0.1075188, 0.31195833, 0.30471383],\n [0.03595038, 0.03534871, -0.01187403, 0.03534703, 0.03130966],\n [-0.22580188, -0.25143769, 0.02816139, -0.25095373, -0.14213895],\n [-0.38006128, -0.47956806, 0.15642695, -0.47527259, -0.356191],\n [1.1166041, 0.76410384, -0.64917324, 0.74608095, 1.1741665],\n [0.20199822, 0.18591811, -0.03661377, 0.18570506, 0.14403118],\n [-0.32867868, -0.4062999, 0.25813875, -0.40327202, -0.38206925],\n [0.05677936, 0.0553442, -0.01411033, 0.05533815, 0.04497619],\n [-0.36806787, -0.454488, 0.1005, -0.45113609, -0.30085043],\n [-0.53506292, -0.79438171, 0.32265847, -0.77407627, -0.56954441],\n [0.19071785, 0.17381953, -0.17391407, 0.17357042, 0.23301424],\n [-0.51589241, -0.74151215, 0.25824814, -0.72549233, -0.51609362],\n [-0.03413597, -0.03468197, 0.00768145, -0.03468051, -0.02615969],\n [0.08910428, 0.08519312, -0.07159331, 0.08516452, 0.1043678],\n [0.35987379, 0.30913411, -0.16000503, 0.30794728, 0.34606262],\n [0.18489821, 0.17040005, -0.06371003, 0.17021083, 0.16331813],\n [1.3405019, 0.88402, -0.50635538, 0.8589904, 1.2208813],\n [-0.46237474, -0.60429991, 0.08655541, -0.59714239, -0.3332505],\n [0.4514692, 0.38508298, -0.04198612, 0.38346423, 0.25770965],\n [0.41235874, 0.34998624, -0.11971637, 0.34842228, 0.34401548],\n [-0.30686678, -0.36928716, 0.16563904, -0.36718543, -0.31479717],\n [1.0163954, 0.70515236, -0.86009015, 0.68968476, 1.2112481],\n [-0.24796447, -0.28380914, 0.0814219, -0.28294869, -0.21553446],\n [0.24538016, 0.22206756, -0.04780098, 0.22169915, 0.17921812],\n [-0.4291345, -0.58654611, 0.43946985, -0.57710826, -0.54498178],\n [-0.39186361, -0.51057574, 0.30062526, -0.5046651, -0.45196879],\n [0.79404903, 0.60254896, -0.2020656, 0.59496774, 0.6339753],\n [-0.36717171, -0.44859111, 0.07036081, -0.44560604, -0.26670618],\n [-0.62796841, -0.97958516, 0.17653188, -0.94803646, -0.51829408],\n [-0.40685157, -0.52591145, 0.18066974, -0.52018042, -0.39107718],\n [0.31378064, 0.2751345, -0.1068019, 0.27434394, 0.27602839],\n [-0.5145653, -0.72052957, 0.16563167, -0.70709631, -0.44430867],\n [-0.44366138, -0.57130426, 0.08146556, -0.56526133, -0.31771357],\n [-0.09743412, -0.10210125, 0.02230239, -0.102064, -0.0750933],\n [-0.67463913, -1.1652336, 0.2955002, -1.1091912, -0.64552085],\n [-0.0532977, -0.05477754, 0.02666406, -0.0547707, -0.05330784],\n [0.01125144, 0.0111872, -0.0069488, 0.01118714, 0.01207217],\n [1.0424593, 0.7658664, -0.08546767, 0.75386482, 0.57058019],\n [-0.23966601, -0.27400878, 0.10024656, -0.27319153, -0.22582436],\n [-0.15102731, -0.16293533, 0.04024455, -0.16277902, -0.12244734],\n [0.10883711, 0.10360373, -0.03484788, 0.10356181, 0.09381093],\n [-0.63749651, -1.0657947, 0.34067459, -1.0202847, -0.65179114],\n [0.21462438, 0.19252866, -0.32449832, 0.19215043, 0.31036079],\n [0.28543484, 0.25359926, -0.07780585, 0.25300912, 0.23317684],\n [0.60037273, 0.47535329, -0.27759935, 0.47106133, 0.58492043],\n [-0.64629577, -1.0650438, 0.26333326, -1.0220094, -0.6036698],\n [-0.24043518, -0.272045, 0.05163505, -0.27135449, -0.18140818],\n [-0.43766527, -0.5607036, 0.07881634, -0.55500896, -0.31139393],\n [-0.10886067, -0.1144656, 0.01680504, -0.11441753, -0.07357617],\n [-0.21659505, -0.24163923, 0.04517702, -0.24115776, -0.16183839],\n [0.24558099, 0.21864978, -0.21619087, 0.21815888, 0.29654159],\n [0.87538756, 0.6420736, -0.36843874, 0.63190621, 0.82654279],\n [0.03268513, 0.03214741, -0.02171152, 0.03214594, 0.03593136],\n [-0.33085539, -0.39571874, 0.07053734, -0.39361288, -0.24902435],\n [-0.54252751, -0.80913275, 0.30893409, -0.78798062, -0.56656071],\n [-0.29883582, -0.34835188, 0.04910994, -0.34699085, -0.20623145],\n [0.82533579, 0.60997673, -0.43779671, 0.60078017, 0.84175935],\n [-0.08115594, -0.08467064, 0.04121902, -0.08464528, -0.08158108],\n [-0.30067698, -0.35725161, 0.11424508, -0.35548796, -0.27438221],\n [0.06741546, 0.06518607, -0.04474214, 0.06517379, 0.07408926],\n [1.003046, 0.72197825, -0.25434616, 0.70913157, 0.79989373],\n [-0.58070163, -0.84229962, 0.11654776, -0.82319219, -0.42836436],\n [0.75923582, 0.58481682, -0.15124806, 0.5782288, 0.55867306],\n [0.0059764, 0.0059584, -0.00334034, 0.00595839, 0.0062025],\n [-0.53090015, -0.75600972, 0.17492552, -0.74049255, -0.46199395],\n [-0.22335222, -0.25544049, 0.17958825, -0.25467492, -0.26167535],\n [-0.22948451, -0.25824195, 0.05233589, -0.25764301, -0.17664938],\n [0.30963181, 0.27345875, -0.06687207, 0.27275659, 0.23405719],\n [1.0611246, 0.74180213, -0.46722051, 0.72619033, 1.0170951],\n [-0.29408863, -0.34962585, 0.13988706, -0.34788828, -0.28923754],\n [-0.25860882, -0.30081882, 0.14020602, -0.29967581, -0.26568138],\n [-0.22592201, -0.25446235, 0.06302462, -0.25386314, -0.18598836],\n [-0.3826994, -0.47233224, 0.07239276, -0.4688642, -0.27678777],\n [0.01947673, 0.01928047, -0.01518198, 0.01928014, 0.02258379],\n [-0.38497259, -0.48883332, 0.17038615, -0.48421616, -0.36963631],\n [-0.08148694, -0.08518986, 0.05985012, -0.08516183, -0.09263114],\n [-0.04295891, -0.04403539, 0.06160084, -0.0440309, -0.06103436],\n [-0.01471251, -0.0148122, 0.00320525, -0.01481209, -0.01115378],\n [0.71449982, 0.53710627, -0.69745866, 0.52988413, 0.89299886],\n [0.08013108, 0.07729919, -0.02108105, 0.07728251, 0.06469063],\n [0.5593455, 0.44200533, -0.53719308, 0.43794786, 0.69530214],\n [-0.29161723, -0.34587494, 0.13500442, -0.34420219, -0.28422882],\n [-0.3444663, -0.41820437, 0.09269602, -0.41559328, -0.28019574],\n [0.81332359, 0.62275131, -0.11332632, 0.61541416, 0.53124621],\n [-0.23053501, -0.26431372, 0.16104974, -0.2634919, -0.25772384],\n [-0.61349953, -0.9545587, 0.20593769, -0.92416238, -0.53719435],\n [0.9104401, 0.6520594, -0.66452249, 0.64010651, 1.032795],\n [-0.05516897, -0.05661534, 0.01231837, -0.05660902, -0.04216878],\n [-0.56883995, -0.87662871, 0.33321132, -0.84988566, -0.59966676],\n [-0.36478423, -0.45367681, 0.13740888, -0.45010064, -0.33192424],\n [-0.1586423, -0.17112781, 0.02675512, -0.17096423, -0.11043115],\n [-0.38205142, -0.4887402, 0.22432994, -0.48383445, -0.40307602],\n [0.49535731, 0.41246085, -0.08944291, 0.41016421, 0.35275325],\n [-0.06313745, -0.06522226, 0.03054947, -0.06521079, -0.0624505],\n [0.34982979, 0.30320269, -0.10692491, 0.30217114, 0.29689815],\n [-0.67251333, -1.1316017, 0.2367678, -1.0821006, -0.59829882],\n [-0.59172716, -0.97634716, 0.54039043, -0.93670332, -0.72331402],\n [1.8266434, 1.0903129, -0.82764403, 1.0433883, 1.7676396],\n [-0.24588016, -0.28664541, 0.23477248, -0.28552426, -0.3050526],\n [-0.5845798, -0.88896881, 0.22408002, -0.86346038, -0.53502439],\n [0.13148771, 0.12328602, -0.09816614, 0.12320082, 0.15028675],\n [2.4346548, 1.3699725, -0.41157278, 1.2972916, 1.6960962],\n [0.36246503, 0.31062852, -0.17737731, 0.30939885, 0.35987645],\n [1.3994869, 0.91006275, -0.55827647, 0.88256762, 1.2979929],\n [-0.45624974, -0.60016264, 0.1132157, -0.59270962, -0.36122736],\n [-0.25496413, -0.29439606, 0.10658785, -0.29338385, -0.24019574],\n [-0.05767819, -0.05933659, 0.01916141, -0.05932864, -0.05032999],\n [-0.80431809, -1.602386, 0.2093638, -1.4822075, -0.64703695],\n [1.5617118, 1.0233385, -0.18272086, 0.99348606, 0.96236533],\n [-0.70968552, -1.3600232, 0.44467442, -1.2686575, -0.76512902],\n [0.64399969, 0.50980783, -0.15969945, 0.50519804, 0.50976292],\n [-0.42577082, -0.56001156, 0.19633848, -0.55306239, -0.4144407],\n [0.19852298, 0.18141046, -0.09500672, 0.18116502, 0.19564494],\n [-0.01755235, -0.01770992, 0.00953801, -0.01770969, -0.01804622],\n [0.71490024, 0.54777169, -0.30852084, 0.54135133, 0.68066767],\n [0.25055792, 0.22421576, -0.11747822, 0.22375532, 0.24524559],\n [0.7455487, 0.56404977, -0.37980282, 0.55679912, 0.7502052],\n [-0.46599585, -0.64242384, 0.27075063, -0.63151757, -0.48991551],\n [1.2948213, 0.83888233, -1.1669167, 0.81310832, 1.5757834],\n [0.37068888, 0.31372161, -0.33695531, 0.31227059, 0.45241863],\n [-0.40780079, -0.51476393, 0.09005722, -0.51013837, -0.31056193],\n [0.04838708, 0.04727737, -0.02046467, 0.04727313, 0.04576124],\n [-0.65958483, -1.0877235, 0.22545378, -1.04365, -0.5810448],\n [-0.41448109, -0.51966116, 0.06413362, -0.51525789, -0.28035506],\n [-0.53221661, -0.77934505, 0.27260226, -0.76076586, -0.53651117],\n [0.37895608, 0.32426337, -0.13780978, 0.32295415, 0.34079714],\n [-0.35423027, -0.44100622, 0.17977977, -0.4374971, -0.35599786],\n [0.43251987, 0.36024553, -0.26019905, 0.35824609, 0.46002624],\n [-0.1780144, -0.19759107, 0.14782681, -0.19723303, -0.21081321],\n [-0.00010377, -0.00010378, 2.836e-05, -0.00010378, -8.485e-05],\n [-0.15775022, -0.17083021, 0.04249013, -0.17064969, -0.12835694],\n [-0.05827104, -0.06000977, 0.02430356, -0.06000112, -0.05485318],\n [1.1849863, 0.83572134, -0.14128063, 0.818976, 0.73481794],\n [-0.59052793, -0.96984829, 0.52299476, -0.93117582, -0.71450083],\n [0.9399917, 0.68377155, -0.28694805, 0.67236428, 0.7974322],\n [-0.44002698, -0.57087568, 0.10634001, -0.56447783, -0.34532533],\n [-0.21863606, -0.24528179, 0.0624941, -0.24474199, -0.1814558],\n [0.56326566, 0.46109889, -0.08372362, 0.45803496, 0.37592526],\n [-0.07591432, -0.07896422, 0.03725001, -0.0789438, -0.07543992],\n [-0.77818641, -1.6795659, 0.42244103, -1.5246549, -0.79981177],\n [-0.04399159, -0.04503607, 0.03189906, -0.04503194, -0.04979462],\n [-0.1002894, -0.10618355, 0.09715338, -0.10612585, -0.12502571],\n [-0.46154278, -0.65298739, 0.47775225, -0.64006615, -0.58823684],\n [0.19297036, 0.17688747, -0.08435038, 0.17666441, 0.18451545],\n [1.2567799, 0.84235571, -0.50274465, 0.82029379, 1.1667164],\n [0.23807226, 0.21413629, -0.110251, 0.2137361, 0.23206521],\n [-0.63972256, -0.9226105, 0.04888754, -0.90231089, -0.34203496],\n [-0.65083021, -1.0839922, 0.27791095, -1.0383617, -0.61748113],\n [0.01714389, 0.01700946, -0.00427506, 0.01700929, 0.01359556],\n [-0.1753322, -0.19645065, 0.33425428, -0.1960278, -0.2739114],\n [1.1513607, 0.79169637, -0.43119065, 0.77348673, 1.0456204],\n [-0.48785477, -0.68179512, 0.24456376, -0.66922912, -0.48827865],\n [0.21072881, 0.19254487, -0.05959303, 0.19228378, 0.17427073],\n [0.35612601, 0.30303134, -0.3278876, 0.30171888, 0.43650323],\n [-0.27023169, -0.3144702, 0.10075437, -0.31326871, -0.24505043],\n [0.61318693, 0.48210992, -0.3274334, 0.47749362, 0.62677681],\n [0.41111143, 0.33301644, -1.8253585, 0.33056576, 0.85133226],\n [-0.19481353, -0.21503613, 0.04602194, -0.21468694, -0.15173218],\n [-0.03987817, -0.04063046, 0.0094143, -0.04062809, -0.03105247],\n [-0.10009869, -0.10514249, 0.027462, -0.10510015, -0.08194824],\n [-0.11716349, -0.12448007, 0.04615095, -0.12440397, -0.10820942],\n [-0.17640093, -0.1928184, 0.04341165, -0.19256414, -0.13927698],\n [-0.73754475, -1.344323, 0.22748136, -1.2668483, -0.62784271],\n [-0.06202451, -0.06398783, 0.02451098, -0.06397748, -0.05734634],\n [-0.21830102, -0.24652744, 0.09873847, -0.24592095, -0.21112644],\n [-0.42028158, -0.55096001, 0.20057009, -0.54428635, -0.41380172],\n [0.27692872, 0.24786499, -0.05178603, 0.24735786, 0.19952295],\n [-0.16056965, -0.17558293, 0.09551896, -0.17534935, -0.17014355],\n [-0.06538395, -0.06758339, 0.02707714, -0.06757106, -0.0614033],\n [0.40472269, 0.34596206, -0.08711496, 0.34454716, 0.30559479],\n [-0.01948248, -0.01970221, 0.0312953, -0.01970179, -0.02874744],\n [-0.74044391, -1.4313084, 0.33547729, -1.332702, -0.71651588],\n [-0.00959026, -0.00963341, 0.00254421, -0.00963338, -0.00776392],\n [0.58290382, 0.46866275, -0.16430017, 0.46496644, 0.48152696],\n [0.78990971, 0.5990838, -0.21558183, 0.59151686, 0.64555366],\n [-0.60878353, -0.904121, 0.11544082, -0.88097363, -0.44066146],\n [-0.19234382, -0.2135101, 0.07997263, -0.21312274, -0.18087392],\n [0.09349877, 0.08945988, -0.04175847, 0.08943082, 0.09004552],\n [-0.56760591, -0.82023065, 0.13154508, -0.80199044, -0.43927128],\n [-0.18688935, -0.20644692, 0.06830505, -0.20610648, -0.16835162],\n [0.13621984, 0.12805562, -0.04984456, 0.12797412, 0.12275603],\n [1.0490177, 0.72713028, -0.68918444, 0.71110319, 1.1489744],\n [-0.45100013, -0.59470485, 0.13184149, -0.58718889, -0.37711905],\n [0.13526372, 0.12645504, -0.12005451, 0.12635952, 0.16377856],\n [-0.28818669, -0.33167944, 0.03350251, -0.33058978, -0.17720872],\n [-0.13060375, -0.1398183, 0.0524589, -0.13971005, -0.12140973],\n [0.9378045, 0.67753716, -0.3788192, 0.66574952, 0.87343154],\n [0.27098326, 0.24004014, -0.15183279, 0.23945297, 0.28146658],\n [-0.33485349, -0.3970462, 0.04412781, -0.39513215, -0.21469273],\n [0.6194375, 0.49775346, -0.10398783, 0.49380734, 0.43052909],\n [-0.14296602, -0.15368801, 0.04171745, -0.15355413, -0.11947338],\n [0.43258663, 0.35935961, -0.30280431, 0.35730782, 0.48392651],\n [0.37175276, 0.31861271, -0.14411522, 0.31735271, 0.34151995],\n [-0.32665016, -0.38884006, 0.06387642, -0.38687864, -0.23887944],\n [-0.42561232, -0.55795914, 0.18142126, -0.55119973, -0.40356688],\n [-0.35105423, -0.42195308, 0.05378071, -0.41958271, -0.23666551],\n [-0.02320392, -0.02345492, 0.00534224, -0.02345447, -0.01791811],\n [-0.33682582, -0.40497783, 0.0758356, -0.4026951, -0.25816932],\n [-0.12414077, -0.13332723, 0.11586673, -0.13321404, -0.1528525],\n [2.3019877, 1.257505, -1.3917948, 1.1840291, 2.4524702],\n [-0.17247345, -0.18734391, 0.02840135, -0.18713052, -0.11910715],\n [0.08731105, 0.08419405, -0.01168084, 0.08417551, 0.05626188],\n [-0.26263879, -0.30489683, 0.11180706, -0.30376866, -0.24892718],\n [-0.2910695, -0.35171965, 0.29755042, -0.34962839, -0.36942645],\n [-0.72868997, -1.3252187, 0.2462431, -1.2493896, -0.63947939],\n [-0.12607099, -0.13517712, 0.08419329, -0.13506761, -0.13883936],\n [-0.303988, -0.36124323, 0.1053936, -0.35945659, -0.269062],\n [-0.4509924, -0.60030839, 0.16495918, -0.59220241, -0.40636377],\n [0.34727781, 0.30690118, -0.02535243, 0.30612116, 0.18286666],\n [-0.64879747, -0.98327508, 0.09287628, -0.95550747, -0.42761313],\n [0.06005547, 0.05833297, -0.02946512, 0.05832474, 0.05967803],\n [0.11741206, 0.11047067, -0.14296951, 0.11040233, 0.15796697],\n [-0.08041936, -0.08379401, 0.03396093, -0.08377041, -0.07601692],\n [1.7225341, 1.03418, -1.1239065, 0.99064126, 1.8823424],\n [-0.10868496, -0.11435385, 0.01896727, -0.11430459, -0.07652287],\n [-0.31637263, -0.37839743, 0.10091835, -0.37638371, -0.2723533],\n [-0.20938853, -0.23213658, 0.0367331, -0.23172556, -0.14768321],\n [-0.28102192, -0.32840365, 0.09088428, -0.32707863, -0.24303364],\n [-0.13782043, -0.14790602, 0.04587562, -0.14778313, -0.12034084],\n [0.10434784, 0.09932503, -0.04931338, 0.09928476, 0.10240486],\n [0.45772778, 0.38254781, -0.13504668, 0.380503, 0.38392184],\n [-0.11546231, -0.12233721, 0.03505027, -0.12226903, -0.09776887],\n [-0.44973591, -0.58508369, 0.09599035, -0.57838802, -0.33862907],\n [0.16888733, 0.15759416, -0.02645707, 0.15746842, 0.11470666],\n [0.32593828, 0.28649726, -0.06498227, 0.28570446, 0.2399009],\n [0.27186575, 0.24233951, -0.08641709, 0.24180647, 0.23376503],\n [0.12746131, 0.1202375, -0.04891426, 0.12016931, 0.11670087],\n [-0.5525436, -0.79779884, 0.16088526, -0.78013652, -0.4614165],\n [-0.33745452, -0.41627007, 0.19449844, -0.41322893, -0.353828],\n [-0.38523263, -0.47686709, 0.07595916, -0.47326717, -0.2825002],\n [-0.29919216, -0.35416709, 0.10080245, -0.35249308, -0.26230134],\n [-0.32859982, -0.39187088, 0.06583404, -0.38985297, -0.24225443],\n [1.2230258, 0.85610896, -0.14322376, 0.83822182, 0.7538856],\n [-0.59484282, -0.91957581, 0.24273238, -0.89112381, -0.55588811],\n [0.22316516, 0.20054443, -0.20153153, 0.20016315, 0.27177443],\n [-0.34353237, -0.42417631, 0.17430392, -0.42104902, -0.34521593],\n [1.0013204, 0.70813496, -0.49237704, 0.69416568, 0.99576753],\n [1.286023, 0.85588024, -0.52633007, 0.83267972, 1.2029896],\n [-0.14232183, -0.15297857, 0.0427016, -0.15284572, -0.12004362],\n [-0.11923524, -0.12690099, 0.05092533, -0.12681891, -0.11313345],\n [0.07967895, 0.07688418, -0.02053429, 0.07686785, 0.06388501],\n [-0.5240686, -0.76737404, 0.30816235, -0.74908509, -0.55317424],\n [-0.57784358, -0.86110674, 0.1843253, -0.83868528, -0.49744512],\n [-0.32958216, -0.40011504, 0.13174724, -0.39761812, -0.30589072],\n [0.41448584, 0.34574843, -0.33455967, 0.343861, 0.48622915],\n [-0.32944976, -0.41067335, 0.33465253, -0.40736804, -0.41725431],\n [-0.09503026, -0.09919597, 0.012917, -0.09916555, -0.06156092],\n [-0.42105309, -0.55299201, 0.20696821, -0.54620281, -0.41866737],\n [-0.07923867, -0.08247432, 0.03034594, -0.08245231, -0.07249949],\n [-0.16577435, -0.17985874, 0.03469311, -0.17965957, -0.12400407],\n [0.19949934, 0.1827653, -0.06902931, 0.18253172, 0.17646103],\n [-0.43920686, -0.57872277, 0.16003694, -0.57144777, -0.39524175],\n [-0.56513879, -0.84885229, 0.24699657, -0.82588278, -0.54035235],\n [-0.06286303, -0.06491442, 0.02858863, -0.06490327, -0.06090758],\n [0.92549746, 0.67323857, -0.32473992, 0.6620081, 0.82244219],\n [-0.38988726, -0.47979843, 0.05636151, -0.47637231, -0.2578083],\n [0.09979014, 0.09573364, -0.01384403, 0.09570617, 0.06508626],\n [0.09280064, 0.0889795, -0.0279896, 0.08895329, 0.07841086],\n [-0.46107866, -0.59994216, 0.07899674, -0.59306763, -0.32264856],\n [0.83347527, 0.60020541, -1.0995866, 0.58955563, 1.1517236],\n [0.27905983, 0.24569371, -0.19753739, 0.24503095, 0.31334636],\n [-0.63810006, -1.0087097, 0.18285623, -0.97429886, -0.5300365],\n [-0.17291354, -0.18879898, 0.04598416, -0.18855612, -0.14009805],\n [-0.51416831, -0.73501209, 0.24166843, -0.71959983, -0.50367861],\n [-0.11603496, -0.12331911, 0.05205844, -0.12324295, -0.11191791],\n [0.25114639, 0.22198601, -0.33394605, 0.22142343, 0.34795258],\n [-0.35430121, -0.44161092, 0.18647538, -0.43805958, -0.36041173],\n [-0.4297934, -0.56245513, 0.16054818, -0.55572832, -0.38998834],\n [-0.24832551, -0.28509038, 0.09585265, -0.28418665, -0.22780274],\n [-0.5430595, -0.8236341, 0.38944718, -0.80029361, -0.61243189],\n [0.14951981, 0.14066735, -0.02096909, 0.14058006, 0.09787438],\n [-0.26030761, -0.30529244, 0.19969441, -0.30400333, -0.3002317],\n [-0.41187027, -0.5222738, 0.09534316, -0.51739668, -0.31862522],\n [0.40355256, 0.33678618, -0.40298382, 0.33495712, 0.5082054],\n [-0.75255175, -1.3894944, 0.21360879, -1.3060837, -0.62312324],\n [-0.35507276, -0.43263695, 0.08326328, -0.42983496, -0.27587102],\n [-0.72634868, -1.3135867, 0.24178343, -1.2397681, -0.63423315],\n [1.1376892, 0.77830075, -0.56460273, 0.75991464, 1.1348542],\n [0.0124268, 0.01235574, -0.00318039, 0.01235567, 0.00994053],\n [-0.17977795, -0.19706609, 0.04822297, -0.19678947, -0.14607826],\n [0.13164445, 0.12436625, -0.02962392, 0.12429923, 0.10088484],\n [1.6590308, 1.0177654, -0.82102101, 0.97836656, 1.6533484],\n [0.35423886, 0.30348345, -0.21771826, 0.30227719, 0.37946561],\n [-0.39745053, -0.50183363, 0.1115254, -0.49731392, -0.327836],\n [0.16236895, 0.15037561, -0.09838355, 0.15022813, 0.17310893],\n [0.6958186, 0.53635626, -0.29431717, 0.53034734, 0.6580808],\n [-0.46773645, -0.6351434, 0.19230175, -0.62534201, -0.43819951],\n [0.10162678, 0.09719379, -0.0235417, 0.09716157, 0.0786372],\n [1.2254236, 0.83616209, -0.33996754, 0.81614444, 1.0069625],\n [-0.44736095, -0.58481213, 0.11337566, -0.57787362, -0.35668813],\n [-0.53040638, -0.77530138, 0.27230976, -0.75698926, -0.53510239],\n [-0.20596997, -0.22932212, 0.05814066, -0.22888187, -0.17023123],\n [0.52440326, 0.4238883, -0.29085357, 0.42070618, 0.54284818],\n [-0.27039535, -0.31848645, 0.18037432, -0.31706852, -0.29766953],\n [-0.18413097, -0.20127687, 0.03093094, -0.20101119, -0.12800461],\n [0.2078039, 0.18889883, -0.11508173, 0.1886127, 0.21500469],\n [-0.55328036, -0.78200973, 0.11006953, -0.76662156, -0.4069389],\n [-0.31123454, -0.37880878, 0.22941672, -0.37638232, -0.3542231],\n [-0.65927093, -0.99075659, 0.0725532, -0.96387989, -0.39805047],\n [0.07618853, 0.07372323, -0.01368779, 0.07370993, 0.05416444],\n [-0.32168303, -0.39510162, 0.24521041, -0.39233234, -0.37023295],\n [-0.52044113, -0.72706987, 0.1447913, -0.71369831, -0.42806058],\n [-0.19292174, -0.21699692, 0.20331068, -0.21649757, -0.24735319],\n [-0.37437819, -0.47321524, 0.1859494, -0.46891375, -0.37354989],\n [0.58338252, 0.47340146, -0.10114556, 0.46997579, 0.40985278],\n [-0.31178488, -0.3671897, 0.05737334, -0.36555753, -0.22343443],\n [-0.44979931, -0.62005397, 0.35883752, -0.60953178, -0.52559974],\n [0.29013402, 0.25421375, -0.21161293, 0.25347515, 0.32904591],\n [-0.0471581, -0.04825397, 0.01517686, -0.04824973, -0.04071692],\n [-0.39545673, -0.49608618, 0.09505087, -0.49186199, -0.30978568],\n [0.54435985, 0.45148952, -0.05282739, 0.44886723, 0.31517645],\n [0.13502896, 0.12765483, -0.02143779, 0.12758775, 0.09212014],\n [-0.3240423, -0.3860241, 0.07033396, -0.38406021, -0.24535759],\n [0.0219324, 0.02171037, -0.00612701, 0.02170999, 0.01806413],\n [-0.13135208, -0.1397574, 0.02259516, -0.13966782, -0.09203926],\n [0.35984601, 0.3131645, -0.06318754, 0.31215909, 0.25388182],\n [-0.53692255, -0.78844182, 0.26544966, -0.76937333, -0.53490725],\n [0.99283801, 0.71110543, -0.33171189, 0.69807369, 0.86799243],\n [-0.30811339, -0.36535786, 0.08606506, -0.36359554, -0.25376166],\n [0.35941228, 0.30921429, -0.14533974, 0.30805106, 0.33486267],\n [0.46119255, 0.38419514, -0.15408706, 0.38206689, 0.40319974],\n [0.25617752, 0.22887855, -0.11355209, 0.22839493, 0.24609477],\n [0.3881024, 0.33229798, -0.10916489, 0.33096708, 0.32038232],\n [-0.01142538, -0.01148719, 0.00325243, -0.01148713, -0.00946949],\n [-0.0663644, -0.06831127, 0.00741754, -0.06830176, -0.04027665],\n [-0.31479598, -0.3845116, 0.23897448, -0.38195885, -0.36180949],\n [0.13188749, 0.12507852, -0.01495072, 0.12501997, 0.08042043],\n [0.05560787, 0.05414142, -0.02479536, 0.05413498, 0.05352512],\n [0.030057, 0.02968142, -0.00338286, 0.02968064, 0.01828388],\n [-0.60764607, -0.92798542, 0.17422059, -0.90082532, -0.50482821],\n [-0.49611384, -0.65425635, 0.06163571, -0.64598206, -0.31189496],\n [0.19595611, 0.17969821, -0.07121758, 0.17947373, 0.17618879],\n [-0.53300603, -0.79124424, 0.33222942, -0.77102963, -0.57364618],\n [0.21233509, 0.19291969, -0.10206777, 0.19262435, 0.20956595],\n [-0.12690115, -0.1343304, 0.01431673, -0.13425796, -0.07725649],\n [-0.32844644, -0.40427048, 0.22379228, -0.40137809, -0.36413974],\n [-0.14290203, -0.15448587, 0.07820742, -0.15432956, -0.14727134],\n [0.79360616, 0.58955236, -0.48783917, 0.58096063, 0.8501696],\n [-0.02949369, -0.02992309, 0.01081226, -0.02992205, -0.0265951],\n [0.27831796, 0.24559747, -0.16801173, 0.24495834, 0.29635848],\n [1.0478477, 0.76214648, -0.12083589, 0.7494232, 0.64260005],\n [0.06339373, 0.06158523, -0.0181729, 0.06157663, 0.05266421],\n [-0.5938279, -0.89957497, 0.18744172, -0.87422318, -0.50941588],\n [0.03644298, 0.03580721, -0.01562572, 0.03580536, 0.0346231],\n [-0.49944896, -0.6882761, 0.15428926, -0.67661922, -0.42538546],\n [-0.68855705, -1.1334562, 0.15684956, -1.0878507, -0.52982329],\n [-0.08804256, -0.09199457, 0.02906246, -0.09196501, -0.07666242],\n [1.9829382, 1.1470288, -1.0173005, 1.0916852, 2.0000117],\n [0.16729735, 0.15551127, -0.04810313, 0.15537303, 0.13912136],\n [-0.6912127, -1.1040963, 0.10905673, -1.0647441, -0.47058199],\n [-0.65126541, -1.1420625, 0.44827384, -1.0841523, -0.72448568],\n [-0.18475545, -0.20375577, 0.06541737, -0.20343072, -0.16467937],\n [-0.07713194, -0.07963294, 0.00526849, -0.07961942, -0.03972479],\n [0.01740949, 0.01725609, -0.0109489, 0.01725586, 0.01879278],\n [1.2196292, 0.85291889, -0.15144619, 0.83500453, 0.7666221],\n [-0.22496394, -0.25705245, 0.16083384, -0.25629232, -0.25344151],\n [-0.36823714, -0.46294364, 0.17993716, -0.45892631, -0.36542816],\n [1.2520815, 0.81943768, -1.1127505, 0.7953967, 1.5166929],\n [0.5370179, 0.42882249, -0.44677811, 0.42522593, 0.63635551],\n [-0.45893868, -0.62841097, 0.26653623, -0.6181842, -0.48242728],\n [2.0020663, 1.154226, -1.0311665, 1.0978758, 2.021957],\n [-0.66655028, -1.1886375, 0.42913662, -1.1248503, -0.72515437],\n [-0.5154665, -0.75240451, 0.33467992, -0.73476249, -0.56236746],\n [0.11044802, 0.10446565, -0.09872336, 0.10441168, 0.13404643],\n [2.1970641, 1.2481317, -0.72708268, 1.1840148, 1.9146957],\n [-0.25675981, -0.2990706, 0.15898926, -0.29791395, -0.27572996],\n [-0.25669536, -0.29556425, 0.08661654, -0.29458722, -0.22515881],\n [0.52513525, 0.4277973, -0.19081609, 0.42481538, 0.47213092],\n [0.15974061, 0.14921456, -0.03568551, 0.14909907, 0.12211926],\n [-0.52570614, -0.79242904, 0.4633627, -0.77061806, -0.63505653],\n [0.80623824, 0.58389749, -1.1584851, 0.57388804, 1.1462581],\n [0.03130321, 0.03083991, -0.01157539, 0.03083876, 0.02830834],\n [-0.10999839, -0.11631162, 0.03812945, -0.11625126, -0.09735414],\n [-0.46938615, -0.62540527, 0.12257532, -0.61690292, -0.37800528],\n [-0.45977529, -0.62819148, 0.25115588, -0.61810743, -0.47353804],\n [-0.24400514, -0.29017483, 0.63139896, -0.28872754, -0.42206299],\n [-0.22238072, -0.25076942, 0.07736622, -0.25016715, -0.197057],\n [0.41225613, 0.35139209, -0.09092106, 0.34990208, 0.31381684],\n [0.90104234, 0.65879176, -0.33084962, 0.64814686, 0.81292475],\n [1.4268219, 0.9417433, -0.29915977, 0.91518567, 1.0679664],\n [-0.01131892, -0.01138296, 0.0052341, -0.0113829, -0.01102793],\n [-0.26064096, -0.29777716, 0.04913562, -0.29689842, -0.18829434],\n [-0.35225204, -0.43891739, 0.19332433, -0.43539782, -0.3633634],\n [0.27755578, 0.24944605, -0.03488256, 0.24897266, 0.17516458],\n [-0.05249628, -0.05408881, 0.06457515, -0.05408076, -0.07086811],\n [0.15270366, 0.14295055, -0.03751237, 0.14284682, 0.12049466],\n [1.6640434, 1.0233537, -0.74645815, 0.98412553, 1.6049257],\n [0.40173966, 0.34399464, -0.08248412, 0.34261792, 0.29860467],\n [-0.2012955, -0.22591292, 0.12425093, -0.22541249, -0.21593975],\n [-0.55241944, -0.81052705, 0.20869867, -0.79100738, -0.50314813],\n [1.2167787, 0.81596409, -0.64037158, 0.79464724, 1.2377371],\n [0.31859384, 0.2793104, -0.0957355, 0.2785059, 0.26885983],\n [0.33521642, 0.29137558, -0.12189482, 0.29042371, 0.30145462],\n [0.35923737, 0.31063323, -0.10219573, 0.30954184, 0.29767469],\n [0.49244921, 0.3958823, -0.63755836, 0.39275613, 0.67622457],\n [1.0662749, 0.74466625, -0.46536687, 0.72890881, 1.0190323],\n [0.23051376, 0.20778465, -0.11628489, 0.20741193, 0.23119707],\n [-0.05407944, -0.05548359, 0.01328741, -0.05547751, -0.04267546],\n [-0.62813954, -0.98581439, 0.18994331, -0.95321309, -0.53119677],\n [-0.18373719, -0.2011591, 0.03627741, -0.20088423, -0.13479904],\n [0.18184522, 0.16675393, -0.12985588, 0.1665455, 0.20478515],\n [-0.31872585, -0.37518492, 0.04704902, -0.37352689, -0.21222916],\n [0.84887233, 0.62383422, -0.43685389, 0.61407755, 0.85707136],\n [-0.05261579, -0.05387151, 0.00796703, -0.05386652, -0.03533347],\n [-0.50035039, -0.71049219, 0.26736812, -0.69613862, -0.51155936],\n [-0.46632367, -0.61629176, 0.1065549, -0.60837729, -0.35919155],\n [-0.41396762, -0.52929026, 0.11631992, -0.52400002, -0.34161659],\n [0.56533161, 0.45487973, -0.20191777, 0.45131692, 0.50536329],\n [-0.25961398, -0.30166258, 0.12862217, -0.30053261, -0.25882168],\n [0.14486225, 0.13424498, -0.22657748, 0.13411544, 0.21186167],\n [-0.56682674, -0.86925212, 0.32130125, -0.84331965, -0.59103657],\n [0.5047258, 0.41021238, -0.29671218, 0.40728788, 0.53271157],\n [-0.43155757, -0.55417099, 0.09413043, -0.5484369, -0.32729987],\n [0.34146008, 0.29564578, -0.1397251, 0.29462552, 0.31939499],\n [0.59808647, 0.48391487, -0.09662502, 0.48031482, 0.41040803],\n [0.2006377, 0.183359, -0.08645239, 0.18311139, 0.1909314],\n [0.44366264, 0.36673917, -0.32881307, 0.36453234, 0.50585757],\n [-0.35790142, -0.43681098, 0.08310658, -0.43393424, -0.27716013],\n [-0.49074253, -0.68009252, 0.1985023, -0.66817166, -0.45726466],\n [0.20408678, 0.18766665, -0.03758255, 0.18744678, 0.14629023],\n [0.92820989, 0.65798031, -0.82939554, 0.64517405, 1.1264053],\n [-0.14206865, -0.15283617, 0.047763, -0.15270031, -0.12446275],\n [-0.17969653, -0.19767623, 0.06598571, -0.19737695, -0.16212616],\n [-0.47137633, -0.61977806, 0.0862388, -0.61210679, -0.33714968],\n [-0.28911174, -0.34384457, 0.16312479, -0.34212796, -0.30099582],\n [-0.00763281, -0.00766442, 0.00752714, -0.0076644, -0.00957215],\n [0.34143269, 0.29614602, -0.12301585, 0.29514894, 0.30610265],\n [2.0719664, 1.2126712, -0.52023762, 1.1565606, 1.6468943],\n [-0.11800533, -0.12555996, 0.05344356, -0.12547942, -0.11417636],\n [-0.66888109, -1.1786054, 0.37739535, -1.1178522, -0.69637225],\n [0.00530837, 0.0052956, -0.00113352, 0.00529559, 0.00399755],\n [0.15362842, 0.1433758, -0.05533638, 0.14326187, 0.13771924],\n [0.42707403, 0.35920849, -0.15986646, 0.35742182, 0.38779115],\n [-0.46487744, -0.65646177, 0.43587838, -0.64361114, -0.57326763],\n [-0.01626688, -0.01640278, 0.00924814, -0.01640259, -0.01697843],\n [-0.027476, -0.02784189, 0.00868304, -0.02784108, -0.0235796],\n [0.19189844, 0.17681613, -0.04846485, 0.17661883, 0.15282694],\n [0.22330594, 0.20058569, -0.20863098, 0.2002013, 0.27504459],\n [0.11559231, 0.10934324, -0.06731977, 0.10928697, 0.12162145],\n [-0.47747168, -0.62350706, 0.06552056, -0.61616792, -0.31028988],\n [0.8575602, 0.61127476, -1.2773277, 0.5997524, 1.2339205],\n [-0.24769378, -0.28838884, 0.20749059, -0.28727965, -0.29418433],\n [-0.16802838, -0.18300456, 0.04553388, -0.18278242, -0.13699666],\n [0.15586551, 0.14451023, -0.11872882, 0.14437251, 0.17934753],\n [-0.39223346, -0.50201694, 0.1843519, -0.49695755, -0.38422796],\n [0.80113557, 0.59491977, -0.45929526, 0.58622787, 0.83851595],\n [-0.24191424, -0.27667143, 0.09486526, -0.27584213, -0.22309338],\n [0.18432121, 0.16997179, -0.0607232, 0.16978585, 0.16039042],\n [-0.40278614, -0.52507026, 0.24440944, -0.51896897, -0.42963435],\n [0.19255907, 0.17757771, -0.04252925, 0.17738371, 0.14664994],\n [-0.0972408, -0.10219865, 0.03820438, -0.10215654, -0.08973187],\n [-0.30184289, -0.35995233, 0.12973145, -0.35809935, -0.28699813],\n [-0.80655346, -1.6474722, 0.23707035, -1.5153139, -0.67565426],\n [-0.36549402, -0.45446948, 0.13440814, -0.45089353, -0.32991751],\n [-0.35756411, -0.45140667, 0.25876731, -0.44734612, -0.40446637],\n [0.47443587, 0.39805103, -0.0802688, 0.39601394, 0.33060589],\n [-0.64139492, -1.0346158, 0.22190249, -0.99622053, -0.56730211],\n [-0.35327017, -0.4485241, 0.33766234, -0.4442919, -0.43843867],\n [0.38580316, 0.32347057, -0.43729991, 0.32180248, 0.50681211],\n [-0.45169795, -0.60091255, 0.1608026, -0.5928297, -0.40334168],\n [0.99548762, 0.72054504, -0.2191243, 0.7081499, 0.75729325],\n [1.8464371, 1.1114614, -0.59910169, 1.0651319, 1.5985756],\n [0.39118858, 0.33428279, -0.1169012, 0.33290991, 0.32951392],\n [-0.26737891, -0.31403027, 0.17241748, -0.31268067, -0.29104169],\n [-0.35472347, -0.43250975, 0.08609581, -0.42968906, -0.2787815],\n [0.20292213, 0.18425357, -0.15632118, 0.18396786, 0.2343701],\n [0.38315112, 0.3261751, -0.17731209, 0.3247703, 0.37339587],\n [-0.4203798, -0.53885631, 0.11049479, -0.5333593, -0.33927498],\n [-0.39280184, -0.50479736, 0.20418445, -0.49954177, -0.39792378],\n [0.01409337, 0.01399707, -0.00585487, 0.01399697, 0.01324928],\n [-0.6652952, -1.0134974, 0.08156506, -0.98417555, -0.41640983],\n [0.34348687, 0.29084513, -0.60061687, 0.28950793, 0.52137387],\n [0.09977508, 0.09541015, -0.02804863, 0.09537833, 0.08234969],\n [0.32918421, 0.28875075, -0.07066353, 0.28792585, 0.24833301],\n [-0.49353903, -0.6871202, 0.20734724, -0.67473867, -0.46571878],\n [0.77392484, 0.60155546, -0.08642675, 0.59523764, 0.46956088],\n [0.11597648, 0.10966911, -0.06949822, 0.10961197, 0.12319163],\n [-0.22418407, -0.24900933, 0.02464014, -0.24855216, -0.13529889],\n [0.0048323, 0.00481904, -0.00792244, 0.00481903, 0.00717903],\n [1.5612954, 1.0050419, -0.31845894, 0.97326169, 1.157936],\n [-0.04439821, -0.04534558, 0.0116988, -0.04534221, -0.03586197],\n [-0.18287636, -0.20228267, 0.09051079, -0.20194015, -0.182256],\n [0.79991917, 0.59627529, -0.39894571, 0.58778217, 0.79924268],\n [0.00045295, 0.00045286, -0.00012532, 0.00045286, 0.00037187],\n [-0.35045702, -0.43580879, 0.18862591, -0.43237702, -0.3591702],\n [0.48168833, 0.40506154, -0.06181568, 0.40304207, 0.30611667],\n [1.2102909, 0.82323981, -0.41739621, 0.803211, 1.0693487],\n [-0.59017082, -0.91634263, 0.27345193, -0.88743255, -0.57538096],\n [-0.37675981, -0.47045792, 0.12095176, -0.46661229, -0.32503025],\n [0.06803516, 0.06595983, -0.01939384, 0.06594928, 0.056414],\n [-0.06959904, -0.07232338, 0.05983358, -0.07230561, -0.08337973],\n [0.21772388, 0.1987227, -0.05156152, 0.19844678, 0.16971593],\n [1.6833678, 1.0531665, -0.41182571, 1.0155263, 1.3264794],\n [-0.34527398, -0.42174351, 0.11413824, -0.4189433, -0.30078969],\n [-0.62314999, -0.96597584, 0.1733497, -0.93571986, -0.51252229],\n [0.6459985, 0.5037073, -0.31753546, 0.49854781, 0.64233524],\n [0.81444581, 0.60869635, -0.31055794, 0.60017873, 0.74410178],\n [-0.4908851, -0.63379759, 0.03898724, -0.62695296, -0.26585044],\n [0.62883787, 0.49981679, -0.15887697, 0.49545104, 0.5008674],\n [-0.47123649, -0.64525963, 0.2157823, -0.63475768, -0.45762303],\n [-0.43045432, -0.56726321, 0.18859397, -0.56012565, -0.41191156],\n [-0.27800518, -0.32605338, 0.11807445, -0.32467635, -0.2632877],\n [0.02531747, 0.02500971, -0.01037265, 0.02500908, 0.02369118],\n [0.8807169, 0.65205874, -0.24272019, 0.64234114, 0.72210916],\n [-0.47195945, -0.64065297, 0.1776242, -0.63078898, -0.42931939],\n [0.49944805, 0.41097781, -0.16469959, 0.40838569, 0.43474479],\n [1.5756966, 0.961067, -1.4644167, 0.92300166, 1.9373732],\n [-0.62905627, -1.0322172, 0.31250029, -0.99119942, -0.62770146],\n [-0.43233827, -0.55683313, 0.10072474, -0.55093416, -0.33517466],\n [1.2574772, 0.8430771, -0.49561225, 0.82102893, 1.1616022],\n [-0.48191348, -0.65985959, 0.18026216, -0.64912206, -0.43747936],\n [0.96451145, 0.70037123, -0.24978434, 0.68855868, 0.77458662],\n [0.10390185, 0.09900718, -0.04141969, 0.09896877, 0.0963447],\n [0.17782342, 0.1633702, -0.12549832, 0.16317469, 0.199472],\n [0.90103943, 0.64142578, -0.9021328, 0.62924252, 1.1356975],\n [-0.31685644, -0.37539119, 0.06613533, -0.37359927, -0.23680791],\n [-0.36576202, -0.45473746, 0.13299164, -0.45116409, -0.32891512],\n [0.3946375, 0.33626288, -0.13179614, 0.33483111, 0.34496614],\n [-0.05564079, -0.05737795, 0.05191923, -0.05736891, -0.06850384],\n [0.07746079, 0.07480482, -0.0207498, 0.07478964, 0.06291235],\n [-0.62763543, -0.9961985, 0.21906113, -0.96162467, -0.55676262],\n [-0.67538326, -1.2113217, 0.40482453, -1.1450707, -0.71746238],\n [-0.19995235, -0.22180501, 0.05561306, -0.22140782, -0.16444472],\n [0.03704984, 0.03639979, -0.01443181, 0.03639789, 0.03409112],\n [-0.51650857, -0.76312406, 0.39772971, -0.74408523, -0.59647333],\n [0.98171505, 0.70792313, -0.2820108, 0.6954651, 0.81612286],\n [-0.2277101, -0.25742923, 0.07554275, -0.25678472, -0.19860757],\n [-0.6677642, -1.1054215, 0.21255838, -1.0599649, -0.57444902],\n [0.82422176, 0.61613989, -0.27973166, 0.60753116, 0.724358],\n [0.25536475, 0.22807412, -0.12009092, 0.22758927, 0.25020003],\n [0.13012242, 0.12202695, -0.10400311, 0.12194308, 0.15214588],\n [0.09729097, 0.0929002, -0.04630879, 0.09286719, 0.09570754],\n [0.05400697, 0.05259462, -0.0289588, 0.05258847, 0.05528026],\n [0.02411581, 0.02383889, -0.00908, 0.02383836, 0.02194016],\n [-0.49513117, -0.69291152, 0.22291771, -0.68003738, -0.4781216],\n [-0.46304413, -0.61997017, 0.15285491, -0.61125577, -0.40319771],\n [-0.18833279, -0.20618262, 0.02966943, -0.20590112, -0.12815347],\n [-0.5643813, -0.85531534, 0.28315697, -0.83116451, -0.56502478],\n [-0.08363191, -0.08715286, 0.02552147, -0.08712817, -0.07094034],\n [-0.47398851, -0.65328525, 0.23517562, -0.64221102, -0.47277297],\n [-0.26479649, -0.30521694, 0.07162969, -0.30419273, -0.21576575],\n [-0.19257033, -0.21268103, 0.05290569, -0.21233169, -0.15772616],\n [-0.42206694, -0.53104639, 0.06193518, -0.52640571, -0.28048524],\n [0.27418477, 0.24533224, -0.05768892, 0.24482746, 0.20546428],\n [-0.32017836, -0.38448403, 0.10816775, -0.38234583, -0.28095531],\n [-0.11235979, -0.11823478, 0.01483804, -0.11818361, -0.07209018],\n [0.0987543, 0.09404778, -0.06959045, 0.09401042, 0.11072114],\n [-0.01475371, -0.0148639, 0.00745724, -0.01486376, -0.01480711],\n [-0.31418685, -0.38182471, 0.20098822, -0.37941624, -0.34108205],\n [0.11799586, 0.11146733, -0.07163686, 0.11140716, 0.12588289],\n [-0.30637716, -0.3637908, 0.09558214, -0.36200816, -0.26180213],\n [-0.10448745, -0.11077222, 0.0847169, -0.11070926, -0.12275602],\n [1.2126604, 0.82725729, -0.37262864, 0.8074287, 1.0310068],\n [-0.39751424, -0.50920053, 0.1695837, -0.50403422, -0.37702762],\n [0.41730243, 0.35889083, -0.04510874, 0.35753432, 0.25045566],\n [-0.73638278, -1.3570324, 0.25051513, -1.2760582, -0.6476749],\n [0.55540527, 0.45719388, -0.06897275, 0.45432127, 0.3491209],\n [-0.30721199, -0.36229287, 0.0687354, -0.36065586, -0.23497893],\n [0.10505872, 0.10057482, -0.01463721, 0.10054293, 0.06862002],\n [-0.45382873, -0.60844735, 0.18506012, -0.5998169, -0.42400951],\n [0.39661504, 0.33831796, -0.11781938, 0.336897, 0.33342259],\n [-0.69147439, -1.1808634, 0.21853097, -1.1263227, -0.59342395],\n [-0.24741436, -0.28045005, 0.04610236, -0.27971718, -0.17804686],\n [-0.36931264, -0.45432927, 0.08596562, -0.45109528, -0.28622949],\n [1.3455919, 0.89213672, -0.41397432, 0.86750981, 1.1444848],\n [-0.47951866, -0.66313969, 0.22918427, -0.65166375, -0.47236235],\n [0.71520118, 0.54505314, -0.38833921, 0.53840534, 0.73513296],\n [-0.25160521, -0.28897301, 0.0883052, -0.28805159, -0.22360689],\n [-0.21586978, -0.24025429, 0.03882649, -0.23979626, -0.15352553],\n [1.9366852, 1.1229873, -1.1507855, 1.0692654, 2.0513864],\n [-0.54191503, -0.81867924, 0.37498983, -0.79590578, -0.60390778],\n [-0.60797965, -0.93124681, 0.18049629, -0.90361689, -0.51100536],\n [0.35254522, 0.30358918, -0.15896092, 0.30246118, 0.34060421],\n [-0.24892482, -0.28657929, 0.10995939, -0.28563373, -0.23885421],\n [-0.73517526, -1.2292916, 0.10907361, -1.176769, -0.49035486],\n [-0.09636889, -0.10089282, 0.0205596, -0.10085744, -0.07255035],\n [-0.38028061, -0.49216014, 0.32033143, -0.4867466, -0.45249381],\n [-0.18871952, -0.20901105, 0.07789747, -0.20864818, -0.1770363],\n [-0.06614495, -0.06841276, 0.02905101, -0.0683998, -0.06334732],\n [-0.62411409, -0.97774934, 0.19561035, -0.94566456, -0.53413365],\n [0.27120071, 0.24281427, -0.05949607, 0.24232028, 0.20607879],\n [0.85585921, 0.61733359, -0.8366515, 0.60648742, 1.0701872],\n [-0.16357957, -0.17802555, 0.05246843, -0.17781323, -0.1410789],\n [-0.17076048, -0.18878456, 0.15029066, -0.18846812, -0.20617955],\n [0.52263905, 0.42910139, -0.12966468, 0.42633288, 0.41376314],\n [0.08575312, 0.08269529, -0.01334224, 0.08267713, 0.05811026],\n [-0.14597352, -0.15759015, 0.05691445, -0.15743625, -0.13435904],\n [-0.12458284, -0.13295752, 0.05161702, -0.13286377, -0.11701632],\n [0.11494773, 0.10876673, -0.06680556, 0.10871137, 0.1208596],\n [-0.04256321, -0.04346356, 0.01520077, -0.04346039, -0.03804709],\n [-0.45536737, -0.58326447, 0.0570685, -0.57735027, -0.28711117],\n [0.47626078, 0.39404241, -0.17708947, 0.39169372, 0.43149006],\n [-0.24311822, -0.27871269, 0.10466917, -0.27784737, -0.23129235],\n [1.3476211, 0.87277805, -0.85701703, 0.8459194, 1.4601082],\n [-0.37467502, -0.46905263, 0.13720983, -0.4651302, -0.33773409],\n [-0.07708257, -0.08018609, 0.03357444, -0.08016527, -0.07361794],\n [-0.01385604, -0.01394893, 0.00470857, -0.01394883, -0.01218239],\n [-0.72719396, -1.3517217, 0.29532086, -1.2688687, -0.67848703],\n [0.44571038, 0.37514843, -0.10875349, 0.37329763, 0.3509077],\n [-0.57534299, -0.86862449, 0.23022732, -0.84453497, -0.53417083],\n [-0.0754074, -0.07835789, 0.03140045, -0.07833865, -0.07094655],\n [0.75644486, 0.57398438, -0.29853542, 0.5667599, 0.69907988],\n [-0.6168851, -0.94901454, 0.17024164, -0.92029119, -0.50602051],\n [-0.46962782, -0.64565932, 0.24080845, -0.63488224, -0.47359064],\n [0.72610874, 0.56301499, -0.16076249, 0.55698784, 0.55344273],\n [-0.10814219, -0.11418864, 0.03518046, -0.11413233, -0.09370741],\n [0.46426286, 0.38453431, -0.20407356, 0.38226845, 0.44474889],\n [-0.31314903, -0.38903031, 0.44926615, -0.38599444, -0.44498491],\n [-0.16918239, -0.18372589, 0.03236222, -0.1835178, -0.1228173],\n [0.01922199, 0.0190669, -0.00221987, 0.01906669, 0.01179374],\n [0.69393246, 0.54025406, -0.19213669, 0.5346523, 0.56984696],\n [-0.47386179, -0.65599758, 0.2572133, -0.64457328, -0.48701491],\n [0.1456625, 0.13616929, -0.06689435, 0.13606627, 0.14159187],\n [-0.70347527, -1.3807153, 0.55747473, -1.2813738, -0.82019595],\n [0.00383646, 0.00382933, -0.00148007, 0.00382933, 0.00351878],\n [-0.1768471, -0.19414626, 0.06286135, -0.19386472, -0.15783495],\n [-0.13496417, -0.14390135, 0.02406649, -0.1438028, -0.09571057],\n [0.37122856, 0.32016044, -0.09463934, 0.31899469, 0.29657063],\n [0.60928212, 0.48199407, -0.25714196, 0.47761034, 0.57581097],\n [-0.41880282, -0.56645291, 0.42755189, -0.55793354, -0.53130757],\n [-0.42053034, -0.55369854, 0.22244575, -0.54677554, -0.42849879],\n [-0.15715324, -0.17157183, 0.09775847, -0.17135169, -0.16902218],\n [-0.09228049, -0.09672915, 0.03634801, -0.09669343, -0.08522692],\n [-0.08995605, -0.09397528, 0.02353244, -0.09394536, -0.07248574],\n [-0.44885439, -0.58736729, 0.11330178, -0.58034539, -0.35740385],\n [-0.30551624, -0.37255546, 0.28556142, -0.37012293, -0.37635647],\n [-0.03486108, -0.03547902, 0.01599897, -0.03547719, -0.03387931],\n [-0.2946841, -0.35200734, 0.16882458, -0.35016057, -0.30836118],\n [0.13931681, 0.13093625, -0.043177, 0.1308523, 0.11878543],\n [-0.19643866, -0.21786169, 0.06304702, -0.21747313, -0.16945309],\n [0.97520508, 0.69987126, -0.35556831, 0.68719635, 0.87777185],\n [-0.17457684, -0.19174755, 0.07274891, -0.19146657, -0.16428954],\n [-0.20695989, -0.23150806, 0.07894116, -0.23102399, -0.18910454],\n [0.09175883, 0.0878384, -0.0436685, 0.08781049, 0.09026055],\n [-0.6726609, -1.1472366, 0.26916505, -1.0944988, -0.62452094],\n [-0.62009308, -0.98610435, 0.24679913, -0.95160491, -0.57468403],\n [0.9143014, 0.67645561, -0.18494444, 0.66632846, 0.67621274],\n [-0.40823923, -0.51398966, 0.0825203, -0.50947217, -0.30186082],\n [0.49533274, 0.39542199, -0.86503165, 0.39209712, 0.75153986],\n [-0.41646708, -0.57351884, 0.65089757, -0.56384747, -0.60893088],\n [-0.24832842, -0.28228688, 0.05344114, -0.28151545, -0.18749348],\n [-0.4291716, -0.56977636, 0.23074298, -0.56222093, -0.43968348],\n [-0.52430276, -0.75504356, 0.23433078, -0.73856158, -0.50505795],\n [-0.35502192, -0.43176704, 0.07773553, -0.42902312, -0.26960004],\n [0.73259343, 0.57037824, -0.12427138, 0.56446619, 0.51094683],\n [-0.17483804, -0.19012333, 0.02844008, -0.18990093, -0.12024792],\n [-0.47873773, -0.65338446, 0.17767818, -0.6429686, -0.4334641],\n [-0.42915684, -0.55172088, 0.10159999, -0.54596001, -0.33449162],\n [0.07254078, 0.07037383, -0.00953335, 0.07036304, 0.04646722],\n [0.13886356, 0.1304756, -0.04616793, 0.13039122, 0.12120364],\n [1.9186054, 1.103383, -1.5492848, 1.049048, 2.2510105],\n [0.81907795, 0.58317424, -1.9232965, 0.57210785, 1.3716466],\n [-0.44373902, -0.58942003, 0.17882664, -0.58157592, -0.41295794],\n [0.132741, 0.12487617, -0.05473605, 0.12479856, 0.12448142],\n [0.16258569, 0.15136781, -0.04905618, 0.15123894, 0.13739243],\n [0.33106334, 0.28830953, -0.11710154, 0.28739283, 0.29498839],\n [0.12174071, 0.11518179, -0.04300579, 0.11512293, 0.10842836],\n [0.18150841, 0.16802615, -0.04268629, 0.16785944, 0.1411575],\n [-0.55181879, -0.78094168, 0.11503991, -0.76546234, -0.41224668],\n [0.31092362, 0.27182326, -0.14438697, 0.2710067, 0.30335779],\n [0.4898463, 0.40123648, -0.24767545, 0.39858616, 0.49167434],\n [0.04020691, 0.03953763, -0.00461499, 0.03953577, 0.02461883],\n [0.69779258, 0.53039763, -0.52893557, 0.52380457, 0.80160759],\n [-0.41938765, -0.528942, 0.07014561, -0.52422331, -0.2911302],\n [1.5500093, 0.99414196, -0.37744285, 0.96219521, 1.2195041],\n [-0.23120537, -0.26211582, 0.08002466, -0.26142923, -0.20452665],\n [-0.5337862, -0.75033499, 0.13270693, -0.73602772, -0.42288223],\n [-0.34890203, -0.42568049, 0.10060014, -0.42288671, -0.29041019],\n [0.09510716, 0.09097446, -0.03856102, 0.09094454, 0.0886887],\n [-0.42043796, -0.53892978, 0.11036636, -0.5334321, -0.33917476],\n [-0.12595244, -0.13532881, 0.10828799, -0.13521259, -0.15089487],\n [-0.31032226, -0.37071943, 0.11203737, -0.36877261, -0.27840237],\n [0.16780569, 0.15577563, -0.05594822, 0.15563205, 0.14660312],\n [-0.31007558, -0.3713279, 0.12483464, -0.36932432, -0.28846962],\n [0.78557461, 0.5899677, -0.33984865, 0.58198177, 0.74856607],\n [1.0796956, 0.75859235, -0.34035392, 0.74306646, 0.92580842],\n [0.27211903, 0.24110835, -0.14370344, 0.24052108, 0.27712242],\n [-0.52540352, -0.73146819, 0.1256489, -0.71828911, -0.41088935],\n [-0.60747742, -0.97426243, 0.32098258, -0.9389528, -0.61876239],\n [-0.43523416, -0.56928385, 0.14331067, -0.56250104, -0.3786618],\n [-0.66214695, -1.0529658, 0.14597953, -1.0161294, -0.50397645],\n [-0.37668723, -0.47345942, 0.14901598, -0.46935893, -0.34839737],\n [0.27174319, 0.24340092, -0.05638588, 0.24290944, 0.20269322],\n [0.0586777, 0.05722555, -0.00890276, 0.05721956, 0.03943062],\n [0.27677671, 0.24694797, -0.06897282, 0.24641357, 0.21944331],\n [0.0637317, 0.06189966, -0.01870783, 0.06189089, 0.05336478],\n [1.0814839, 0.75717338, -0.38058322, 0.74136982, 0.96199595],\n [-0.65943831, -1.0712433, 0.19220718, -1.0303401, -0.55087024],\n [-0.18011189, -0.19751596, 0.0493431, -0.19723615, -0.14738289],\n [0.46231409, 0.37982102, -0.33361411, 0.3773866, 0.5224558],\n [-0.56942431, -0.85763161, 0.24175842, -0.83411749, -0.53921405],\n [-0.35493969, -0.44571691, 0.2330656, -0.44188734, -0.38869215],\n [-0.37534646, -0.48492178, 0.33926643, -0.47965973, -0.45724101],\n [-0.41212714, -0.53947528, 0.22608188, -0.53301102, -0.42506244],\n [-0.89109482, -1.8088793, 0.10306442, -1.6661471, -0.54701034],\n [0.84612011, 0.63633926, -0.17695167, 0.62780994, 0.63277537],\n [-0.12382645, -0.13164325, 0.03240452, -0.13156107, -0.0997901],\n [1.1076252, 0.76777951, -0.44561598, 0.75085943, 1.0302093],\n [0.3311406, 0.2893973, -0.08957051, 0.28852346, 0.26981948],\n [0.02920011, 0.02877719, -0.01654502, 0.02877617, 0.03044309],\n [0.07314809, 0.0706297, -0.03411092, 0.07061526, 0.0714677],\n [-0.35913427, -0.43718768, 0.07348821, -0.43438211, -0.26663703],\n [-0.18609667, -0.20641976, 0.09821248, -0.20605065, -0.18947764],\n [0.50919209, 0.4142026, -0.26402946, 0.41127427, 0.51540492],\n [-0.26690924, -0.30841311, 0.07685443, -0.30734196, -0.22206257],\n [0.02042216, 0.02021503, -0.01102855, 0.02021468, 0.02095321],\n [0.7002997, 0.53833717, -0.31546173, 0.53217977, 0.67636561],\n [1.2941374, 0.88132159, -0.23467786, 0.86000984, 0.92289924],\n [0.00143183, 0.00143077, -0.00093378, 0.00143077, 0.00156442],\n [0.80324755, 0.60017028, -0.3506458, 0.59175704, 0.76771346],\n [-0.34282415, -0.4202402, 0.13739525, -0.41735056, -0.31845507],\n [0.17602685, 0.16229011, -0.08993897, 0.16211168, 0.17730129],\n [-0.18038496, -0.19597926, 0.02026541, -0.19575488, -0.10966347],\n [-0.30741981, -0.36447461, 0.08698474, -0.36271998, -0.25428003],\n [-0.07797062, -0.08103614, 0.02503701, -0.08101606, -0.06727053],\n [0.80370036, 0.60024571, -0.3553498, 0.59180636, 0.77142102],\n [0.49751879, 0.41508697, -0.07680567, 0.41282555, 0.3362643],\n [-0.12394241, -0.13270282, 0.08092548, -0.13259972, -0.13547268],\n [0.0907716, 0.087107, -0.0276548, 0.08708235, 0.07695441],\n [-0.58551063, -0.89305963, 0.23048889, -0.8670732, -0.54065039],\n [-0.4436628, -0.58751169, 0.16649328, -0.57985969, -0.40319108],\n [-0.01064029, -0.01069679, 0.00486865, -0.01069674, -0.01033035],\n [-0.76349743, -1.3671158, 0.14976747, -1.2927466, -0.55892588],\n [-0.41942955, -0.54623956, 0.17243998, -0.53993799, -0.3929422],\n [0.07188648, 0.06969326, -0.0125059, 0.06968211, 0.05056071],\n [-0.45634065, -0.62180242, 0.25193005, -0.6119925, -0.47166041],\n [0.66568185, 0.5216652, -0.19801412, 0.51653357, 0.55986914],\n [-0.04302486, -0.04399902, 0.02521296, -0.04399534, -0.04536256],\n [-0.59939806, -0.92172055, 0.2130805, -0.89387717, -0.5349766],\n [-0.46198986, -0.61219946, 0.12241772, -0.60418739, -0.37386349],\n [-0.44136021, -0.58987474, 0.21532139, -0.58168434, -0.43775843],\n [0.15531067, 0.14510439, -0.04349201, 0.14499271, 0.12802084],\n [-0.1605713, -0.17470648, 0.05934997, -0.17449939, -0.14518735],\n [0.45124764, 0.38064244, -0.08599328, 0.37881183, 0.327171],\n [-0.28769159, -0.33737083, 0.08922817, -0.33594827, -0.2453553],\n [-0.30543375, -0.35768158, 0.05171439, -0.35619939, -0.21289158],\n [-0.00282118, -0.00282495, 0.0008457, -0.00282495, -0.00237886],\n [0.04404652, 0.04315387, -0.01374418, 0.04315085, 0.03764067],\n [-0.66277922, -1.0501904, 0.13883223, -1.0140043, -0.49592883],\n [0.94359123, 0.67929441, -0.41216658, 0.66721909, 0.90203527],\n [-0.60937317, -0.98748869, 0.35184524, -0.95015357, -0.63931679],\n [-0.35004111, -0.42969838, 0.12236166, -0.42670244, -0.31067369],\n [-0.17253268, -0.18766663, 0.03247046, -0.18744569, -0.12457199],\n [0.56866536, 0.4582866, -0.17765109, 0.45474887, 0.48615021],\n [-0.64104987, -0.98195742, 0.11824463, -0.95281519, -0.45976054],\n [0.13859401, 0.13062588, -0.0288178, 0.13054959, 0.10344907],\n [0.13036798, 0.12305608, -0.0368677, 0.12298778, 0.10781338],\n [-0.22676671, -0.25854605, 0.13248042, -0.25780633, -0.2388435],\n [0.371507, 0.32034478, -0.09523099, 0.31917562, 0.29733596],\n [1.2071602, 0.81655811, -0.51291364, 0.7961247, 1.1434083],\n [0.89070081, 0.66413745, -0.16685752, 0.65469612, 0.64211551],\n [-0.60394234, -0.96084765, 0.30641235, -0.92716896, -0.60688873],\n [-0.26604872, -0.30950626, 0.11235654, -0.30832857, -0.25148779],\n [0.65899032, 0.52076457, -0.14448362, 0.51598551, 0.50065155],\n [-0.17951486, -0.19802609, 0.08409235, -0.19770856, -0.17565578],\n [0.68945354, 0.53699955, -0.19838414, 0.53145064, 0.57347653],\n [0.01497978, 0.01488627, -0.00158129, 0.01488618, 0.00891971],\n [-0.65161336, -1.0159802, 0.13055012, -0.98332859, -0.48039198],\n [0.3362121, 0.29120925, -0.15401533, 0.29020943, 0.32654287],\n [-0.37880467, -0.48944216, 0.31617826, -0.48412663, -0.44936325],\n [0.63868806, 0.51567573, -0.06375307, 0.51176294, 0.37328131],\n [0.06884034, 0.06676215, -0.01600538, 0.06675169, 0.05333276],\n [-0.17402133, -0.19051874, 0.05593966, -0.1902585, -0.15019366],\n [-0.1912367, -0.21227567, 0.08358483, -0.21189074, -0.18285212],\n [-0.43253467, -0.55538081, 0.09223564, -0.54963788, -0.32557936],\n [0.1682158, 0.15649153, -0.04134298, 0.15635548, 0.13275629],\n [-0.6224555, -1.0180922, 0.32847661, -0.97814722, -0.63374862],\n [-0.53997585, -0.76271418, 0.13218644, -0.7477607, -0.42558717],\n [-0.63427137, -1.0088441, 0.2064307, -0.97352075, -0.54969042],\n [0.13276369, 0.12501016, -0.04742027, 0.12493475, 0.11868182],\n [-0.42174769, -0.54921003, 0.16462506, -0.5428784, -0.38833851],\n [-0.24126988, -0.27460897, 0.07270986, -0.27384373, -0.20380265],\n [0.17335348, 0.16131178, -0.03141992, 0.16117251, 0.12360425],\n [0.4376146, 0.36594902, -0.18691447, 0.36400544, 0.41522693],\n [-0.59768424, -0.90289008, 0.17116733, -0.87777946, -0.49636158],\n [-0.40429612, -0.53254623, 0.30793577, -0.52586762, -0.46518935],\n [0.43090041, 0.36636087, -0.07523678, 0.36475822, 0.30343896],\n [0.62725403, 0.48691579, -0.4924925, 0.48174928, 0.72907496],\n [-0.40588037, -0.52052606, 0.14833886, -0.51519516, -0.36561753],\n [-0.03740417, -0.03810963, 0.01568498, -0.03810741, -0.03527373],\n [0.27306715, 0.24349892, -0.08080338, 0.2429667, 0.22926239],\n [-0.49496985, -0.67615602, 0.13869254, -0.6653144, -0.40808146],\n [-0.79113899, -1.4054818, 0.10558115, -1.3310049, -0.50937855],\n [-0.43097213, -0.55555191, 0.10576207, -0.54962668, -0.33995344],\n [-0.4032534, -0.5112142, 0.11204773, -0.50645073, -0.33153525],\n [-0.33314549, -0.40137054, 0.08986394, -0.39905799, -0.27120297],\n [0.2407433, 0.2169849, -0.08291416, 0.21659496, 0.21261262],\n [0.24101098, 0.21640297, -0.11760103, 0.21598518, 0.23905891],\n [0.20000383, 0.1830665, -0.07461323, 0.18282781, 0.18140148],\n [-0.22538443, -0.25428686, 0.07188698, -0.25367093, -0.1940183],\n [-0.32149179, -0.39624663, 0.27840685, -0.39337475, -0.38608527],\n [0.87040729, 0.62890561, -0.68543035, 0.6179705, 1.0126968],\n [-0.09850354, -0.10349707, 0.03284711, -0.1034549, -0.08606177],\n [-0.13904173, -0.15016113, 0.08736364, -0.1500131, -0.15004344],\n [1.8567607, 1.0760962, -1.6345595, 1.0245247, 2.2420615],\n [0.25420233, 0.22854457, -0.06799885, 0.22811392, 0.20636223],\n [-0.42601637, -0.55519167, 0.15490018, -0.54875438, -0.38309942],\n [-0.42006046, -0.54070736, 0.12484348, -0.53500515, -0.35318845],\n [-0.34038517, -0.41888298, 0.1670363, -0.4158917, -0.33826784],\n [0.10732772, 0.10202788, -0.0504268, 0.10198429, 0.10512477],\n [2.4165453, 1.2773864, -2.1690413, 1.1947914, 2.9369456],\n [0.58770343, 0.45938875, -0.60368314, 0.45477576, 0.74711106],\n [0.22404849, 0.20314341, -0.08197069, 0.20281894, 0.20189438],\n [0.06774159, 0.06578282, -0.01213391, 0.06577339, 0.04811131],\n [-0.05276523, -0.05416643, 0.01965723, -0.05416023, -0.04783539],\n [-0.05982267, -0.06179001, 0.04541919, -0.06177923, -0.06875965],\n [-0.09655933, -0.10173287, 0.06111632, -0.1016867, -0.10445403],\n [0.29990152, 0.26387987, -0.11665314, 0.26316113, 0.27582137],\n [-0.28520689, -0.33843405, 0.16472851, -0.33678811, -0.29925374],\n [-0.22646097, -0.25959206, 0.18367603, -0.25878722, -0.26608664],\n [-0.43394393, -0.55488155, 0.0799881, -0.54933148, -0.31115316],\n [-0.13630458, -0.14645735, 0.05782735, -0.14633144, -0.1290411],\n [-0.61471548, -0.98815853, 0.29978608, -0.95200201, -0.60962568],\n [0.27042842, 0.2433076, -0.03873781, 0.24285529, 0.17827477],\n [-0.3037132, -0.3548468, 0.04833258, -0.35341891, -0.20736356],\n [1.3275166, 0.89183862, -0.30062782, 0.86874653, 1.0194823],\n [-0.12083708, -0.12842762, 0.03783723, -0.12834821, -0.10338319],\n [0.17329023, 0.16132223, -0.02974094, 0.16118461, 0.12133264],\n [-0.55179382, -0.87702595, 0.62383453, -0.8464111, -0.72424308],\n [-0.66512486, -1.1011503, 0.21960267, -1.0558538, -0.5791948],\n [-0.30685317, -0.36060847, 0.05895314, -0.35904713, -0.22308278],\n [-0.33302732, -0.39716754, 0.05925522, -0.39512139, -0.23599644],\n [0.23219186, 0.20987025, -0.08413126, 0.20951333, 0.20855813],\n [-0.35602639, -0.43601762, 0.09821201, -0.43304672, -0.29200234],\n [-0.14141661, -0.15399593, 0.17806271, -0.1538097, -0.19239832],\n [-0.09296937, -0.09735512, 0.02855205, -0.09732065, -0.07902825],\n [-0.4456744, -0.58277989, 0.11773478, -0.57585032, -0.36029374],\n [3.1329873, 1.621228, -0.46042711, 1.5096314, 2.0830645],\n [-0.2042655, -0.22627432, 0.04202093, -0.22587992, -0.15192477],\n [-0.06201938, -0.06392066, 0.01862458, -0.06391094, -0.05232675],\n [0.34632572, 0.29817015, -0.18601059, 0.29705915, 0.35468737],\n [0.21401773, 0.19464961, -0.08608469, 0.194358, 0.19904524],\n [0.45136307, 0.37960871, -0.1031863, 0.3777189, 0.34772402],\n [0.37285458, 0.31822787, -0.18611474, 0.31690065, 0.3726461],\n [-0.49299048, -0.66634587, 0.11428475, -0.65636839, -0.381562],\n [-0.41513381, -0.520888, 0.06491106, -0.51644384, -0.2817788],\n [-0.37195651, -0.46926504, 0.18495104, -0.46506769, -0.37127044],\n [-0.18813731, -0.20902544, 0.10218763, -0.20863978, -0.1934013],\n [-0.42736786, -0.58745043, 0.51011007, -0.57765619, -0.57117064],\n [-0.49915342, -0.6604927, 0.06368884, -0.65193616, -0.31660698],\n [1.3670951, 0.91160504, -0.30307787, 0.88712478, 1.0424628],\n [-0.49474038, -0.7113154, 0.36275486, -0.6959238, -0.56208153],\n [0.7613852, 0.58098624, -0.22289997, 0.57396562, 0.63696605],\n [-0.17319968, -0.19279796, 0.22909499, -0.1924292, -0.23954103],\n [0.45265479, 0.378838, -0.13576636, 0.37684438, 0.38175565],\n [0.93930293, 0.68862464, -0.21360169, 0.67768683, 0.72235166],\n [0.22161701, 0.20016359, -0.13135934, 0.19981818, 0.23454835],\n [0.15032335, 0.14023016, -0.06996913, 0.14011732, 0.14677873],\n [0.3406974, 0.29533657, -0.13054502, 0.29433411, 0.31177597],\n [-0.16307222, -0.17810303, 0.07541958, -0.17787248, -0.1588881],\n [-0.54862673, -0.79377698, 0.17620597, -0.77600914, -0.47337085],\n [-0.32634377, -0.3857118, 0.04692816, -0.38392179, -0.21541274],\n [-0.15373157, -0.16730443, 0.08605123, -0.16710499, -0.15962629],\n [0.9011882, 0.66456837, -0.23643424, 0.65440352, 0.72687063],\n [-0.34509771, -0.42555341, 0.16069105, -0.42245448, -0.33700417],\n [-0.16367701, -0.17758625, 0.03848037, -0.17738952, -0.12727649],\n [-0.56470257, -0.89463205, 0.4988797, -0.86382843, -0.68268724],\n [0.20397399, 0.18495038, -0.1729023, 0.18465524, 0.24321673],\n [-0.3193625, -0.38720281, 0.16127484, -0.38481872, -0.32042138],\n [-0.56406387, -0.92260115, 0.72246827, -0.88640048, -0.77179468],\n [0.58923877, 0.46784371, -0.28158811, 0.46371946, 0.58041993],\n [-0.59126872, -0.86740855, 0.12090897, -0.84653375, -0.43888707],\n [-0.21570205, -0.24209371, 0.07172638, -0.24155697, -0.18828071],\n [-0.52204454, -0.74049644, 0.18677028, -0.72562754, -0.46692912],\n [-0.28620727, -0.34438183, 0.28781731, -0.3424245, -0.36127347],\n [-0.24656188, -0.2814314, 0.07253135, -0.28061238, -0.20660264],\n [-0.67681799, -1.1496091, 0.24645424, -1.0975441, -0.60893372],\n [-0.26112484, -0.3008458, 0.07808969, -0.2998428, -0.22000879],\n [-0.27114812, -0.31363221, 0.07156776, -0.31252748, -0.21913926],\n [0.54582258, 0.45257873, -0.05252811, 0.44994244, 0.31514343],\n [0.48766246, 0.4031505, -0.15344342, 0.40072711, 0.41789994],\n [-0.24193248, -0.280044, 0.18522885, -0.27904766, -0.27885322],\n [-0.21380309, -0.24358292, 0.20279659, -0.24289395, -0.26467091],\n [0.59744826, 0.49250182, -0.03553001, 0.48945219, 0.29381592],\n [-0.05153952, -0.05300573, 0.04300642, -0.05299877, -0.06113379],\n [-0.43004264, -0.59907074, 0.6310609, -0.5882379, -0.61570796],\n [-0.26585761, -0.31380878, 0.22782518, -0.31237523, -0.31815833],\n [0.28391941, 0.25156111, -0.10242141, 0.25094826, 0.25464607],\n [-0.00320243, -0.00320713, 0.00070621, -0.00320713, -0.00243766],\n [-0.63603531, -1.0387713, 0.27203655, -0.99825208, -0.60377237],\n [0.25766098, 0.23140991, -0.06735573, 0.23096518, 0.20757129],\n [-0.45820457, -0.59541584, 0.08024045, -0.58866089, -0.3229838],\n [-0.18203541, -0.19956372, 0.04409411, -0.19928289, -0.14296864],\n [-0.02065128, -0.02085557, 0.00614934, -0.02085524, -0.0173747],\n [0.01775183, 0.01759911, -0.00752808, 0.0175989, 0.01680352],\n [-0.30643849, -0.36765483, 0.1481102, -0.36563012, -0.30299389],\n [-0.081806, -0.08524442, 0.03003035, -0.08522034, -0.07379953],\n [-0.35109893, -0.44246114, 0.28372612, -0.43854089, -0.41203057],\n [0.29228106, 0.25830425, -0.10052206, 0.25764797, 0.25800666],\n [-0.12973689, -0.13987802, 0.12660355, -0.13974606, -0.16213158],\n [-0.35944159, -0.44852369, 0.17236722, -0.44487993, -0.35447078],\n [-0.19235849, -0.21056665, 0.02480166, -0.21027986, -0.12243657],\n [0.3134934, 0.27464565, -0.11503931, 0.27384613, 0.28277718],\n [0.24534764, 0.22013787, -0.1090143, 0.21970717, 0.2358807],\n [0.06643198, 0.06432928, -0.03325946, 0.06431819, 0.06646095],\n [0.67429179, 0.50146604, -1.718607, 0.4942113, 1.1604705],\n [-0.56591225, -0.83989574, 0.20500954, -0.81846328, -0.50827735],\n [-0.56831985, -0.85025465, 0.22302449, -0.82768488, -0.52423105],\n [-0.53897792, -0.75946483, 0.12838518, -0.74478013, -0.42094848],\n [-0.37473684, -0.46835206, 0.12996716, -0.46449282, -0.33172037],\n [-0.30393718, -0.36528954, 0.1697736, -0.36323935, -0.31537162],\n [-0.72871779, -1.1633648, 0.07100675, -1.121995, -0.42248969],\n [0.73993752, 0.56423112, -0.28858573, 0.55737846, 0.68113257],\n [-0.63001589, -0.92832045, 0.07789064, -0.90547362, -0.39543286],\n [-0.49481042, -0.70976714, 0.34837384, -0.69460179, -0.55460579],\n [-0.12128416, -0.12906781, 0.04379504, -0.12898462, -0.10881473],\n [-0.23925667, -0.26926298, 0.03771911, -0.26863752, -0.16284453],\n [-0.28141339, -0.33034739, 0.11180667, -0.3289365, -0.26065281],\n [-0.58607202, -0.8867257, 0.20466395, -0.86188206, -0.51998535],\n [0.61486753, 0.49307432, -0.12233617, 0.48909245, 0.45225426],\n [0.03891069, 0.03825868, -0.00637663, 0.03825686, 0.02682788],\n [-0.07801938, -0.08098829, 0.01887476, -0.08096947, -0.06124991],\n [-0.26574199, -0.30695533, 0.07809122, -0.30589447, -0.22259609],\n [0.55197529, 0.44772379, -0.16106236, 0.44447072, 0.46126918],\n [0.08014856, 0.0772757, -0.02407156, 0.07725855, 0.0676252],\n [0.53949228, 0.43100368, -0.42056321, 0.42740402, 0.62557192],\n [0.31646791, 0.27733569, -0.10418963, 0.27653203, 0.27532012],\n [-0.38349065, -0.49156583, 0.23031385, -0.48655163, -0.40764914],\n [-0.11154803, -0.11815096, 0.04400122, -0.11808586, -0.10307172],\n [-0.33397972, -0.40069318, 0.07436371, -0.39848681, -0.25504124],\n [-0.34494433, -0.41725684, 0.07966411, -0.41474851, -0.26664303],\n [-0.04882811, -0.05004658, 0.02132962, -0.05004152, -0.04667858],\n [-0.66432154, -1.0115412, 0.08200189, -0.98233962, -0.41674478],\n [0.52050265, 0.43235813, -0.06925066, 0.42988738, 0.33478546],\n [-0.74032013, -1.3684431, 0.24557522, -1.2860082, -0.64568081],\n [1.2339346, 0.82646407, -0.59570105, 0.80474385, 1.219591],\n [0.19545939, 0.18091742, -0.02399317, 0.18073731, 0.12238933],\n [0.24345009, 0.2190794, -0.08809054, 0.21867369, 0.21857126],\n [-0.30556093, -0.3698442, 0.21518371, -0.36760659, -0.34251401],\n [0.87359409, 0.6485596, -0.23214682, 0.63906773, 0.70762641],\n [-0.24616913, -0.27982425, 0.05720681, -0.2790599, -0.19068431],\n [0.56084516, 0.4533764, -0.16931747, 0.44997509, 0.47403009],\n [1.4051818, 0.92171756, -0.41174286, 0.89495768, 1.175909],\n [-0.37948801, -0.48217879, 0.19474988, -0.47760011, -0.38279633],\n [-0.19345345, -0.21595798, 0.11683694, -0.21552271, -0.20602553],\n [-0.13327704, -0.14214321, 0.02763418, -0.14204499, -0.09938688],\n [-0.19513424, -0.2178728, 0.1104413, -0.21743226, -0.20336498],\n [-0.45336283, -0.61008216, 0.20274556, -0.60121035, -0.43680847],\n [0.19905702, 0.18149646, -0.1180615, 0.18123871, 0.21071612],\n [0.31701275, 0.27459232, -0.24571319, 0.27365014, 0.36689121],\n [0.21517695, 0.19551065, -0.09133669, 0.19521163, 0.20374592],\n [-0.31500546, -0.37694124, 0.10656824, -0.37492464, -0.27654426],\n [-0.23886428, -0.27613613, 0.1915309, -0.27517089, -0.27959136],\n [-0.11233747, -0.119213, 0.05476638, -0.11914292, -0.11139468],\n [-0.65407609, -1.0550305, 0.18964903, -1.0158838, -0.54543842],\n [0.23118901, 0.20950059, -0.06747548, 0.20916214, 0.1932132],\n [-0.1629926, -0.1762519, 0.02808787, -0.17607234, -0.11427771],\n [0.79802628, 0.60927836, -0.14947944, 0.60194532, 0.57528354],\n [0.07555839, 0.07326083, -0.00813483, 0.07324919, 0.04528782],\n [-0.29706245, -0.35368369, 0.13760561, -0.35189589, -0.28959244],\n [-0.72628919, -1.1778543, 0.08714156, -1.1331839, -0.45132718],\n [-0.59367952, -0.92112343, 0.25838665, -0.89215596, -0.56684989],\n [-0.20404943, -0.22927129, 0.1208015, -0.2287531, -0.21586937],\n [-0.52120876, -0.75620574, 0.27790989, -0.73902735, -0.53249942],\n [0.05033043, 0.04909361, -0.02844995, 0.04908854, 0.05243134],\n [-0.29237643, -0.35807637, 0.47522164, -0.35563591, -0.43311541],\n [0.33116746, 0.28590579, -0.21937196, 0.28487922, 0.36372136],\n [-0.41170629, -0.5238152, 0.1055138, -0.51878584, -0.32948685],\n [-0.63828799, -0.99745234, 0.15843317, -0.965077, -0.50540145],\n [-0.10852217, -0.1145968, 0.0345321, -0.11454016, -0.0933462],\n [-0.62646586, -1.0304966, 0.33150961, -0.98915117, -0.63842073],\n [0.29006217, 0.2571871, -0.08128721, 0.2565679, 0.23915427],\n [-0.20755921, -0.23202555, 0.07356903, -0.23154608, -0.18507013],\n [-0.45843701, -0.61452581, 0.17006626, -0.6058187, -0.41502013],\n [-0.57597609, -0.85161759, 0.16821868, -0.83029147, -0.481472],\n [0.04791798, 0.04670435, -0.05588205, 0.04669922, 0.06354769],\n [1.1140451, 0.77362432, -0.39965379, 0.75674035, 0.99733217],\n [0.35442451, 0.30496469, -0.16100425, 0.30381952, 0.34327192],\n [-0.30706359, -0.3659546, 0.11018847, -0.36408372, -0.2749208],\n [0.01808864, 0.0179398, -0.00431744, 0.0179396, 0.01413696],\n [-0.60604805, -0.9519488, 0.25558281, -0.92035188, -0.57260955],\n [-0.47452351, -0.65396514, 0.23265321, -0.64288539, -0.47143104],\n [0.03411148, 0.03356251, -0.01259434, 0.03356103, 0.03083204],\n [-0.07142736, -0.07441505, 0.08533555, -0.07439423, -0.09549114],\n [-0.13154852, -0.14018265, 0.02747933, -0.14008828, -0.09834132],\n [-0.62578576, -0.88953038, 0.04548333, -0.87145497, -0.32903684],\n [-0.54757952, -0.84248913, 0.44234536, -0.81697698, -0.64253278],\n [0.1637329, 0.15304981, -0.02654703, 0.15293374, 0.11248793],\n [-0.4752366, -0.63874586, 0.13538532, -0.62953183, -0.39397964],\n [0.01884909, 0.01868321, -0.00573015, 0.01868297, 0.01596831],\n [-0.1200169, -0.12775865, 0.04966883, -0.12767549, -0.11268504],\n [-0.55066135, -0.76724507, 0.087634, -0.75335539, -0.37597354],\n [-0.23388905, -0.26350406, 0.04859864, -0.26288086, -0.17453852],\n [0.16733075, 0.15538363, -0.05492886, 0.15524162, 0.14543224],\n [-0.24782997, -0.28972217, 0.2529601, -0.28854769, -0.31438602],\n [-0.26174758, -0.3045396, 0.12880019, -0.30337895, -0.26035781],\n [-0.71588996, -1.2542213, 0.20860676, -1.1908268, -0.59797584],\n [1.1727991, 0.80654265, -0.37037706, 0.7880042, 1.0062528],\n [-0.33457229, -0.4067131, 0.12284605, -0.40414026, -0.30184945],\n [-0.00193528, -0.00193703, 0.00053265, -0.00193703, -0.00158606],\n [-0.12463975, -0.13330898, 0.06790085, -0.13320858, -0.12825455],\n [-0.22502702, -0.25179013, 0.04124696, -0.25126096, -0.16105112],\n [-0.43691789, -0.58198797, 0.21613564, -0.57409059, -0.4353635],\n [-0.32437967, -0.38294517, 0.04671202, -0.38119255, -0.21421767],\n [0.0610175, 0.05930711, -0.02091476, 0.05929912, 0.05380185],\n [-0.52215097, -0.72854958, 0.13774574, -0.71524927, -0.42192324],\n [-0.18028465, -0.19698008, 0.03506657, -0.1967228, -0.13160738],\n [-0.56020464, -0.81533228, 0.16414995, -0.79650289, -0.46880049],\n [0.07701309, 0.07456865, -0.01046854, 0.07455572, 0.04989019],\n [-0.4750142, -0.65555976, 0.23728386, -0.64435686, -0.47486533],\n [-0.37174818, -0.46250316, 0.11951458, -0.45884551, -0.32086045],\n [-0.14379597, -0.15490357, 0.05053412, -0.15476073, -0.12785058],\n [-0.33433065, -0.40662203, 0.12589755, -0.40403667, -0.3041818],\n [0.43777177, 0.36727412, -0.15315095, 0.36539361, 0.3886408],\n [-0.42384435, -0.54687488, 0.12396355, -0.54099947, -0.35446955],\n [2.3286877, 1.266299, -1.4264423, 1.1912351, 2.4917372],\n [-0.10755589, -0.116195, 0.70570879, -0.11607948, -0.2536927],\n [-0.32505471, -0.38866237, 0.08022933, -0.38660108, -0.2568968],\n [0.6430193, 0.50937731, -0.15644778, 0.50479796, 0.50576542],\n [-0.02916294, -0.02958909, 0.01222266, -0.02958805, -0.02749705],\n [-0.14202469, -0.15306323, 0.0586341, -0.1529204, -0.13324042],\n [-0.49993967, -0.67956219, 0.11535547, -0.66900637, -0.38633833],\n [0.50595567, 0.41368356, -0.21097591, 0.41090154, 0.47624373],\n [-0.44157422, -0.57764708, 0.12804833, -0.57075842, -0.36824522],\n [0.1977206, 0.18065707, -0.09933826, 0.18041203, 0.19803873],\n [-0.56356418, -0.83113681, 0.19258517, -0.81059027, -0.49641687],\n [1.0006511, 0.69854209, -0.78505558, 0.68372653, 1.1627828],\n [1.2057068, 0.82210315, -0.39678269, 0.80234749, 1.0487904],\n [0.24652153, 0.22211218, -0.07101267, 0.22171024, 0.20512811],\n [-0.04790089, -0.04915003, 0.03641648, -0.0491446, -0.05508141],\n [0.15522884, 0.14495182, -0.04703418, 0.14483852, 0.13135989],\n [0.07175052, 0.06957892, -0.01177297, 0.06956797, 0.04949052],\n [0.54797755, 0.4387557, -0.33223197, 0.43516301, 0.58434004],\n [-0.30887057, -0.3727549, 0.17603496, -0.37056818, -0.32264677],\n [-0.45451581, -0.60069191, 0.13085035, -0.59297729, -0.37812404],\n [0.77121364, 0.57993385, -0.373342, 0.57215398, 0.76294884],\n [-0.31715249, -0.37584449, 0.06644612, -0.37404464, -0.23732598],\n [-0.02251018, -0.02274116, 0.00426729, -0.02274076, -0.01629222],\n [0.86506483, 0.64873029, -0.166787, 0.63986126, 0.62964607],\n [0.27920442, 0.24757064, -0.1098991, 0.24697501, 0.25780395],\n [-0.16780749, -0.18289662, 0.04934262, -0.18267082, -0.14059125],\n [-0.42845453, -0.55207963, 0.10960382, -0.54621033, -0.3426797],\n [0.02776211, 0.02740527, -0.00827402, 0.0274045, 0.02336417],\n [0.14395688, 0.13526741, -0.03436767, 0.13518006, 0.11251617],\n [0.3819375, 0.32415477, -0.22152545, 0.32270561, 0.40130947],\n [-0.20414445, -0.23017274, 0.1519752, -0.22962123, -0.23310914],\n [-0.42852011, -0.57253543, 0.27074448, -0.56460271, -0.46328049],\n [-0.28122228, -0.32706559, 0.07105758, -0.32582573, -0.22399928],\n [0.77587467, 0.58943555, -0.23157891, 0.58208049, 0.65328705],\n [-0.21408721, -0.24228755, 0.1325768, -0.24167033, -0.22991095],\n [-0.42270633, -0.5491531, 0.15197741, -0.54293455, -0.37870018],\n [-0.32252737, -0.39730027, 0.26610493, -0.3944361, -0.38112902],\n [0.14527998, 0.13575976, -0.07214054, 0.13565588, 0.14494625],\n [-0.30306188, -0.35596113, 0.06347804, -0.35443015, -0.22676292],\n [0.78586715, 0.59766323, -0.19976323, 0.59026271, 0.62721236],\n [-0.40255526, -0.51028533, 0.11332277, -0.50553393, -0.33240387],\n [-0.15995616, -0.17365697, 0.04924995, -0.17346164, -0.13608573],\n [0.02366448, 0.02340901, -0.00600155, 0.02340855, 0.01887248],\n [-0.55138361, -0.81593512, 0.24152275, -0.79541699, -0.52759246],\n [0.26282695, 0.23557989, -0.06906473, 0.23511023, 0.21210067],\n [-0.46382619, -0.63459868, 0.24090026, -0.62432279, -0.46974187],\n [-0.27765198, -0.32380274, 0.09043777, -0.32253025, -0.24069134],\n [-0.25639371, -0.2954045, 0.09055007, -0.2944192, -0.22833781],\n [-0.7448588, -1.3224353, 0.17011573, -1.252506, -0.57364181],\n [-0.64967696, -1.0348946, 0.17338872, -0.99843094, -0.52700563],\n [-0.56631178, -0.92534445, 0.69228997, -0.88918071, -0.76291545],\n [1.1090896, 0.74963186, -1.0368868, 0.73079915, 1.3663591],\n [1.0910584, 0.74952844, -0.68464578, 0.73220369, 1.1768758],\n [-0.42938471, -0.55158796, 0.09903492, -0.54586356, -0.33177004],\n [-0.61135726, -0.92674886, 0.14990638, -0.90054882, -0.48211068],\n [-0.46009899, -0.6280147, 0.24467719, -0.61799632, -0.4696511],\n [0.30668884, 0.2693635, -0.11258485, 0.26860894, 0.27667418],\n [0.0195704, 0.01937981, -0.01069719, 0.0193795, 0.02016042],\n [-0.10546512, -0.11103137, 0.02661959, -0.11098243, -0.08397494],\n [0.81530195, 0.60637084, -0.37528177, 0.59760197, 0.7931247],\n [-0.189364, -0.20871692, 0.05116993, -0.2083879, -0.15424575],\n [0.77752047, 0.58711835, -0.2940486, 0.57946865, 0.70842019],\n [0.43963306, 0.37255515, -0.07923969, 0.37085871, 0.31288481],\n [1.1103624, 0.76321561, -0.58380289, 0.74562605, 1.1291247],\n [-0.10208709, -0.10731007, 0.0266489, -0.10726556, -0.08220224],\n [-0.29090748, -0.33895313, 0.05949824, -0.33763678, -0.21594739],\n [-0.45835551, -0.60551112, 0.1210955, -0.59775789, -0.37055639],\n [-0.34599044, -0.42026402, 0.09118512, -0.41762664, -0.27948643],\n [-0.0164071, -0.01657064, 0.04143263, -0.01657037, -0.02815002],\n [0.26081024, 0.23418477, -0.06243106, 0.23373279, 0.20402967],\n [-0.60770311, -0.96990944, 0.30261078, -0.93545769, -0.6068748],\n [-0.55443466, -0.79473823, 0.13854266, -0.77782203, -0.43998522],\n [0.37219421, 0.32009456, -0.11269455, 0.31888459, 0.31488892],\n [-0.17241024, -0.190336, 0.12382657, -0.19002598, -0.19453152],\n [-0.12420242, -0.13145783, 0.01669931, -0.13138724, -0.08016712],\n [-0.26109642, -0.31959136, 1.1518632, -0.31742495, -0.53952417],\n [-0.24023407, -0.27136395, 0.04665643, -0.27069363, -0.17528182],\n [-0.35782071, -0.43620289, 0.07974605, -0.43336357, -0.27333173],\n [-0.26774043, -0.31200026, 0.11632177, -0.31078653, -0.25548945],\n [-0.04456046, -0.04549267, 0.00956196, -0.04548942, -0.03361182],\n [0.49812955, 0.41154842, -0.13410754, 0.40905847, 0.40524961],\n [-0.3289724, -0.39793901, 0.11643419, -0.39554666, -0.29318594],\n [-0.0815082, -0.08483096, 0.02386864, -0.08480839, -0.06819513],\n [-0.51891927, -0.73602425, 0.19577587, -0.72124959, -0.47242142],\n [-0.21719685, -0.24376479, 0.06774354, -0.24322459, -0.18558163],\n [0.76571451, 0.58865994, -0.15398813, 0.58193013, 0.56521932],\n [0.4376258, 0.37165203, -0.0715995, 0.37000322, 0.3015657],\n [-0.25937774, -0.292798, 0.02421402, -0.29208242, -0.14824752],\n [0.0352206, 0.0346677, -0.00775126, 0.03466626, 0.02679159],\n [-0.16637039, -0.18048157, 0.03326114, -0.18028235, -0.12256687],\n [-0.59109533, -0.89118856, 0.18194713, -0.86663372, -0.50284031],\n [-0.18898344, -0.20749119, 0.03729979, -0.20718963, -0.1386313],\n [0.02497347, 0.02466619, -0.01292686, 0.02466556, 0.0252635],\n [-0.38914559, -0.49490244, 0.16359863, -0.49016719, -0.36729185],\n [-0.46840968, -0.62844631, 0.14603499, -0.61948898, -0.40017155],\n [0.4877303, 0.39793513, -0.31740591, 0.39520248, 0.53251865],\n [0.29130866, 0.25850627, -0.07324233, 0.25789241, 0.23165036],\n [0.22166568, 0.20114086, -0.08244524, 0.20082472, 0.20084645],\n [2.9151703, 1.4835375, -1.2322409, 1.3764449, 2.7564566],\n [-0.70920357, -1.2739795, 0.27735089, -1.2039408, -0.6534321],\n [-0.32750475, -0.39863738, 0.15298162, -0.39608222, -0.3201608],\n [-0.14402642, -0.15481446, 0.03892847, -0.15467992, -0.1173259],\n [0.38918005, 0.33111505, -0.16254333, 0.32967869, 0.36652204],\n [-0.21949453, -0.24508302, 0.04323825, -0.24458705, -0.16090956],\n [0.41702782, 0.35051293, -0.20191108, 0.3487554, 0.41257864],\n [-0.43528291, -0.57853342, 0.21153592, -0.57080205, -0.43117389],\n [0.36872555, 0.31084288, -0.44941036, 0.30933727, 0.49624161],\n [0.27868132, 0.24719459, -0.10826872, 0.24660338, 0.25620223],\n [-0.4355134, -0.55827797, 0.08336106, -0.55258096, -0.31622686],\n [0.26096066, 0.23215072, -0.14285506, 0.23162207, 0.2689626],\n [-0.62632795, -0.99641748, 0.22888971, -0.96149863, -0.56418295],\n [-0.33556505, -0.40664182, 0.10646655, -0.4041511, -0.28835813],\n [0.30085296, 0.26298602, -0.18970776, 0.26219454, 0.3250433],\n [-0.54939152, -0.84099645, 0.40271441, -0.81611374, -0.62411367],\n [0.38021192, 0.32812647, -0.07458243, 0.32694241, 0.2783381],\n [1.7252034, 1.0676948, -0.46538082, 1.0277966, 1.4044503],\n [-0.19943526, -0.22237773, 0.08409164, -0.2219389, -0.18842082],\n [-0.23917223, -0.27312948, 0.09523011, -0.27232878, -0.22168787],\n [-0.05133649, -0.05276053, 0.03569616, -0.05275394, -0.05730176],\n [-0.44571787, -0.625897, 0.55096848, -0.61403325, -0.60268764],\n [0.07902772, 0.07631622, -0.01778952, 0.07630072, 0.06056909],\n [-0.48695547, -0.66943809, 0.17997538, -0.65826864, -0.44029173],\n [-0.5015471, -0.69896654, 0.18581227, -0.68629642, -0.4538468],\n [-0.36894206, -0.46120792, 0.14823186, -0.45740031, -0.34300145],\n [1.1919129, 0.80877467, -0.51160041, 0.78885357, 1.1327915],\n [-0.1832353, -0.2018709, 0.06410404, -0.20155561, -0.16267141],\n [0.17691426, 0.16462548, -0.02669745, 0.16448336, 0.11867024],\n [-0.35743235, -0.45470307, 0.32569601, -0.45034197, -0.43659295],\n [-0.60044702, -0.94377322, 0.27752394, -0.91235823, -0.5849157],\n [-0.16844988, -0.185647, 0.12942661, -0.18535495, -0.194386],\n [-0.51190636, -0.69445151, 0.09151857, -0.68380167, -0.3633346],\n [-0.3966205, -0.49924595, 0.10322674, -0.49486679, -0.31904909],\n [-0.16643226, -0.1838773, 0.17485578, -0.18357315, -0.2131711],\n [-0.42483516, -0.54021187, 0.08073315, -0.53504938, -0.30773314],\n [0.27024082, 0.2429945, -0.04119511, 0.24253769, 0.18188317],\n [-0.02732081, -0.02770222, 0.01377701, -0.02770133, -0.02739832],\n [-0.64574301, -1.0066211, 0.139991, -0.97429942, -0.48874596],\n [-0.15692684, -0.17371656, 0.3280044, -0.17341778, -0.25279474],\n [-0.3044152, -0.36912754, 0.23781199, -0.36685174, -0.35323642],\n [-0.34371398, -0.42078217, 0.12841466, -0.41792556, -0.3118983],\n [1.6382384, 1.0091422, -0.80651337, 0.97071305, 1.629791],\n [0.32333333, 0.28196211, -0.12773934, 0.28108315, 0.29891787],\n [-0.6486216, -1.1799054, 0.62609594, -1.1123358, -0.80764015],\n [-0.05268509, -0.05407984, 0.01937159, -0.05407369, -0.04755435],\n [-0.65649156, -1.0278427, 0.12953118, -0.99420396, -0.48152725],\n [-0.49644253, -0.67568219, 0.12540798, -0.66509895, -0.39539488],\n [-0.42369386, -0.56064885, 0.23639241, -0.5533852, -0.43946331],\n [-0.31568126, -0.37433512, 0.07090762, -0.3725293, -0.24177246],\n [-0.03604488, -0.0367347, 0.0239648, -0.0367325, -0.03963665],\n [1.7779317, 1.0780795, -0.6589181, 1.0343903, 1.609029],\n [-0.22416269, -0.25456814, 0.11412351, -0.25388299, -0.22551595],\n [-0.29931665, -0.34529826, 0.02814393, -0.34412575, -0.17148474],\n [-0.4639905, -0.62394085, 0.16580432, -0.61491038, -0.41484068],\n [1.0202515, 0.74670774, -0.1177282, 0.73471942, 0.6258088],\n [0.31428151, 0.27060095, -0.40244129, 0.26959364, 0.42998839],\n [0.43546456, 0.36427333, -0.19055585, 0.36234588, 0.41653609],\n [0.0530618, 0.05177225, -0.01680511, 0.05176703, 0.04556994],\n [0.37512674, 0.32097724, -0.14977056, 0.31968081, 0.34801997],\n [-0.33165414, -0.40802138, 0.2035771, -0.40511567, -0.35512119],\n [-0.27099835, -0.31105942, 0.04712533, -0.31007614, -0.19057792],\n [0.84549607, 0.63445801, -0.19512218, 0.62582196, 0.65341125],\n [0.52953109, 0.42744625, -0.28889301, 0.42419612, 0.5451508],\n [-0.35787252, -0.44242206, 0.13101812, -0.43912261, -0.32255666],\n [1.565082, 0.99618567, -0.44049465, 0.96309072, 1.2922551],\n [0.01590784, 0.01579678, -0.002703, 0.01579666, 0.0111011],\n [-0.19789672, -0.22062798, 0.08844495, -0.22019384, -0.19063095],\n [-0.49807721, -0.66188378, 0.07169645, -0.65304871, -0.32888218],\n [-0.51127417, -0.70449968, 0.12768509, -0.69257559, -0.4056573],\n [0.23609158, 0.21258804, -0.10623156, 0.21219892, 0.22793701],\n [-0.62344177, -0.92780618, 0.09841196, -0.903808, -0.42451192],\n [-0.00219294, -0.00219531, 0.00092301, -0.00219531, -0.00207061],\n [-0.30067078, -0.35935201, 0.14702875, -0.35745532, -0.29844999],\n [0.8594222, 0.64902826, -0.13141685, 0.64057786, 0.579026],\n [0.12380921, 0.11709195, -0.0402474, 0.11703125, 0.10725674],\n [-0.10546415, -0.11144036, 0.04804157, -0.11138395, -0.10223955],\n [0.18980596, 0.17448034, -0.06963094, 0.17427439, 0.1711922],\n [0.33915942, 0.29019959, -0.33561519, 0.28902735, 0.42582039],\n [-0.47809611, -0.63421765, 0.0954486, -0.62585506, -0.35205457],\n [0.46050232, 0.37626187, -0.46494908, 0.37371435, 0.58205811],\n [-0.19188464, -0.21328153, 0.09022851, -0.21288476, -0.18799721],\n [-0.14004254, -0.15082998, 0.06105184, -0.15069164, -0.13378772],\n [-0.63847208, -0.94753507, 0.07803487, -0.92336221, -0.39920948],\n [0.95378281, 0.66766463, -1.0455829, 0.65371749, 1.2390699],\n [-0.20426911, -0.22687891, 0.05168475, -0.22646273, -0.16277933],\n [-0.31442982, -0.37939876, 0.15159173, -0.37717711, -0.31063543],\n [0.54623381, 0.44410214, -0.1547877, 0.4409466, 0.45203757],\n [-0.21514947, -0.24273071, 0.10401452, -0.24214313, -0.21274932],\n [-0.35426764, -0.44264986, 0.20194656, -0.43901117, -0.37009209],\n [0.82616424, 0.61806297, -0.26609819, 0.60947213, 0.71351247],\n [-0.22787564, -0.25728937, 0.06914327, -0.25665846, -0.19292651],\n [-0.44612431, -0.58219534, 0.1109507, -0.57537538, -0.35347384],\n [-0.50342186, -0.68985899, 0.12921207, -0.67857699, -0.40308726],\n [-0.40225981, -0.52014785, 0.19960342, -0.51446514, -0.40123957],\n [0.3748698, 0.32022487, -0.16829833, 0.31890384, 0.36165147],\n [-0.03682105, -0.03745628, 0.0081431, -0.03745445, -0.02805459],\n [-0.11224373, -0.11921681, 0.06233209, -0.11914466, -0.11623992],\n [0.48933125, 0.40309301, -0.1825126, 0.4005789, 0.44378871],\n [0.50617895, 0.41662813, -0.14577638, 0.41400756, 0.42115482],\n [-0.68286009, -1.1935643, 0.29826066, -1.1337193, -0.65277444],\n [0.26372656, 0.23429089, -0.14764108, 0.23374485, 0.27385141],\n [-0.30292479, -0.35960796, 0.10358983, -0.35785057, -0.26689407],\n [-0.47864969, -0.70247932, 0.65971775, -0.6855383, -0.67113194],\n [-0.8591855, -1.828234, 0.17816862, -1.6653902, -0.64073487],\n [0.63037315, 0.4976202, -0.21686082, 0.49301243, 0.5565047],\n [0.31491042, 0.27564262, -0.11882244, 0.27482942, 0.28670427],\n [-0.05953758, -0.0612827, 0.0175488, -0.06127418, -0.04992138],\n [-0.02611477, -0.02646331, 0.01329718, -0.02646253, -0.02627367],\n [-0.03845548, -0.03924331, 0.02589129, -0.03924062, -0.04246522],\n [-0.54115058, -0.88743131, 1.0351439, -0.85225139, -0.84636104],\n [0.43871747, 0.36637039, -0.19874796, 0.36439484, 0.42452263],\n [-0.32518622, -0.39204624, 0.1120159, -0.38977106, -0.28720455],\n [0.08669957, 0.08301252, -0.0657326, 0.08298639, 0.0996051],\n [-0.52946363, -0.7424546, 0.13539611, -0.72849602, -0.42341816],\n [-0.35463884, -0.44294099, 0.197771, -0.43931257, -0.36778025],\n [0.05956428, 0.05767069, -0.08298128, 0.05766066, 0.08381607],\n [0.27637345, 0.24603888, -0.08481906, 0.24548546, 0.23487602],\n [0.10126586, 0.0965923, -0.04171717, 0.09655637, 0.09493441],\n [-0.20165523, -0.22350769, 0.04867788, -0.22311385, -0.15819529],\n [-0.59301261, -0.9032179, 0.20418431, -0.87710891, -0.52367286],\n [1.0002739, 0.70795258, -0.48261024, 0.69404949, 0.9884502],\n [0.68971676, 0.53373595, -0.26524847, 0.52793335, 0.6319385],\n [0.26733013, 0.2406546, -0.04028707, 0.24021194, 0.17923817],\n [-0.09247594, -0.09661992, 0.01933908, -0.09658899, -0.0691578],\n [3.1534746, 1.6498702, -0.33316187, 1.5398841, 1.8782554],\n [-0.03687639, -0.03759705, 0.02397922, -0.03759471, -0.04025198],\n [0.16865744, 0.15532297, -0.14458957, 0.15514749, 0.20186421],\n [-0.0629505, -0.06490254, 0.01824187, -0.06489246, -0.05248469],\n [-0.04566691, -0.04670114, 0.01568647, -0.04669724, -0.04029515],\n [-0.30024899, -0.35187102, 0.06147214, -0.35039921, -0.22295839],\n [0.44713673, 0.37417976, -0.15072949, 0.37220829, 0.39207545],\n [-0.38964399, -0.48801251, 0.10174088, -0.48391521, -0.31377655],\n [-0.20806832, -0.23193291, 0.05856947, -0.23147779, -0.17180576],\n [-0.80197519, -1.728815, 0.33003233, -1.5698303, -0.75157003],\n [-0.22493557, -0.25485809, 0.09607418, -0.25419673, -0.2134278],\n [0.61567023, 0.48534926, -0.27956948, 0.48080332, 0.59621837],\n [0.67565472, 0.53953802, -0.07108595, 0.53501365, 0.40187205],\n [-0.22402695, -0.2529698, 0.07977056, -0.25234843, -0.20005883],\n [-0.58713112, -0.87147522, 0.15294644, -0.84922579, -0.47243985],\n [-0.2097378, -0.2336, 0.05164782, -0.23314857, -0.16563235],\n [0.19383372, 0.17561329, -0.28216284, 0.17532839, 0.27677668],\n [-0.30775953, -0.37072754, 0.16836223, -0.36859513, -0.31712652],\n [-0.55845233, -0.82821557, 0.22547442, -0.80715832, -0.5200357],\n [-0.48173993, -0.65274949, 0.14501692, -0.64281641, -0.4067785],\n [-0.20631742, -0.23034376, 0.07037134, -0.22987856, -0.18162092],\n [0.01722956, 0.01709981, -0.00284339, 0.01709964, 0.01190706],\n [-0.50259861, -0.71746511, 0.28297405, -0.70253704, -0.52288525],\n [-0.43860781, -0.58692132, 0.23285178, -0.57870299, -0.44745987],\n [-0.49294772, -0.75400097, 0.94856672, -0.73177275, -0.77250246],\n [-0.2956642, -0.34372594, 0.04664959, -0.34242972, -0.20129134],\n [0.75824791, 0.56477418, -0.63159017, 0.55668761, 0.89886803],\n [-0.16455147, -0.17953845, 0.06385487, -0.1793113, -0.15121999],\n [-0.06715729, -0.06937811, 0.01888228, -0.06936587, -0.05543154],\n [-0.25611633, -0.29248375, 0.05512596, -0.29162611, -0.19338386],\n [-0.4484982, -0.6017206, 0.20663079, -0.59314527, -0.43643088],\n [0.49001226, 0.40718898, -0.10974037, 0.40487174, 0.37491841],\n [0.26582446, 0.23732234, -0.09021163, 0.23681431, 0.23361146],\n [0.06417361, 0.06214931, -0.0424689, 0.06213867, 0.07045924],\n [-0.7154646, -1.2402209, 0.19080603, -1.1798076, -0.58022904],\n [1.0394541, 0.74139814, -0.26787516, 0.7274744, 0.83340808],\n [-0.39417377, -0.51589875, 0.32166883, -0.50972365, -0.46409278],\n [-0.61776147, -0.89675706, 0.0719622, -0.8763298, -0.38012405],\n [-0.43892263, -0.58650386, 0.22408423, -0.57837098, -0.44198318],\n [-0.54239803, -0.80619127, 0.29432686, -0.78546692, -0.55739795],\n [-0.27040424, -0.30848465, 0.03359927, -0.30759395, -0.17000523],\n [0.57419404, 0.46268639, -0.16543964, 0.45911074, 0.47781778],\n [-0.68581634, -1.135315, 0.17172741, -1.0886275, -0.54462188],\n [-0.34139566, -0.42360091, 0.21612927, -0.42033238, -0.36933448],\n [0.0853953, 0.0824384, -0.01043778, 0.08242134, 0.05339517],\n [-0.21619342, -0.2416878, 0.05326242, -0.24118797, -0.17075703],\n [-0.44157444, -0.56401782, 0.06743461, -0.5584264, -0.29737668],\n [-0.54566286, -0.76580672, 0.10804706, -0.75133917, -0.40071037],\n [-0.04885488, -0.05010196, 0.0258448, -0.05009666, -0.04978208],\n [-0.52937813, -0.72281265, 0.08020824, -0.71125816, -0.35557154],\n [-0.15359523, -0.16708456, 0.0831097, -0.16688739, -0.15769299],\n [0.281392, 0.25203696, -0.04275163, 0.25152781, 0.18917717],\n [0.32510889, 0.28270529, -0.15110887, 0.28178711, 0.31729206],\n [0.20049988, 0.1832802, -0.08451732, 0.18303411, 0.18940931],\n [-0.36695035, -0.45313792, 0.10320753, -0.44979389, -0.30291352],\n [0.24205785, 0.21883404, -0.0590665, 0.21846343, 0.19057672],\n [-0.09088204, -0.09561142, 0.07783583, -0.09557042, -0.10873981],\n [-0.25150964, -0.28558731, 0.045075, -0.28482024, -0.17865904],\n [-0.00952394, -0.00956779, 0.00330535, -0.00956775, -0.00843258],\n [0.67717919, 0.51789168, -0.51667663, 0.511736, 0.77962425],\n [0.17864499, 0.16455835, -0.08882114, 0.16437347, 0.17831013],\n [-0.51061737, -0.70687969, 0.14149135, -0.69456938, -0.41942118],\n [-0.28657751, -0.34044444, 0.1665885, -0.33876688, -0.30133743],\n [-0.28221276, -0.33334084, 0.14577777, -0.33180557, -0.28529328],\n [-0.42132649, -0.54541981, 0.14204046, -0.53940876, -0.36945356],\n [0.44669922, 0.37958304, -0.05846924, 0.37791122, 0.28575638],\n [-0.51843687, -0.7084093, 0.09629695, -0.69703059, -0.37268744],\n [0.45617133, 0.38087787, -0.14702021, 0.37882003, 0.39405273],\n [0.07299621, 0.07070188, -0.0146483, 0.07068986, 0.05384424],\n [0.58596406, 0.47540614, -0.09861585, 0.47195975, 0.40760506],\n [0.16315184, 0.15127378, -0.08181999, 0.15112982, 0.16331439],\n [-0.46449785, -0.61387484, 0.10993518, -0.60599177, -0.36200233],\n [0.19590249, 0.18100889, -0.02932253, 0.18082041, 0.13105002],\n [0.2810031, 0.24973033, -0.08613577, 0.24915188, 0.23871438],\n [-0.32960657, -0.3937932, 0.06943555, -0.39172308, -0.24709722],\n [-0.10210757, -0.10728008, 0.02449268, -0.10723643, -0.07993324],\n [1.2668389, 0.83885135, -0.6987135, 0.81555408, 1.3089527],\n [-0.6118378, -0.89520819, 0.0882217, -0.87395637, -0.40422764],\n [0.28569954, 0.24990732, -0.2725118, 0.24916265, 0.35433289],\n [-0.23296601, -0.2679531, 0.1772762, -0.26708079, -0.26797145],\n [-0.49168361, -0.67846887, 0.17956065, -0.66688454, -0.44279652],\n [-0.43464789, -0.57365231, 0.18103311, -0.56635612, -0.40896648],\n [-0.49266584, -0.69301951, 0.25528938, -0.67975126, -0.49856588],\n [-0.24159749, -0.27477112, 0.06863202, -0.27401445, -0.20010005],\n [0.57495002, 0.46345077, -0.16091507, 0.45988025, 0.473837],\n [-0.18558717, -0.20350099, 0.03850485, -0.20321329, -0.13842476],\n [-0.28784884, -0.34272358, 0.17726758, -0.34099062, -0.30855275],\n [-0.42518782, -0.57472746, 0.37366669, -0.56611923, -0.51312755],\n [0.71594311, 0.55700096, -0.15451504, 0.55119351, 0.54106893],\n [-0.53498212, -0.78679833, 0.27983439, -0.76761996, -0.54308759],\n [1.2211648, 0.81159155, -0.83263031, 0.78944466, 1.3541816],\n [-0.39157239, -0.50049687, 0.1798729, -0.49550727, -0.38066237],\n [-0.25771982, -0.29547854, 0.06597874, -0.29455997, -0.20617842],\n [0.08135206, 0.07857934, -0.01324417, 0.07856359, 0.05596682],\n [0.33362239, 0.29226352, -0.07023612, 0.29141197, 0.2500539],\n [-0.13737328, -0.14905414, 0.15702344, -0.14888882, -0.18096712],\n [-0.54291883, -0.8808448, 0.8979184, -0.84738204, -0.80893239],\n [-0.01555449, -0.0156845, 0.01329394, -0.01568431, -0.01859795],\n [-0.38549884, -0.49459904, 0.22669162, -0.48951636, -0.40691516],\n [-0.04629879, -0.04735416, 0.01486959, -0.04735015, -0.03994748],\n [0.47226516, 0.3981615, -0.06173938, 0.3962348, 0.30198684],\n [0.87022528, 0.64867769, -0.20340832, 0.63943767, 0.67538907],\n [-0.21432724, -0.23950269, 0.05541127, -0.23901104, -0.17202608],\n [-0.64035317, -0.9763625, 0.11206724, -0.94799889, -0.45128331],\n [0.76228245, 0.56808727, -0.59220036, 0.55998289, 0.88289726],\n [-0.09002458, -0.09426775, 0.03667102, -0.09423443, -0.08407981],\n [0.25452787, 0.22714058, -0.13257364, 0.22665071, 0.25801956],\n [0.17830097, 0.16453833, -0.07262131, 0.16436151, 0.16652029],\n [-0.2825473, -0.33145422, 0.10482735, -0.33005048, -0.25579711],\n [-0.06221518, -0.06415457, 0.02096438, -0.0641445, -0.05454667],\n [-0.56131494, -0.82533879, 0.1898616, -0.80524424, -0.49275017],\n [-0.48135059, -0.6330343, 0.07334239, -0.62518623, -0.32391871],\n [0.09367239, 0.08847607, -0.46591083, 0.08842806, 0.20145838],\n [0.10862107, 0.10318687, -0.05201347, 0.10314159, 0.10706758],\n [0.33147718, 0.28637689, -0.20703543, 0.28535854, 0.35699378],\n [-0.30722738, -0.36423445, 0.08729372, -0.36248165, -0.2544745],\n [-0.4464405, -0.58938251, 0.14713627, -0.58187114, -0.38853097],\n [-0.3595744, -0.43830994, 0.0764879, -0.43545891, -0.2704374],\n [-0.47142656, -0.64148963, 0.18875409, -0.63145648, -0.43777553],\n [0.24541312, 0.2193413, -0.15494364, 0.21888083, 0.26525674],\n [-0.58105231, -0.8517778, 0.13736909, -0.83135845, -0.45267151],\n [0.30260782, 0.26488207, -0.16305991, 0.26410097, 0.31025051],\n [0.0110551, 0.01100583, -0.00084519, 0.01100579, 0.00591159],\n [0.55766006, 0.44579524, -0.31282394, 0.44209253, 0.5794594],\n [1.9648421, 1.1338703, -1.1671211, 1.078703, 2.080976],\n [0.63151996, 0.49431858, -0.31861049, 0.48940976, 0.63341461],\n [-0.00529967, -0.00531398, 0.00302975, -0.00531397, -0.00554172],\n [1.5962708, 1.0010143, -0.58776838, 0.9655851, 1.441506],\n [0.26608829, 0.23716281, -0.10363647, 0.23664013, 0.24483004],\n [-0.48514944, -0.66757355, 0.18890618, -0.65637103, -0.44635021],\n [-0.6358893, -0.99466112, 0.16524883, -0.96224017, -0.51126234],\n [-0.2331373, -0.26443297, 0.07716899, -0.26373501, -0.20318834],\n [-0.14935425, -0.15984336, 0.01727818, -0.1597207, -0.0916898],\n [0.54322304, 0.43989854, -0.19956951, 0.43665203, 0.49018519],\n [-0.0810468, -0.08489645, 0.09067803, -0.08486599, -0.10600702],\n [-0.12204413, -0.1296683, 0.03339678, -0.12958898, -0.09982886],\n [-0.67868636, -1.0744986, 0.11370098, -1.0376058, -0.47138687],\n [0.19493357, 0.1792266, -0.05507847, 0.17901596, 0.16116165],\n [-0.51870631, -0.74207038, 0.22827026, -0.72644398, -0.49709661],\n [-0.23973607, -0.27482379, 0.11680039, -0.27397107, -0.23767328],\n [-0.09872373, -0.1036863, 0.03006509, -0.10364474, -0.08368453],\n [0.02495051, 0.02466855, -0.00597504, 0.02466801, 0.01952135],\n [0.42384999, 0.36061221, -0.08212641, 0.3590479, 0.30901465],\n [-0.15758965, -0.17049964, 0.03890342, -0.17032359, -0.12455402],\n [0.59322598, 0.4858421, -0.05397439, 0.48262808, 0.33616516],\n [-0.2563769, -0.2964207, 0.10952121, -0.29538267, -0.2432738],\n [-0.4295476, -0.54556631, 0.07142896, -0.5404029, -0.29760643],\n [-0.21958413, -0.24341005, 0.02487112, -0.24298009, -0.1338574],\n [-0.04369004, -0.04461727, 0.01269149, -0.04461399, -0.03645602],\n [-0.41874456, -0.53823887, 0.12305241, -0.5326267, -0.35075685],\n [0.08686166, 0.08333116, -0.04195716, 0.08330726, 0.08586789],\n [-0.36585313, -0.44874397, 0.0834805, -0.44563985, -0.28167148],\n [0.11738607, 0.11131056, -0.03890136, 0.11125819, 0.1023472],\n [0.35560178, 0.30706947, -0.12216902, 0.30597027, 0.3137904],\n [-0.5001129, -0.68949032, 0.15398961, -0.67778162, -0.42548656],\n [-0.10989825, -0.11711279, 0.11600216, -0.11703392, -0.14098055],\n [0.4155222, 0.35125894, -0.14542226, 0.3496118, 0.36893494],\n [0.40291287, 0.34473955, -0.08477213, 0.34334647, 0.30192688],\n [0.50625566, 0.40947481, -0.3731945, 0.40641881, 0.57619348],\n [1.0466268, 0.73464619, -0.45180424, 0.71952893, 0.99660106],\n [0.26278628, 0.23239288, -0.2228496, 0.23180878, 0.31338803],\n [1.7372719, 1.0744682, -0.44852277, 1.0342114, 1.3937451],\n [0.16804624, 0.15619421, -0.04693619, 0.15605505, 0.13839863],\n [-0.53752217, -0.76754263, 0.1661654, -0.75154682, -0.45791808],\n [0.26624036, 0.23623799, -0.15168422, 0.23567611, 0.27808191],\n [-0.41351894, -0.5389088, 0.19659927, -0.53265997, -0.40663139],\n [0.34152217, 0.29540574, -0.1498275, 0.29437223, 0.32695384],\n [-0.21282338, -0.2392076, 0.08820191, -0.23866394, -0.19991662],\n [-0.31649299, -0.38506154, 0.19778463, -0.38260458, -0.34091824],\n [-0.31911672, -0.37587714, 0.04794976, -0.37420349, -0.21374953],\n [-0.63312167, -0.96191132, 0.11690475, -0.93442706, -0.45423316],\n [0.43790732, 0.36528248, -0.21563707, 0.36328821, 0.435685],\n [-0.52274944, -0.78805245, 0.48544747, -0.76635138, -0.64256904],\n [0.43509123, 0.36497234, -0.16228704, 0.36310054, 0.39460098],\n [-0.18794604, -0.20805059, 0.0773634, -0.20769289, -0.17614783],\n [-0.23528413, -0.26922367, 0.12199881, -0.26841067, -0.23815349],\n [-0.06893757, -0.0714488, 0.03501924, -0.07143356, -0.06930261],\n [0.63560786, 0.49760006, -0.30028077, 0.49266523, 0.62370422],\n [-0.62637435, -1.0223308, 0.30532748, -0.98255517, -0.6210901],\n [-0.30282195, -0.36667935, 0.23495172, -0.36445139, -0.3505859],\n [0.50248074, 0.41563777, -0.11690826, 0.41315421, 0.38937794],\n [-0.28604093, -0.33368086, 0.07229241, -0.33236472, -0.22785559],\n [-0.22829199, -0.25807182, 0.07373359, -0.25742632, -0.19734475],\n [-0.34891329, -0.43526124, 0.21662708, -0.43173433, -0.37502417],\n [-0.22221942, -0.24911494, 0.05207149, -0.24857382, -0.17260949],\n [0.30981925, 0.26856423, -0.27955636, 0.26765239, 0.37720032],\n [-0.49456353, -0.73670042, 0.63630577, -0.71755689, -0.67771434],\n [-0.26229629, -0.30278254, 0.08375827, -0.30174529, -0.2258815],\n [-0.71632293, -1.2291807, 0.17229669, -1.1714199, -0.56127394],\n [0.34154982, 0.30050952, -0.04139624, 0.29969031, 0.2129608],\n [0.40727218, 0.34399589, -0.17999362, 0.3423667, 0.39085788],\n [0.29875967, 0.26253723, -0.13319591, 0.26180772, 0.28755545],\n [-0.45451117, -0.60090033, 0.13199155, -0.59316343, -0.37921754],\n [-0.15765517, -0.17200332, 0.08940932, -0.17178602, -0.16441562],\n [-0.18737951, -0.20661345, 0.05759028, -0.20628502, -0.15932153],\n [-0.54151616, -0.75482553, 0.10101945, -0.74112589, -0.38983959],\n [0.6651935, 0.49731305, -1.5474206, 0.49036965, 1.1104818],\n [-0.17299421, -0.18933473, 0.05700241, -0.18907791, -0.15054353],\n [0.16132823, 0.15041734, -0.04274356, 0.15029447, 0.13054908],\n [0.05094474, 0.04957028, -0.06186615, 0.0495641, 0.0684795],\n [1.1153546, 0.77050647, -0.47416692, 0.75321395, 1.0566449],\n [0.53865389, 0.44629203, -0.06102288, 0.44367111, 0.32838308],\n [0.62185488, 0.50062682, -0.0916739, 0.49672481, 0.41388969],\n [1.9581085, 1.1624729, -0.55199188, 1.11142, 1.6176283],\n [-0.58435992, -0.85176758, 0.11996326, -0.83194145, -0.4343232],\n [-0.49004157, -0.69481161, 0.30752332, -0.68089287, -0.52859692],\n [-0.51441607, -0.68946586, 0.06744246, -0.67970639, -0.32925382],\n [-0.38732235, -0.49348365, 0.17910101, -0.4886905, -0.37736161],\n [0.14886954, 0.13826206, -0.13712314, 0.13813622, 0.18249494],\n [0.09452706, 0.0902796, -0.055915, 0.09024781, 0.09997472],\n [-0.15180546, -0.16290956, 0.02109803, -0.16277432, -0.09907153],\n [-0.33392116, -0.4061755, 0.12774464, -0.40358964, -0.30541254],\n [0.05084907, 0.04963439, -0.02011871, 0.04962956, 0.04703257],\n [0.61427892, 0.47335157, -0.8091448, 0.46803555, 0.84839055],\n [0.24274327, 0.21785063, -0.11626167, 0.21742618, 0.23928767],\n [0.34508271, 0.29991148, -0.09760403, 0.29892987, 0.28539611],\n [-0.50345497, -0.68080084, 0.09757147, -0.67057633, -0.36707773],\n [0.39094818, 0.33256386, -0.15846872, 0.33111825, 0.36453338],\n [-0.47564646, -0.65682294, 0.23756413, -0.64555746, -0.47547366],\n [-0.43167621, -0.55455928, 0.09503768, -0.54880177, -0.32840821],\n [-0.25949473, -0.29454994, 0.03433652, -0.2937632, -0.16660235],\n [-0.42367156, -0.54572744, 0.11877438, -0.5399414, -0.34935781],\n [2.0599325, 1.2169396, -0.42155479, 1.1625164, 1.5294314],\n [0.54481395, 0.43884998, -0.2566489, 0.435447, 0.53409923],\n [0.10119038, 0.09684596, -0.02091624, 0.09681488, 0.07538137],\n [-0.1659091, -0.18089615, 0.05603981, -0.18067085, -0.14557575],\n [-0.30496663, -0.35659426, 0.04862747, -0.35514469, -0.20835572],\n [0.0622492, 0.0606153, -0.00962284, 0.06060815, 0.04209207],\n [-0.41547887, -0.54296427, 0.20320623, -0.53653718, -0.41243448],\n [-0.51611985, -0.72804946, 0.18522928, -0.71388374, -0.46211165],\n [-0.06014845, -0.06200558, 0.02530336, -0.06199603, -0.05678308],\n [-0.34475253, -0.42606263, 0.17467911, -0.42289495, -0.34628097],\n [-0.64888003, -1.1024683, 0.34623735, -1.0524864, -0.66309778],\n [-0.32206743, -0.37928227, 0.04416306, -0.37759727, -0.20924781],\n [1.3792933, 0.9000614, -0.56238908, 0.87329777, 1.2886254],\n [-0.56508561, -0.80608492, 0.10943776, -0.78937982, -0.41191591],\n [-0.31537858, -0.39182458, 0.4253564, -0.38876515, -0.43901813],\n [0.65138603, 0.51238252, -0.19751333, 0.50749525, 0.55135895],\n [-0.21618638, -0.24094113, 0.04258046, -0.24046981, -0.15847678],\n [-0.37262516, -0.45418704, 0.0573607, -0.4512348, -0.25161098],\n [0.36262864, 0.31224063, -0.12775718, 0.31107892, 0.32268579],\n [0.57799027, 0.46468136, -0.17640708, 0.46101422, 0.49030052],\n [-0.5556686, -0.82697831, 0.24725136, -0.80558398, -0.53448247],\n [-0.16644817, -0.18401825, 0.18451181, -0.18370976, -0.21703881],\n [-0.54039333, -0.76743451, 0.14469157, -0.75192128, -0.43883144],\n [0.44948898, 0.3741224, -0.19910448, 0.37203039, 0.43170099],\n [-0.14057355, -0.15093678, 0.04156937, -0.15080958, -0.11799679],\n [-0.58037643, -0.87051895, 0.19566406, -0.84712209, -0.5089247],\n [-0.6227822, -0.95735035, 0.15617349, -0.92847585, -0.49480783],\n [-0.57804723, -0.82733256, 0.09670195, -0.80986804, -0.40129519],\n [0.78448866, 0.58944179, -0.33672365, 0.58149008, 0.74557668],\n [1.054146, 0.74435402, -0.34479284, 0.72954213, 0.91508885],\n [-0.34208617, -0.40985682, 0.05801577, -0.40763371, -0.23856992],\n [0.25856674, 0.23084893, -0.11230974, 0.23035499, 0.24671615],\n [-0.68810825, -1.1657844, 0.21064479, -1.113482, -0.58429398],\n [0.62564676, 0.50140734, -0.10983529, 0.49733552, 0.44137741],\n [-0.42905605, -0.57230462, 0.25804445, -0.56446446, -0.45630041],\n [-0.02524752, -0.02554976, 0.00666823, -0.02554916, -0.0204092],\n [-0.51515661, -0.7777138, 0.55681303, -0.75615134, -0.66609982],\n [0.37998884, 0.32840472, -0.067694, 0.32724257, 0.26938533],\n [-0.03708866, -0.03776797, 0.01298699, -0.0377659, -0.03293621],\n [0.15859075, 0.14863505, -0.0233522, 0.14853098, 0.10551264],\n [0.15149813, 0.14148303, -0.05629751, 0.14137279, 0.13722859],\n [-0.2374756, -0.27021291, 0.08110957, -0.26946325, -0.20914461],\n [-0.16377675, -0.17747129, 0.03366691, -0.17728068, -0.12178092],\n [-0.40330466, -0.50351353, 0.06974952, -0.49940434, -0.28310393],\n [-0.47998837, -0.65494177, 0.17318064, -0.64451623, -0.43052356],\n [-0.5687609, -0.80815922, 0.09701883, -0.79177286, -0.39741888],\n [-0.5474604, -0.80926025, 0.25159329, -0.78901937, -0.53228573],\n [0.36179061, 0.311102, -0.14281246, 0.30992376, 0.33437738],\n [0.07301298, 0.07053646, -0.03005509, 0.07052247, 0.06843045],\n [-0.54009147, -0.75365861, 0.10532974, -0.73989159, -0.39461345],\n [-0.07667632, -0.07981353, 0.0401393, -0.07979214, -0.07785875],\n [-0.52343237, -0.73890811, 0.16773808, -0.7244704, -0.45129543],\n [-0.40856945, -0.52653791, 0.16097394, -0.52093341, -0.37737441],\n [0.52423996, 0.42917274, -0.14830522, 0.42632233, 0.43359296],\n [-0.36437603, -0.45830694, 0.19866664, -0.45431343, -0.37504627],\n [0.06421078, 0.06229403, -0.02512568, 0.0622845, 0.05917268],\n [-0.17963938, -0.19752144, 0.06356216, -0.19722529, -0.16008252],\n [0.01168468, 0.01162184, -0.00298217, 0.01162178, 0.00933825],\n [0.01270297, 0.01262402, -0.00567896, 0.01262394, 0.01223779],\n [0.12822104, 0.12126438, -0.03041667, 0.12120151, 0.10000462],\n [0.34992171, 0.30594363, -0.05573344, 0.30502576, 0.23898054],\n [-0.18220258, -0.2012022, 0.08142686, -0.20087264, -0.17551015],\n [-0.51643924, -0.73663542, 0.22456911, -0.72137604, -0.49295373],\n [-0.13668587, -0.14635447, 0.03722035, -0.14624059, -0.1116227],\n [-0.08592909, -0.08959026, 0.02273354, -0.08956428, -0.06950123],\n [-0.29890749, -0.35627346, 0.13731366, -0.3544498, -0.2905845],\n [-0.14127667, -0.15344908, 0.1393806, -0.15327452, -0.17719756],\n [-0.08596702, -0.08951136, 0.01715288, -0.08948702, -0.06329124],\n [-0.17241445, -0.19288784, 0.34314038, -0.19248367, -0.27324342],\n [-0.10304256, -0.10825622, 0.02254455, -0.10821227, -0.07822909],\n [0.23801067, 0.21452616, -0.09067178, 0.21414079, 0.21738605],\n [0.00781653, 0.00778987, -0.00120038, 0.00778985, 0.00527383],\n [0.45498075, 0.37550619, -0.29374233, 0.37320943, 0.49544356],\n [-0.30898377, -0.36656602, 0.08595257, -0.36478792, -0.25412852],\n [1.3919557, 0.93074907, -0.23987551, 0.90608786, 0.97593806],\n [0.90259427, 0.66739667, -0.20993602, 0.65736604, 0.69935986],\n [0.86892751, 0.65294884, -0.14804341, 0.64414622, 0.60691619],\n [-0.27612326, -0.32454883, 0.1382021, -0.32314074, -0.27621687],\n [-0.6634974, -1.1259087, 0.2863091, -1.0750947, -0.63170531],\n [-0.13891881, -0.14945591, 0.05751103, -0.14932285, -0.1304471],\n [0.56019835, 0.45185643, -0.19152775, 0.4483963, 0.49353177],\n [-0.64258415, -1.1112944, 0.44130738, -1.0576033, -0.71429411],\n [-0.49223171, -0.69701834, 0.28999517, -0.68315682, -0.51990019],\n [-0.06630194, -0.06856965, 0.02792265, -0.06855672, -0.06261519],\n [0.95455463, 0.69116295, -0.30250445, 0.67929908, 0.81995077],\n [-0.44260523, -0.57963155, 0.12921138, -0.5726631, -0.36993147],\n [-0.57961396, -0.8695938, 0.19844934, -0.84619337, -0.51088024],\n [0.39122666, 0.33156949, -0.20012735, 0.3300616, 0.39421315],\n [0.08841621, 0.08509843, -0.01697395, 0.08507768, 0.06426269],\n [-0.26051347, -0.3008352, 0.09023165, -0.29979934, -0.23050645],\n [-0.18984637, -0.21023619, 0.07363499, -0.20987196, -0.1744374],\n [-0.1977214, -0.22011883, 0.08009159, -0.21969695, -0.18432097],\n [-0.59209368, -0.86580004, 0.11361261, -0.84531004, -0.43027468],\n [1.0007412, 0.72745756, -0.17853491, 0.71526918, 0.70979383],\n [-0.19851116, -0.22191649, 0.10527669, -0.2214577, -0.20244669],\n [-0.2998424, -0.34861962, 0.04191862, -0.34730313, -0.19606843],\n [-0.23239053, -0.26236909, 0.05883541, -0.26172644, -0.18522597],\n [1.3247754, 0.90085441, -0.20449846, 0.8789053, 0.8953679],\n [-0.07615355, -0.07884554, 0.01222015, -0.07882968, -0.05213895],\n [-0.35850781, -0.43313761, 0.05580867, -0.43056679, -0.24298324],\n [-0.16909033, -0.18442839, 0.04974666, -0.18419685, -0.14169151],\n [0.25692835, 0.23019802, -0.08599227, 0.22973564, 0.22475248],\n [-0.48906897, -0.65652377, 0.1046771, -0.64713167, -0.36858752],\n [-0.10909284, -0.11484912, 0.02026746, -0.11479852, -0.0784285],\n [0.0486299, 0.04746228, -0.03015713, 0.04745761, 0.05224872],\n [-0.52512392, -0.76920343, 0.30500905, -0.75083593, -0.5520208],\n [-0.02634513, -0.02675419, 0.04700672, -0.02675313, -0.04025906],\n [-0.37580779, -0.48457198, 0.31869806, -0.47939288, -0.44817435],\n [0.45408324, 0.38201788, -0.09643806, 0.38012303, 0.34133682],\n [0.16371874, 0.15235593, -0.04922592, 0.15222463, 0.13818902],\n [0.01009116, 0.01004427, -0.00255849, 0.01004423, 0.00804696],\n [0.74603482, 0.56862893, -0.27543584, 0.56170049, 0.67430474],\n [-0.22530933, -0.25282642, 0.04992479, -0.2522678, -0.17177826],\n [-0.54387407, -0.77710046, 0.15291264, -0.76084916, -0.44890733],\n [0.5275074, 0.43433477, -0.10276098, 0.43161257, 0.38527584],\n [-0.05335427, -0.05465682, 0.00867738, -0.05465152, -0.0366932],\n [-0.39039911, -0.4915102, 0.11784698, -0.48719165, -0.32995549],\n [-0.18714887, -0.20426998, 0.02362404, -0.20400933, -0.11828223],\n [0.3187253, 0.27982556, -0.08532863, 0.27903696, 0.25881291],\n [0.12342615, 0.11646371, -0.06042813, 0.1163983, 0.1225635],\n [-0.57222779, -0.84842244, 0.18480835, -0.82687967, -0.49464827],\n [0.93255535, 0.67231233, -0.42919888, 0.66046383, 0.90715035],\n [0.45855083, 0.37478395, -0.47297595, 0.37225421, 0.58373358],\n [0.3395952, 0.29600141, -0.0894222, 0.29507223, 0.27424127],\n [0.13959118, 0.13104366, -0.05065634, 0.13095649, 0.1254469],\n [0.03360636, 0.03312423, -0.0049253, 0.03312307, 0.02232383],\n [0.00180669, 0.00180514, -0.00057417, 0.00180514, 0.00155338],\n [-0.32231295, -0.38924918, 0.12868142, -0.38694878, -0.29902015],\n [-0.63662885, -1.04481, 0.28445515, -1.0032658, -0.6132047],\n [-0.31414915, -0.38190875, 0.20377712, -0.37949137, -0.34262502],\n [0.20454735, 0.18679783, -0.08010264, 0.18654155, 0.18854782],\n [-0.3243895, -0.38470337, 0.05731274, -0.38284517, -0.22933565],\n [-0.23104776, -0.26402797, 0.13061165, -0.26324615, -0.24069763],\n [0.10205012, 0.09755886, -0.02480805, 0.09752593, 0.08024474],\n [-0.07631875, -0.07914865, 0.01805655, -0.07913117, -0.05947152],\n [0.27159428, 0.24281548, -0.06699882, 0.24230851, 0.21460811],\n [-0.26951922, -0.31386735, 0.1067351, -0.31265676, -0.24936696],\n [-0.01261613, -0.01268603, 0.00182045, -0.01268596, -0.0083372],\n [-0.57893745, -0.86150815, 0.17777217, -0.83923332, -0.49209885],\n [0.24501166, 0.21950425, -0.12662078, 0.21906274, 0.2477248],\n [0.45667656, 0.37687392, -0.28649241, 0.37456678, 0.49255363],\n [-0.07951973, -0.08237394, 0.00984906, -0.08235687, -0.04994111],\n [0.06942818, 0.06716607, -0.03072466, 0.06715379, 0.06665962],\n [0.79464256, 0.59417293, -0.37102405, 0.58588489, 0.77670941],\n [-0.07309122, -0.07630684, 0.10873895, -0.07628327, -0.10512722],\n [-0.27563222, -0.31881566, 0.06273292, -0.31769282, -0.21202895],\n [0.17641173, 0.1643809, -0.02272453, 0.1642443, 0.11225178],\n [-0.29277417, -0.35056993, 0.19670598, -0.34868067, -0.32307575],\n [0.69660075, 0.53600357, -0.31559827, 0.52991679, 0.67407905],\n [1.0327953, 0.74571352, -0.17910779, 0.73269197, 0.72564519],\n [-0.39980428, -0.49586405, 0.06023685, -0.49205284, -0.26803759],\n [-0.59287685, -0.91220485, 0.2342961, -0.88457847, -0.54816084],\n [0.27674917, 0.24756534, -0.0547607, 0.2470537, 0.20318449],\n [-0.17734954, -0.19375534, 0.03964697, -0.1935028, -0.1356126],\n [0.0558028, 0.05430399, -0.02862857, 0.05429728, 0.05628343],\n [-0.11720558, -0.12416229, 0.03029124, -0.12409352, -0.09406207],\n [0.51045502, 0.41547288, -0.25090773, 0.4125521, 0.50755877],\n [0.0747963, 0.07216217, -0.03538748, 0.07214671, 0.07343109],\n [-0.42009588, -0.56549866, 0.37571022, -0.5572574, -0.50994885],\n [-0.57154547, -0.8812774, 0.32309047, -0.85432632, -0.59541335],\n [-0.26265359, -0.30331547, 0.08464909, -0.30227065, -0.22688528],\n [1.8542891, 1.1343047, -0.36603667, 1.0898937, 1.3603056],\n [0.17316308, 0.16018471, -0.06810967, 0.16002279, 0.15985141],\n [-0.57386658, -0.85162255, 0.1830963, -0.82990081, -0.4940571],\n [-0.27591827, -0.31915467, 0.06229918, -0.31803025, -0.21168549],\n [-0.52559745, -0.7623008, 0.25837044, -0.74501665, -0.52262852],\n [-0.13829099, -0.14848161, 0.04722347, -0.14835658, -0.12178446],\n [0.0830565, 0.07987642, -0.03392164, 0.07985613, 0.07763985],\n [1.2875637, 0.85976724, -0.46793907, 0.83683432, 1.1576721],\n [-0.69746722, -1.1603542, 0.15792818, -1.1117194, -0.53560639],\n [-0.20963124, -0.23636886, 0.12325868, -0.23580212, -0.22126862],\n [-0.16887024, -0.18536799, 0.0900327, -0.18509983, -0.1725224],\n [0.02403166, 0.02376895, -0.00595373, 0.02376847, 0.01901643],\n [-0.48195017, -0.63325837, 0.07103345, -0.62545807, -0.32074931],\n [0.31232972, 0.27525572, -0.07414351, 0.2745246, 0.24365571],\n [0.1673552, 0.155558, -0.04825875, 0.15541955, 0.13930333],\n [-0.62837383, -1.0043814, 0.23556385, -0.96848532, -0.57085385],\n [0.2660056, 0.23771925, -0.08215224, 0.23721919, 0.22653935],\n [-0.67000496, -1.1240995, 0.23703154, -1.0754592, -0.5970317],\n [0.61591255, 0.48971449, -0.18441502, 0.48544991, 0.51914435],\n [0.03259064, 0.03199139, -0.06210725, 0.03198955, 0.050908],\n [-0.50043132, -0.72604011, 0.38463098, -0.70954942, -0.57754754],\n [0.18283235, 0.16856457, -0.06639599, 0.16837925, 0.16434606],\n [-0.15025921, -0.16169425, 0.03152725, -0.16154937, -0.11249488],\n [-0.31241386, -0.3780408, 0.1783969, -0.37575979, -0.32655723],\n [1.303822, 0.86084416, -0.60888903, 0.83660576, 1.2744855],\n [0.69195681, 0.52797272, -0.48035167, 0.5215896, 0.77193801],\n [1.261634, 0.84300644, -0.53901197, 0.82059233, 1.1971955],\n [-0.35212988, -0.43131017, 0.10782961, -0.42836703, -0.29903677],\n [-0.27241679, -0.32385492, 0.25784259, -0.3222458, -0.33699035],\n [0.16284675, 0.15181856, -0.04018553, 0.15169421, 0.12869232],\n [-0.43131531, -0.57213497, 0.21730541, -0.56459337, -0.43241102],\n [0.5068062, 0.41628461, -0.16187428, 0.41361066, 0.43647954],\n [-0.41792042, -0.55391493, 0.27432955, -0.54665519, -0.45761114],\n [-0.33688825, -0.41594368, 0.20329602, -0.41287909, -0.35868251],\n [-0.59286939, -0.87463123, 0.12900077, -0.85297499, -0.44927624],\n [-0.26509378, -0.30409919, 0.05431984, -0.30314626, -0.19690761],\n [0.110943, 0.10576846, -0.02226059, 0.10572826, 0.08183181],\n [-0.40357001, -0.52012977, 0.17796044, -0.51459058, -0.38701733],\n [0.55572376, 0.45209093, -0.130598, 0.44889726, 0.43207733],\n [0.22159764, 0.20188713, -0.0544899, 0.20159543, 0.17491435],\n [0.51081102, 0.40964654, -0.53803045, 0.40633965, 0.65481576],\n [-0.20711017, -0.23205584, 0.08816765, -0.23155637, -0.19629717],\n [0.07329426, 0.07115667, -0.00700367, 0.07114629, 0.04221809],\n [0.1190697, 0.11262297, -0.05389371, 0.11256483, 0.11518348],\n [-0.02651643, -0.02685975, 0.00900583, -0.02685901, -0.02330923],\n [0.46224268, 0.38664271, -0.11878693, 0.38459507, 0.37026522],\n [-0.30832079, -0.36700403, 0.10118452, -0.36515374, -0.26794762],\n [0.59328448, 0.46800323, -0.36463604, 0.4636433, 0.6355336],\n [-0.65286707, -1.0805997, 0.25421176, -1.0361892, -0.60065494],\n [0.150074, 0.1397441, -0.09089028, 0.13962572, 0.15997525],\n [-0.12434117, -0.13281068, 0.0583834, -0.13271461, -0.12176331],\n [1.1347334, 0.78499853, -0.39354633, 0.76751259, 1.004472],\n [0.06335567, 0.06159726, -0.01410533, 0.06158913, 0.04837951],\n [0.04946144, 0.04837481, -0.01150543, 0.04837083, 0.03832558],\n [-0.50081414, -0.73378689, 0.44749073, -0.7162414, -0.60774641],\n [-0.06709695, -0.06921612, 0.01282192, -0.06920497, -0.04869258],\n [-0.08395361, -0.08776157, 0.04641813, -0.0877328, -0.08681581],\n [0.00709072, 0.00706405, -0.00633969, 0.00706404, 0.00860649],\n [-0.01384978, -0.01393683, 0.00266246, -0.01393674, -0.01007085],\n [-0.31333024, -0.37345682, 0.09410699, -0.37154563, -0.26437405],\n [0.3459756, 0.30014247, -0.10917801, 0.29913459, 0.29676912],\n [0.05033621, 0.04904892, -0.04126522, 0.04904343, 0.05935515],\n [0.27297642, 0.23839854, -0.44293958, 0.23767122, 0.40414907],\n [-0.22933082, -0.27152466, 0.85380151, -0.27023816, -0.44782045],\n [-0.35040168, -0.41739729, 0.03751391, -0.41527542, -0.20962909],\n [0.28805578, 0.25385397, -0.14204666, 0.25317931, 0.28672886],\n [-0.65490533, -1.0599143, 0.19490193, -1.0200478, -0.5508936],\n [-0.47018372, -0.61418078, 0.07468916, -0.60693471, -0.32082947],\n [-0.47721825, -0.63924909, 0.12138493, -0.63023495, -0.38095729],\n [1.3476126, 0.88016734, -0.65872221, 0.85410151, 1.3374805],\n [-0.40137402, -0.50502353, 0.09267038, -0.50060927, -0.3102343],\n [-0.11566665, -0.12280419, 0.04637906, -0.12273083, -0.10746228],\n [-0.64272384, -1.0352977, 0.21481796, -0.99709771, -0.56197432],\n [1.1198415, 0.78055866, -0.33657388, 0.76386521, 0.94509292],\n [-0.37840693, -0.47789095, 0.16625793, -0.47357901, -0.36244616],\n [0.35985366, 0.31101251, -0.10441355, 0.30991237, 0.30015546],\n [-0.51498973, -0.74001592, 0.26070601, -0.72405173, -0.51712166],\n [-0.4990457, -0.68787284, 0.15597986, -0.67620687, -0.42670368],\n [-0.36024144, -0.4345239, 0.05036177, -0.4319889, -0.23556248],\n [0.10090429, 0.09583676, -0.09734933, 0.09579437, 0.12562056],\n [0.10500037, 0.1005026, -0.01522613, 0.1004705, 0.06950249],\n [-0.55590106, -0.80661548, 0.16621952, -0.78828286, -0.46834836],\n [0.97445552, 0.71012257, -0.20075028, 0.69840821, 0.72510918],\n [0.07514112, 0.07271582, -0.01493122, 0.07270277, 0.05524507],\n [-0.47107749, -0.65198934, 0.26838357, -0.64065111, -0.49202844],\n [0.4069781, 0.35245994, -0.03370757, 0.35124777, 0.22351148],\n [-0.49908369, -0.72063411, 0.36333215, -0.70467725, -0.56566615],\n [0.79057675, 0.59611118, -0.27347639, 0.58826455, 0.69921805],\n [0.08300378, 0.07991925, -0.02564544, 0.07990015, 0.07069879],\n [1.3209475, 0.88013486, -0.40927206, 0.85640889, 1.1261711],\n [1.1385706, 0.77137101, -0.77741171, 0.75222023, 1.2631851],\n [-0.14866611, -0.16067507, 0.05542297, -0.16051359, -0.13480767],\n [-0.44572032, -0.58061113, 0.1066508, -0.5739016, -0.34863661],\n [1.2907402, 0.84279895, -0.93782602, 0.81780937, 1.4619863],\n [1.0002664, 0.6747233, -2.5314396, 0.65760149, 1.7174192],\n [0.16224712, 0.1507751, -0.06376113, 0.15064005, 0.14973155],\n [-0.58306357, -0.88826504, 0.23450017, -0.86256129, -0.54225262],\n [-0.60546947, -0.94470352, 0.2370439, -0.91424776, -0.55806026],\n [-0.04588784, -0.04702499, 0.03300025, -0.04702029, -0.05179813],\n [-0.12428197, -0.13234103, 0.03921224, -0.132254, -0.1065997],\n [-0.25465265, -0.29114693, 0.06175708, -0.29027839, -0.20008032],\n [-0.22189215, -0.24960362, 0.06676593, -0.24902841, -0.18733672],\n [-0.04215724, -0.04303719, 0.01460919, -0.04303412, -0.03730784],\n [-0.46370116, -0.61126413, 0.10478294, -0.60355598, -0.35584892],\n [-0.1524219, -0.16487794, 0.04980965, -0.1647085, -0.1322755],\n [1.0631753, 0.75853504, -0.22129347, 0.74431333, 0.79384572],\n [0.55206403, 0.44506498, -0.22111313, 0.44164051, 0.51271318],\n [-0.11179798, -0.11844282, 0.04470382, -0.11837704, -0.10377215],\n [-0.28236901, -0.3243892, 0.03560427, -0.32335101, -0.17839748],\n [-0.62694557, -1.1149174, 0.68460729, -1.0556368, -0.81341166],\n [0.01335705, 0.0132603, -0.01529862, 0.01326018, 0.01760764],\n [0.04115702, 0.04034471, -0.01850457, 0.04034204, 0.03972516],\n [0.01838351, 0.01821034, -0.01302698, 0.01821006, 0.02064953],\n [0.26479015, 0.23667301, -0.08394001, 0.23617666, 0.22747529],\n [0.48889433, 0.4011329, -0.22931042, 0.39852767, 0.4785874],\n [0.5222803, 0.43058076, -0.10359255, 0.4279173, 0.38375608],\n [-0.30676865, -0.3580404, 0.04235269, -0.35661899, -0.19976113],\n [-0.34268654, -0.41891825, 0.12481144, -0.41611461, -0.30833737],\n [-0.07302125, -0.0755844, 0.01610214, -0.07556941, -0.05558241],\n [-0.53696253, -0.74382336, 0.0957976, -0.73081965, -0.38085313],\n [-0.42031073, -0.54401294, 0.14415932, -0.53802518, -0.37068461],\n [-0.01591942, -0.01604737, 0.00779645, -0.0160472, -0.01580983],\n [-0.0188094, -0.01896496, 0.0026473, -0.01896475, -0.0123271],\n [0.70188734, 0.54645815, -0.17523172, 0.54079296, 0.55683413],\n [-0.3441711, -0.41329873, 0.06069805, -0.41100009, -0.2431744],\n [-0.04686378, -0.0479702, 0.01831092, -0.04796584, -0.04316565],\n [-0.56945387, -0.80345967, 0.08467915, -0.78780703, -0.38010874],\n [0.82247946, 0.62664704, -0.128373, 0.61898959, 0.55793629],\n [0.10768572, 0.10302446, -0.01375552, 0.10299085, 0.06832942],\n [-0.19258597, -0.21414592, 0.09032336, -0.21374456, -0.18852102],\n [-0.32378256, -0.40978659, 0.61736445, -0.4060211, -0.50585515],\n [0.46677348, 0.39278085, -0.07630821, 0.39083757, 0.3215668],\n [0.05401344, 0.05258224, -0.03271933, 0.05257593, 0.05758102],\n [-0.22367683, -0.25427639, 0.12370828, -0.253581, -0.23132554],\n [-0.44651825, -0.61106296, 0.32816171, -0.6011534, -0.5076902],\n [-0.48923259, -0.68118483, 0.22278358, -0.66890378, -0.47422158],\n [-0.39463212, -0.51350542, 0.2746272, -0.50761948, -0.44060843],\n [0.65042399, 0.51832042, -0.10674184, 0.51389442, 0.44866197],\n [-0.48063527, -0.6859908, 0.40332796, -0.67173158, -0.57118028],\n [0.21558589, 0.1973213, -0.04111851, 0.19706381, 0.15635173],\n [1.2116469, 0.8244019, -0.40966174, 0.80437457, 1.063496],\n [-0.29438491, -0.36331022, 0.58922179, -0.36064427, -0.46742646],\n [-0.20520111, -0.22818804, 0.05463765, -0.22775983, -0.16632613],\n [0.35596893, 0.30277928, -0.33753765, 0.30146157, 0.44061452],\n [0.08897616, 0.08515727, -0.05836642, 0.08512997, 0.09740475],\n [1.4483664, 0.95547149, -0.26974832, 0.92846127, 1.0421139],\n [0.04825085, 0.04712187, -0.02522913, 0.04711747, 0.04897571],\n [-0.20903291, -0.23002077, 0.02022173, -0.2296702, -0.12089988],\n [-0.5890277, -0.89836948, 0.21952511, -0.87223592, -0.53406635],\n [-0.29072761, -0.34975442, 0.25196987, -0.34777063, -0.34923443],\n [0.31366216, 0.27510212, -0.10497666, 0.27431477, 0.27437782],\n [-0.49735208, -0.65677986, 0.06244199, -0.64839273, -0.31377009],\n [-0.00493755, -0.00495056, 0.00428246, -0.00495055, -0.00593265],\n [-0.14535234, -0.15625504, 0.03659025, -0.15611888, -0.11563247],\n [-0.5964309, -0.90336615, 0.18057337, -0.87792773, -0.50458537],\n [-0.00992831, -0.00998084, 0.00820446, -0.00998079, -0.01173843],\n [-0.05324062, -0.05451998, 0.00769293, -0.05451486, -0.03519946],\n [0.74723262, 0.58871995, -0.06104652, 0.58317891, 0.40850826],\n [0.92386917, 0.66813996, -0.41115462, 0.65658666, 0.88869357],\n [0.4894384, 0.40733204, -0.10135246, 0.40505181, 0.36482685],\n [0.59609854, 0.48339414, -0.08819533, 0.47987366, 0.3972256],\n [-0.23882443, -0.26915205, 0.04202611, -0.26851201, -0.16861733],\n [0.40116282, 0.33785748, -0.24326036, 0.33620229, 0.42780668],\n [0.0479421, 0.04685654, -0.01956438, 0.04685244, 0.0448033],\n [0.53941314, 0.43976727, -0.14773511, 0.43672479, 0.44135249],\n [3.8526804, 1.8217191, -0.75647537, 1.6619172, 2.8213064],\n [-0.12455221, -0.13244019, 0.03175268, -0.132357, -0.09950336],\n [-0.52318843, -0.72541264, 0.12008382, -0.71265968, -0.40359296],\n [0.13619695, 0.12835979, -0.03341989, 0.12828468, 0.10742943],\n [-0.09062037, -0.09500845, 0.04362382, -0.09497306, -0.08948189],\n [-0.22329489, -0.25168974, 0.0727166, -0.25108967, -0.19355618],\n [0.67370954, 0.53136698, -0.13281006, 0.52641064, 0.49400967],\n [-0.20243394, -0.22276238, 0.02616932, -0.22242277, -0.12896238],\n [-0.27365042, -0.32240267, 0.16586027, -0.32096284, -0.29177956],\n [-0.05991093, -0.06182501, 0.03502458, -0.06181482, -0.06311584],\n [0.07493902, 0.07205309, -0.08161657, 0.07203458, 0.09714224],\n [-0.57627448, -0.84764461, 0.15470794, -0.82696848, -0.46838228],\n [0.09873071, 0.09432918, -0.03659185, 0.09429649, 0.08935242],\n [0.27590214, 0.24526493, -0.09739294, 0.24469949, 0.24567212],\n [-0.38698872, -0.49057608, 0.15557606, -0.4860064, -0.35985134],\n [-0.06259506, -0.06461343, 0.02669906, -0.06460259, -0.05936567],\n [-0.06550517, -0.06768315, 0.02414352, -0.06767108, -0.05917349],\n [-0.55225301, -0.80145349, 0.17567744, -0.7832224, -0.47497861],\n [-0.00714523, -0.00717403, 0.01000028, -0.00717401, -0.0100699],\n [1.2602841, 0.82671822, -0.98172356, 0.80272251, 1.4610061],\n [1.043912, 0.73341473, -0.44646877, 0.71840001, 0.99094516],\n [-0.43132456, -0.5662489, 0.16893532, -0.55931775, -0.39760601],\n [0.39438992, 0.33470387, -0.17201905, 0.33320649, 0.37683629],\n [-0.50414255, -0.67732771, 0.0836691, -0.66758313, -0.34906041],\n [1.8470935, 1.1320365, -0.35763665, 1.0880446, 1.3463251],\n [0.6694033, 0.51885462, -0.3132809, 0.51328419, 0.65480684],\n [-0.2755584, -0.31846568, 0.06013792, -0.31735681, -0.20902677],\n [-0.53109785, -0.70632029, 0.04370179, -0.69684025, -0.29104452],\n [-0.38016339, -0.48416495, 0.20549193, -0.47947802, -0.39017112],\n [-0.21644843, -0.24806102, 0.26047917, -0.24729436, -0.29007076],\n [0.22464717, 0.204938, -0.04238525, 0.20465029, 0.16233627],\n [0.18255798, 0.16745986, -0.12147509, 0.16725206, 0.20080427],\n [0.18834064, 0.17271266, -0.09791501, 0.17249686, 0.19080473],\n [-0.35425951, -0.4428542, 0.20514244, -0.43919807, -0.37202848],\n [0.21969688, 0.1999514, -0.0650006, 0.19965615, 0.18444422],\n [0.122713, 0.11616948, -0.03648803, 0.11611136, 0.10319386],\n [-0.42723475, -0.55833711, 0.1626259, -0.5517272, -0.39010748],\n [-0.02451718, -0.02479725, 0.00557298, -0.02479671, -0.0188518],\n [-0.04687689, -0.04802464, 0.02506269, -0.04801996, -0.04793563],\n [0.05305057, 0.05167392, -0.03107604, 0.05166797, 0.05592577],\n [-0.31050077, -0.37292577, 0.13922555, -0.37084803, -0.29942733],\n [0.20887052, 0.1890275, -0.17334826, 0.18871394, 0.24730596],\n [0.12160078, 0.11432671, -0.12128972, 0.11425424, 0.15307663],\n [-0.4786629, -0.6404824, 0.11557347, -0.63151775, -0.37553397],\n [0.69538074, 0.5308133, -0.45327777, 0.52441604, 0.75964948],\n [0.07554328, 0.07283333, -0.03894072, 0.07281713, 0.07631476],\n [-0.16002451, -0.17249543, 0.02306356, -0.17233363, -0.10570847],\n [0.28745241, 0.25362713, -0.13105163, 0.25296582, 0.27874138],\n [-0.31900993, -0.38422716, 0.12564022, -0.38202034, -0.29461557],\n [0.15846756, 0.14444768, -0.75377929, 0.14424131, 0.33577754],\n [0.67802328, 0.53342129, -0.14224838, 0.52834012, 0.50760066],\n [0.32227808, 0.27988621, -0.17800649, 0.27896053, 0.33315203],\n [-0.689705, -1.2063613, 0.27705579, -1.1457302, -0.64117216],\n [0.50862172, 0.41457705, -0.23910062, 0.41170299, 0.4982724],\n [-0.55756118, -0.81008919, 0.16580611, -0.79154937, -0.46889096],\n [0.6659468, 0.51830268, -0.27094785, 0.51291508, 0.6217249],\n [0.54504791, 0.4418793, -0.18302942, 0.43865311, 0.47731683],\n [-0.23168171, -0.26098984, 0.05206213, -0.26037367, -0.17746463],\n [1.5564676, 0.98481622, -0.55095472, 0.95125094, 1.3872094],\n [-0.21817516, -0.24439497, 0.05716511, -0.24387114, -0.1758965],\n [-0.11225338, -0.1190164, 0.04839388, -0.11894854, -0.10684144],\n [0.79514812, 0.59562841, -0.34278122, 0.58742223, 0.75679984],\n [-0.21647274, -0.24183736, 0.0502126, -0.24134324, -0.16757776],\n [-0.35997872, -0.44684088, 0.14337273, -0.44338005, -0.33369529],\n [0.40854687, 0.34382458, -0.21996117, 0.34212582, 0.4187485],\n [-0.1530492, -0.16604881, 0.06557914, -0.16586503, -0.14537374],\n [-0.14413865, -0.15581035, 0.07238166, -0.15565302, -0.14434657],\n [-0.09408929, -0.09840609, 0.02053462, -0.09837309, -0.07137273],\n [-0.39301475, -0.48727188, 0.06827468, -0.48353887, -0.27629252],\n [-0.24605572, -0.28276665, 0.10912952, -0.28185731, -0.23641755],\n [-0.74467796, -1.2573597, 0.10835057, -1.2016485, -0.49347603],\n [-0.4022203, -0.52233619, 0.22385208, -0.5164387, -0.4168435],\n [0.06682166, 0.06472335, -0.02944639, 0.06471237, 0.06406668],\n [0.57766322, 0.46671261, -0.13758116, 0.46319305, 0.45114152],\n [-0.01968855, -0.01987486, 0.00607468, -0.01987457, -0.01676204],\n [-0.72534761, -1.3803908, 0.35614986, -1.2895402, -0.72097197],\n [-0.39755281, -0.50932555, 0.17015997, -0.50415182, -0.37747861],\n [0.05541468, 0.0540372, -0.01466541, 0.05403149, 0.04482545],\n [-0.33090587, -0.40446483, 0.16434529, -0.40176137, -0.33016579],\n [-0.0854305, -0.08925077, 0.03581858, -0.08922231, -0.08056037],\n [-0.20923584, -0.23549068, 0.11068174, -0.23494313, -0.2132027],\n [-0.38497634, -0.48980343, 0.18049086, -0.48510088, -0.37680596],\n [-0.55067069, -0.77949325, 0.11744861, -0.76402275, -0.41452806],\n [0.3514565, 0.30426126, -0.11196786, 0.30320938, 0.30242813],\n [-0.36442407, -0.45596798, 0.1683269, -0.45217357, -0.35492169],\n [0.55862248, 0.45711991, -0.09137459, 0.45407064, 0.38491433],\n [0.98757975, 0.71248778, -0.26417743, 0.69998509, 0.80172117],\n [-0.17833263, -0.19639535, 0.07713576, -0.196091, -0.16992181],\n [-0.01980995, -0.02000314, 0.0075547, -0.02000282, -0.01809969],\n [0.01791458, 0.01776767, -0.00452259, 0.01776747, 0.01426517],\n [0.12051318, 0.11439804, -0.02599857, 0.11434635, 0.09106458],\n [-0.57040743, -0.83976402, 0.17023686, -0.81918687, -0.48026911],\n [-0.48418655, -0.65617892, 0.13978045, -0.64618233, -0.40318153],\n [-0.41376686, -0.53658243, 0.17206738, -0.53058805, -0.3891168],\n [-0.39736418, -0.52230491, 0.33561927, -0.51585445, -0.47324363],\n [0.789415, 0.61140354, -0.08671464, 0.60479997, 0.47633342],\n [-0.45273112, -0.61077915, 0.21682458, -0.6017464, -0.44627895],\n [0.04048982, 0.0397188, -0.01513165, 0.03971635, 0.03674537],\n [-0.63470698, -0.9674859, 0.11984384, -0.93941626, -0.45877254],\n [0.2618683, 0.2330883, -0.13239143, 0.23256256, 0.26283624],\n [0.19377173, 0.17770167, -0.07757214, 0.17747988, 0.17993081],\n [0.58723259, 0.47367768, -0.13114675, 0.47005153, 0.44888588],\n [0.69307845, 0.53813346, -0.219424, 0.53243351, 0.59514982],\n [-0.57764781, -0.86068256, 0.18440667, -0.83828911, -0.49740593],\n [-0.26782449, -0.31198634, 0.1139541, -0.31077833, -0.2537972],\n [-0.70868634, -1.1711819, 0.1297819, -1.1233345, -0.50704995],\n [0.4606641, 0.38328251, -0.16715777, 0.38113069, 0.41397594],\n [-0.19014589, -0.20787589, 0.02417353, -0.20760079, -0.12046142],\n [-0.37788839, -0.4836057, 0.24762906, -0.478736, -0.41354195],\n [-0.17801827, -0.19425086, 0.03434979, -0.19400454, -0.12960679],\n [-0.48308859, -0.65038326, 0.12323755, -0.64089631, -0.38601917],\n [0.25848821, 0.23234614, -0.06074086, 0.23190649, 0.2009698],\n [0.11192558, 0.10615829, -0.05454158, 0.10610878, 0.11096997],\n [2.2103467, 1.2399104, -0.96744817, 1.1734462, 2.1144277],\n [0.54133925, 0.44631372, -0.07756959, 0.44355424, 0.35690572],\n [-0.32947276, -0.39838731, 0.11327762, -0.39600213, -0.29080668],\n [-0.06956448, -0.07214855, 0.03847715, -0.07213256, -0.07194534],\n [0.34574032, 0.29872866, -0.1467757, 0.2976678, 0.32738698],\n [-0.36442537, -0.44325897, 0.06336404, -0.44043841, -0.25626936],\n [-0.52417636, -0.76654556, 0.30147977, -0.7483973, -0.54922168],\n [-0.10294317, -0.1082127, 0.02503518, -0.10816776, -0.08095778],\n [-0.44853287, -0.60675929, 0.24784799, -0.59762437, -0.46373298],\n [0.54811133, 0.43721149, -0.40008915, 0.43350971, 0.62178672],\n [-0.77339733, -1.4128057, 0.15830115, -1.3308055, -0.57425723],\n [0.00311698, 0.00311258, -0.00065575, 0.00311257, 0.00233568],\n [0.16546273, 0.15367022, -0.05901408, 0.1535303, 0.14784115],\n [1.0782845, 0.74165035, -0.72184466, 0.72461628, 1.1884479],\n [-0.14325199, -0.15333407, 0.02454582, -0.15321592, -0.10024659],\n [-0.54106483, -0.74855613, 0.0879214, -0.7355701, -0.37199839],\n [-0.01554362, -0.01565892, 0.00462261, -0.01565877, -0.01307195],\n [0.23435681, 0.21389756, -0.02870592, 0.21360038, 0.14663992],\n [-0.21589488, -0.24409962, 0.11654564, -0.24348735, -0.22148122],\n [-0.40602043, -0.51951841, 0.13874424, -0.51429434, -0.35764068],\n [0.01460532, 0.0145034, -0.0053337, 0.01450328, 0.01315306],\n [0.47378981, 0.39536301, -0.11230566, 0.39321344, 0.36943187],\n [0.17730636, 0.1633511, -0.09257247, 0.16316828, 0.17988164],\n [0.58137418, 0.47440216, -0.07731488, 0.4711487, 0.37388226],\n [-0.43695412, -0.55995752, 0.0805111, -0.55425701, -0.31327025],\n [0.62747558, 0.50448334, -0.09127002, 0.50050337, 0.41576756],\n [-0.37398313, -0.47031284, 0.16050385, -0.46622059, -0.35541833],\n [-0.38488016, -0.48811777, 0.16465811, -0.48355422, -0.36538837],\n [0.16882954, 0.15590683, -0.10322184, 0.15574218, 0.18053697],\n [-0.05819763, -0.05994739, 0.02620706, -0.05993863, -0.05620215],\n [-0.00378748, -0.00379413, 0.00093389, -0.00379412, -0.00299233],\n [-0.45740353, -0.60989175, 0.15335998, -0.60155781, -0.40035663],\n [0.96274755, 0.69683538, -0.28652439, 0.68484684, 0.80985166],\n [0.05958472, 0.05779674, -0.04800426, 0.0577878, 0.0698543],\n [-0.61316876, -0.97866302, 0.28179435, -0.94389586, -0.59617547],\n [-0.21870465, -0.24783391, 0.12138522, -0.24718931, -0.22644911],\n [1.2191264, 0.82944004, -0.39078432, 0.80928401, 1.0512069],\n [0.19790549, 0.18076257, -0.1024348, 0.18051548, 0.20020015],\n [-0.3915342, -0.49898009, 0.16549726, -0.49412317, -0.37021448],\n [-0.05989271, -0.06170904, 0.02243475, -0.06169987, -0.05439592],\n [-0.60835317, -0.9374616, 0.19454908, -0.90887105, -0.52415159],\n [0.93726962, 0.66527905, -0.73159374, 0.65242844, 1.0872841],\n [-0.77961717, -1.3707204, 0.10873488, -1.3005904, -0.50939407],\n [0.51118936, 0.41853336, -0.180392, 0.41575656, 0.45513157],\n [0.10664256, 0.10189676, -0.01941382, 0.10186158, 0.07614956],\n [0.54394214, 0.44253993, -0.15439524, 0.43941602, 0.45039107],\n [-0.04468064, -0.04568047, 0.01682746, -0.04567674, -0.04065329],\n [0.42232032, 0.35761426, -0.11465577, 0.35597103, 0.3445376],\n [-0.11373807, -0.12003518, 0.0216566, -0.1199771, -0.08244114],\n [-0.24970214, -0.28577578, 0.07665105, -0.28491036, -0.21222549],\n [0.14967316, 0.13887169, -0.14929294, 0.13874192, 0.18841655],\n [-0.04416694, -0.04517034, 0.02105742, -0.04516655, -0.04347203],\n [0.09550975, 0.09136204, -0.03706601, 0.09133203, 0.08777425],\n [-0.60345573, -0.90964065, 0.15374561, -0.88460156, -0.4819935],\n [-0.39335361, -0.4858555, 0.05967075, -0.48226216, -0.26431345],\n [0.91188654, 0.67179381, -0.22367346, 0.66145241, 0.71918731],\n [0.0893321, 0.08542113, -0.06841059, 0.08539261, 0.10297287],\n [1.0542231, 0.74207457, -0.38557647, 0.72704413, 0.94987954],\n [-0.80140281, -1.4468496, 0.10861552, -1.3659914, -0.51865018],\n [-0.65661096, -1.0909152, 0.25248002, -1.045425, -0.60157682],\n [0.77179132, 0.59281922, -0.15014697, 0.58599761, 0.56344143],\n [-0.07764923, -0.08065276, 0.02253154, -0.08063341, -0.06476868],\n [-0.06175517, -0.06377033, 0.03294705, -0.06375937, -0.06310509],\n [0.50650259, 0.41570612, -0.16991997, 0.41301447, 0.44341725],\n [-0.40926117, -0.52588389, 0.14652362, -0.52041418, -0.36613921],\n [-0.45927482, -0.61604101, 0.17002452, -0.60727483, -0.41549163],\n [1.0567886, 0.74734019, -0.3202118, 0.73259505, 0.89429581],\n [-0.11140128, -0.11790377, 0.03960864, -0.11784055, -0.09943366],\n [-0.30337178, -0.36215487, 0.13026612, -0.36026837, -0.2883615],\n [-0.42253305, -0.55498618, 0.20169408, -0.54816798, -0.41605253],\n [-0.25977024, -0.30503888, 0.21535438, -0.30373086, -0.3074592],\n [0.32950947, 0.28807868, -0.09057868, 0.28721359, 0.26993794],\n [-0.18003178, -0.19723229, 0.04531631, -0.19695886, -0.14321691],\n [0.09805314, 0.09330175, -0.08653726, 0.09326339, 0.1185001],\n [-0.49089533, -0.70725054, 0.399791, -0.69177447, -0.57758194],\n [1.1664585, 0.8161681, -0.21232685, 0.79907572, 0.83289675],\n [-0.4644781, -0.64735139, 0.34389256, -0.63561204, -0.52941278],\n [-0.41856481, -0.55305757, 0.25163428, -0.54596576, -0.44508375],\n [1.1278749, 0.7967161, -0.19805737, 0.78089615, 0.79575792],\n [0.34566979, 0.29986766, -0.1102072, 0.29886025, 0.29752327],\n [-0.37267293, -0.46647729, 0.14254855, -0.4625815, -0.34083917],\n [0.24961853, 0.22378361, -0.10251373, 0.22333903, 0.23376988],\n [0.53624914, 0.43791335, -0.14162413, 0.43493246, 0.43347777],\n [0.02897433, 0.02856922, -0.01272993, 0.02856828, 0.027752],\n [0.08015478, 0.07737286, -0.01771104, 0.07735678, 0.06105356],\n [-0.50476571, -0.70753251, 0.19691235, -0.69426216, -0.46468738],\n [-0.40522231, -0.52445365, 0.19283833, -0.51868373, -0.39859944],\n [-0.30219412, -0.35872948, 0.10546732, -0.35697701, -0.26806492],\n [-0.35942658, -0.45131409, 0.21023945, -0.44743922, -0.37872287],\n [0.26996379, 0.24234186, -0.04851345, 0.24187193, 0.19194099],\n [0.4646472, 0.38552663, -0.18330676, 0.38329676, 0.42935683],\n [-0.3275455, -0.38444346, 0.03295657, -0.38280459, -0.19194278],\n [-0.55860279, -0.78929956, 0.10141245, -0.77379405, -0.39851299],\n [-0.05774895, -0.05942333, 0.02039347, -0.05941524, -0.05142841],\n [-0.08416668, -0.08876627, 0.21614889, -0.0887244, -0.14521823],\n [-0.51076446, -0.69909851, 0.11233086, -0.6877496, -0.38843958],\n [-0.50295582, -0.68588962, 0.11774145, -0.67501102, -0.39054674],\n [-0.15478959, -0.16820163, 0.07018808, -0.16800822, -0.14982776],\n [0.23271758, 0.20738762, -0.3068774, 0.20692933, 0.32152723],\n [-0.37941033, -0.46960824, 0.08548247, -0.46606675, -0.29087638],\n [-0.25435962, -0.29035462, 0.05680228, -0.28950863, -0.19443034],\n [0.06588453, 0.06368361, -0.05917427, 0.06367135, 0.08008969],\n [0.01837002, 0.01819873, -0.01193611, 0.01819846, 0.02004645],\n [-0.40509825, -0.51115711, 0.09388126, -0.50657893, -0.3135041],\n [0.39800708, 0.33995085, -0.1050213, 0.33854642, 0.32163477],\n [-0.07725479, -0.08029924, 0.02749215, -0.08027925, -0.06897576],\n [-0.20637628, -0.22811815, 0.0325346, -0.22773717, -0.14046399],\n [0.08154315, 0.07840933, -0.04072179, 0.07838927, 0.08150995],\n [1.1910859, 0.80960517, -0.48571352, 0.78983713, 1.1128384],\n [0.26456618, 0.23553114, -0.12012076, 0.23500149, 0.25619594],\n [0.23971249, 0.21576538, -0.09755645, 0.21536754, 0.22381495],\n [0.12466272, 0.11807848, -0.02929845, 0.11802056, 0.09692801],\n [-0.04478352, -0.04579575, 0.01802714, -0.04579193, -0.04166114],\n [0.08461206, 0.08145068, -0.02316046, 0.081431, 0.06921718],\n [-0.11222889, -0.11827756, 0.01927437, -0.11822326, -0.0785971],\n [0.66826592, 0.5290729, -0.11819708, 0.52429317, 0.47261967],\n [-0.02497361, -0.02526647, 0.00606328, -0.0252659, -0.01962908],\n [-0.18560142, -0.20481407, 0.06635467, -0.20448324, -0.16596685],\n [-0.42062705, -0.55511035, 0.23495048, -0.54805336, -0.43644907],\n [0.37335652, 0.32316837, -0.06983945, 0.32204863, 0.26902507],\n [0.44395122, 0.37075775, -0.18027585, 0.36875953, 0.41420248],\n [-0.10149399, -0.10627658, 0.01402168, -0.10623904, -0.06610539],\n [1.6776906, 1.0774226, -0.19261374, 1.0429959, 1.0273387],\n [-0.03187361, -0.03248491, 0.06531512, -0.03248296, -0.05100766],\n [0.06182835, 0.06003111, -0.02647815, 0.0600224, 0.05871706],\n [-0.48307609, -0.64353785, 0.09701343, -0.63479909, -0.35642185],\n [-0.81096394, -1.6544539, 0.22537601, -1.5221645, -0.66677645],\n [-0.4026664, -0.51871387, 0.17880383, -0.51321067, -0.38704892],\n [-0.06926005, -0.07178871, 0.03438955, -0.07177333, -0.06909936],\n [-0.66360562, -1.0288145, 0.10497055, -0.9965726, -0.45217429],\n [-0.33803354, -0.41156213, 0.11995102, -0.40891704, -0.30152119],\n [0.22568917, 0.19695072, -2.1495738, 0.19634307, 0.60274637],\n [-0.10028386, -0.10594532, 0.06968719, -0.10589208, -0.1119133],\n [-0.33326482, -0.40322095, 0.10587404, -0.4007912, -0.2865054],\n [0.01988369, 0.0196859, -0.01143858, 0.01968557, 0.02083525],\n [-0.17025897, -0.1869918, 0.08834197, -0.1867182, -0.17237424],\n [0.23507155, 0.21421042, -0.03318566, 0.2139024, 0.15421509],\n [-0.28474643, -0.33233788, 0.07686481, -0.33101849, -0.2318594],\n [-0.58720124, -0.87273312, 0.15584735, -0.85030475, -0.47544591],\n [-0.61495582, -0.98260761, 0.27888785, -0.9475382, -0.59527248],\n [-0.56243197, -0.80025237, 0.10867358, -0.78390244, -0.40966732],\n [-0.14077564, -0.15085131, 0.03234317, -0.15073124, -0.10863152],\n [-0.18260914, -0.20079369, 0.05591265, -0.20049242, -0.15507021],\n [-0.24439112, -0.29069365, 0.62906119, -0.28924033, -0.42198587],\n [-0.73811111, -1.2279392, 0.10035927, -1.176477, -0.47820082],\n [-0.50644645, -0.72439785, 0.27553577, -0.70915905, -0.52090479],\n [-0.17399597, -0.19079487, 0.06462178, -0.19052501, -0.15757806],\n [-0.44498495, -0.60721357, 0.31775135, -0.59754437, -0.5011134],\n [0.07172271, 0.06957196, -0.01082252, 0.06956122, 0.04810873],\n [0.21509852, 0.19535678, -0.09567971, 0.19505536, 0.20687511],\n [0.02863796, 0.02827191, -0.00613238, 0.02827113, 0.02158645],\n [0.25546167, 0.22796332, -0.12927557, 0.22747127, 0.25648737],\n [-0.34061627, -0.41792931, 0.14965998, -0.4150289, -0.32625373],\n [0.77469004, 0.57754495, -0.51279666, 0.56932591, 0.85063591],\n [0.39144017, 0.32691917, -0.49215888, 0.32515813, 0.53229866],\n [0.59615035, 0.45419211, -1.6874646, 0.4486407, 1.0624914],\n [-0.17837085, -0.19626745, 0.07154096, -0.19596873, -0.16573371],\n [-0.1003708, -0.10577087, 0.04640926, -0.10572247, -0.09778746],\n [-0.09795612, -0.1029221, 0.03435635, -0.10288016, -0.08703623],\n [0.09295039, 0.08865096, -0.08461053, 0.08861783, 0.11349732],\n [-0.4072643, -0.54113581, 0.3577372, -0.53391891, -0.49141559],\n [0.65382402, 0.50566575, -0.41751741, 0.5001435, 0.70937429],\n [-0.5575895, -0.85145524, 0.34610208, -0.82654647, -0.59926805],\n [-0.10217966, -0.10730779, 0.02252941, -0.10726492, -0.07777433],\n [-0.06366886, -0.06577556, 0.02907765, -0.06576394, -0.06177525],\n [-0.43525928, -0.57286957, 0.16729571, -0.56572687, -0.39872208],\n [-0.49811105, -0.67619761, 0.11554628, -0.6657818, -0.38560806],\n [0.50090974, 0.40769452, -0.29216005, 0.4048278, 0.52729857],\n [0.86268364, 0.61965343, -0.9182124, 0.60848892, 1.1097507],\n [-0.62052198, -0.92951709, 0.11244186, -0.90469252, -0.44240917],\n [-0.06257593, -0.06445174, 0.01429789, -0.06444237, -0.04819909],\n [-0.34703233, -0.42965766, 0.176219, -0.42640866, -0.34882494],\n [-0.05892985, -0.06083489, 0.04422308, -0.06082463, -0.0674709],\n [-0.24691258, -0.28168847, 0.06957238, -0.28087497, -0.20394721],\n [-0.36903059, -0.45514355, 0.09453386, -0.45182378, -0.29528907],\n [-0.18309023, -0.20412071, 0.16476016, -0.20371905, -0.22270887],\n [-0.44482225, -0.61975687, 0.4830332, -0.60853947, -0.57604975],\n [-0.08058226, -0.08362613, 0.01368654, -0.08360697, -0.05622561],\n [0.20211563, 0.18364076, -0.15093069, 0.18335982, 0.23103039],\n [-0.26383763, -0.30228513, 0.05257534, -0.30135479, -0.19416094],\n [0.50169006, 0.41084605, -0.20709964, 0.4081262, 0.47064499],\n [0.85690669, 0.62910114, -0.42150416, 0.619198, 0.85224863],\n [-0.67448662, -1.1321169, 0.2257037, -1.0830488, -0.58998158],\n [-0.24488697, -0.28097606, 0.10361773, -0.28009299, -0.23163195],\n [-0.39807196, -0.49524433, 0.06909688, -0.4913284, -0.27977175],\n [-0.33216003, -0.39639229, 0.06221035, -0.39433499, -0.2394395],\n [-0.38177713, -0.48617498, 0.19811154, -0.48147219, -0.38653296],\n [-0.08027696, -0.08357483, 0.02877727, -0.08355226, -0.07184896],\n [-0.36011292, -0.4494137, 0.1704529, -0.4457589, -0.35359322],\n [0.22393373, 0.20272089, -0.09636476, 0.20238664, 0.21300792],\n [0.01812136, 0.01797578, -0.00342167, 0.01797558, 0.01309834],\n [1.0445171, 0.75843336, -0.13225875, 0.74563807, 0.66083825],\n [-0.22269522, -0.25013243, 0.05856105, -0.24957055, -0.17975744],\n [-0.60386461, -0.91304686, 0.15964353, -0.88754515, -0.48830011],\n [0.10351565, 0.09820456, -0.09771299, 0.09815917, 0.12793758],\n [0.18780321, 0.17260011, -0.07782076, 0.17239528, 0.17640484],\n [0.39839468, 0.33369914, -0.35895544, 0.3319591, 0.48480363],\n [-0.56902707, -0.872537, 0.31107061, -0.84651874, -0.58620782],\n [0.52082414, 0.42577496, -0.16874438, 0.42290727, 0.45069269],\n [-0.27582253, -0.32232201, 0.10543888, -0.32102181, -0.25221065],\n [0.13055037, 0.12300944, -0.04868599, 0.12293689, 0.1183941],\n [-0.70347537, -1.2040031, 0.1943892, -1.1479507, -0.57729789],\n [0.31150776, 0.27687318, -0.03738351, 0.27623316, 0.19358986],\n [-0.26786344, -0.30662368, 0.04423454, -0.30569231, -0.18515671],\n [-0.10166485, -0.10788655, 0.12161541, -0.10782314, -0.13597323],\n [-0.28141867, -0.32648367, 0.06218693, -0.32528622, -0.2143606],\n [0.12675404, 0.11969532, -0.04304958, 0.11962985, 0.11142283],\n [0.00564854, 0.00563403, -0.00124235, 0.00563403, 0.00429585],\n [0.70315729, 0.55232744, -0.11447213, 0.54699757, 0.48373952],\n [-0.57315138, -0.83876186, 0.14921006, -0.81883073, -0.46109342],\n [-0.32012522, -0.38622687, 0.13072175, -0.38396798, -0.29923051],\n [0.19846144, 0.18343456, -0.02536435, 0.18324517, 0.12595111],\n [0.40129512, 0.33990812, -0.16938496, 0.33835163, 0.37926625],\n [-0.22856924, -0.25983375, 0.10393876, -0.25912335, -0.22145273],\n [-0.01674834, -0.01690274, 0.01757409, -0.0169025, -0.02144283],\n [0.451725, 0.37640179, -0.17925774, 0.37432235, 0.41823341],\n [-0.5069588, -0.73016766, 0.30740562, -0.7142168, -0.54062448],\n [0.14639697, 0.13762086, -0.02806901, 0.13753324, 0.10635889],\n [0.54156343, 0.43020691, -0.53963023, 0.42643087, 0.68151431],\n [-0.00369693, -0.00370369, 0.00164843, -0.00370369, -0.00355845],\n [-0.00518422, -0.0051979, 0.00293264, -0.0051979, -0.00540197],\n [0.52562602, 0.42180305, -0.41413477, 0.41841804, 0.61165772],\n [-0.34805066, -0.41861733, 0.0591539, -0.41624897, -0.24290294],\n [0.19213086, 0.17635487, -0.07498533, 0.17613929, 0.17690231],\n [0.59949237, 0.46575827, -0.68085713, 0.4608489, 0.78804516],\n [-0.269149, -0.31443131, 0.12642314, -0.31316769, -0.26360119],\n [-0.08055403, -0.08380109, 0.02383641, -0.08377928, -0.06763141],\n [-0.52710152, -0.74389095, 0.15786241, -0.72937786, -0.44432329],\n [0.23318428, 0.21130044, -0.06324005, 0.21095881, 0.19016923],\n [-0.347374, -0.41668089, 0.05385617, -0.41439143, -0.23511844],\n [0.73367753, 0.56669864, -0.17585896, 0.56044855, 0.57420678],\n [0.17500619, 0.16226928, -0.04652948, 0.16211496, 0.14178219],\n [0.94605452, 0.68612388, -0.30550686, 0.67446351, 0.81776358],\n [0.50546688, 0.41903062, -0.09826981, 0.4165844, 0.36893103],\n [-0.18998405, -0.20998685, 0.06278385, -0.20963655, -0.16548961],\n [0.37311908, 0.31743209, -0.22857165, 0.31605411, 0.39925383],\n [-0.01912069, -0.01932781, 0.02543713, -0.01932743, -0.02649527],\n [0.31556855, 0.27535671, -0.14827811, 0.27450587, 0.30909939],\n [0.39197774, 0.33229297, -0.19381106, 0.33078654, 0.39052046],\n [-0.6211055, -0.94356796, 0.13609918, -0.91661946, -0.47177916],\n [-0.39929942, -0.51206168, 0.16883709, -0.50681948, -0.37759979],\n [-0.50640822, -0.68089631, 0.08212162, -0.6710497, -0.34793357],\n [-0.05687068, -0.05829852, 0.00664011, -0.05829255, -0.03502091],\n [-0.14211899, -0.15451163, 0.14617386, -0.15433177, -0.1807457],\n [-0.20880173, -0.23566553, 0.13577928, -0.23509118, -0.22791726],\n [-0.12352715, -0.13063606, 0.01541625, -0.13056792, -0.07777581],\n [-0.43868933, -0.59652185, 0.33072572, -0.58723422, -0.50304176],\n [-0.29153569, -0.3398282, 0.05973137, -0.33850118, -0.21654026],\n [0.13964675, 0.13118796, -0.04536704, 0.13110264, 0.12095134],\n [-0.03966402, -0.04042777, 0.01176078, -0.04042532, -0.0333237],\n [0.75850883, 0.58647238, -0.12878309, 0.58005447, 0.52917993],\n [0.15005071, 0.14060256, -0.03747029, 0.14050349, 0.11905046],\n [-0.68233406, -1.0671377, 0.09382966, -1.0323789, -0.44373267],\n [-0.27654892, -0.3235601, 0.10951834, -0.32223473, -0.25587052],\n [-0.28516153, -0.33071434, 0.05497475, -0.32950686, -0.2075511],\n [-0.26369445, -0.30841731, 0.15767347, -0.30715932, -0.27989612],\n [0.3169408, 0.27673987, -0.13613017, 0.27589314, 0.30128688],\n [1.0952246, 0.78043927, -0.18254339, 0.76570135, 0.75939459],\n [0.13669411, 0.12921327, -0.01984136, 0.12914508, 0.0905108],\n [0.330727, 0.28661029, -0.16907958, 0.28563349, 0.33318605],\n [-0.63230002, -0.9027777, 0.04523618, -0.88397517, -0.33071515],\n [-0.52766834, -0.77382029, 0.29878069, -0.75523288, -0.55000698],\n [-0.05007151, -0.05137913, 0.02586399, -0.05137344, -0.05061769],\n [-0.42531462, -0.5499931, 0.12801344, -0.54398146, -0.35911653],\n [-0.1297289, -0.13839983, 0.03563857, -0.13830332, -0.10625301],\n [-0.21443397, -0.24183356, 0.10423078, -0.24125174, -0.2124244],\n [0.46625586, 0.38202951, -0.3615271, 0.3795137, 0.53968434],\n [1.2041013, 0.79553721, -1.1263728, 0.7732085, 1.4836999],\n [-0.26296011, -0.30456318, 0.09809341, -0.30347089, -0.23849715],\n [-0.31697151, -0.37666494, 0.07536004, -0.37480244, -0.24740237],\n [-0.22314465, -0.2540796, 0.13911348, -0.25336724, -0.24017293],\n [-0.31576921, -0.38059817, 0.14030666, -0.37839529, -0.30358662],\n [-0.35073082, -0.4215313, 0.05400701, -0.4191653, -0.23685142],\n [-0.09403073, -0.09858561, 0.03256135, -0.09854885, -0.08319374],\n [-0.27935476, -0.32542194, 0.08138768, -0.3241617, -0.23332805],\n [-0.62213469, -0.92903391, 0.1052494, -0.90459631, -0.43351702],\n [-0.14938662, -0.16149883, 0.05493428, -0.16133536, -0.1348442],\n [-0.20664992, -0.23418287, 0.19152947, -0.23357339, -0.25385089],\n [-0.26586532, -0.30991069, 0.12496911, -0.30870023, -0.26044659],\n [-0.25807377, -0.29778868, 0.09331687, -0.29677423, -0.23164662],\n [-0.30314598, -0.36319764, 0.15247232, -0.36122789, -0.30374435],\n [0.24775518, 0.22381456, -0.05283603, 0.22342979, 0.18649558],\n [-0.24843365, -0.28577432, 0.1066353, -0.28484257, -0.23611157],\n [-0.70001612, -1.1721137, 0.16366661, -1.1217738, -0.5433363],\n [-0.16368619, -0.1785629, 0.06550496, -0.1783379, -0.15197638],\n [-0.22438792, -0.25222303, 0.0580311, -0.25164911, -0.18012055],\n [1.4120916, 0.91321611, -0.61659021, 0.88493049, 1.3497421],\n [-0.08394263, -0.08770191, 0.04174102, -0.08767387, -0.08378882],\n [0.11246524, 0.10698612, -0.0304348, 0.10694165, 0.09165279],\n [0.73968257, 0.56947986, -0.19038664, 0.56304099, 0.59281486],\n [1.4090884, 0.91290803, -0.5933184, 0.88485893, 1.3306528],\n [-0.2440235, -0.28366336, 0.21559238, -0.28259501, -0.29501374],\n [-0.24924191, -0.28846526, 0.14492164, -0.28744088, -0.26210086],\n [-0.24698227, -0.28058286, 0.05372172, -0.27982347, -0.18714167],\n [-0.45857664, -0.62671937, 0.25786659, -0.61664209, -0.47688791],\n [-0.61800719, -1.0276645, 0.41359248, -0.98467025, -0.68107755],\n [-0.39285108, -0.49992802, 0.15444424, -0.4951199, -0.36259282],\n [-0.40121812, -0.53012827, 0.35290152, -0.52333129, -0.48433762],\n [-0.54848998, -0.78129082, 0.13547929, -0.76522807, -0.43359084],\n [-0.42025715, -0.56786194, 0.4070682, -0.55937626, -0.52389284],\n [0.34029902, 0.29960747, -0.04043193, 0.29879914, 0.21077801],\n [-0.80333773, -1.5357323, 0.16225583, -1.433301, -0.59384834],\n [0.35708367, 0.31125712, -0.05966209, 0.31028061, 0.24779325],\n [-0.5847827, -0.85951985, 0.13601524, -0.83863343, -0.45310849],\n [-0.40549831, -0.53574142, 0.32356278, -0.52887632, -0.473866],\n [-0.40787468, -0.51196956, 0.0756231, -0.50758728, -0.29303038],\n [0.0808268, 0.07791694, -0.02345714, 0.07789948, 0.06742257],\n [0.11103542, 0.10540773, -0.04950461, 0.10536021, 0.10687257],\n [-0.15895282, -0.17356782, 0.09076858, -0.1733442, -0.16615013],\n [-0.48207555, -0.64863346, 0.1236818, -0.63920946, -0.38594196],\n [-0.41828669, -0.54416709, 0.17141096, -0.53793991, -0.39144633],\n [-0.25512486, -0.2982931, 0.20198933, -0.29708165, -0.29736372],\n [-0.37029916, -0.46148307, 0.13034527, -0.45777687, -0.32941516],\n [0.47744139, 0.39992459, -0.08417699, 0.39784011, 0.33730376],\n [-0.00934437, -0.00938289, 0.00143207, -0.00938286, -0.00630035],\n [-0.02772804, -0.02810658, 0.0100532, -0.02810572, -0.02491098],\n [-0.10725828, -0.11332995, 0.04162304, -0.1132727, -0.09856933],\n [-0.59832789, -0.92792556, 0.24138858, -0.89880156, -0.55702549],\n [-0.57759115, -0.85775157, 0.17569523, -0.83579705, -0.4894147],\n [-0.33794661, -0.40552683, 0.06817076, -0.40328937, -0.24971317],\n [-0.62978867, -1.0370275, 0.32123178, -0.99525298, -0.6339856],\n [-0.68597933, -1.1991312, 0.28782311, -1.1389871, -0.64703221],\n [-0.49037436, -0.67011574, 0.15054169, -0.65934682, -0.41678694],\n [0.55890967, 0.45229089, -0.16472336, 0.44893128, 0.46862216],\n [0.32741952, 0.28625396, -0.09524658, 0.28539446, 0.27333558],\n [-0.35087844, -0.43207793, 0.13403453, -0.42897309, -0.32076479],\n [1.5813168, 0.97846186, -0.93921735, 0.94187025, 1.6747293],\n [-0.44018906, -0.55989131, 0.06126661, -0.55452844, -0.28741589],\n [0.06129037, 0.05954432, -0.02349401, 0.05953603, 0.05609497],\n [-0.46163541, -0.60840753, 0.10801341, -0.60074747, -0.35840053],\n [-0.29875723, -0.36047311, 0.2278336, -0.3583632, -0.34389686],\n [1.4552584, 0.95668254, -0.2900742, 0.92919281, 1.0710419],\n [0.53780487, 0.43727525, -0.17474037, 0.43416999, 0.46582655],\n [0.41170242, 0.34757772, -0.16990823, 0.34592261, 0.38619243],\n [-0.12477915, -0.13336909, 0.06188318, -0.13327062, -0.12444064],\n [-0.35026659, -0.42755531, 0.09928534, -0.42473541, -0.28989286],\n [-0.49730549, -0.6883825, 0.17444435, -0.67640159, -0.44188691],\n [-0.20168938, -0.22264205, 0.03508153, -0.22227998, -0.14184854],\n [-0.3716121, -0.4675208, 0.17101449, -0.46343851, -0.36147717],\n [0.10483886, 0.09985598, -0.04200895, 0.09981653, 0.09738053],\n [-0.31658804, -0.37002846, 0.03532148, -0.36853229, -0.19202272],\n [-0.47864818, -0.68117839, 0.39635932, -0.66724558, -0.5663053],\n [-0.62571462, -1.0136703, 0.28378283, -0.97539099, -0.60569814],\n [-0.43625711, -0.56745388, 0.1219175, -0.56096812, -0.35935776],\n [-0.3768037, -0.50466399, 0.80072945, -0.49755497, -0.61035434],\n [1.0621417, 0.75963392, -0.20403967, 0.74559242, 0.77215265],\n [0.0328903, 0.03226835, -0.07481728, 0.03226639, 0.05449908],\n [-0.17185771, -0.18674733, 0.03037659, -0.18653262, -0.12151663],\n [0.0236511, 0.02340173, -0.00486029, 0.02340129, 0.01758457],\n [-0.43862113, -0.57065469, 0.11743041, -0.56412149, -0.35617482],\n [0.46269548, 0.38383737, -0.19158832, 0.38161295, 0.43450674],\n [0.18951731, 0.17559938, -0.02694152, 0.17542923, 0.12461872],\n [-0.32747899, -0.41462936, 0.5704933, -0.41080661, -0.49645796],\n [0.25758844, 0.23049773, -0.09449579, 0.23002404, 0.23232632],\n [0.31758568, 0.27759082, -0.12377705, 0.2767544, 0.2922788],\n [-0.34218485, -0.42648063, 0.24580193, -0.423053, -0.38611109],\n [-0.4088183, -0.51638855, 0.09005913, -0.51172227, -0.3110805],\n [-0.80521678, -1.4050351, 0.07860527, -1.335001, -0.46712786],\n [-0.8546913, -2.0223329, 0.29883109, -1.7923761, -0.75862086],\n [0.69311103, 0.53298743, -0.33765129, 0.52690642, 0.6871233],\n [1.1947123, 0.81219341, -0.47164122, 0.7923775, 1.1042214],\n [-0.40009891, -0.49501237, 0.0551297, -0.49129354, -0.26036552],\n [-0.4081933, -0.521855, 0.12978439, -0.5166433, -0.35101693],\n [-0.45795604, -0.64026731, 0.40870837, -0.62843877, -0.55551659],\n [0.08164061, 0.07866174, -0.02459797, 0.07864363, 0.06895735],\n [-0.04035683, -0.04125175, 0.03510488, -0.04124844, -0.04853747],\n [0.14532344, 0.13313101, -0.87268265, 0.13296079, 0.33280197],\n [-0.50026008, -0.68636627, 0.13933804, -0.67505498, -0.41162063],\n [0.57392939, 0.47021874, -0.06886095, 0.46711995, 0.35664815],\n [-0.73827282, -1.4743277, 0.4215619, -1.3630265, -0.77168792],\n [-0.58281782, -0.84274163, 0.1075697, -0.82393788, -0.41808238],\n [-0.00065633, -0.00065654, 0.00029299, -0.00065654, -0.00063199],\n [0.75288704, 0.57253124, -0.28254898, 0.56543741, 0.68421794],\n [-0.29174285, -0.34598048, 0.13379398, -0.34430968, -0.28345819],\n [-0.01213181, -0.01219526, 0.00148818, -0.0121952, -0.00759473],\n [1.1875734, 0.80992775, -0.44442331, 0.79048795, 1.0782412],\n [-0.59310681, -0.94491404, 0.35995501, -0.91160125, -0.63267598],\n [-0.49983984, -0.69911699, 0.2045385, -0.68616942, -0.46754376],\n [-0.43655652, -0.55970174, 0.08213568, -0.55398313, -0.31517198],\n [0.06577544, 0.06384116, -0.01799947, 0.06383169, 0.0538029],\n [0.65614772, 0.51896186, -0.14355175, 0.51423367, 0.49813522],\n [-0.73470229, -1.2324181, 0.11317359, -1.1791277, -0.4962105],\n [-0.45860398, -0.61939807, 0.20045514, -0.61016967, -0.43850473],\n [-0.35747689, -0.4564202, 0.36142831, -0.45190998, -0.45204607],\n [0.56119242, 0.44767566, -0.33020406, 0.4438875, 0.59248679],\n [0.63886023, 0.49972097, -0.29988187, 0.49473085, 0.6255528],\n [0.22006193, 0.19891531, -0.12866661, 0.19857732, 0.23184367],\n [-0.70226265, -1.2038878, 0.20010291, -1.1475115, -0.58222946],\n [0.31365132, 0.27515623, -0.10312727, 0.27437149, 0.27275073],\n [-0.25744216, -0.29712234, 0.09625626, -0.29610719, -0.23367169],\n [0.2909643, 0.25914074, -0.05412012, 0.25856219, 0.20926152],\n [1.448869, 0.96127786, -0.22405255, 0.93483027, 0.97981942],\n [-0.63537241, -0.98650244, 0.1514761, -0.95538216, -0.49637542],\n [0.06547446, 0.06338746, -0.03991762, 0.06337638, 0.06994868],\n [-0.45136775, -0.59744329, 0.14350211, -0.58968686, -0.38813527],\n [-0.18090053, -0.19645099, 0.01893559, -0.1962285, -0.10741448],\n [-0.01422981, -0.01433352, 0.00801671, -0.0143334, -0.01480726],\n [-0.28676242, -0.33361277, 0.06176315, -0.33234288, -0.2165715],\n [-0.03810031, -0.0389361, 0.05062011, -0.03893305, -0.05277198],\n [-0.45690859, -0.60877016, 0.15199966, -0.60049491, -0.39888137],\n [0.49643102, 0.41044933, -0.13213891, 0.4079852, 0.40233967],\n [-0.52586306, -0.76036553, 0.24477965, -0.74340229, -0.51347213],\n [-0.5924395, -0.87061468, 0.12222478, -0.8494784, -0.44105467],\n [0.28472938, 0.25311973, -0.07550306, 0.25253648, 0.23047281],\n [-0.80915386, -1.5112437, 0.13010558, -1.4172483, -0.5543653],\n [-0.34930171, -0.42981181, 0.13533621, -0.42674549, -0.32083505],\n [0.27165529, 0.24119811, -0.1222478, 0.24063059, 0.26228239],\n [-0.591751, -0.94386109, 0.3716657, -0.91042169, -0.63848911],\n [0.48929003, 0.40475457, -0.1440689, 0.40233778, 0.41012004],\n [-0.3686661, -0.47023772, 0.27450567, -0.46562852, -0.42100046],\n [0.0078351, 0.00780773, -0.00146312, 0.00780771, 0.00564243],\n [1.2654177, 0.83530349, -0.77888165, 0.8117597, 1.356198],\n [0.39521216, 0.3377571, -0.10694825, 0.33637183, 0.32207333],\n [-0.49057537, -0.67009995, 0.14870434, -0.65936082, -0.4151978],\n [-0.19877268, -0.22288743, 0.12935537, -0.22240111, -0.21702475],\n [-0.58216871, -0.87924513, 0.21055823, -0.85481939, -0.52259668],\n [-0.2297186, -0.25718772, 0.03643411, -0.25664167, -0.15666675],\n [-0.31661211, -0.37751449, 0.08802402, -0.37557404, -0.26035269],\n [-0.27206636, -0.32236391, 0.22499123, -0.32082303, -0.32174744],\n [0.16440459, 0.15357079, -0.02835786, 0.15345192, 0.11530384],\n [0.00103929, 0.0010388, -0.00021298, 0.0010388, 0.00077199],\n [-0.33083111, -0.39558536, 0.06980408, -0.39348639, -0.24814632],\n [-0.12046646, -0.12864033, 0.07287372, -0.12854797, -0.12836435],\n [-0.43725155, -0.55670313, 0.06651499, -0.55132732, -0.29408356],\n [-0.3054157, -0.35349905, 0.02859342, -0.35224275, -0.17472684],\n [1.1598261, 0.80217503, -0.33297464, 0.78428477, 0.96399688],\n [-0.59378871, -0.8659557, 0.10641497, -0.84574523, -0.42179251],\n [0.05247017, 0.05130234, -0.00809786, 0.051298, 0.03546024],\n [0.70684525, 0.55549519, -0.1072906, 0.55015624, 0.47505816],\n [0.31151861, 0.27466035, -0.07308972, 0.27393582, 0.24207582],\n [-0.6991559, -1.3535436, 0.53619384, -1.2598869, -0.80630612],\n [0.03458053, 0.0339866, -0.0205769, 0.0339849, 0.03664582],\n [0.82682948, 0.60682106, -0.56819092, 0.59724861, 0.919289],\n [-0.10284804, -0.10861681, 0.0539691, -0.10856291, -0.10451752],\n [-0.3197203, -0.38921434, 0.18637731, -0.38671619, -0.33650213],\n [0.97145615, 0.70037602, -0.30919677, 0.68803492, 0.83567443],\n [0.0025278, 0.00252482, -0.0006883, 0.00252482, 0.00206426],\n [0.05788055, 0.05636685, -0.01664403, 0.05636025, 0.04813392],\n [-0.52883627, -0.73940217, 0.1293742, -0.7257381, -0.41671588],\n [-0.39248162, -0.48165016, 0.04845533, -0.47830192, -0.24622761],\n [-0.3925012, -0.51354987, 0.33019403, -0.50741698, -0.46683176],\n [-0.56031347, -0.81448204, 0.16065483, -0.79579414, -0.46550959],\n [0.22330768, 0.2037657, -0.04331321, 0.20348115, 0.1628618],\n [1.2671633, 0.85776025, -0.33976017, 0.83637302, 1.029491],\n [-0.43456787, -0.5717868, 0.1681824, -0.56467313, -0.39900217],\n [-0.24206258, -0.28093407, 0.21175835, -0.27989831, -0.29168132],\n [-0.42873818, -0.5629535, 0.1780476, -0.55605376, -0.40301091],\n [-0.49242361, -0.6598997, 0.09536759, -0.65056734, -0.35895184],\n [0.13989783, 0.13073676, -0.09689245, 0.13063687, 0.1559483],\n [0.7407564, 0.55949554, -0.42875698, 0.55221846, 0.77779296],\n [-0.18645751, -0.20665062, 0.09067891, -0.2062869, -0.18474187],\n [-0.55840691, -0.85765763, 0.36963806, -0.8318906, -0.61315379],\n [0.31715448, 0.27514663, -0.21852665, 0.27422302, 0.35293281],\n [-0.7165565, -1.2868527, 0.25484644, -1.2161667, -0.63964128],\n [0.974585, 0.69060403, -0.56893569, 0.67713489, 1.0262288],\n [-0.32192358, -0.38577559, 0.09508929, -0.38367856, -0.27011937],\n [-0.5998119, -0.89087792, 0.13017601, -0.86805931, -0.45414761],\n [0.43858688, 0.3659991, -0.20787794, 0.3640099, 0.43084054],\n [-0.15953623, -0.17482537, 0.12207043, -0.17458157, -0.18384551],\n [-0.25385373, -0.2911425, 0.07588293, -0.29023302, -0.21385221],\n [-0.39446492, -0.49894205, 0.12492114, -0.49438067, -0.33876165],\n [0.0306346, 0.03018139, -0.01370242, 0.03018027, 0.02951779],\n [1.4738252, 0.93933028, -0.68459262, 0.90830075, 1.4380855],\n [-0.29050831, -0.34106826, 0.08722709, -0.33960921, -0.24509409],\n [0.59850856, 0.47340936, -0.29846146, 0.46909894, 0.59797954],\n [2.4104501, 1.2709452, -2.3575449, 1.1881433, 3.0145935],\n [-0.67469976, -1.0052526, 0.05320747, -0.97910221, -0.36453668],\n [-0.46940231, -0.66813162, 0.46465258, -0.65445295, -0.58940833],\n [-0.27271976, -0.32287399, 0.2127947, -0.32134546, -0.3163307],\n [-0.35825387, -0.45985633, 0.41275096, -0.45511323, -0.47318753],\n [0.18800499, 0.17256188, -0.08955579, 0.17235077, 0.18499251],\n [0.15566192, 0.1440027, -0.15870237, 0.14385733, 0.19739041],\n [-0.39539076, -0.49323855, 0.07964831, -0.48924201, -0.29202489],\n [0.59827041, 0.48136068, -0.12866286, 0.47758885, 0.4516057],\n [0.00012764, 0.00012763, -5.284e-05, 0.00012763, 0.00011986],\n [-0.60613341, -0.91044242, 0.14108794, -0.88580433, -0.46977018],\n [0.14661279, 0.13790178, -0.02540357, 0.13781558, 0.10298078],\n [1.1334289, 0.79676324, -0.22650754, 0.78050387, 0.83489861],\n [-0.2907386, -0.33234446, 0.02116708, -0.33135568, -0.15295562],\n [0.37710646, 0.3241227, -0.1058066, 0.32288766, 0.31104524],\n [-0.02883914, -0.02924234, 0.0090557, -0.0292414, -0.0246967],\n [-0.02005801, -0.02024287, 0.00414231, -0.02024259, -0.01493767],\n [0.23177615, 0.21016051, -0.06218017, 0.20982518, 0.1883388],\n [0.76446302, 0.58331524, -0.21613315, 0.57626475, 0.6321519],\n [-0.48101958, -0.66892298, 0.24911215, -0.65695167, -0.48668744],\n [-0.04201194, -0.04284543, 0.00963797, -0.04284267, -0.03240313],\n [0.18774025, 0.17117493, -0.18893547, 0.17093174, 0.23703879],\n [0.74734106, 0.56677162, -0.33648461, 0.55961035, 0.72167938],\n [-0.6591305, -1.0723949, 0.19621188, -1.0311942, -0.55449721],\n [-0.22167818, -0.24704873, 0.03470137, -0.24656593, -0.15052461],\n [0.04388406, 0.04306384, -0.00668581, 0.04306129, 0.02953016],\n [-0.22977463, -0.26124438, 0.1006082, -0.2605284, -0.21983112],\n [-0.51127357, -0.75846885, 0.45884498, -0.73915675, -0.62134702],\n [-0.19452091, -0.21545657, 0.06160036, -0.21508181, -0.16705082],\n [0.58082574, 0.4560683, -0.53736887, 0.45165396, 0.71306875],\n [0.28466945, 0.2499898, -0.2065418, 0.24928803, 0.32228485],\n [0.14022023, 0.13136471, -0.06636631, 0.13127158, 0.13767857],\n [-0.39441849, -0.54777294, 1.1885606, -0.53804669, -0.71777547],\n [-0.2854344, -0.33754261, 0.14041345, -0.33596604, -0.28389023],\n [-0.10232612, -0.10792736, 0.04591567, -0.10787629, -0.09870094],\n [-0.22298031, -0.24819432, 0.03019943, -0.24772022, -0.14427373],\n [-0.49189513, -0.65191499, 0.07435375, -0.64337513, -0.33013581],\n [0.54129734, 0.44473221, -0.09421088, 0.44188313, 0.38077428],\n [-0.3141839, -0.38136968, 0.19219353, -0.37899305, -0.33603064],\n [0.52910353, 0.42971918, -0.21126421, 0.42663465, 0.49088423],\n [-0.62464995, -0.97954638, 0.19663399, -0.94726566, -0.53536996],\n [0.75256547, 0.56658394, -0.4322483, 0.55904583, 0.78816526],\n [-0.55772974, -0.84573248, 0.31387839, -0.82178593, -0.58015808],\n [0.15164157, 0.14298332, -0.01310932, 0.14290099, 0.08447903],\n [0.0153214, 0.01520807, -0.00617892, 0.01520793, 0.01426198],\n [0.47089409, 0.38773578, -0.25429813, 0.38530659, 0.48314024],\n [-0.05675135, -0.0582369, 0.00969293, -0.05823042, -0.03967155],\n [0.11061595, 0.10520062, -0.03643206, 0.10515646, 0.09624609],\n [0.01416898, 0.01406766, -0.00848756, 0.01406754, 0.01504862],\n [-0.24161691, -0.27095853, 0.02734499, -0.27036622, -0.1472496],\n [0.01455121, 0.0144392, -0.0134194, 0.01443906, 0.01784516],\n [0.46859083, 0.3934604, -0.08443875, 0.39146497, 0.33346719],\n [-0.22719214, -0.25977519, 0.15469994, -0.25899916, -0.25182697],\n [0.15358846, 0.14319541, -0.06368345, 0.14307831, 0.1442972],\n [0.3556438, 0.31156832, -0.04197952, 0.31066111, 0.21980252],\n [0.17859891, 0.16500587, -0.0621345, 0.16483366, 0.15826084],\n [-0.37956082, -0.48827711, 0.27626347, -0.48315254, -0.43016864],\n [0.3731618, 0.31932929, -0.15510458, 0.31804123, 0.35087276],\n [-0.18786051, -0.208419, 0.0925445, -0.20804484, -0.18693209],\n [0.81327041, 0.60721746, -0.32707028, 0.59866332, 0.75633409],\n [-0.16706276, -0.18441847, 0.15832612, -0.18411855, -0.20675084],\n [0.94132863, 0.70605769, -0.08417635, 0.69641767, 0.53035606],\n [-0.33784023, -0.41811617, 0.21561216, -0.41496577, -0.366473],\n [-0.13640498, -0.14617173, 0.04176048, -0.14605529, -0.11582935],\n [1.6127625, 1.014966, -0.48695071, 0.97957604, 1.3631761],\n [-0.53208943, -0.78188964, 0.28847348, -0.76291209, -0.54664039],\n [1.6146371, 1.0290688, -0.32997947, 0.99507393, 1.1982725],\n [-0.77581268, -1.3487554, 0.10245997, -1.2823816, -0.49777452],\n [-0.49310456, -0.7287983, 0.57941269, -0.71058407, -0.65558954],\n [0.1963589, 0.18089863, -0.0410606, 0.18069602, 0.14684262],\n [-0.69517518, -1.2632614, 0.3566579, -1.1911621, -0.7011702],\n [-0.03665689, -0.03735259, 0.01946688, -0.03735039, -0.03740066],\n [-0.09244805, -0.09707926, 0.04940959, -0.09704061, -0.09452476],\n [-0.31164636, -0.3733888, 0.12230389, -0.37136336, -0.28747371],\n [-0.1985375, -0.22173589, 0.09819529, -0.22128522, -0.19781926],\n [-0.26318965, -0.31195439, 0.29793548, -0.31045717, -0.34559168],\n [-0.34121537, -0.41643027, 0.12159344, -0.41368879, -0.30478887],\n [-0.30756931, -0.37637527, 0.30765907, -0.37383066, -0.38755074],\n [-0.19443444, -0.21632326, 0.08733718, -0.21591349, -0.18761109],\n [-0.54927006, -0.77451596, 0.11179131, -0.75947913, -0.40707082],\n [-0.13259001, -0.14222483, 0.05905857, -0.14210826, -0.1275787],\n [0.13708588, 0.12851652, -0.07184424, 0.12842731, 0.13925233],\n [-0.12015573, -0.12784866, 0.04628493, -0.12776663, -0.11015043],\n [-0.36949224, -0.44524532, 0.0395459, -0.44267477, -0.22102804],\n [0.70993199, 0.54609802, -0.27478998, 0.53988256, 0.6518607],\n [0.77662021, 0.58105767, -0.43460627, 0.5729868, 0.80633304],\n [-0.54946963, -0.91417902, 1.0649076, -0.87585537, -0.86313045],\n [0.02407643, 0.02380893, -0.0068007, 0.02380844, 0.01990318],\n [0.29569801, 0.25968919, -0.1517248, 0.25896081, 0.29825962],\n [-0.368871, -0.46224996, 0.16047575, -0.45835003, -0.35215145],\n [0.52276536, 0.42167726, -0.33247035, 0.41844923, 0.56641164],\n [-0.44749152, -0.63940798, 0.72845226, -0.62603375, -0.66323419],\n [-0.65579296, -1.0719928, 0.2150347, -1.0300301, -0.56975833],\n [-0.6022482, -0.93903725, 0.24527004, -0.90885475, -0.56243868],\n [0.4680247, 0.38561959, -0.25765354, 0.38321946, 0.48328267],\n [0.16268144, 0.15218796, -0.02493842, 0.15207525, 0.10969622],\n [0.19373071, 0.17705278, -0.11399762, 0.17681387, 0.20453811],\n [0.19253084, 0.17717772, -0.05471559, 0.17697395, 0.15948282],\n [-0.47507919, -0.64150947, 0.15042388, -0.63196539, -0.40796816],\n [0.30893445, 0.26958626, -0.17485314, 0.26875407, 0.3219675],\n [0.95742478, 0.68410067, -0.48450958, 0.67138558, 0.96127408],\n [-0.00020088, -0.0002009, 4.746e-05, -0.0002009, -0.00015646],\n [-0.41655101, -0.53772901, 0.14433722, -0.5319297, -0.36862227],\n [0.69049373, 0.5340279, -0.26978312, 0.52819607, 0.63599653],\n [-0.19829589, -0.2207681, 0.07851276, -0.22034462, -0.18345627],\n [-0.34670537, -0.41593531, 0.05499794, -0.41364655, -0.23646445],\n [-0.37843404, -0.47587802, 0.14570374, -0.47173978, -0.34686491],\n [-0.08398264, -0.08753776, 0.02584999, -0.08751269, -0.07144248],\n [0.76964305, 0.57764319, -0.41079793, 0.56979036, 0.78658498],\n [0.03622165, 0.03556509, -0.02337097, 0.03556311, 0.03943493],\n [-0.26755896, -0.31635395, 0.23388636, -0.31487911, -0.322323],\n [-0.01255189, -0.0126362, 0.01056077, -0.01263611, -0.01492959],\n [-0.30845422, -0.36666675, 0.09509106, -0.36484664, -0.26253288],\n [1.338161, 0.87983412, -0.56482266, 0.85457017, 1.2646964],\n [-0.33926235, -0.41223079, 0.10806819, -0.40963487, -0.29192172],\n [-0.15645295, -0.16877968, 0.03016566, -0.16861799, -0.1138772],\n [-0.20904858, -0.23396862, 0.0756621, -0.23347476, -0.18770144],\n [-0.44183262, -0.59607991, 0.26353094, -0.58726416, -0.46858932],\n [0.19758775, 0.18025388, -0.11825369, 0.18000086, 0.20979167],\n [-0.36086085, -0.44873655, 0.14915824, -0.44520369, -0.33867691],\n [-0.02459151, -0.02489064, 0.00947921, -0.02489003, -0.02254884],\n [-0.70225898, -1.1268567, 0.10123357, -1.0859291, -0.46392707],\n [0.16521037, 0.15188618, -0.2088498, 0.15170734, 0.22506755],\n [1.1967308, 0.81044147, -0.5290354, 0.79027901, 1.1486011],\n [0.14631725, 0.13723089, -0.03995947, 0.13713693, 0.11960433],\n [-0.07734385, -0.08035544, 0.02459876, -0.0803359, -0.06651684],\n [-0.64371163, -1.0402404, 0.22002504, -1.0013503, -0.56705903],\n [1.5524088, 0.97495273, -0.70290097, 0.94065939, 1.5019153],\n [-0.28060654, -0.32433074, 0.05227474, -0.32319997, -0.20191681],\n [-0.40252318, -0.52402258, 0.23730785, -0.51799455, -0.42524703],\n [0.19208406, 0.17463573, -0.21253742, 0.17437205, 0.25031258],\n [-0.44132255, -0.56914823, 0.0885403, -0.56305679, -0.32550755],\n [0.84461867, 0.6380791, -0.14874639, 0.62979241, 0.59648464],\n [0.43705352, 0.37012395, -0.08664179, 0.36842511, 0.32107677],\n [-0.41079452, -0.52282583, 0.10842605, -0.51779244, -0.33199966],\n [-0.09970087, -0.10475503, 0.02981724, -0.10471235, -0.08400371],\n [-0.30064167, -0.3554778, 0.09212965, -0.35382014, -0.25537356],\n [-0.85525691, -1.8855153, 0.219699, -1.7025483, -0.68498925],\n [1.1113579, 0.77408623, -0.36683635, 0.7574659, 0.96769112],\n [-0.44689844, -0.59398737, 0.17145775, -0.58604801, -0.40913654],\n [0.20610637, 0.18756321, -0.10950147, 0.18728566, 0.21031856],\n [0.55131358, 0.44116271, -0.32611823, 0.43753094, 0.58308807],\n [-0.17439685, -0.19111203, 0.05991796, -0.19084547, -0.15389391],\n [-0.013729, -0.0138198, 0.00449467, -0.0138197, -0.01192162],\n [-0.63042134, -0.99667387, 0.20170184, -0.96265864, -0.54325102],\n [-0.62553683, -0.9774335, 0.18596994, -0.9457211, -0.52600859],\n [-0.28148494, -0.3432292, 0.54867406, -0.34098958, -0.44301481],\n [0.17822118, 0.16438191, -0.07745754, 0.16420303, 0.17008687],\n [-0.36264389, -0.44273912, 0.075674, -0.43981411, -0.27100615],\n [-0.08051297, -0.08340306, 0.00892393, -0.08338577, -0.04872733],\n [-0.45501394, -0.63376352, 0.40062633, -0.6223134, -0.54946449],\n [-0.18714207, -0.20608581, 0.05220646, -0.20576681, -0.15406324],\n [0.43589437, 0.36399211, -0.20999187, 0.36202815, 0.43052495],\n [-0.21860384, -0.24612779, 0.07998934, -0.24555183, -0.19699683],\n [0.63163106, 0.47671587, -1.5831303, 0.47048265, 1.0809971],\n [0.27460516, 0.2445488, -0.08665917, 0.24400198, 0.23555228],\n [0.01103839, 0.01098239, -0.00276905, 0.01098235, 0.00877117],\n [-0.46884904, -0.63417799, 0.17350826, -0.6246366, -0.42410371],\n [-0.60840711, -0.91799413, 0.1460026, -0.89260785, -0.47635023],\n [0.27226217, 0.24193171, -0.11211278, 0.24137013, 0.25520347],\n [0.56206722, 0.45804978, -0.10865281, 0.45486822, 0.40946409],\n [2.2574775, 1.2763903, -0.66359112, 1.2097591, 1.8911493],\n [0.45320571, 0.37170034, -0.42998167, 0.36927644, 0.56107841],\n [0.42725895, 0.36167879, -0.10665048, 0.36001042, 0.33894174],\n [0.00697976, 0.00695695, -0.00202444, 0.00695693, 0.00582111],\n [-0.70044707, -1.2553482, 0.30065116, -1.1868559, -0.66570401],\n [-0.48982143, -0.67466904, 0.17847675, -0.66327803, -0.4407873],\n [0.15351535, 0.14273853, -0.09234347, 0.14261257, 0.16327258],\n [-0.43391792, -0.57738764, 0.22288376, -0.56960922, -0.43783237],\n [-0.54102287, -0.75076049, 0.0933284, -0.73749592, -0.37945332],\n [-0.63054991, -0.95239523, 0.11130689, -0.92592675, -0.44565317],\n [-0.04260425, -0.04347693, 0.01139051, -0.04347395, -0.0345801],\n [0.26530286, 0.23776463, -0.06493007, 0.23728936, 0.20908357],\n [-0.51412507, -0.72472348, 0.18861556, -0.71067954, -0.46371203],\n [0.34446129, 0.29870085, -0.11665038, 0.29769178, 0.30250484],\n [-0.29797033, -0.34568092, 0.03933844, -0.3444133, -0.19116032],\n [-0.60360127, -0.9037539, 0.139343, -0.87967138, -0.46652166],\n [0.40768739, 0.34950269, -0.06766524, 0.34812523, 0.28228213],\n [-0.37466846, -0.4792952, 0.26222624, -0.47448427, -0.41911526],\n [-0.46921854, -0.64741377, 0.26058549, -0.63636552, -0.48593351],\n [-0.56433279, -0.85554398, 0.2847436, -0.83134645, -0.56604572],\n [-0.08634594, -0.09071815, 0.09333165, -0.09068127, -0.11164714],\n [-0.02003637, -0.02024106, 0.01041608, -0.02024071, -0.0202982],\n [0.59132009, 0.47419447, -0.16713741, 0.47036531, 0.48893333],\n [-0.46173172, -0.5978794, 0.06874143, -0.59127716, -0.30832542],\n [-0.58402034, -0.85039333, 0.11854149, -0.83070586, -0.43243294],\n [-0.55914576, -0.8353297, 0.24877737, -0.8133109, -0.53781178],\n [0.44547946, 0.36764891, -0.34860404, 0.36539922, 0.51721676],\n [-0.26127152, -0.30681979, 0.20622582, -0.30550317, -0.30421857],\n [-0.61504732, -0.97835053, 0.26432744, -0.9440826, -0.58478528],\n [0.08355641, 0.08052772, -0.01918686, 0.08050943, 0.06446606],\n [-0.40578931, -0.53826007, 0.35378452, -0.53116637, -0.48841614],\n [-0.35961484, -0.43178716, 0.04249367, -0.42938916, -0.22233602],\n [-0.38258441, -0.47359501, 0.08030352, -0.47001936, -0.28646597],\n [0.89314566, 0.66428211, -0.18076181, 0.65467732, 0.66068395],\n [0.27105607, 0.24028062, -0.14248256, 0.23969994, 0.2756153],\n [-0.40391258, -0.50923, 0.09366305, -0.504702, -0.31264946],\n [-0.23916345, -0.27116805, 0.06147544, -0.27045649, -0.1915904],\n [-0.33892144, -0.41893695, 0.20154034, -0.41581661, -0.35908452],\n [0.15639155, 0.1463535, -0.03254967, 0.14624621, 0.11677084],\n [0.29300078, 0.25715597, -0.17167031, 0.2564276, 0.30890214],\n [-0.05759414, -0.05909465, 0.0082698, -0.05908813, -0.03799799],\n [0.4806042, 0.39766338, -0.16403827, 0.39529483, 0.42317185],\n [0.81670299, 0.60790134, -0.3583561, 0.59915753, 0.78191167],\n [0.30851868, 0.27094305, -0.10821653, 0.27018287, 0.27413354],\n [0.57166398, 0.45582386, -0.29121752, 0.45195147, 0.57523202],\n [-0.4046238, -0.53201139, 0.29259045, -0.52542633, -0.45757715],\n [-0.24679736, -0.2798083, 0.0476481, -0.2790747, -0.1797155],\n [0.79474976, 0.59957038, -0.25654982, 0.59170708, 0.68689049],\n [-0.20744948, -0.23045703, 0.04637853, -0.23003268, -0.15863191],\n [0.21771334, 0.19694262, -0.13002563, 0.19661302, 0.23099877],\n [0.78607735, 0.60277702, -0.13957481, 0.59575277, 0.55665896],\n [-0.61240994, -0.96393736, 0.24113132, -0.9316551, -0.56553049],\n [-0.34004814, -0.42724245, 0.33384104, -0.42355466, -0.42581097],\n [-0.18712246, -0.20885844, 0.15076754, -0.20843865, -0.21937976],\n [0.15013306, 0.14052878, -0.04365754, 0.14042647, 0.12531815],\n [-0.06555559, -0.06780034, 0.03087027, -0.06778753, -0.06425841],\n [-0.45532588, -0.6032533, 0.13688155, -0.59536894, -0.38430249],\n [-0.47866281, -0.62154923, 0.05551102, -0.61453651, -0.29409627],\n [0.10379618, 0.0992905, -0.01897395, 0.09925791, 0.0742193],\n [0.27525161, 0.24445907, -0.10746921, 0.24388656, 0.25346885],\n [0.44000184, 0.36911396, -0.14780521, 0.36722222, 0.38536853],\n [-0.08764775, -0.09186995, 0.05454247, -0.09183606, -0.0942791],\n [0.39856408, 0.34084917, -0.09559651, 0.33946304, 0.3120011],\n [-0.28915418, -0.34830523, 0.27694204, -0.34630242, -0.35910872],\n [-0.54668736, -0.81965483, 0.31385788, -0.79766835, -0.57246249],\n [-0.13214671, -0.14110555, 0.03451977, -0.14100441, -0.10643151],\n [-0.05303343, -0.0543188, 0.00855113, -0.05431361, -0.03636785],\n [0.37518581, 0.32519639, -0.06085479, 0.32409083, 0.25779383],\n [-0.20416909, -0.2255121, 0.0333895, -0.22514098, -0.14067167],\n [-0.73453935, -1.3261482, 0.21807897, -1.252033, -0.6173877],\n [-0.51285193, -0.69944573, 0.1009299, -0.68834599, -0.37584716],\n [-0.34356943, -0.41990334, 0.12127341, -0.41709935, -0.30592024],\n [-0.61185903, -1.0170268, 0.4461753, -0.97454363, -0.69387209],\n [-0.74680096, -1.3323688, 0.17349841, -1.2607571, -0.57842236],\n [0.70162212, 0.55215776, -0.10661961, 0.54691156, 0.47172752],\n [0.52578771, 0.42595277, -0.25362418, 0.42282123, 0.51953384],\n [0.05380708, 0.05250817, -0.01411749, 0.05250294, 0.04339991],\n [0.60789505, 0.48050614, -0.27229875, 0.47610578, 0.586018],\n [-0.03696114, -0.03765362, 0.01627638, -0.03765146, -0.03542905],\n [0.06786725, 0.06552571, -0.06338565, 0.06551225, 0.08358221],\n [-0.24636214, -0.28486146, 0.15119768, -0.28386295, -0.26377949],\n [1.5507848, 0.96230871, -1.010033, 0.9267362, 1.6936469],\n [-0.64427637, -1.112392, 0.42573289, -1.0589588, -0.70702896],\n [0.40112431, 0.33692308, -0.28696759, 0.33522086, 0.45200191],\n [0.22752738, 0.20598175, -0.08452806, 0.20564237, 0.20607853],\n [-0.41029188, -0.51805874, 0.08653263, -0.51339204, -0.30770313],\n [-0.3352269, -0.40292353, 0.07774257, -0.4006604, -0.25949094],\n [-0.09823975, -0.10297479, 0.021997, -0.10293677, -0.07516044],\n [-0.45906155, -0.6389501, 0.36820685, -0.62745478, -0.53738792],\n [0.54875732, 0.4442739, -0.18633969, 0.44098771, 0.48235339],\n [-0.46390271, -0.61090183, 0.10182795, -0.60325493, -0.352574],\n [-0.13586222, -0.14521798, 0.03140487, -0.1451107, -0.10505292],\n [1.5325827, 0.95475011, -0.98943472, 0.92001987, 1.6688667],\n [-0.0180165, -0.01817654, 0.00704904, -0.01817631, -0.01660225],\n [0.1074245, 0.10219496, -0.04359798, 0.10215255, 0.10020768],\n [0.91324351, 0.66341484, -0.38068255, 0.65225386, 0.85951881],\n [-0.13804802, -0.14753983, 0.02729999, -0.14743116, -0.10133302],\n [0.09347746, 0.08949071, -0.03700553, 0.08946238, 0.08647757],\n [-0.50931142, -0.72863446, 0.26424161, -0.71329046, -0.51562347],\n [-0.53793659, -0.76080839, 0.13943669, -0.74578287, -0.43213862],\n [-0.25884982, -0.29723551, 0.06959389, -0.2962904, -0.21049039],\n [0.86831329, 0.63220778, -0.52265542, 0.62172045, 0.92370391],\n [-0.65285423, -1.0173435, 0.12767231, -0.98472881, -0.47744101],\n [-0.45934698, -0.6418679, 0.39489684, -0.63004719, -0.55029867],\n [-0.23191228, -0.26444189, 0.11144756, -0.26368404, -0.22886674],\n [0.09519918, 0.09137385, -0.01806198, 0.09134824, 0.06892137],\n [1.6758455, 1.0134619, -1.1410758, 0.97196346, 1.8575371],\n [-0.21193295, -0.23870482, 0.10506008, -0.23814276, -0.21132695],\n [0.16828151, 0.15522648, -0.1214123, 0.1550579, 0.19016117],\n [0.49347086, 0.40570005, -0.19050999, 0.40311803, 0.45271365],\n [-0.31551801, -0.38082435, 0.14918844, -0.37858735, -0.30969759],\n [-0.30187324, -0.37396933, 0.54845442, -0.37112573, -0.46409468],\n [-0.46016208, -0.61586466, 0.15922752, -0.60723147, -0.40702703],\n [-0.08661744, -0.09036911, 0.02450479, -0.09034204, -0.07164137],\n [-0.25151268, -0.28898407, 0.09061437, -0.28805721, -0.22548394],\n [-0.04191402, -0.04290011, 0.04338401, -0.04289625, -0.05341863],\n [0.08469815, 0.08157622, -0.02018716, 0.08155704, 0.06616338],\n [-0.31034085, -0.37216383, 0.1313238, -0.37012469, -0.29355114],\n [-0.45803461, -0.62017743, 0.21375366, -0.61078472, -0.44762402],\n [0.49980632, 0.4166992, -0.07721613, 0.41441121, 0.33789402],\n [-0.56384813, -0.81737021, 0.1460957, -0.79888631, -0.4528947],\n [-0.48061948, -0.6559582, 0.17221889, -0.64550075, -0.43010171],\n [0.14620871, 0.13680497, -0.05692722, 0.13670426, 0.13451338],\n [0.26422407, 0.23781698, -0.04497363, 0.23737809, 0.18449201],\n [-0.43463765, -0.59962418, 0.47955395, -0.58939942, -0.56585791],\n [-0.07744555, -0.08062791, 0.03843585, -0.08060612, -0.07725379],\n [0.54920956, 0.44158076, -0.26428657, 0.43809866, 0.54224275],\n [-0.58678218, -0.89783355, 0.23639882, -0.87132354, -0.54602146],\n [-0.27075858, -0.31760637, 0.14700409, -0.31626226, -0.27829664],\n [-0.62999685, -1.0615455, 0.40474533, -1.0148667, -0.68490373],\n [-0.45134316, -0.59439733, 0.12701407, -0.58695388, -0.37264776],\n [0.05554217, 0.05405862, -0.02821365, 0.05405202, 0.05583566],\n [-0.58017052, -0.89710684, 0.29732825, -0.86932041, -0.58495954],\n [-0.08213309, -0.0853622, 0.0165121, -0.08534104, -0.06062097],\n [-0.17602733, -0.19292472, 0.05629284, -0.19265484, -0.15166351],\n [1.8124182, 1.1240987, -0.29446015, 1.0824607, 1.2460184],\n [-0.46757378, -0.61606444, 0.09686109, -0.60832329, -0.34857258],\n [-0.70077091, -1.2435629, 0.27604516, -1.1779104, -0.64722321],\n [-0.1823987, -0.20009761, 0.04608985, -0.19981187, -0.14528683],\n [0.15408466, 0.14388699, -0.0497265, 0.14377461, 0.13316156],\n [-0.51644298, -0.7288862, 0.18606273, -0.71466167, -0.46299686],\n [-0.14773155, -0.15945679, 0.05062292, -0.15930187, -0.13024906],\n [0.08229583, 0.07941547, -0.01553589, 0.07939868, 0.05948041],\n [0.69703697, 0.52883327, -0.57770487, 0.5221705, 0.8249277],\n [-0.04851095, -0.04963147, 0.01148373, -0.04962715, -0.0378092],\n [0.24278042, 0.21941664, -0.05953089, 0.21904267, 0.19145493],\n [0.671777, 0.52784301, -0.16253492, 0.52276247, 0.52740266],\n [-0.15617841, -0.17052382, 0.10367119, -0.17030455, -0.17164982],\n [1.5544976, 0.96906503, -0.8655973, 0.93391329, 1.611296],\n [-0.28513785, -0.3342719, 0.09440154, -0.33286791, -0.24852664],\n [-0.46089739, -0.60530758, 0.10034894, -0.59787735, -0.34934169],\n [1.3867149, 0.93965444, -0.1596063, 0.91634674, 0.84986798],\n [0.99046183, 0.70838601, -0.35652627, 0.69529365, 0.88769888],\n [-0.08002395, -0.08323765, 0.02434802, -0.08321615, -0.06781273],\n [-0.40573327, -0.51545943, 0.11362931, -0.51056997, -0.33445206],\n [-0.40648123, -0.52178853, 0.15049556, -0.5164044, -0.3677434],\n [0.66919291, 0.51936908, -0.29608372, 0.51384969, 0.64246439],\n [0.39119646, 0.33692167, -0.06846835, 0.33567223, 0.27569987],\n [-0.44122326, -0.59962437, 0.3120213, -0.59032278, -0.49527189],\n [-0.13558433, -0.1446097, 0.02421351, -0.14450965, -0.09619864],\n [0.0099352, 0.00988846, -0.00324443, 0.00988842, 0.00862],\n [-0.42082174, -0.53340542, 0.07930414, -0.52844146, -0.30397701],\n [0.96329528, 0.67781139, -0.80426813, 0.66405317, 1.1428342],\n [-0.0708706, -0.07339878, 0.02347869, -0.07338375, -0.06178438],\n [-0.32322952, -0.4044075, 0.4388475, -0.40104347, -0.45094324],\n [-0.05908515, -0.06087455, 0.02471536, -0.06086552, -0.05567385],\n [-0.06013292, -0.0621644, 0.05487635, -0.06215297, -0.07348747],\n [0.47440157, 0.39775455, -0.083663, 0.3957034, 0.3351855],\n [-0.47880308, -0.63947157, 0.11047014, -0.63063467, -0.36999534],\n [-0.39057774, -0.49590363, 0.15135669, -0.49122338, -0.35876951],\n [0.11513068, 0.10932827, -0.03507222, 0.10927956, 0.09760194],\n [-0.02055911, -0.02077991, 0.01319937, -0.02077952, -0.02234587],\n [0.27033978, 0.24057006, -0.10481223, 0.24002518, 0.24836322],\n [-0.43174396, -0.5558757, 0.10088222, -0.55000281, -0.33504185],\n [-0.28981713, -0.34109702, 0.09986907, -0.33959278, -0.25599788],\n [0.49827704, 0.41348828, -0.10395794, 0.41110027, 0.37234295],\n [-0.66452107, -1.0741235, 0.17103461, -1.033928, -0.53257058],\n [0.86372263, 0.64400865, -0.21357962, 0.63485223, 0.68303999],\n [-0.57246645, -0.81588755, 0.09712925, -0.79906265, -0.39929453],\n [-0.25150833, -0.28811492, 0.07642183, -0.28723019, -0.21303488],\n [1.1257878, 0.77787497, -0.43700417, 0.76043632, 1.0346883],\n [0.08991045, 0.08629892, -0.02843603, 0.08627475, 0.0771803],\n [-0.49263698, -0.68520543, 0.20655261, -0.67292912, -0.46455617],\n [-0.09048578, -0.09431884, 0.01431767, -0.09429178, -0.06166248],\n [0.15430815, 0.14356173, -0.08180471, 0.14343712, 0.15734829],\n [0.97131033, 0.70646202, -0.22186017, 0.69466674, 0.74806921],\n [0.18003708, 0.16590468, -0.07972372, 0.16572004, 0.17289422],\n [-0.29781107, -0.3528204, 0.10849739, -0.35113661, -0.2679849],\n [-0.21547121, -0.24222419, 0.08081257, -0.24167209, -0.19577736],\n [-0.42192978, -0.54243744, 0.11643098, -0.53677288, -0.34609312],\n [0.33540017, 0.29160407, -0.11927025, 0.29065466, 0.29938467],\n [-0.44351758, -0.57901057, 0.11739654, -0.57220895, -0.35878614],\n [0.16268658, 0.15236957, -0.02106035, 0.15226062, 0.10368907],\n [0.28590306, 0.25449007, -0.06539078, 0.25391638, 0.22028996],\n [0.23972163, 0.21592341, -0.09129711, 0.21553052, 0.21892759],\n [-0.06197495, -0.06390928, 0.02185816, -0.06389922, -0.05516861],\n [-0.6571649, -1.0257757, 0.12369826, -0.99264783, -0.47451226],\n [-0.07585836, -0.07859974, 0.0152187, -0.07858323, -0.05595061],\n [-0.53839137, -0.73995489, 0.08017422, -0.727629, -0.35954532],\n [-0.39484051, -0.49202145, 0.07777106, -0.48807333, -0.2894435],\n [0.12160171, 0.11546501, -0.02293981, 0.11541343, 0.08786847],\n [-0.33319858, -0.42175107, 0.49249551, -0.41787197, -0.47820362],\n [0.12191667, 0.11533611, -0.04328711, 0.11527694, 0.10876894],\n [-0.26129947, -0.30723369, 0.21850026, -0.30589492, -0.3101604],\n [0.08817333, 0.08467038, -0.0299148, 0.0846472, 0.07748129],\n [0.81469793, 0.60845971, -0.31873043, 0.59990492, 0.75072737],\n [0.40253592, 0.34314298, -0.10928505, 0.34168986, 0.32839763],\n [-0.47822963, -0.66079171, 0.23024887, -0.64941592, -0.47224441],\n [-0.48937212, -0.65862684, 0.11032834, -0.6490406, -0.37525963],\n [0.04390026, 0.04301507, -0.01346779, 0.04301209, 0.03730384],\n [-0.51180504, -0.71021291, 0.14550164, -0.69766505, -0.42400306],\n [0.69894703, 0.54186216, -0.21977156, 0.53605376, 0.59882051],\n [-0.60590729, -0.95447584, 0.26523247, -0.92239875, -0.57963733],\n [-0.28623331, -0.33707983, 0.11342964, -0.33558246, -0.26489004],\n [-0.43329208, -0.55524493, 0.08603306, -0.549594, -0.31848254],\n [-0.34106626, -0.41442309, 0.10358315, -0.41181335, -0.28884546],\n [-0.34568306, -0.42566848, 0.15087844, -0.4226105, -0.33037293],\n [-0.42486998, -0.56263495, 0.23547429, -0.5553061, -0.43970547],\n [-0.75007176, -1.3815232, 0.21532873, -1.2992251, -0.6234178],\n [-0.48305465, -0.67351797, 0.25326371, -0.6612743, -0.49075548],\n [-0.4925107, -0.6685222, 0.12619137, -0.65823188, -0.39412169],\n [0.03483665, 0.03426099, -0.0135819, 0.0342594, 0.03206425],\n [-0.19559532, -0.21772783, 0.08660362, -0.21731139, -0.18782828],\n [-0.35443248, -0.43511509, 0.11098173, -0.43207961, -0.30323745],\n [-0.38942924, -0.47511672, 0.0417335, -0.4719991, -0.2330544],\n [-0.3169427, -0.3911817, 0.32732977, -0.38830899, -0.40363827],\n [0.87763202, 0.63066908, -0.78726522, 0.61933602, 1.0664127],\n [-0.17403822, -0.1918899, 0.10340688, -0.1915853, -0.18434141],\n [-0.6464664, -1.0192215, 0.15955964, -0.9848451, -0.51091461],\n [-0.65026946, -1.094346, 0.31035396, -1.0464465, -0.6402624],\n [-0.60132335, -0.96744638, 0.36099189, -0.93192504, -0.63911821],\n [-0.2453341, -0.27893539, 0.05957132, -0.2781709, -0.19283876],\n [-0.00191244, -0.00191422, 0.00075416, -0.00191422, -0.00176694],\n [0.36734483, 0.31925054, -0.05931416, 0.31820521, 0.25202603],\n [0.22426428, 0.20539334, -0.02796272, 0.20512909, 0.14115942],\n [-0.17674076, -0.19368182, 0.05387904, -0.19341163, -0.1498676],\n [-0.13031518, -0.13912861, 0.03784719, -0.13902935, -0.10873045],\n [-0.44782118, -0.60051686, 0.20683888, -0.59198702, -0.43613788],\n [-0.00159905, -0.00160024, 0.00040897, -0.00160024, -0.00127884],\n [0.41485687, 0.3471336, -0.2760776, 0.34530268, 0.45633724],\n [-0.18260258, -0.20092326, 0.05929216, -0.20061747, -0.15812979],\n [-0.11701754, -0.1239598, 0.03056328, -0.1238912, -0.09424189],\n [0.1495302, 0.13983215, -0.05168332, 0.13972741, 0.13221459],\n [0.20380671, 0.18589445, -0.09405106, 0.18563252, 0.19843132],\n [-0.13218807, -0.14162562, 0.0524459, -0.14151343, -0.1223795],\n [0.41633297, 0.35056555, -0.18295125, 0.34884422, 0.39879435],\n [-0.40341564, -0.51561001, 0.14153119, -0.51047173, -0.35847803],\n [-0.01089955, -0.01096055, 0.00642133, -0.0109605, -0.01151218],\n [-0.27456939, -0.31871107, 0.07790851, -0.31753356, -0.22732108],\n [-0.23174712, -0.2617172, 0.06128696, -0.26107315, -0.18741697],\n [0.71778654, 0.56038029, -0.12906297, 0.55469721, 0.51043536],\n [0.09040699, 0.08684762, -0.02246169, 0.08682428, 0.07160755],\n [-0.42823472, -0.57659012, 0.32392831, -0.56817405, -0.49160265],\n [-0.73344797, -1.2893331, 0.17694924, -1.2234043, -0.57527094],\n [0.19918849, 0.18194076, -0.09690683, 0.18169226, 0.19738052],\n [-0.31398027, -0.38360774, 0.24614757, -0.38105488, -0.36476242],\n [-0.43606549, -0.55891246, 0.08221844, -0.55321494, -0.31504135],\n [-0.07099877, -0.07359873, 0.02893663, -0.07358286, -0.06632234],\n [-0.00646293, -0.00648357, 0.00279196, -0.00648356, -0.00615553],\n [-0.51680267, -0.73973336, 0.23720371, -0.72411136, -0.50226586],\n [-0.51801861, -0.73542479, 0.20179385, -0.72058524, -0.47666092],\n [-0.31510981, -0.37479678, 0.08222015, -0.3729238, -0.25369429],\n [-0.29745244, -0.35137603, 0.09614113, -0.34975579, -0.25719239],\n [-0.7350391, -1.336973, 0.23001115, -1.2604332, -0.62873348],\n [0.45611517, 0.38510575, -0.07446771, 0.38327377, 0.31408632],\n [0.50450701, 0.41745884, -0.11098301, 0.41497349, 0.3837134],\n [-0.54620251, -0.77488586, 0.13042447, -0.75931257, -0.42693866],\n [0.1362107, 0.12821241, -0.04075766, 0.12813419, 0.11478555],\n [-0.30235082, -0.35349385, 0.05179581, -0.35205906, -0.21156751],\n [-0.3467653, -0.43121179, 0.20520569, -0.42781666, -0.36680075],\n [-0.09358901, -0.09788313, 0.02144084, -0.0978503, -0.07215069],\n [-0.5109863, -0.71815973, 0.1875663, -0.70447986, -0.46096481],\n [0.23676848, 0.21228508, -0.15500948, 0.21186412, 0.25902718],\n [0.20057903, 0.18446754, -0.04245899, 0.18425215, 0.15061107],\n [-0.29042894, -0.33880521, 0.06405475, -0.33746858, -0.22108216],\n [-0.61377622, -0.96433791, 0.23168023, -0.93229385, -0.55887287],\n [1.9333925, 1.1448055, -0.65698359, 1.0940393, 1.6998406],\n [0.62427481, 0.48959503, -0.31924866, 0.4848092, 0.62898009],\n [-0.2687731, -0.31553849, 0.16220444, -0.31418928, -0.28616825],\n [0.1688125, 0.15653835, -0.06146817, 0.15638978, 0.15187857],\n [1.5577789, 1.0099456, -0.2602146, 0.97901267, 1.0809139],\n [0.34211411, 0.29468124, -0.19859652, 0.29359004, 0.35956814],\n [-0.37676295, -0.46575044, 0.08665975, -0.46227877, -0.29084477],\n [-0.04356825, -0.04456409, 0.02476282, -0.0445603, -0.04546984],\n [0.54015646, 0.43791248, -0.1967464, 0.43471516, 0.48602505],\n [-0.22783589, -0.25506306, 0.03873162, -0.25452215, -0.15901814],\n [1.6325194, 1.0312477, -0.39506806, 0.99585434, 1.2817585],\n [-0.3395014, -0.41625668, 0.14967692, -0.41338841, -0.32555372],\n [-0.5985588, -0.92194884, 0.22020632, -0.8938893, -0.54036989],\n [0.78260351, 0.58753605, -0.35527209, 0.57956426, 0.75780632],\n [0.40960714, 0.34094308, -0.41757968, 0.33903753, 0.51939942],\n [-0.30710287, -0.36661182, 0.11817014, -0.36470194, -0.28142877],\n [-0.17108852, -0.18604524, 0.03381223, -0.18582762, -0.12555919],\n [0.11350844, 0.10731254, -0.08557291, 0.10725621, 0.130159],\n [1.2408357, 0.7831263, -3.6426137, 0.75614678, 2.2385007],\n [-0.21783895, -0.24568367, 0.09213761, -0.24509219, -0.20602156],\n [0.35676019, 0.30760557, -0.13237542, 0.30648176, 0.32299567],\n [-0.47339603, -0.65544184, 0.25993722, -0.6440178, -0.48840777],\n [0.99117112, 0.71254419, -0.29305528, 0.6997705, 0.83194052],\n [-0.26696039, -0.30860822, 0.07857154, -0.30752986, -0.2237328],\n [-0.51142055, -0.74194832, 0.32302645, -0.72510054, -0.55285106],\n [-0.1879375, -0.20968483, 0.14143173, -0.20926642, -0.21537795],\n [-0.33121455, -0.40057121, 0.10942298, -0.39816807, -0.28848226],\n [-0.05952037, -0.06115745, 0.0101589, -0.06114995, -0.0415977],\n [0.2214726, 0.20056696, -0.10145905, 0.20023871, 0.21510654],\n [0.82994407, 0.61060408, -0.50085881, 0.60112256, 0.88365144],\n [0.65665325, 0.50334461, -0.59829855, 0.49746269, 0.80205973],\n [0.39092314, 0.33477803, -0.10168134, 0.33344053, 0.31440155],\n [0.04051729, 0.03977174, -0.01095703, 0.03976946, 0.0330117],\n [0.43090563, 0.35541749, -0.46982053, 0.3532297, 0.55878161],\n [0.54113558, 0.44306617, -0.11419461, 0.44012757, 0.40590954],\n [0.32541581, 0.28493424, -0.0890398, 0.28409788, 0.26617289],\n [0.16749253, 0.15455772, -0.12037301, 0.15439145, 0.18902388],\n [-0.4913407, -0.6389941, 0.04688347, -0.63170005, -0.28288185],\n [-0.17514395, -0.19160083, 0.04955168, -0.19134353, -0.1448637],\n [0.36564359, 0.31353362, -0.15904747, 0.31230171, 0.34905261],\n [-0.62117094, -0.96637706, 0.18616163, -0.93562098, -0.52373806],\n [-0.08114134, -0.08447644, 0.02653013, -0.0844536, -0.07042895],\n [0.18348311, 0.1698388, -0.03959452, 0.1696699, 0.13866039],\n [-0.1807133, -0.19767585, 0.03826057, -0.19741091, -0.13570231],\n [1.6634134, 1.0265519, -0.67645138, 0.98775055, 1.5527051],\n [-0.07647688, -0.07923264, 0.01391425, -0.07921609, -0.05459884],\n [0.11829654, 0.11159352, -0.08867616, 0.11153026, 0.1353922],\n [1.2923946, 0.8523698, -0.67641718, 0.82824634, 1.312235],\n [-0.24728027, -0.28598105, 0.1483619, -0.28497583, -0.2627708],\n [-0.21988658, -0.25062573, 0.16645027, -0.24991198, -0.252486],\n [-0.2755918, -0.31879509, 0.06308192, -0.31767107, -0.21240064],\n [-0.41555938, -0.5461046, 0.23334534, -0.53937065, -0.4319483],\n [-0.46232233, -0.63073909, 0.23301535, -0.62070869, -0.46355517],\n [1.0525976, 0.74403674, -0.33602878, 0.72931862, 0.90638017],\n [-0.27406865, -0.32789422, 0.32204228, -0.32614366, -0.36437944],\n [0.24627717, 0.22025092, -0.14244988, 0.21979366, 0.2585315],\n [-0.49882646, -0.67117749, 0.09412654, -0.66142541, -0.36047932],\n [-0.757155, -1.2849575, 0.09964465, -1.2269459, -0.48523396],\n [0.01601422, 0.01588458, -0.00985738, 0.01588441, 0.01716331],\n [-0.04840384, -0.04964864, 0.02969185, -0.04964331, -0.0518174],\n [0.7899999, 0.60188253, -0.17742938, 0.59452606, 0.60502019],\n [0.05872464, 0.05722597, -0.01179111, 0.05721959, 0.04332533],\n [0.26990213, 0.23861572, -0.18437104, 0.23801312, 0.29948732],\n [-0.21574816, -0.24002871, 0.03790984, -0.23957432, -0.1522505],\n [0.08911037, 0.08538963, -0.04426576, 0.08536375, 0.088917],\n [-0.17054948, -0.18884314, 0.17185955, -0.18851678, -0.21542767],\n [-0.42724009, -0.5588158, 0.16613186, -0.55215875, -0.39289422],\n [-0.53907287, -0.73960392, 0.07661012, -0.72741674, -0.35443535],\n [-0.12371115, -0.13191876, 0.04904969, -0.13182807, -0.11450593],\n [0.26584066, 0.23718158, -0.09554028, 0.23666799, 0.2381331],\n [-0.07193602, -0.07471814, 0.04147278, -0.07470021, -0.0754331],\n [-0.16794376, -0.18242318, 0.03513074, -0.1822154, -0.12560733],\n [0.51446722, 0.4240492, -0.11827354, 0.42142039, 0.39707968],\n [0.18001221, 0.16650009, -0.05070766, 0.16633125, 0.14867427],\n [-0.1651066, -0.18166399, 0.13222778, -0.18138776, -0.19317927],\n [-0.4040671, -0.50783038, 0.08478384, -0.50343544, -0.30251716],\n [-0.35656546, -0.43438462, 0.08002697, -0.43157594, -0.27301194],\n [-0.46168852, -0.60815213, 0.10651153, -0.60052466, -0.35675899],\n [0.75767365, 0.57522385, -0.28807777, 0.56801151, 0.6915677],\n [0.03096169, 0.03049283, -0.01560224, 0.03049165, 0.03104241],\n [-0.60133479, -0.99043463, 0.46627864, -0.95049615, -0.69604298],\n [-0.70618049, -1.2061333, 0.18489381, -1.1503995, -0.56919525],\n [0.24609655, 0.22195569, -0.06527636, 0.22156185, 0.19921972],\n [-0.52063409, -0.77721393, 0.43550804, -0.75680191, -0.61805974],\n [-0.58453743, -0.85844221, 0.13471332, -0.8376708, -0.45153186],\n [0.12094395, 0.11458282, -0.03583559, 0.11452709, 0.10158688],\n [0.02002306, 0.01982647, -0.00959622, 0.01982615, 0.01974227],\n [0.91966288, 0.6663239, -0.39775707, 0.654931, 0.87626421],\n [-0.38085913, -0.4691169, 0.070183, -0.46573763, -0.27306344],\n [0.05913795, 0.05757364, -0.0155936, 0.05756674, 0.04777892],\n [0.25105429, 0.22456662, -0.12004839, 0.22410201, 0.24734732],\n [0.19938372, 0.18269231, -0.06796318, 0.18245978, 0.17548003],\n [0.18622686, 0.17193903, -0.04823291, 0.17175657, 0.14956135],\n [-0.41397735, -0.53665434, 0.16968061, -0.53067629, -0.3874406],\n [0.04709547, 0.0461403, -0.00810289, 0.04613707, 0.03300218],\n [-0.19518562, -0.21806268, 0.11519254, -0.21761688, -0.20627662],\n [0.29378722, 0.25870416, -0.12913736, 0.25800816, 0.28143789],\n [0.01902191, 0.0188538, -0.00554078, 0.01885355, 0.0158868],\n [-0.40164753, -0.51436671, 0.15502383, -0.50915843, -0.36844518],\n [0.64695473, 0.52158013, -0.06206705, 0.51756806, 0.37314672],\n [0.12682621, 0.12003312, -0.02927752, 0.11997251, 0.09802287],\n [-0.19410383, -0.21635595, 0.101727, -0.21593179, -0.19717181],\n [-0.67811503, -1.0999621, 0.15124977, -1.0582101, -0.51813606],\n [-0.46257479, -0.60401555, 0.08429428, -0.59690919, -0.33041823],\n [-0.3608694, -0.46488093, 0.42992296, -0.45994804, -0.48199247],\n [-0.65122871, -1.0610844, 0.21954844, -1.0200803, -0.5710522],\n [-0.67258585, -1.1766601, 0.33936915, -1.1174798, -0.67463044],\n [-0.08720266, -0.09116938, 0.0350974, -0.09113932, -0.08111879],\n [-0.00509289, -0.00510486, 0.00119871, -0.00510485, -0.00396178],\n [0.45291532, 0.38054444, -0.1065549, 0.37862873, 0.35227283],\n [-0.08207081, -0.08518451, 0.01227092, -0.08516483, -0.05488179],\n [-0.24891045, -0.29268088, 0.32299783, -0.29140477, -0.34206236],\n [-0.20663337, -0.23348496, 0.1590604, -0.23290514, -0.23859664],\n [0.57358388, 0.45987294, -0.21555813, 0.45615226, 0.52151015],\n [0.45636142, 0.37899664, -0.20081928, 0.37682578, 0.43733853],\n [-0.20636515, -0.22662807, 0.01883135, -0.22629705, -0.11705629],\n [1.1532973, 0.79180972, -0.44832937, 0.77345009, 1.0604819],\n [-0.54038446, -0.75598549, 0.110033, -0.74196735, -0.40054645],\n [0.34928336, 0.29803513, -0.31908381, 0.2967882, 0.42700227],\n [-0.40210016, -0.5032936, 0.07752103, -0.49909166, -0.29266627],\n [-0.25093777, -0.30070727, 0.69736222, -0.29907283, -0.44450176],\n [-0.30426825, -0.35877733, 0.07382058, -0.35715861, -0.2390967],\n [-0.14970123, -0.16509879, 0.35249769, -0.16483534, -0.25092601],\n [0.02307411, 0.02283149, -0.00577354, 0.02283107, 0.01831923],\n [-0.41932088, -0.54145072, 0.13788425, -0.53559902, -0.36465252],\n [-0.34474458, -0.42026974, 0.10748187, -0.41753365, -0.29452356],\n [-0.55799517, -0.82395329, 0.21253264, -0.80345531, -0.50961105],\n [0.12961953, 0.12247884, -0.03238179, 0.12241332, 0.10285465],\n [-0.33018576, -0.39064385, 0.04471248, -0.38880923, -0.21362815],\n [0.73731356, 0.54577692, -1.0140553, 0.53763244, 1.0330754],\n [-0.18154748, -0.20014871, 0.07311945, -0.19983167, -0.16892001],\n [-0.42498663, -0.55452806, 0.16297509, -0.54803939, -0.38901581],\n [-0.00600484, -0.0060217, 0.00158229, -0.00602169, -0.00485035],\n [0.61450549, 0.49157276, -0.13907971, 0.48751452, 0.47182589],\n [-0.04702735, -0.04815478, 0.02034566, -0.04815028, -0.04481268],\n [-0.16008101, -0.17544448, 0.12013535, -0.17519914, -0.18328495],\n [-0.51264618, -0.67570017, 0.04680861, -0.66718712, -0.29084644],\n [-0.22284727, -0.24995451, 0.05288025, -0.2494064, -0.17382519],\n [0.1777406, 0.16335742, -0.12044423, 0.16316371, 0.19669641],\n [-0.1437142, -0.15404958, 0.02850149, -0.15392582, -0.10559232],\n [-0.24474757, -0.28229458, 0.13943231, -0.28133846, -0.25562897],\n [-0.78642972, -1.4467791, 0.14614317, -1.3609128, -0.56542644],\n [-0.19407858, -0.21476424, 0.05819234, -0.21439754, -0.16366294],\n [-0.64387251, -1.048366, 0.23861404, -1.0079661, -0.58269584],\n [0.29829784, 0.26117414, -0.17942885, 0.26040684, 0.31725428],\n [1.4900327, 0.92632316, -1.3171175, 0.89233917, 1.8016982],\n [0.45972549, 0.37377608, -0.61421176, 0.37112074, 0.63794284],\n [-0.2861039, -0.33632444, 0.1046803, -0.33486287, -0.25781855],\n [0.14937027, 0.13895727, -0.10718929, 0.13883641, 0.16848836],\n [0.19463101, 0.17693388, -0.19324726, 0.17666618, 0.24463719],\n [0.79355468, 0.60572762, -0.15788247, 0.59842521, 0.58367695],\n [0.31042585, 0.27243172, -0.1086875, 0.27165932, 0.27566087],\n [0.04135138, 0.04049242, -0.02863879, 0.04048945, 0.04609512],\n [-0.32145068, -0.38395426, 0.08396824, -0.38194144, -0.25889554],\n [-0.73562767, -1.4450777, 0.39204252, -1.3408506, -0.75143789],\n [-0.36532791, -0.44937262, 0.09336057, -0.44617764, -0.29209205],\n [-0.39892618, -0.50704603, 0.1318741, -0.50221789, -0.34752938],\n [-0.32060474, -0.38784896, 0.14354275, -0.38551527, -0.30901795],\n [-0.1765428, -0.19355136, 0.05657903, -0.19327871, -0.15221654],\n [2.3394378, 1.3473676, -0.30483287, 1.2813555, 1.4943016],\n [-0.31096282, -0.37087833, 0.10283438, -0.36896617, -0.27093291],\n [1.0859783, 0.75577531, -0.45440743, 0.739474, 1.0233855],\n [0.19720267, 0.18057479, -0.07989068, 0.18034148, 0.18384447],\n [-0.3246465, -0.38443762, 0.0533124, -0.38261275, -0.22398893],\n [-0.35286254, -0.44059746, 0.20400798, -0.43699738, -0.37036456],\n [-0.11130243, -0.11825382, 0.06964829, -0.11818152, -0.1199453],\n [-0.06994831, -0.07243064, 0.02493993, -0.07241596, -0.06249231],\n [-0.07509116, -0.0781089, 0.04074936, -0.0780887, -0.07716899],\n [1.166492, 0.82525978, -0.14316975, 0.80901539, 0.730379],\n [-0.58856584, -0.87216233, 0.14631541, -0.850078, -0.46626919],\n [-0.15170571, -0.16421673, 0.05562899, -0.16404499, -0.13680806],\n [-0.23642523, -0.26953091, 0.09431982, -0.26876095, -0.21928396],\n [1.2246902, 0.82709598, -0.48183993, 0.80623267, 1.1306506],\n [-0.31617484, -0.37923159, 0.11402568, -0.37714942, -0.28354963],\n [0.61648186, 0.48125335, -0.43749454, 0.47636965, 0.69281042],\n [-0.64138809, -1.0544577, 0.27300116, -1.0122425, -0.60787193],\n [0.00472718, 0.00471616, -0.00215465, 0.00471616, 0.00458357],\n [-0.61747777, -0.97556422, 0.23622717, -0.94236336, -0.56476464],\n [-0.31496282, -0.3808278, 0.16217289, -0.37854867, -0.31805992],\n [-0.37219955, -0.47069992, 0.1978299, -0.46640289, -0.37986085],\n [-0.71138226, -1.2460676, 0.21902671, -1.1831298, -0.6052169],\n [1.0772525, 0.76792309, -0.20551361, 0.75345346, 0.78133124],\n [-0.1475719, -0.15801087, 0.02016759, -0.15788791, -0.0957702],\n [-0.0727345, -0.07519354, 0.01205448, -0.07517969, -0.0503369],\n [-0.08809982, -0.09235631, 0.05365101, -0.09232205, -0.09408477],\n [-0.18005295, -0.19752633, 0.05116633, -0.19724419, -0.1491437],\n [-0.38072892, -0.48875079, 0.25433663, -0.48370594, -0.41933091],\n [-0.36413263, -0.46052622, 0.23616567, -0.45631989, -0.39712021],\n [-0.68556971, -1.1161577, 0.14429661, -1.0731609, -0.51380279],\n [-0.71979819, -1.214295, 0.14099908, -1.1606648, -0.52669103],\n [0.13541721, 0.12781347, -0.02750881, 0.12774236, 0.1002959],\n [-0.19155512, -0.21398675, 0.13296114, -0.21355002, -0.21368831],\n [-0.02946115, -0.02991134, 0.0167019, -0.0299102, -0.03072075],\n [0.07547686, 0.07294779, -0.02053261, 0.07293367, 0.06161701],\n [-0.18542571, -0.20554424, 0.09615491, -0.2051812, -0.18769258],\n [-0.180983, -0.19856682, 0.04954349, -0.19828258, -0.1480576],\n [0.20548708, 0.18394425, -0.55711593, 0.18356875, 0.36100638],\n [0.30564407, 0.27130693, -0.04864899, 0.27066583, 0.20869495],\n [-0.4335402, -0.55447739, 0.08104884, -0.5489223, -0.31232867],\n [0.00480665, 0.00479632, -0.00090624, 0.00479632, 0.00347259],\n [-0.47371878, -0.64231774, 0.16830858, -0.63250006, -0.42272619],\n [0.78206513, 0.5938595, -0.22169969, 0.58642411, 0.64728216],\n [-0.24252746, -0.29054001, 0.90449951, -0.28896615, -0.47386365],\n [0.33614965, 0.29324886, -0.09098586, 0.29233972, 0.27396164],\n [-0.02459471, -0.02489137, 0.00878931, -0.02489077, -0.02198988],\n [-0.32686367, -0.40010855, 0.19120171, -0.3973953, -0.34441735],\n [0.45452702, 0.38874174, -0.03322338, 0.38716258, 0.23944057],\n [-0.61526369, -0.92187408, 0.12080582, -0.89722331, -0.45055396],\n [-0.32979795, -0.40438668, 0.18921242, -0.40159837, -0.34526974],\n [0.01276038, 0.01267508, -0.01062279, 0.01267499, 0.01512395],\n [0.67025524, 0.53061827, -0.11573048, 0.5258223, 0.47023982],\n [0.01231046, 0.0122366, -0.00530102, 0.01223653, 0.0117124],\n [-0.53343753, -0.76247714, 0.17972208, -0.74649829, -0.46766284],\n [0.79134069, 0.60363712, -0.16613213, 0.59632443, 0.59256619],\n [0.1153121, 0.10893029, -0.08634822, 0.10887146, 0.13193025],\n [-0.16037794, -0.17487406, 0.07385238, -0.17465601, -0.15603712],\n [0.9654928, 0.69826557, -0.28863239, 0.68619382, 0.81337542],\n [-0.06134382, -0.06330359, 0.02897981, -0.06329316, -0.06019435],\n [0.00244078, 0.00243787, -0.00102986, 0.00243787, 0.00230651],\n [1.5691233, 0.98842673, -0.5897572, 0.95409789, 1.4267213],\n [0.04228714, 0.0414951, -0.00917155, 0.04149263, 0.0320108],\n [1.3228095, 0.90357051, -0.17791923, 0.88205697, 0.85391732],\n [-0.01503735, -0.01515156, 0.00743688, -0.01515142, -0.01498262],\n [0.5965217, 0.48228649, -0.1026045, 0.4786732, 0.41797436],\n [0.06534403, 0.06314579, -0.06755126, 0.06313346, 0.08324506],\n [-0.49513241, -0.70830816, 0.33087258, -0.693397, -0.54539501],\n [-0.4677422, -0.65885271, 0.39645944, -0.64613953, -0.55771709],\n [-0.20531295, -0.22935246, 0.07626127, -0.22888449, -0.18594688],\n [0.33563435, 0.29252514, -0.09882365, 0.29160579, 0.28132472],\n [0.31064422, 0.27186077, -0.1343201, 0.27105662, 0.29595966],\n [-0.55097263, -0.7914575, 0.15102473, -0.77441451, -0.45093372],\n [-0.55340313, -0.78297871, 0.11190365, -0.76748207, -0.40924731],\n [-0.29572207, -0.35195959, 0.14044925, -0.35018791, -0.29069594],\n [1.0988318, 0.77682481, -0.23706795, 0.76147012, 0.83033837],\n [-0.28297883, -0.33497381, 0.15739575, -0.33339057, -0.29320878],\n [-0.35137446, -0.43606491, 0.17272374, -0.43269435, -0.34938759],\n [0.12066639, 0.11377046, -0.08228424, 0.11370482, 0.1338155],\n [-0.44701492, -0.58502391, 0.11740855, -0.57802441, -0.36068209],\n [-0.30824166, -0.36985169, 0.14184369, -0.36781286, -0.29982941],\n [1.2625632, 0.85442377, -0.35283047, 0.83309177, 1.0400019],\n [-0.61868489, -0.94808588, 0.15795341, -0.9198961, -0.49449967],\n [0.29443171, 0.26202895, -0.05259633, 0.26143623, 0.20892234],\n [-0.27616623, -0.32253796, 0.10147059, -0.32124646, -0.24921286],\n [-0.40471255, -0.5251753, 0.20886682, -0.51927976, -0.40900718],\n [0.33381072, 0.28756209, -0.23980506, 0.28649889, 0.37667168],\n [-0.09207871, -0.09583497, 0.0090507, -0.09580943, -0.05353982],\n [0.56006342, 0.45844305, -0.08799057, 0.45539449, 0.38075608],\n [-0.58850091, -0.85505713, 0.10766838, -0.83548634, -0.42092444],\n [-0.18128359, -0.19814108, 0.03465984, -0.19788023, -0.13158039],\n [-0.31121101, -0.37485349, 0.15347266, -0.3726993, -0.30978256],\n [-0.45361014, -0.59288488, 0.10104785, -0.58585906, -0.34645023],\n [-0.01900502, -0.0191797, 0.00622152, -0.01917943, -0.01650268],\n [0.52994269, 0.43020798, -0.21359841, 0.42710669, 0.49320629],\n [-0.21196157, -0.24338567, 0.34360046, -0.24261218, -0.3137131],\n [-0.25544256, -0.29826036, 0.18707452, -0.29706989, -0.29009732],\n [-0.48657066, -0.68175257, 0.26214621, -0.66899595, -0.49883296],\n [-0.42367461, -0.53713506, 0.07560392, -0.53212753, -0.30052439],\n [0.98389218, 0.71258108, -0.23523613, 0.70036802, 0.76938376],\n [0.47483615, 0.39254505, -0.18996527, 0.39018529, 0.44082267],\n [-0.1376024, -0.14743101, 0.03818127, -0.14731411, -0.11307783],\n [0.21915281, 0.19975787, -0.05652117, 0.19947229, 0.17575664],\n [-0.34172702, -0.41941311, 0.14731614, -0.41649423, -0.32524677],\n [-0.49610192, -0.67810347, 0.13786304, -0.66718982, -0.40788702],\n [-0.16061155, -0.17489035, 0.0640399, -0.17467908, -0.14894002],\n [-0.035485, -0.03613234, 0.01785704, -0.03613037, -0.03556119],\n [-0.10011507, -0.10571042, 0.0649818, -0.10565833, -0.1092128],\n [-0.04909337, -0.05051213, 0.07267563, -0.0505053, -0.07049444],\n [-0.54435295, -0.76508557, 0.11316441, -0.7505084, -0.4062877],\n [-0.11677946, -0.12383066, 0.03591921, -0.12375975, -0.09931844],\n [-0.65726608, -1.0940232, 0.25523702, -1.0480806, -0.60416009],\n [-0.29905577, -0.35263585, 0.08481753, -0.35104457, -0.24755593],\n [-0.19437406, -0.21352226, 0.03118789, -0.21320842, -0.13307532],\n [-0.27081395, -0.31241964, 0.06270018, -0.31135865, -0.20951427],\n [0.15849984, 0.14778562, -0.04899389, 0.14766503, 0.13502366],\n [-0.26566333, -0.30965233, 0.12526537, -0.30844405, -0.26052021],\n [-0.68532103, -1.1922166, 0.27796429, -1.1334119, -0.63914938],\n [-0.10471729, -0.11043262, 0.03724848, -0.11038066, -0.09348138],\n [0.00803161, 0.00800338, -0.00126996, 0.00800336, 0.00547195],\n [-0.07886452, -0.08222875, 0.04562021, -0.08220484, -0.08279108],\n [-0.67807343, -1.1186664, 0.18148905, -1.0732677, -0.55056841],\n [-0.17595583, -0.19625729, 0.2342868, -0.19586783, -0.24389059],\n [-0.18978248, -0.21062799, 0.08775934, -0.21024721, -0.18490353],\n [-0.23427545, -0.27119856, 0.24041235, -0.27023282, -0.29772373],\n [0.08337657, 0.07982728, -0.09072793, 0.07980211, 0.10804874],\n [-0.61638855, -0.95161513, 0.17947302, -0.92234774, -0.51472994],\n [0.20711321, 0.18931154, -0.06572243, 0.18905694, 0.17798623],\n [-0.3158705, -0.38265762, 0.17068702, -0.3803213, -0.32415263],\n [-0.13189491, -0.140452, 0.0244726, -0.14035955, -0.09478117],\n [-0.16764576, -0.18354498, 0.07563673, -0.18329408, -0.16200035],\n [-0.74333211, -1.3062087, 0.15845589, -1.2395043, -0.55945873],\n [-0.04221584, -0.04305493, 0.009424, -0.04305215, -0.03226552],\n [-0.72568875, -1.1692499, 0.08157792, -1.1260602, -0.44126669],\n [1.77338, 1.061003, -0.98213706, 1.0157412, 1.8348557],\n [0.36294902, 0.31489956, -0.07387521, 0.31384364, 0.26899239],\n [-0.16160544, -0.17445902, 0.02511073, -0.17428883, -0.10946294],\n [-0.70185702, -1.1708789, 0.15445141, -1.1212862, -0.53387517],\n [0.52140835, 0.42384374, -0.22667406, 0.42082704, 0.49765602],\n [-0.59259514, -0.90874925, 0.2250736, -0.88164116, -0.54070067],\n [-0.40948553, -0.52396608, 0.12972269, -0.51869615, -0.35170161],\n [0.20816448, 0.18841311, -0.17581014, 0.18810139, 0.24791095],\n [0.20215826, 0.18342302, -0.17387828, 0.18313418, 0.24222526],\n [0.11115793, 0.10565635, -0.03894939, 0.10561099, 0.09873486],\n [-0.04123664, -0.04213836, 0.02615007, -0.04213508, -0.04463647],\n [-0.50670895, -0.6690676, 0.05365911, -0.66052978, -0.30203936],\n [0.15720202, 0.14634125, -0.0650782, 0.14621632, 0.14761391],\n [-0.1834838, -0.20188296, 0.05681243, -0.20157602, -0.15639502],\n [0.44790692, 0.37157338, -0.24828478, 0.36942029, 0.46357344],\n [-0.49986366, -0.69590579, 0.18716349, -0.68336807, -0.45392568],\n [0.03891738, 0.03824744, -0.00816765, 0.03824552, 0.02913878],\n [-0.55877496, -0.7915482, 0.10588122, -0.77577269, -0.40436565],\n [-0.39515345, -0.48937289, 0.06294872, -0.48566269, -0.26988753],\n [0.90903762, 0.66431873, -0.31404331, 0.65355199, 0.80363915],\n [0.23699414, 0.21462184, -0.05905524, 0.21427054, 0.1878976],\n [-0.22644866, -0.25608388, 0.08060168, -0.25543944, -0.20219537],\n [-0.51409536, -0.72513605, 0.19086976, -0.71103345, -0.4655341],\n [-0.09390425, -0.09808598, 0.01622865, -0.09805496, -0.06590135],\n [-0.18293775, -0.20044297, 0.04035424, -0.20016426, -0.13926499],\n [0.30686752, 0.26943475, -0.11489929, 0.26867629, 0.2786654],\n [0.25941202, 0.23225778, -0.08495427, 0.2317852, 0.22528476],\n [-0.29289941, -0.33915755, 0.04087848, -0.33794516, -0.19141997],\n [-0.55640335, -0.82344801, 0.22498141, -0.80273028, -0.51838453],\n [-0.23501333, -0.26804386, 0.10169991, -0.2672728, -0.22396416],\n [0.80660885, 0.61166026, -0.18348059, 0.60392645, 0.62036702],\n [0.63597057, 0.50273699, -0.1890479, 0.49813604, 0.53475965],\n [-0.17831489, -0.19376152, 0.02258637, -0.19353881, -0.11282809],\n [0.15788542, 0.14683688, -0.07146351, 0.14670816, 0.15273297],\n [0.31437664, 0.27629039, -0.08785822, 0.27552393, 0.25896295],\n [-0.02119928, -0.02142054, 0.00801789, -0.02142015, -0.01931572],\n [-0.213974, -0.24075384, 0.0907402, -0.24019679, -0.20254301],\n [0.57300032, 0.4618039, -0.16673832, 0.45824071, 0.4784006],\n [-0.69990914, -1.2024454, 0.20969585, -1.1456999, -0.59006663],\n [-0.43663688, -0.57937163, 0.19843595, -0.5717182, -0.42295782],\n [-0.6999281, -1.1796357, 0.17473638, -1.1277224, -0.55527322],\n [-0.33628019, -0.40642213, 0.0943844, -0.40400116, -0.27740286],\n [0.09676137, 0.09286071, -0.01635389, 0.09283451, 0.0674039],\n [1.8018275, 1.1002365, -0.48487958, 1.0568529, 1.4656494],\n [-0.1943912, -0.21310284, 0.02600456, -0.21280316, -0.12525957],\n [-0.17430367, -0.19142964, 0.07306846, -0.19114968, -0.16435806],\n [0.26648034, 0.23882517, -0.0625431, 0.23834798, 0.20709992],\n [-0.08958401, -0.0935379, 0.02193978, -0.09350883, -0.07061675],\n [-0.39875508, -0.51947359, 0.26043558, -0.513467, -0.43589403],\n [0.51016929, 0.41311266, -0.33056636, 0.4100625, 0.55621038],\n [-0.16540808, -0.18191671, 0.12555404, -0.1816426, -0.19010405],\n [0.50464921, 0.40235926, -0.79250121, 0.39893887, 0.739043],\n [-0.32848361, -0.39668106, 0.11034615, -0.394338, -0.28769889],\n [0.21190257, 0.19356919, -0.05856727, 0.19330527, 0.17390783],\n [0.61856884, 0.48325688, -0.4108685, 0.47838328, 0.6799907],\n [-0.12318035, -0.13091587, 0.03237189, -0.13083497, -0.09940929],\n [-0.16718999, -0.18387022, 0.11490442, -0.18359337, -0.18589274],\n [-0.00552738, -0.00554152, 0.00133074, -0.00554152, -0.00433233],\n [-0.34750055, -0.42763568, 0.14151549, -0.42458214, -0.32452513],\n [-0.34545728, -0.4196014, 0.09219207, -0.41696914, -0.2802232],\n [-0.49934896, -0.69915736, 0.21022035, -0.68612969, -0.47152459],\n [1.0062373, 0.71246557, -0.45343421, 0.6985066, 0.97195996],\n [-0.40626462, -0.5176477, 0.12244208, -0.51261747, -0.34318337],\n [-0.42870959, -0.56598503, 0.20357239, -0.55877089, -0.42139731],\n [1.1220376, 0.80928424, -0.09607301, 0.79506088, 0.62308708],\n [-0.57000283, -0.84169848, 0.17929395, -0.82075725, -0.48840851],\n [-0.57962635, -0.9324093, 0.48360729, -0.8981939, -0.68750051],\n [-0.34757552, -0.43013201, 0.17133745, -0.42689337, -0.34593424],\n [-0.40116626, -0.51722287, 0.18855829, -0.5116987, -0.39298395],\n [-0.1318524, -0.14315117, 0.23189101, -0.14299002, -0.2005226],\n [0.34696024, 0.30208288, -0.08187864, 0.30111914, 0.27013862],\n [0.68635505, 0.53431463, -0.21049749, 0.52877116, 0.5831651],\n [0.53415328, 0.43106998, -0.272978, 0.4277847, 0.53805889],\n [-0.55241711, -0.80281608, 0.17929285, -0.78441849, -0.47830957],\n [-0.15357511, -0.16710791, 0.08543382, -0.16690944, -0.15913553],\n [-0.37272554, -0.46913947, 0.16937978, -0.46502656, -0.36104108],\n [-0.11396473, -0.12090322, 0.04674349, -0.12083286, -0.10668353],\n [-0.16232168, -0.17592145, 0.03673375, -0.1757318, -0.12462814],\n [0.3351197, 0.29115699, -0.12639674, 0.29019958, 0.30506233],\n [-0.68757701, -1.1034464, 0.12006062, -1.0633468, -0.4841996],\n [0.28062883, 0.24683379, -0.20418777, 0.24615772, 0.31801048],\n [-0.63868948, -1.0709972, 0.34581798, -1.0247459, -0.65587191],\n [-0.58877059, -0.91706114, 0.29009841, -0.88772079, -0.58589868],\n [0.81611024, 0.61280088, -0.25217023, 0.60449659, 0.69514247],\n [-0.05579265, -0.05736679, 0.02123494, -0.05735938, -0.05094228],\n [0.13043956, 0.12285477, -0.05233093, 0.12278132, 0.12120923],\n [-0.20919899, -0.2352261, 0.10393811, -0.2346879, -0.20875714],\n [-0.56744871, -0.81155064, 0.11068618, -0.79449026, -0.41462824],\n [-0.1940966, -0.21434157, 0.04919301, -0.21399033, -0.15475917],\n [0.44182534, 0.36408877, -0.40500544, 0.36182622, 0.54075104],\n [-0.01865539, -0.01882854, 0.00787443, -0.01882827, -0.01763137],\n [-0.08065021, -0.08465838, 0.13779541, -0.08462519, -0.1214764],\n [0.43433084, 0.36925957, -0.07083523, 0.36764328, 0.29897868],\n [-0.33521405, -0.41214582, 0.18289228, -0.40922816, -0.34510925],\n [-0.12262503, -0.13071836, 0.05044404, -0.1306294, -0.11490335],\n [0.23473909, 0.21374147, -0.0364345, 0.21342897, 0.15894171],\n [-0.33774469, -0.41738767, 0.20562881, -0.41428549, -0.3606592],\n [-0.36903476, -0.46565002, 0.19791216, -0.46147956, -0.37775684],\n [0.06340986, 0.06144898, -0.03875127, 0.06143887, 0.0677969],\n [0.33058378, 0.28722357, -0.14068763, 0.28627944, 0.31329226],\n [-0.26984463, -0.31387478, 0.09954822, -0.31268285, -0.24383555],\n [0.18696495, 0.17099738, -0.13956152, 0.17077045, 0.21368401],\n [-0.52960263, -0.7515304, 0.16771962, -0.73640445, -0.45481842],\n [-0.28316653, -0.334515, 0.14281789, -0.33297169, -0.28398724],\n [0.36771133, 0.31898127, -0.06732474, 0.3179093, 0.26307095],\n [-0.02603472, -0.02636435, 0.00855866, -0.02636365, -0.02263848],\n [0.12908829, 0.12155656, -0.05880958, 0.12148337, 0.12514608],\n [-0.24843362, -0.28399314, 0.07459121, -0.28314787, -0.2095943],\n [0.29605613, 0.25933735, -0.18357246, 0.25858099, 0.31807427],\n [0.53914806, 0.43730331, -0.19476862, 0.43412486, 0.48378792],\n [-0.40364205, -0.51834327, 0.16088398, -0.51297832, -0.37426441],\n [-0.21349457, -0.24323462, 0.20531619, -0.2425465, -0.26550678],\n [0.51045144, 0.41854975, -0.16824595, 0.41581371, 0.44425039],\n [0.0641572, 0.06226, -0.02314806, 0.06225065, 0.05754555],\n [1.0415381, 0.76123549, -0.10527893, 0.74890682, 0.61128058],\n [0.49465947, 0.41079497, -0.1055044, 0.40844152, 0.37236691],\n [0.39576095, 0.33863294, -0.09758524, 0.3372652, 0.31267513],\n [-0.30237268, -0.37541857, 0.58287022, -0.37250493, -0.47412805],\n [0.31545764, 0.27616088, -0.11590711, 0.27534789, 0.28466936],\n [-0.61214837, -1.0049153, 0.39043097, -0.96491339, -0.66388963],\n [-0.34251752, -0.43626109, 0.4745376, -0.43203465, -0.48108528],\n [0.4715008, 0.38702078, -0.29953, 0.3845176, 0.51067555],\n [0.46184264, 0.38043816, -0.290303, 0.37806466, 0.49845183],\n [0.25935317, 0.23285363, -0.06588961, 0.23240341, 0.20695537],\n [-0.35230193, -0.43826698, 0.18310789, -0.43480406, -0.35688013],\n [-0.32328653, -0.38029361, 0.04067316, -0.37862702, -0.20409752],\n [-0.13313177, -0.14309375, 0.07244864, -0.14296964, -0.13694343],\n [0.33611667, 0.29069772, -0.17158709, 0.28967912, 0.3384529],\n [-0.30067393, -0.35833911, 0.13036976, -0.35650718, -0.28672538],\n [-0.09645221, -0.10094237, 0.01903377, -0.10090755, -0.07074999],\n [0.06671792, 0.06454695, -0.04173267, 0.06453518, 0.07188921],\n [0.6182557, 0.49486798, -0.12857452, 0.49080423, 0.46150214],\n [0.17507007, 0.16115999, -0.11280864, 0.16097604, 0.19051627],\n [-0.15839997, -0.17079417, 0.02584927, -0.17063272, -0.10905933],\n [-0.25464138, -0.30430441, 0.5528601, -0.30270031, -0.41543308],\n [0.8334476, 0.61380438, -0.46533053, 0.60433542, 0.86466799],\n [-0.57173845, -0.81511966, 0.0986911, -0.79827964, -0.401083],\n [0.20398599, 0.18794113, -0.02996711, 0.18773108, 0.13561004],\n [-0.47896225, -0.63114888, 0.08014446, -0.62321102, -0.33253347],\n [-0.08587928, -0.09012595, 0.07993469, -0.09009097, -0.10564462],\n [-0.11145971, -0.1181655, 0.05063144, -0.1180983, -0.1079515],\n [-0.05762836, -0.0593631, 0.02864716, -0.0593544, -0.0575168],\n [-0.39564628, -0.50560667, 0.16467577, -0.50057398, -0.37218422],\n [-0.8186431, -1.5612468, 0.13726219, -1.4578464, -0.56875258],\n [-0.4298262, -0.56849767, 0.20837131, -0.56115667, -0.42541999],\n [-0.29717354, -0.34824523, 0.06655057, -0.34678971, -0.22737045],\n [-0.2663489, -0.30960182, 0.10678396, -0.30843645, -0.24744534],\n [0.62238915, 0.4899456, -0.27428963, 0.48530168, 0.59674365],\n [-0.46663198, -0.60878868, 0.07689304, -0.60167196, -0.32232106],\n [1.08491, 0.75534841, -0.45134348, 0.73909341, 1.0204104],\n [0.26931269, 0.24231221, -0.03990889, 0.24186205, 0.17955787],\n [0.47043431, 0.39382938, -0.09807516, 0.39176342, 0.35144902],\n [-0.00905558, -0.0090941, 0.00243494, -0.00909407, -0.00736406],\n [0.20377738, 0.18745498, -0.0363535, 0.18723738, 0.14453157],\n [0.62720037, 0.50051111, -0.133264, 0.49628922, 0.47153994],\n [0.41297257, 0.34729489, -0.2110996, 0.34556443, 0.41602549],\n [-0.58715294, -0.89322081, 0.21747192, -0.86754487, -0.53126584],\n [-0.50533386, -0.71893342, 0.25540897, -0.70425374, -0.50715531],\n [-0.14009154, -0.1512005, 0.07686636, -0.15105385, -0.14449851],\n [-0.59694963, -0.94176184, 0.30421971, -0.90990987, -0.60075525],\n [-0.39574995, -0.49862122, 0.10815252, -0.49421175, -0.3235709],\n [-0.02203916, -0.0222661, 0.00521286, -0.02226571, -0.01717245],\n [-0.5643388, -0.84952657, 0.25781957, -0.82629313, -0.54761487],\n [-0.46143087, -0.59850049, 0.0722647, -0.59180533, -0.31336933],\n [0.06867264, 0.06640179, -0.03857597, 0.06638928, 0.07139012],\n [1.4069938, 0.91911624, -0.46391089, 0.89191944, 1.2246622],\n [0.01437706, 0.01427343, -0.00812206, 0.01427331, 0.01497427],\n [-0.24825838, -0.28976038, 0.23058706, -0.28860956, -0.30518107],\n [-0.45187503, -0.58313209, 0.07457827, -0.57685962, -0.31229114],\n [0.25773194, 0.23150697, -0.06631236, 0.23106325, 0.20653183],\n [-0.52978301, -0.73966925, 0.12414029, -0.72611493, -0.41150925],\n [-0.58921908, -0.91838301, 0.29081526, -0.8889123, -0.58667862],\n [0.25824659, 0.23058882, -0.11231133, 0.2300964, 0.24651362],\n [0.40403531, 0.34235158, -0.15679228, 0.34079063, 0.37130526],\n [0.27361868, 0.24458212, -0.06414271, 0.24406985, 0.21256394],\n [-0.10496889, -0.11072481, 0.03798771, -0.11067224, -0.0942464],\n [0.09818183, 0.09312006, -0.15636528, 0.09307659, 0.14445886],\n [-0.52659025, -0.77067499, 0.29419098, -0.75235543, -0.54643058],\n [-0.17248065, -0.19014623, 0.10983932, -0.18984525, -0.18696323],\n [0.65251306, 0.50119818, -0.57530002, 0.49543065, 0.78831708],\n [-0.23637986, -0.27195111, 0.16012657, -0.27106246, -0.26156007],\n [-0.22305026, -0.25038868, 0.05559303, -0.24983171, -0.17685551],\n [0.4107871, 0.34630393, -0.18930093, 0.34462669, 0.39976562],\n [0.2216956, 0.20296372, -0.03198751, 0.20270035, 0.14650132],\n [-0.54044385, -0.77070257, 0.15552789, -0.75475731, -0.44955187],\n [0.15455827, 0.14435313, -0.04740392, 0.14424093, 0.13132364],\n [-0.2975832, -0.34559306, 0.04196399, -0.34430789, -0.19515268],\n [-0.30919006, -0.36357347, 0.05707774, -0.36198757, -0.22181076],\n [0.1470479, 0.13773376, -0.04671594, 0.13763552, 0.12641664],\n [-0.27668337, -0.32180148, 0.08112549, -0.32058084, -0.23158894],\n [-0.34043974, -0.42537637, 0.27778143, -0.42187937, -0.4008094],\n [0.98571709, 0.72132124, -0.15599688, 0.70972918, 0.67176416],\n [-0.53744887, -0.77628212, 0.20199067, -0.75906314, -0.48866572],\n [0.76475491, 0.58244415, -0.23356688, 0.57530701, 0.64887633],\n [0.4292717, 0.36394447, -0.09307895, 0.36229654, 0.32492415],\n [-0.20931072, -0.23535541, 0.10360539, -0.23481677, -0.20860839],\n [-0.55941465, -0.8232608, 0.19772633, -0.8031278, -0.49833424],\n [-0.78218099, -1.4146044, 0.13373406, -1.3351006, -0.54696833],\n [-0.39557156, -0.50125184, 0.12896442, -0.49659865, -0.34301782],\n [0.55608386, 0.4539049, -0.10853815, 0.45080137, 0.40640995],\n [0.00168945, 0.00168803, -0.0008074, 0.00168803, 0.00166419],\n [-0.63501826, -1.036189, 0.27290696, -0.99591278, -0.60377113],\n [0.74239142, 0.54687025, -1.1635979, 0.53844638, 1.086508],\n [-0.20609582, -0.23159291, 0.11227057, -0.23106861, -0.21206944],\n [0.38679794, 0.32601357, -0.31028056, 0.32443082, 0.45281168],\n [-0.15855635, -0.17295887, 0.08414102, -0.17274116, -0.16173413],\n [-0.27333304, -0.32138804, 0.1525336, -0.3199873, -0.28352624],\n [-0.44275803, -0.58484206, 0.15931926, -0.57735932, -0.39677447],\n [-0.02067804, -0.02090629, 0.016071, -0.02090587, -0.02395321],\n [-0.12558298, -0.13447685, 0.07401916, -0.13437197, -0.1326616],\n [0.16734546, 0.15439342, -0.12396971, 0.15422656, 0.19077633],\n [-0.21300708, -0.23931334, 0.08510621, -0.23877333, -0.19766348],\n [-0.58182647, -0.85676257, 0.14576395, -0.83574557, -0.46212099],\n [-0.18692859, -0.20567212, 0.04893615, -0.20535946, -0.15066201],\n [-0.4993853, -0.67374138, 0.09900201, -0.66377543, -0.36687251],\n [-0.58389203, -0.90928901, 0.30932041, -0.88022178, -0.59525249],\n [-0.18699099, -0.20657095, 0.06832535, -0.20622992, -0.16842934],\n [-0.50695147, -0.71397244, 0.20763442, -0.70020786, -0.4743374],\n [-0.04559305, -0.04668013, 0.02485613, -0.04667581, -0.04692672],\n [0.21231924, 0.19457061, -0.04049855, 0.19432371, 0.15398652],\n [-0.36578368, -0.46398373, 0.24873611, -0.45963919, -0.40526506],\n [0.33809673, 0.29409674, -0.10778097, 0.29314608, 0.29099443],\n [0.57700922, 0.45883265, -0.309139, 0.4548408, 0.59044996],\n [0.00642141, 0.00640259, -0.0014665, 0.00640258, 0.00494528],\n [-0.69472872, -1.2290276, 0.28823549, -1.1648157, -0.65283383],\n [-0.70671288, -1.2777091, 0.30036925, -1.2059735, -0.66945876],\n [0.21664136, 0.19702405, -0.07857647, 0.19672851, 0.19465628],\n [0.02260057, 0.02233772, -0.01714393, 0.02233721, 0.02596927],\n [-0.4978844, -0.6832303, 0.14528112, -0.67195796, -0.41606904],\n [0.9373839, 0.69029883, -0.18226697, 0.67964474, 0.68421247],\n [-0.15536107, -0.1676527, 0.03298504, -0.16749081, -0.11677337],\n [0.0753751, 0.07282344, -0.02287026, 0.07280904, 0.06381444],\n [-0.48360754, -0.66005986, 0.16400979, -0.64953463, -0.42490838],\n [0.17651917, 0.1645224, -0.02184892, 0.16438664, 0.1108361],\n [0.14902537, 0.13901636, -0.07521762, 0.13890443, 0.14949385],\n [-0.11372673, -0.12063143, 0.04645204, -0.12056161, -0.10631306],\n [0.38962092, 0.32846824, -0.28859996, 0.32687783, 0.44415743],\n [-0.13831529, -0.14874468, 0.05677358, -0.14861375, -0.12951068],\n [0.45014415, 0.37964011, -0.08873034, 0.3778103, 0.33006676],\n [-0.59932926, -0.88869049, 0.12781384, -0.8661144, -0.45114168],\n [-0.07285928, -0.07618779, 0.15321854, -0.07616245, -0.11760804],\n [0.76961802, 0.57911231, -0.36943314, 0.57137871, 0.7592278],\n [0.66658241, 0.5152085, -0.36793387, 0.50955464, 0.68892057],\n [-0.62358675, -0.97685879, 0.19677698, -0.94481243, -0.53489189],\n [-0.58088849, -0.90231318, 0.31211268, -0.87379199, -0.59498906],\n [0.10412954, 0.09960377, -0.01871541, 0.099571, 0.07403875],\n [-0.23137148, -0.26715256, 0.2316752, -0.26623412, -0.29163729],\n [-0.0382953, -0.03912356, 0.04295741, -0.03912058, -0.05013254],\n [-0.54583794, -0.7822518, 0.15643182, -0.76562044, -0.45341322],\n [-0.62053932, -0.92182829, 0.09946152, -0.89819597, -0.42469248],\n [-0.64636966, -0.9678868, 0.08069936, -0.94208274, -0.40702462],\n [-0.24688153, -0.28801846, 0.2355242, -0.28688146, -0.30620639],\n [-0.27727563, -0.32345202, 0.09267822, -0.32217641, -0.2424434],\n [0.23571931, 0.21441825, -0.03915749, 0.21409801, 0.16325947],\n [0.02579881, 0.02546094, -0.01766667, 0.0254602, 0.02865022],\n [0.01018719, 0.01013478, -0.00596942, 0.01013474, 0.01074048],\n [-0.23096241, -0.26423235, 0.14006962, -0.26343648, -0.24631197],\n [-0.71367779, -1.1606493, 0.10338586, -1.1161336, -0.47224261],\n [0.31282919, 0.27725536, -0.04603387, 0.27658309, 0.20808495],\n [-0.05945849, -0.06112264, 0.0119089, -0.06111488, -0.04383051],\n [0.51297607, 0.4271129, -0.06779652, 0.42473358, 0.32921317],\n [0.00025261, 0.00025258, -0.00020794, 0.00025258, 0.00029828],\n [-0.3127226, -0.37298985, 0.09843749, -0.37106606, -0.26802154],\n [-0.00170261, -0.00170402, 0.00066826, -0.00170402, -0.00157061],\n [1.1611366, 0.79812769, -0.40803169, 0.77973498, 1.0323577],\n [-0.59119608, -0.88733313, 0.1703966, -0.8634081, -0.49202217],\n [-0.02362601, -0.023929, 0.02083461, -0.02392835, -0.02854514],\n [0.05530985, 0.05393895, -0.01449093, 0.05393329, 0.04459064],\n [0.47259754, 0.38931982, -0.24069738, 0.38689234, 0.47551194],\n [-0.33479564, -0.41306924, 0.20857697, -0.41004602, -0.36026216],\n [-0.21943083, -0.24744835, 0.08621337, -0.24685385, -0.20248832],\n [0.57101126, 0.46007865, -0.17379503, 0.45652008, 0.48393337],\n [-0.0316426, -0.03215089, 0.01463176, -0.03214953, -0.03082882],\n [-0.2881194, -0.33455702, 0.05396576, -0.33331514, -0.20769742],\n [1.2161216, 0.816381, -0.62139287, 0.79516325, 1.2249455],\n [0.18269367, 0.16919124, -0.03852079, 0.16902511, 0.1370011],\n [0.14266762, 0.1341986, -0.03132067, 0.13411488, 0.10843529],\n [-0.00524903, -0.00526237, 0.00190546, -0.00526237, -0.00471768],\n [0.75192761, 0.57366542, -0.24827375, 0.56672404, 0.6547934],\n [-0.20089528, -0.22412975, 0.0826315, -0.22368296, -0.18823702],\n [0.34601415, 0.29494433, -0.36799881, 0.29369444, 0.44499457],\n [-0.10867453, -0.11453236, 0.02495986, -0.11447976, -0.08385119],\n [0.28694097, 0.25478952, -0.07838203, 0.25419079, 0.23457256],\n [0.427035, 0.3673767, -0.03670304, 0.36599389, 0.23743951],\n [-0.62148672, -0.9681708, 0.1885892, -0.93717387, -0.52618303],\n [-0.00869881, -0.00873551, 0.00312224, -0.00873548, -0.00778883],\n [0.07610759, 0.07323409, -0.05970733, 0.07321602, 0.08843778],\n [-0.00509814, -0.00511164, 0.00345547, -0.00511163, -0.00564227],\n [0.28586233, 0.2547403, -0.05935707, 0.25417708, 0.21327443],\n [1.6615841, 1.0644756, -0.22420563, 1.0300949, 1.0737596],\n [-0.11622361, -0.12359196, 0.05572119, -0.12351416, -0.11460749],\n [-0.47749766, -0.67181146, 0.32901169, -0.65893501, -0.53136703],\n [-0.19254828, -0.2121792, 0.04394746, -0.21184626, -0.14825671],\n [-0.04649595, -0.04755493, 0.01426885, -0.04755091, -0.03951388],\n [-0.07931842, -0.08260539, 0.03410218, -0.08258269, -0.07542583],\n [-0.26884138, -0.31519171, 0.15172867, -0.31386657, -0.27991744],\n [-0.24944187, -0.2824923, 0.0407766, -0.28176471, -0.17184092],\n [-0.40601813, -0.52475388, 0.18290599, -0.51904222, -0.39214763],\n [0.45282614, 0.37853673, -0.14522169, 0.37651843, 0.39051838],\n [0.01715587, 0.01697844, -0.05316215, 0.01697813, 0.03151273],\n [0.34733492, 0.30333315, -0.06454921, 0.3024075, 0.24973108],\n [0.02556932, 0.02523941, -0.01655107, 0.0252387, 0.0278675],\n [-0.1494024, -0.16187134, 0.06915062, -0.16169812, -0.14560638],\n [-0.26773939, -0.31135008, 0.10467388, -0.31017154, -0.24665937],\n [-0.35702969, -0.44172636, 0.13711975, -0.43840783, -0.32697375],\n [-0.01508935, -0.01520375, 0.00711999, -0.01520361, -0.01480074],\n [-0.27394964, -0.31595297, 0.05636073, -0.31488397, -0.20375863],\n [-0.10874203, -0.11488613, 0.03671461, -0.11482831, -0.09540136],\n [0.76481258, 0.58966962, -0.13679373, 0.58307459, 0.54291946],\n [-0.3530344, -0.43827726, 0.16826872, -0.43487874, -0.34744746],\n [-0.64654178, -1.0535779, 0.23292136, -1.0128438, -0.57962107],\n [-0.13580059, -0.14604153, 0.06581002, -0.14591295, -0.13439253],\n [0.35804975, 0.31001552, -0.09546595, 0.30894596, 0.29034994],\n [-0.18155178, -0.19938011, 0.05251883, -0.19908883, -0.15128017],\n [-0.45439126, -0.62707997, 0.33824226, -0.61636569, -0.51884695],\n [0.15608272, 0.14581403, -0.04227721, 0.14570154, 0.12723755],\n [-0.46485105, -0.63744583, 0.24797994, -0.62697558, -0.47499757],\n [-0.45553411, -0.5845954, 0.06002834, -0.57857639, -0.29206267],\n [-0.54320935, -0.79196175, 0.21592063, -0.77350635, -0.50321418],\n [-0.48736189, -0.67165321, 0.18788473, -0.66027432, -0.44689779],\n [-0.56800422, -0.82241252, 0.13509182, -0.80393212, -0.44339153],\n [-0.11162201, -0.11845271, 0.05756639, -0.11838309, -0.11278021],\n [-0.04907169, -0.05018343, 0.00885214, -0.05017923, -0.03493387],\n [-0.10962828, -0.11590109, 0.03819445, -0.11584131, -0.0971908],\n [-0.45160058, -0.58885072, 0.0985034, -0.58199558, -0.34250223],\n [-0.1994024, -0.22374977, 0.13241529, -0.22325561, -0.21918439],\n [-0.22905152, -0.26273431, 0.17412391, -0.26191188, -0.26338129],\n [-0.10781419, -0.11381191, 0.03456585, -0.11375634, -0.09297004],\n [-0.30423076, -0.36129752, 0.10179414, -0.35952395, -0.26610498],\n [-0.1425758, -0.15224961, 0.0189258, -0.15214031, -0.09163442],\n [1.9216935, 1.1866063, -0.20976514, 1.1418559, 1.1571168],\n [-0.13575323, -0.14470404, 0.02217588, -0.14460576, -0.0934983],\n [1.1687913, 0.81092704, -0.28073579, 0.79314467, 0.91537822],\n [0.38176342, 0.32608969, -0.14618068, 0.32474321, 0.34927664],\n [-0.2598243, -0.29725508, 0.05464092, -0.2963596, -0.19467156],\n [-0.5464075, -0.77583871, 0.13201835, -0.76017113, -0.42877804],\n [-0.61671174, -1.0006056, 0.32406862, -0.96258745, -0.62701386],\n [-0.42496381, -0.56190555, 0.22665969, -0.5546644, -0.4342129],\n [-0.00255548, -0.00255854, 0.00071623, -0.00255854, -0.00210705],\n [0.34223009, 0.29781075, -0.09495459, 0.29685356, 0.28122944],\n [-0.45673818, -0.58402781, 0.05310595, -0.57818627, -0.28086822],\n [0.12762718, 0.12045632, -0.04446452, 0.12038922, 0.11314707],\n [-0.32207242, -0.39951794, 0.34140655, -0.39644283, -0.41374838],\n [0.44372967, 0.37334388, -0.1152856, 0.37149414, 0.35673632],\n [0.19218058, 0.17660794, -0.06536837, 0.17639793, 0.16902031],\n [-0.14463536, -0.15745123, 0.14423191, -0.15726224, -0.18205954],\n [-0.35681123, -0.43234864, 0.06492199, -0.42970297, -0.25474144],\n [-0.78585143, -1.3684292, 0.09378862, -1.3007048, -0.4874765],\n [-0.43056416, -0.5679701, 0.19280781, -0.56077272, -0.41502742],\n [-0.1194873, -0.12628985, 0.0183943, -0.12622535, -0.08068366],\n [1.1504472, 0.78712309, -0.51432053, 0.76854013, 1.1083215],\n [0.66994874, 0.51588221, -0.41716434, 0.51005709, 0.72078672],\n [-0.50174162, -0.74091841, 0.49762731, -0.72248226, -0.63042219],\n [0.0980341, 0.09337897, -0.0712195, 0.09334215, 0.11103531],\n [-0.30095346, -0.36594052, 0.28942734, -0.36361946, -0.37427379],\n [-0.28916199, -0.34238757, 0.13404999, -0.340764, -0.28196361],\n [-0.31635819, -0.39324606, 0.42212845, -0.39016088, -0.43881109],\n [0.01227257, 0.01219951, -0.00506124, 0.01219944, 0.0115094],\n [2.3768308, 1.3307905, -0.56859495, 1.2590067, 1.8589876],\n [-0.21238201, -0.24031481, 0.13972587, -0.23970439, -0.23272734],\n [-0.0683085, -0.07075601, 0.032727, -0.0707414, -0.06734343],\n [-0.67175986, -1.0412262, 0.09493018, -1.0086275, -0.44084658],\n [0.04463719, 0.04369266, -0.01844834, 0.04368933, 0.04189161],\n [-0.28367342, -0.33030346, 0.0703631, -0.32903186, -0.22456259],\n [0.20751412, 0.18880873, -0.10576337, 0.18852821, 0.20884313],\n [-0.39639779, -0.53145541, 0.5463898, -0.5239165, -0.55581681],\n [-0.53396651, -0.76610369, 0.19014519, -0.74971475, -0.47684928],\n [-0.41388794, -0.52473636, 0.0911924, -0.51984373, -0.3149571],\n [-0.0474799, -0.04858814, 0.0149372, -0.04858383, -0.0406855],\n [0.01555976, 0.01544743, -0.00437587, 0.0154473, 0.01284399],\n [-0.54760283, -0.84791733, 0.48049955, -0.82149155, -0.6605182],\n [-0.20115182, -0.22518278, 0.10467438, -0.22470551, -0.20384783],\n [-0.63784704, -0.95934541, 0.09561186, -0.93321814, -0.42689881],\n [0.90776996, 0.66918714, -0.22617176, 0.6589283, 0.71968093],\n [-0.06315845, -0.06515324, 0.0208039, -0.06514274, -0.05495567],\n [-0.23762773, -0.27339367, 0.15373728, -0.27249999, -0.25894119],\n [-0.50295249, -0.69954311, 0.17494849, -0.68701101, -0.44565424],\n [0.01552274, 0.01541147, -0.00418046, 0.01541134, 0.01262981],\n [2.1068331, 1.1889219, -1.2133041, 1.1264521, 2.2084453],\n [0.47868198, 0.39622289, -0.16575858, 0.39387229, 0.42351298],\n [-0.02986399, -0.03032427, 0.01615749, -0.03032309, -0.0306596],\n [-0.20453409, -0.22636955, 0.03872599, -0.22598184, -0.14797487],\n [-0.14510921, -0.15728345, 0.09104008, -0.15711344, -0.15651316],\n [0.70931453, 0.54673599, -0.25286811, 0.54060879, 0.63367599],\n [-0.53000475, -0.74513956, 0.14089154, -0.73091807, -0.42936319],\n [-0.21910342, -0.24687749, 0.0826378, -0.24629237, -0.19945076],\n [-0.41493285, -0.52361045, 0.07768675, -0.51891737, -0.29907322],\n [2.6560511, 1.3993556, -1.105995, 1.3079768, 2.498918],\n [-0.41589553, -0.54146745, 0.18302736, -0.53523562, -0.39857021],\n [-0.5526323, -0.80926377, 0.20181019, -0.78996847, -0.49767811],\n [-0.13207272, -0.14097686, 0.03313644, -0.14087689, -0.10495118],\n [0.14448753, 0.13515265, -0.06544272, 0.13505222, 0.13980328],\n [-0.85325759, -1.7623394, 0.16079813, -1.6167939, -0.61634485],\n [0.76115922, 0.57533086, -0.33446264, 0.56788681, 0.72908173],\n [-0.4445706, -0.6065932, 0.31952327, -0.59693944, -0.50173145],\n [0.05289442, 0.05153665, -0.02874207, 0.05153084, 0.0543821],\n [-0.17820591, -0.19853626, 0.19416859, -0.1981506, -0.23103852],\n [-0.4801718, -0.67039242, 0.27251422, -0.65810878, -0.50088438],\n [0.7304052, 0.55833536, -0.28785848, 0.55167589, 0.67470239],\n [-0.16887215, -0.18469715, 0.06495133, -0.18445039, -0.15473125],\n [0.35410875, 0.30570854, -0.12846339, 0.30461071, 0.31819564],\n [0.19684382, 0.18046651, -0.07068492, 0.18023976, 0.17627881],\n [-0.20643774, -0.23348809, 0.17055485, -0.23289913, -0.24405671],\n [1.22551, 0.8353436, -0.35199394, 0.81523741, 1.0187469],\n [0.17923756, 0.16578231, -0.0525377, 0.16561417, 0.15000982],\n [0.01548457, 0.01536218, -0.01038154, 0.01536202, 0.0170751],\n [-0.19320256, -0.2148948, 0.0900355, -0.21448979, -0.18872228],\n [0.32956715, 0.28787909, -0.09663489, 0.28700342, 0.27585694],\n [-0.45938766, -0.62492575, 0.23072715, -0.61516994, -0.46007551],\n [-0.02945698, -0.02988319, 0.01034191, -0.02988217, -0.02618197],\n [-0.67370006, -1.1813745, 0.34180682, -1.121476, -0.67698851],\n [-0.02907156, -0.02947386, 0.00775848, -0.02947293, -0.02358203],\n [-0.38428138, -0.47793117, 0.09063364, -0.47416337, -0.29913877],\n [-0.23046223, -0.26119584, 0.08058798, -0.26051489, -0.20456591],\n [-0.12846248, -0.13685096, 0.03186268, -0.13675974, -0.10169236],\n [-0.30992245, -0.36335987, 0.04867032, -0.36183198, -0.21066874],\n [0.22387009, 0.20364682, -0.05894499, 0.20334289, 0.18078249],\n [-0.17917948, -0.19985379, 0.20268227, -0.19945716, -0.23521992],\n [-0.21841171, -0.24833161, 0.15119244, -0.24765074, -0.24342806],\n [0.4118637, 0.34607627, -0.22690728, 0.34433546, 0.42539771],\n [0.20921569, 0.19135775, -0.05644292, 0.1911041, 0.17032406],\n [-0.04141178, -0.04225313, 0.01331006, -0.04225028, -0.03573984],\n [-0.08967166, -0.09381787, 0.032207, -0.09378593, -0.08030888],\n [-0.43040966, -0.57519442, 0.26278401, -0.56721212, -0.46004218],\n [-0.35010686, -0.43150226, 0.14059672, -0.42837577, -0.32543841],\n [-0.45224038, -0.61066644, 0.22306877, -0.60158155, -0.45019687],\n [-0.15280789, -0.16560823, 0.05946739, -0.16542975, -0.14056163],\n [0.22920489, 0.20747095, -0.08083996, 0.20712815, 0.20403341],\n [-0.02695281, -0.02729756, 0.00709777, -0.02729683, -0.02176641],\n [-0.48806489, -0.66592024, 0.15134011, -0.6553242, -0.41621053],\n [-0.07485465, -0.07759823, 0.01909723, -0.07758147, -0.05981535],\n [-0.55354816, -0.81424305, 0.21356234, -0.79437836, -0.50771714],\n [-0.52719084, -0.76911913, 0.2772692, -0.75113379, -0.53615346],\n [0.05684705, 0.05526936, -0.03355884, 0.05526206, 0.06008291],\n [1.7973129, 1.1092567, -0.36345079, 1.06734, 1.329151],\n [-0.29913733, -0.36026582, 0.2095762, -0.35819823, -0.33473752],\n [0.84441039, 0.63750804, -0.15314645, 0.62919077, 0.60221008],\n [1.5300133, 0.9606923, -0.79434177, 0.92687173, 1.5493253],\n [-0.35610485, -0.467774, 0.79408433, -0.46202371, -0.58616174],\n [0.82406803, 0.61953257, -0.22184624, 0.61120781, 0.67040318],\n [-0.33473208, -0.42123435, 0.39786289, -0.41754776, -0.4467378],\n [-0.59704681, -0.89013644, 0.1428792, -0.86690615, -0.46702334],\n [-0.27033676, -0.31698804, 0.1460948, -0.31565307, -0.27743313],\n [1.0258857, 0.7403491, -0.19262469, 0.72738144, 0.74013901],\n [1.1604516, 0.81467254, -0.19669616, 0.79792351, 0.80914553],\n [0.71885488, 0.54399465, -0.50294544, 0.5370146, 0.80404022],\n [0.05144662, 0.0500942, -0.04484664, 0.05008827, 0.06191909],\n [0.47392482, 0.39353593, -0.1494817, 0.39127892, 0.40645485],\n [0.22266008, 0.20231271, -0.06938075, 0.20200337, 0.19018864],\n [0.25539674, 0.2288861, -0.08805858, 0.22842855, 0.22563724],\n [-0.19568251, -0.2165339, 0.05437877, -0.21616435, -0.16088707],\n [0.86800737, 0.63454643, -0.44857999, 0.6242839, 0.8776181],\n [-0.44093685, -0.57926714, 0.14407138, -0.572141, -0.38263688],\n [-0.38796266, -0.48874054, 0.12624104, -0.48442371, -0.33620446],\n [0.54666874, 0.44328732, -0.17675284, 0.44005735, 0.47273195],\n [2.0300788, 1.1682836, -0.96824133, 1.1108888, 1.998388],\n [0.20015608, 0.18332403, -0.06902249, 0.18308848, 0.17684227],\n [0.6704618, 0.53159956, -0.10716473, 0.52685744, 0.45843382],\n [-0.44458745, -0.58919121, 0.16678779, -0.58147544, -0.40398901],\n [0.54446718, 0.43989513, -0.22082431, 0.436578, 0.50777747],\n [0.33144135, 0.28713208, -0.17030141, 0.28614888, 0.33446746],\n [0.39694643, 0.34039606, -0.08209858, 0.33905965, 0.29576253],\n [-0.42074543, -0.5470025, 0.16075656, -0.54077417, -0.3846619],\n [-0.34132884, -0.41326123, 0.08979717, -0.41075309, -0.2755579],\n [0.27126712, 0.24182145, -0.08746021, 0.24129014, 0.23435718],\n [0.44037707, 0.37447891, -0.06255576, 0.37284401, 0.28950022],\n [-0.14644983, -0.15791575, 0.04864516, -0.1577663, -0.12778576],\n [0.83887112, 0.62380912, -0.30053485, 0.61477927, 0.75065164],\n [-0.58971609, -0.84195736, 0.07898515, -0.82442391, -0.38014896],\n [-0.14693144, -0.15819566, 0.04001161, -0.15805189, -0.11999096],\n [-0.35444119, -0.4312898, 0.08034526, -0.42853408, -0.27228656],\n [0.0737336, 0.07125403, -0.02561239, 0.07124013, 0.06530358],\n [-0.14687879, -0.15771931, 0.02953682, -0.1575861, -0.10841864],\n [-0.14879663, -0.16058595, 0.04713587, -0.16043045, -0.12779756],\n [0.255008, 0.22848792, -0.09093051, 0.22802936, 0.22783254],\n [0.52932208, 0.43116877, -0.17994595, 0.42816069, 0.46544749],\n [-0.57678802, -0.83528755, 0.11985366, -0.81649906, -0.43043204],\n [-0.43909753, -0.55885675, 0.06365748, -0.55347575, -0.29062556],\n [0.59572477, 0.47490671, -0.21164208, 0.47086457, 0.53158712],\n [1.3566666, 0.89996404, -0.38104577, 0.8751849, 1.1193978],\n [0.23076888, 0.20661165, -0.21673278, 0.20619121, 0.28473205],\n [-0.34028239, -0.4117036, 0.08954949, -0.40922324, -0.27474134],\n [-0.37062089, -0.45756657, 0.09456789, -0.4541971, -0.29617234],\n [0.38295285, 0.32751843, -0.13106841, 0.32618755, 0.3374993],\n [-0.37335462, -0.47661172, 0.25405495, -0.47190847, -0.41374577],\n [-0.15304894, -0.16668687, 0.09593084, -0.16648462, -0.16502496],\n [0.1042071, 0.09911783, -0.05736882, 0.09907642, 0.10760526],\n [-0.46824434, -0.62851306, 0.14796752, -0.61952704, -0.40183444],\n [0.5037424, 0.41082186, -0.25088311, 0.4079888, 0.50308281],\n [0.84808535, 0.63314877, -0.23483142, 0.62422333, 0.6964476],\n [1.0502609, 0.7772032, -0.06052008, 0.76558315, 0.51110244],\n [-0.53151373, -0.76676609, 0.21629309, -0.74987, -0.49625002],\n [0.02620452, 0.02584242, -0.02552688, 0.02584158, 0.03272855],\n [0.2398854, 0.21542703, -0.12027352, 0.21501237, 0.24010574],\n [-0.22643811, -0.25247045, 0.03030234, -0.25197283, -0.1459268],\n [0.1166085, 0.11048453, -0.04729675, 0.11043096, 0.10875283],\n [1.3431452, 0.89781391, -0.32261633, 0.87398692, 1.0519314],\n [-0.00187006, -0.00187161, 0.000327, -0.00187161, -0.00131753],\n [-0.15164755, -0.16445306, 0.06738007, -0.16427307, -0.14579549],\n [0.08269046, 0.07945194, -0.04361586, 0.0794308, 0.08417732],\n [0.08617637, 0.08305152, -0.0150427, 0.08303264, 0.06067978],\n [0.2798524, 0.25006393, -0.05532456, 0.24953681, 0.20540075],\n [-0.30467726, -0.36042436, 0.08464232, -0.3587339, -0.25047586],\n [0.30195525, 0.26443918, -0.15980555, 0.26366505, 0.30772941],\n [1.340299, 0.89487285, -0.34074742, 0.87098951, 1.0697657],\n [-0.26049778, -0.3020933, 0.1130195, -0.30099117, -0.24846419],\n [0.0397316, 0.03898097, -0.01635773, 0.03897861, 0.03723989],\n [-0.34632342, -0.41654823, 0.06160529, -0.41419106, -0.24539775],\n [-0.37410872, -0.45578874, 0.0546393, -0.45283957, -0.2482239],\n [-0.04034842, -0.04111172, 0.0087849, -0.04110931, -0.03058252],\n [-0.12428565, -0.13264089, 0.05264014, -0.13254735, -0.11759703],\n [-0.72785825, -1.2666293, 0.17219649, -1.2040833, -0.56717355],\n [1.8408131, 1.1261187, -0.38746129, 1.0820371, 1.3796192],\n [-0.18881786, -0.20840084, 0.05869633, -0.20806299, -0.16115447],\n [0.06242098, 0.06068176, -0.0164772, 0.06067368, 0.05044965],\n [0.41261605, 0.34419064, -0.34676838, 0.34231182, 0.49059213],\n [0.60428656, 0.48029853, -0.21858193, 0.47610295, 0.54247123],\n [-0.30322308, -0.36506293, 0.18549386, -0.36297539, -0.32431075],\n [0.1572132, 0.1467182, -0.04614981, 0.14660154, 0.13164144],\n [-0.39806354, -0.51019956, 0.1704672, -0.50499905, -0.37802908],\n [-0.2302522, -0.26219035, 0.10869935, -0.26145448, -0.22588541],\n [-0.05024085, -0.05158971, 0.03198843, -0.05158368, -0.05445599],\n [-0.55766118, -0.84608885, 0.31656254, -0.82207079, -0.58175945],\n [-0.24070252, -0.2759872, 0.11441098, -0.27512835, -0.23667533],\n [-0.16040658, -0.17378822, 0.03903075, -0.1736024, -0.12617125],\n [-0.06634171, -0.06855248, 0.02223805, -0.06854021, -0.05806307],\n [1.3592493, 0.8944937, -0.48455987, 0.86891532, 1.2142979],\n [-0.28906615, -0.34194426, 0.12870891, -0.34034118, -0.27810643],\n [-0.50352365, -0.69691907, 0.15762218, -0.68479707, -0.43075371],\n [-0.69471901, -1.0704718, 0.06720278, -1.0378355, -0.40180178],\n [0.21847797, 0.19887323, -0.06684954, 0.19858055, 0.18548747],\n [-0.5428572, -0.75471105, 0.0941741, -0.74122646, -0.3814558],\n [0.03577835, 0.03513084, -0.02541775, 0.03512889, 0.04022251],\n [0.21380327, 0.19363341, -0.13368382, 0.1933169, 0.23034521],\n [-0.32240633, -0.39064823, 0.14593547, -0.3882585, -0.31188847],\n [0.66465513, 0.52295291, -0.16608129, 0.5179753, 0.52744981],\n [-0.32430575, -0.39326332, 0.14356266, -0.39083764, -0.31140597],\n [0.05393036, 0.05264785, -0.01205061, 0.05264277, 0.04123207],\n [-0.0028317, -0.00283571, 0.00140141, -0.00283571, -0.00282204],\n [-0.02135293, -0.02156446, 0.00476595, -0.02156411, -0.01631917],\n [0.10786111, 0.10313359, -0.01531064, 0.10309907, 0.07088991],\n [-0.47964118, -0.64280246, 0.11783585, -0.63370878, -0.37848336],\n [-0.39842251, -0.51601229, 0.22492566, -0.51030478, -0.41487657],\n [-0.09905477, -0.10351316, 0.01158226, -0.10347973, -0.06102737],\n [-0.24547459, -0.27872479, 0.0546228, -0.27797657, -0.18741558],\n [0.10888075, 0.10361494, -0.03673675, 0.10357251, 0.09550168],\n [-0.66853058, -1.0972597, 0.19254359, -1.0536153, -0.55624639],\n [-0.26737067, -0.31576114, 0.22301942, -0.31430952, -0.3171028],\n [1.0645332, 0.73086408, -0.85099415, 0.71391715, 1.2447773],\n [0.22606207, 0.20453712, -0.09427344, 0.20419621, 0.21279345],\n [-0.43314942, -0.58895264, 0.36564343, -0.57978648, -0.51576802],\n [-0.23247676, -0.25948378, 0.02658345, -0.25896213, -0.14216744],\n [0.48348128, 0.39798279, -0.20933574, 0.39548186, 0.4608337],\n [0.17081161, 0.15847915, -0.05211926, 0.15833091, 0.14488419],\n [-0.51725932, -0.74111587, 0.2395372, -0.72537996, -0.50420429],\n [1.1637011, 0.7996095, -0.40572604, 0.78114865, 1.0319261],\n [0.517248, 0.42450814, -0.14397092, 0.42175838, 0.42550122],\n [0.20377275, 0.18713122, -0.04431389, 0.18690503, 0.15439055],\n [0.37960426, 0.31476553, -0.95803217, 0.31293262, 0.65116455],\n [0.26374504, 0.23715653, -0.04998972, 0.2367108, 0.19087972],\n [0.16509569, 0.15369065, -0.04393403, 0.15355947, 0.13379324],\n [-0.14878344, -0.16056319, 0.04689619, -0.16040793, -0.12757304],\n [0.03112964, 0.03070085, -0.0062502, 0.03069987, 0.02296628],\n [-0.45146616, -0.6190492, 0.3128399, -0.60888544, -0.50334703],\n [0.01026683, 0.01021352, -0.00610921, 0.01021347, 0.01088001],\n [-0.03831364, -0.0390543, 0.01605932, -0.03905191, -0.03612612],\n [0.24916831, 0.2203365, -0.34282994, 0.21978215, 0.3491656],\n [0.20605202, 0.18741517, -0.11593127, 0.18713473, 0.21431939],\n [0.26738299, 0.23812242, -0.10704603, 0.23759018, 0.24828817],\n [-0.07107709, -0.07356866, 0.0197543, -0.0735541, -0.05844091],\n [0.23730441, 0.21182058, -0.22739701, 0.21136564, 0.29476469],\n [-0.27485574, -0.31636297, 0.04916292, -0.31532239, -0.1951158],\n [-0.19505071, -0.21275145, 0.01592398, -0.21248413, -0.10660859],\n [0.70056918, 0.54467763, -0.18947516, 0.53896875, 0.57081421],\n [1.6429994, 1.0053109, -0.95644829, 0.96598995, 1.7284445],\n [-0.46441552, -0.63262118, 0.21761309, -0.62265912, -0.45447446],\n [0.14443729, 0.13599906, -0.02424701, 0.13591696, 0.10038811],\n [-0.52960941, -0.79336663, 0.39957719, -0.77217395, -0.60745488],\n [-0.33390194, -0.4038831, 0.10318464, -0.4014562, -0.28442056],\n [-0.2285471, -0.25905302, 0.0867362, -0.25837651, -0.20847818],\n [1.2125627, 0.7984311, -1.1857801, 0.77566244, 1.5164014],\n [-0.32860078, -0.40410853, 0.21625493, -0.40124137, -0.36011765],\n [-0.21919949, -0.24554758, 0.05514477, -0.24502109, -0.17434298],\n [0.39846564, 0.33805359, -0.16176033, 0.33653529, 0.37173027],\n [0.14472573, 0.13684479, -0.01196691, 0.13677331, 0.07943914],\n [-0.62022414, -0.92676789, 0.10887495, -0.90231422, -0.43754068],\n [-0.14261973, -0.1536374, 0.05400271, -0.1534957, -0.12999748],\n [-0.01461438, -0.01471576, 0.0041704, -0.01471564, -0.01212242],\n [0.99225736, 0.70501776, -0.44703681, 0.69147821, 0.95838641],\n [-0.48755881, -0.66838696, 0.16829922, -0.65742882, -0.43091212],\n [-0.3777599, -0.48123749, 0.2179296, -0.47656828, -0.3962105],\n [0.20800236, 0.18925086, -0.10391383, 0.18896962, 0.20794411],\n [-0.19079329, -0.213611, 0.16107615, -0.21315735, -0.22719345],\n [0.31904245, 0.27589917, -0.26252013, 0.2749309, 0.37667186],\n [0.04010878, 0.0393186, -0.02236594, 0.039316, 0.04159413],\n [-0.02494808, -0.02525158, 0.00845639, -0.02525096, -0.02191609],\n [0.8212072, 0.60966564, -0.38077433, 0.60074282, 0.80081903],\n [-0.24368044, -0.27772481, 0.07283286, -0.2769348, -0.2052735],\n [0.66968971, 0.52729017, -0.15116308, 0.5223009, 0.51373718],\n [-0.22175179, -0.25047303, 0.08820703, -0.24985487, -0.2054735],\n [-0.38644071, -0.47499015, 0.05802841, -0.47163706, -0.25878879],\n [0.01919507, 0.01902635, -0.00490186, 0.01902611, 0.01534347],\n [-0.3713334, -0.4571303, 0.08430312, -0.45385478, -0.28540871],\n [0.14357274, 0.13569087, -0.01381273, 0.13561879, 0.08288664],\n [-0.13841049, -0.1481505, 0.03228135, -0.14803637, -0.10734288],\n [0.87551684, 0.66655617, -0.08041144, 0.65836644, 0.49769004],\n [0.91127172, 0.66035816, -0.42523081, 0.64907921, 0.89053331],\n [-0.30803484, -0.37574199, 0.27045591, -0.3732811, -0.37162825],\n [-0.00678639, -0.00681114, 0.00619686, -0.00681113, -0.00829519],\n [-0.27461572, -0.32156385, 0.12059118, -0.32023279, -0.26298568],\n [-0.27504152, -0.31881524, 0.07147225, -0.31765917, -0.22113356],\n [0.73670468, 0.57617649, -0.09651916, 0.57041651, 0.47142231],\n [-0.47160338, -0.6449074, 0.20866342, -0.63449845, -0.45276903],\n [0.01841131, 0.01823494, -0.0149963, 0.01823466, 0.02166346],\n [-0.5864559, -0.87238967, 0.15950373, -0.84987263, -0.47872986],\n [-0.23419911, -0.26584045, 0.07806246, -0.26513025, -0.20458835],\n [-0.06042102, -0.06234982, 0.03249908, -0.06233956, -0.06190976],\n [-0.56688868, -0.98333402, 1.2357693, -0.93532088, -0.92609242],\n [-0.64334619, -0.99313137, 0.12726082, -0.96260133, -0.47228562],\n [-0.37870995, -0.50850668, 0.81436117, -0.50121973, -0.61586685],\n [-0.72441698, -1.1832351, 0.09617001, -1.1370623, -0.46560286],\n [-0.22634773, -0.25477515, 0.0594121, -0.25418177, -0.18259368],\n [0.42793116, 0.35498325, -0.35874136, 0.35292514, 0.50837762],\n [0.47199579, 0.39193388, -0.15444486, 0.38968604, 0.40978862],\n [1.1985705, 0.83253909, -0.22061912, 0.81439502, 0.85901372],\n [0.23502298, 0.2116479, -0.10906943, 0.21126127, 0.22925451],\n [-0.39761407, -0.50587964, 0.1393689, -0.50102296, -0.35321552],\n [-0.30317936, -0.36865385, 0.27304678, -0.36631514, -0.36888312],\n [0.52891415, 0.4286696, -0.23649281, 0.42553087, 0.50957264],\n [0.09077196, 0.08692412, -0.04414515, 0.08689695, 0.08993709],\n [-0.14416059, -0.1568803, 0.14318278, -0.15669352, -0.18121937],\n [0.73647886, 0.57278216, -0.12516194, 0.56679404, 0.51397376],\n [-0.30020097, -0.35901216, 0.15278503, -0.35710415, -0.3019801],\n [-0.30970783, -0.36488812, 0.0616491, -0.36325833, -0.22783496],\n [-0.13090644, -0.14142637, 0.14714733, -0.14128564, -0.17148842],\n [1.7228459, 1.0583109, -0.58431917, 1.0175577, 1.5137622],\n [-0.22877864, -0.25919887, 0.08362009, -0.25852681, -0.20609013],\n [-0.82510806, -1.6126582, 0.1515493, -1.4979969, -0.59092886],\n [-0.64003893, -1.0073362, 0.1692274, -0.97361338, -0.51757245],\n [-0.13130595, -0.14057566, 0.05050397, -0.1404667, -0.12031177],\n [-0.18244628, -0.19973106, 0.03812658, -0.19945858, -0.13640887],\n [0.02164936, 0.0214168, -0.01169166, 0.02141638, 0.02221257],\n [0.50690309, 0.41969105, -0.10328319, 0.41720801, 0.37581131],\n [1.0020303, 0.7351515, -0.12375838, 0.72352927, 0.62871736]]\n\n\ninvgauss_resids = np.asarray(invgauss).astype(float).reshape(-1, 5)\n\nwfs_resids = [-1.168096, -1.274198, -22.17336, -1.276003, -3.11599,\n 1.11769, 1.074522, 93.39722, 1.074787, 4.886437,\n .7295976, .715076, 143.1204, .7151231, 4.239254,\n .6471395, .6355711, 124.1729, .6356048, 3.732567,\n .5949094, .5789607, 24.55698, .5790298, 2.056012,\n -.8578523, -.8829973, -123.7017, -.8831246, -4.498492,\n .9996619, .9733517, 207.696, .9734635, 5.92077, -.3647809,\n -.3685094, -82.85051, -.3685159, -2.225631, -.5218114,\n -.5276356, -268.2977, -.5276466, -4.180369, -.9659918,\n -.9815653, -1084.813, -.981608, -10.04078, .3903325,\n .3878348, 390.8879, .3878375, 3.905176, -.1136672,\n -.1140306, -24.22441, -.1140308, -.6789552, 1.404348,\n 1.344308, 166.2025, 1.344715, 6.894916, .6979978, .6888638,\n 444.2878, .6888834, 6.004232, -.4536614, -.4585339, -171.4895,\n -.4585427, -3.280201, -.1426753, -.1432245, -34.60201,\n -.1432249, -.8897463, 4.041986, 3.678221, 839.28, 3.683146,\n 23.93493, -.4076838, -.4104721, -420.7155, -.4104753,\n -4.119822, 1.037925, 1.021274, 1152.838, 1.021318, 10.74897,\n 1.002495, .9780589, 268.3886, .9781554, 6.461147, -1.810334,\n -1.851575, -4981.573, -1.851736, -25.3685, -1.028229,\n -1.038076, -6328.413, -1.038092, -18.84335, -.3727405,\n -.3743742, -1096.784, -.3743754, -5.341272, .5779432,\n .5697695, 164.1083, .5697884, 3.798688, 1.083387, 1.055122,\n 294.6827, 1.055241, 7.019516, .2389214, .2379634, 227.0207,\n .237964, 2.348868, -.6575496, -.6669395, -330.597, -.6669623,\n -5.228599, -1.223007, -1.27265, -208.9083, -1.273003, -6.785854,\n .5404838, .534834, 319.6869, .5348437, 4.536944, -2.227986,\n -2.295954, -5036.329, -2.296313, -29.24016, .2379562,\n .2370335, 247.2598, .2370341, 2.410178, 1.05744, 1.030792,\n 298.8007, 1.030901, 6.939011, .3025656, .3017648, 2051.827,\n .3017652, 5.726993, -.1662367, -.1664337, -2143.633,\n -.1664338, -3.898237, .9615754, .9505365, 2399.173,\n .9505573, 13.04196, .6036977, .5925143, 84.08839, .5925481,\n 3.12938, -.4469533, -.4512659, -221.0249, -.451273,\n -3.534449, .9504169, .9381722, 1602.909, .9381981, 11.31303,\n -1.58407, -1.647805, -602.1467, -1.648254, -11.47494,\n -.5808498, -.5975259, -27.5484, -.5976085, -2.102526,\n -.6490194, -.6557381, -799.9641, -.6557499, -6.958708,\n .6957488, .6903502, 2190.347, .6903571, 10.19701, .1750624,\n .174529, 150.309, .1745293, 1.663886, .1572418, .1562937,\n 12.33672, .1562946, .6731502, -.6408126, -.6436554,\n -9244.279, -.6436575, -15.59954, 1.101677, 1.092417,\n 10778.35, 1.09243, 23.56244, -.4992968, -.5028596,\n -835.3172, -.5028639, -5.927292, -.9280973, -1.02178,\n -6.907707, -1.023576, -1.812064, .6339129, .6277187,\n 744.4614, .6277287, 6.688065, -.6707486,\n -.6785607, -647.0199, -.6785761, -6.627439, -1.38237,\n -1.427934, -602.4287, -1.428195, -10.48056, -1.459774,\n -1.554047, -125.0777, -1.555148, -6.435521, -1.093153,\n -1.111322, -1629.418, -1.111374, -12.48719, -.3837357,\n -.3857544, -717.1755, -.3857562, -4.726757, -.5606073,\n -.5670648, -326.075, -.5670773, -4.679632, .8169871, .7887753,\n 38.50973, .7889313, 2.951211, 1.275712, 1.265811, 24735.07,\n 1.265823, 34.27201, .0862263, .0861666, 766.7436, .0861666,\n 1.786392, .3344287, .3328024, 485.5152, .3328038, 3.786779,\n -1.345582, -1.471176, -36.61779, -1.473379, -4.047349,\n .4774571, .4752944, 2502.141, .475296, 8.29329, .349272,\n .3477617, 824.9081, .3477628, 4.651336, -.5002936,\n -.5066125, -158.1673, -.506626, -3.408177, -1.247884,\n -1.411017, -12.7456, -1.415246, -2.707506, -1.673559,\n -1.700277, -10051.77, -1.70035, -30.4211, .3986639,\n .3972206, 2398.038, .3972214, 7.250311, 1.482003, 1.444616,\n 1148.828, 1.444769, 13.61396, .0587717, .0587566, 3267.75,\n .0587566, 2.243168, .4706421, .4689196, 4507.782, .4689206,\n 9.994969, -1.157463, -1.186371, -642.0396, -1.186495, -9.510254]\nwfs_resids = np.asarray(wfs_resids).astype(float).reshape(-1, 5)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nStatistical tools for time series analysis\n\"\"\"\nfrom __future__ import division\n\nfrom six.moves import range, zip\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom sm2.tools.tools import add_constant\nfrom sm2.compat.numpy import lstsq\n\nfrom sm2.regression.linear_model import OLS\n\nfrom sm2.tsa.tsatools import lagmat, lagmat2ds\n\n# upstream these autocov functions are implemented here in stattools\nfrom sm2.tsa.autocov import acf, ccovf, ccf, acovf, pacf_yw\n\nfrom sm2.tsa.adfvalues import mackinnonp, mackinnoncrit # noqa:F841\nfrom sm2.tsa._bds import bds\nfrom sm2.tsa.unit_root import (kpss, _sigma_est_kpss, coint, # noqa:F841\n adfuller, _autolag, q_stat)\n\n# TODO: bds is not used outside of tests; un-port?\n__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',\n 'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',\n 'adfuller', 'kpss', 'bds',\n 'innovations_algo', 'innovations_filter',\n 'levinson_durbin']\n\n\ndef pacf_ols(x, nlags=40, efficient=True, unbiased=False):\n \"\"\"\n Calculate partial autocorrelations via OLS\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n Number of lags for which pacf is returned. Lag 0 is not returned.\n efficient : bool, optional\n If true, uses the maximum number of available observations to compute\n each partial autocorrelation. If not, uses the same number of\n observations to compute all pacf values.\n unbiased : bool, optional\n Adjust each partial autocorrelation by n / (n - lag)\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, (maxlag,) array corresponding to lags\n 0, 1, ..., maxlag\n\n Notes\n -----\n This solves a separate OLS estimation for each desired lag. Setting\n efficient to True has two effects. First, it uses `nobs - lag`\n observations of estimate each pacf. Second, it re-estimates the mean in\n each regression. If efficient is False, then the data are first demeaned,\n and then `nobs - maxlag` observations are used to estimate each partial\n autocorrelation.\n\n The inefficient estimator appears to have better finite sample properties.\n This option should only be used in time series that are covariance\n stationary.\n\n OLS estimation of the pacf does not guarantee that all pacf values are\n between -1 and 1.\n\n See also\n --------\n sm2.tsa.stattools.pacf\n sm2.tsa.autocov.pacf_yw\n sm2.tsa.autocov.pacf_burg\n\n References\n ----------\n .. [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).\n Time series analysis: forecasting and control. John Wiley & Sons, p. 66\n \"\"\"\n\n pacf = np.empty(nlags + 1)\n pacf[0] = 1.0\n x = np.squeeze(np.asarray(x))\n if x.ndim != 1:\n raise ValueError('x must be squeezable to a 1-d array')\n\n if efficient:\n xlags, x0 = lagmat(x, nlags, original='sep')\n xlags = add_constant(xlags)\n for k in range(1, nlags + 1):\n params = lstsq(xlags[k:, :k + 1], x0[k:], rcond=None)[0]\n pacf[k] = params[-1]\n\n else:\n x = x - np.mean(x)\n # Create a single set of lags for multivariate OLS\n xlags, x0 = lagmat(x, nlags, original='sep', trim='both')\n for k in range(1, nlags + 1):\n params = lstsq(xlags[:, :k], x0, rcond=None)[0]\n # Last coefficient corresponds to PACF value (see [1])\n pacf[k] = params[-1]\n\n if unbiased:\n n = len(x)\n pacf *= n / (n - np.arange(nlags + 1))\n\n return pacf\n\n\ndef pacf(x, nlags=40, method='ywunbiased', alpha=None):\n \"\"\"\n Partial autocorrelation estimated\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n largest lag for which the pacf is returned\n method : str\n specifies which method for the calculations to use:\n\n - 'yw' or 'ywunbiased' : Yule-Walker with bias correction in\n denominator for acovf. Default.\n - 'ywm' or 'ywmle' : Yule-Walker without bias correction\n - 'ols' : regression of time series on lags of it and on constant\n - 'ols-inefficient' : regression of time series on lags using a single\n common sample to estimate all pacf coefficients\n - 'ols-unbiased' : regression of time series on lags with a bias\n adjustment\n - 'ld' or 'ldunbiased' : Levinson-Durbin recursion with bias correction\n - 'ldb' or 'ldbiased' : Levinson-Durbin recursion without bias\n correction\n alpha : float, optional\n If a number is given, the confidence intervals for the given level are\n returned. For instance if alpha=.05, 95 % confidence intervals are\n returned where the standard deviation is computed according to\n 1/sqrt(len(x))\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, nlags elements, including lag zero\n confint : array, optional\n Confidence intervals for the PACF. Returned if confint is not None.\n\n See also\n --------\n sm2.tsa.autocov.acf\n sm2.tsa.autocov.pacf_yw\n sm2.tsa.autocov.pacf_burg\n sm2.tsa.stattools.pacf_ols\n\n Notes\n -----\n Based on simulation evidence across a range of low-order ARMA models,\n the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin\n (MLE) and Burg, respectively. The estimators with the lowest bias included\n included these three in addition to OLS and OLS-unbiased.\n\n Yule-Walker (unbiased) and Levinson-Durbin (unbiased) performed\n consistently worse than the other options.\n \"\"\"\n if method in ('ols', 'ols-inefficient', 'ols-unbiased'):\n # GH#5153\n efficient = 'inefficient' not in method\n unbiased = 'unbiased' in method\n ret = pacf_ols(x, nlags=nlags, efficient=efficient, unbiased=unbiased)\n elif method in ('yw', 'ywu', 'ywunbiased', 'yw_unbiased'):\n ret = pacf_yw(x, nlags=nlags, method='unbiased')\n elif method in ('ywm', 'ywmle', 'yw_mle'):\n ret = pacf_yw(x, nlags=nlags, method='mle')\n elif method in ('ld', 'ldu', 'ldunbiased', 'ld_unbiased'):\n acv = acovf(x, unbiased=True, fft=False)\n ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)\n ret = ld_[2]\n\n # FIXME: inconsistent naming with ywmle\n elif method in ('ldb', 'ldbiased', 'ld_biased'):\n acv = acovf(x, unbiased=False, fft=False)\n ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)\n ret = ld_[2]\n else: # pragma: no cover\n raise ValueError('method not available')\n\n if alpha is not None:\n varacf = 1. / len(x) # for all lags >=1\n interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)\n confint = np.array(list(zip(ret - interval, ret + interval)))\n confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0\n return ret, confint\n else:\n return ret\n # TODO: Get rid of multiple-return\n\n\n# TODO: not tested; consider un-porting, as it isn't _really_ used upstream\ndef periodogram(X):\n \"\"\"\n Returns the periodogram for the natural frequency of X\n\n Parameters\n ----------\n X : array-like\n Array for which the periodogram is desired.\n\n Returns\n -------\n pgram : array\n 1./len(X) * np.abs(np.fft.fft(X))**2\n\n References\n ----------\n Brockwell and Davis.\n \"\"\"\n X = np.asarray(X)\n # if kernel == \"bartlett\":\n # w = 1 - np.arange(M + 1.) / M # JP removed integer division\n\n pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2\n pergr[0] = 0. # what are the implications of this?\n return pergr\n\n\n# TODO: belongs in autocov?\n# copied from nitime and sandbox\\tsa\\examples\\try_ld_nitime.py\n# TODO: check what to return, for testing and trying out returns everything\ndef levinson_durbin(s, nlags=10, isacov=False):\n \"\"\"\n Levinson-Durbin recursion for autoregressive processes\n\n Parameters\n ----------\n s : array_like\n If isacov is False, then this is the time series. If iasacov is true\n then this is interpreted as autocovariance starting with lag 0\n nlags : integer\n largest lag to include in recursion or order of the autoregressive\n process\n isacov : boolean\n flag to indicate whether the first argument, s, contains the\n autocovariances or the data series.\n\n Returns\n -------\n sigma_v : float\n estimate of the error variance ?\n arcoefs : ndarray\n estimate of the autoregressive coefficients for a model including nlags\n pacf : ndarray\n partial autocorrelation function\n sigma : ndarray\n entire sigma array from intermediate result, last value is sigma_v\n phi : ndarray\n entire phi array from intermediate result, last column contains\n autoregressive coefficients for AR(nlags)\n\n Notes\n -----\n This function returns currently all results, but maybe we drop sigma and\n phi from the returns.\n\n If this function is called with the time series (isacov=False), then the\n sample autocovariance function is calculated with the default options\n (biased, no fft).\n \"\"\"\n s = np.asarray(s)\n order = nlags\n\n if isacov:\n sxx_m = s\n else:\n sxx_m = acovf(s, fft=False)[:order + 1] # TODO: not tested\n\n phi = np.zeros((order + 1, order + 1), 'd')\n sig = np.zeros(order + 1)\n # initial points for the recursion\n phi[1, 1] = sxx_m[1] / sxx_m[0]\n sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]\n for k in range(2, order + 1):\n phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k - 1],\n sxx_m[1:k][::-1])) / sig[k - 1]\n for j in range(1, k):\n phi[j, k] = phi[j, k - 1] - phi[k, k] * phi[k - j, k - 1]\n sig[k] = sig[k - 1] * (1 - phi[k, k]**2)\n\n sigma_v = sig[-1]\n arcoefs = phi[1:, -1]\n pacf_ = np.diag(phi).copy()\n pacf_[0] = 1.\n return sigma_v, arcoefs, pacf_, sig, phi # return everything\n\n\n# TODO: belongs in autocov?\n# GH#5042 upstream\ndef innovations_algo(acov, nobs=None, rtol=None):\n \"\"\"\n Innovations algorithm to convert autocovariances to MA parameters\n\n Parameters\n ----------\n acov : array-like\n Array containing autocovariances including lag 0\n nobs : int, optional\n Number of periods to run the algorithm. If not provided, nobs is\n equal to the length of acovf\n rtol : float, optional\n Tolerance used to check for convergence. Default value is 0 which will\n never prematurely end the algorithm. Checks after 10 iterations and\n stops if sigma2[i] - sigma2[i - 10] < rtol * sigma2[0]. When the\n stopping condition is met, the remaining values in theta and sigma2\n are forward filled using the value of the final iteration.\n\n Returns\n -------\n theta : ndarray\n Innovation coefficients of MA representation. Array is (nobs, q) where\n q is the largest index of a non-zero autocovariance. theta\n corresponds to the first q columns of the coefficient matrix in the\n common description of the innovation algorithm.\n sigma2 : ndarray\n The prediction error variance (nobs,).\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.macrodata.load_pandas()\n >>> rgdpg = data.data['realgdp'].pct_change().dropna()\n >>> acov = sm.tsa.acovf(rgdpg)\n >>> nobs = activity.shape[0]\n >>> theta, sigma2 = innovations_algo(acov[:4], nobs=nobs)\n\n See also\n --------\n innovations_filter\n\n References\n ----------\n Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series and\n forecasting. Springer.\n \"\"\"\n acov = np.squeeze(np.asarray(acov))\n if acov.ndim != 1:\n raise ValueError('acov must be 1-d or squeezable to 1-d.')\n\n rtol = 0.0 if rtol is None else rtol\n if not isinstance(rtol, float):\n raise ValueError('rtol must be a non-negative float or None.')\n\n n = acov.shape[0] if nobs is None else int(nobs)\n if n != nobs or nobs < 1:\n raise ValueError('nobs must be a positive integer')\n\n max_lag = int(np.max(np.argwhere(acov != 0)))\n v = np.zeros(n + 1)\n v[0] = acov[0]\n\n # Retain only the relevant columns of theta\n theta = np.zeros((n + 1, max_lag + 1))\n for i in range(1, n):\n for k in range(max(i - max_lag, 0), i):\n sub = 0\n for j in range(max(i - max_lag, 0), k):\n sub += theta[k, k - j] * theta[i, i - j] * v[j]\n theta[i, i - k] = 1. / v[k] * (acov[i - k] - sub)\n v[i] = acov[0]\n for j in range(max(i - max_lag, 0), i):\n v[i] -= theta[i, i - j] ** 2 * v[j]\n\n # Break if v has converged\n if i >= 10:\n if v[i - 10] - v[i] < v[0] * rtol:\n # Forward fill all remaining values\n v[i + 1:] = v[i]\n theta[i + 1:] = theta[i]\n break\n\n theta = theta[:-1, 1:]\n v = v[:-1]\n return theta, v\n\n\n# TODO: belongs in autocov?\n# GH#5042 upstream\ndef innovations_filter(endog, theta):\n \"\"\"\n Filter observations using the innovations algorithm\n\n Parameters\n ----------\n endog : array-like\n The time series to filter (nobs,). Should be demeaned if not mean 0.\n theta : ndarray\n Innovation coefficients of MA representation. Array must be (nobs, q)\n where q order of the MA.\n\n Returns\n -------\n resid : ndarray\n Array of filtered innovations\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.macrodata.load_pandas()\n >>> rgdpg = data.data['realgdp'].pct_change().dropna()\n >>> acov = sm.tsa.acovf(rgdpg)\n >>> nobs = activity.shape[0]\n >>> theta, sigma2 = innovations_algo(acov[:4], nobs=nobs)\n >>> resid = innovations_filter(rgdpg, theta)\n\n See also\n --------\n innovations_algo\n\n References\n ----------\n Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series and\n forecasting. Springer.\n \"\"\"\n orig_endog = endog\n endog = np.squeeze(np.asarray(endog))\n if endog.ndim != 1:\n raise ValueError('endog must be 1-d or squeezable to 1-d.')\n\n nobs = endog.shape[0]\n n_theta, k = theta.shape\n if nobs != n_theta:\n raise ValueError('theta must be (nobs, q) where q is the moder order')\n\n is_pandas = isinstance(orig_endog, (pd.DataFrame, pd.Series))\n if is_pandas:\n if len(orig_endog.index) != nobs:\n raise ValueError('If endog is a Series or DataFrame, the index '\n 'must correspond to the number of time series '\n 'observations.')\n\n u = np.empty(nobs)\n u[0] = endog[0]\n for i in range(1, nobs):\n if i < k:\n hat = (theta[i, :i] * u[:i][::-1]).sum()\n else:\n hat = (theta[i] * u[i - k:i][::-1]).sum()\n u[i] = endog[i] + hat\n\n if is_pandas:\n u = pd.Series(u, index=orig_endog.index.copy())\n return u\n\n\ndef grangercausalitytests(x, maxlag, addconst=True, verbose=True):\n \"\"\"four tests for granger non causality of 2 timeseries\n\n all four tests give similar results\n `params_ftest` and `ssr_ftest` are equivalent based on F test which is\n identical to lmtest:grangertest in R\n\n Parameters\n ----------\n x : array, 2d\n data for test whether the time series in the second column Granger\n causes the time series in the first column\n maxlag : integer\n the Granger causality test results are calculated for all lags up to\n maxlag\n verbose : bool\n print results if true\n\n Returns\n -------\n results : dictionary\n all test results, dictionary keys are the number of lags. For each\n lag the values are a tuple, with the first element a dictionary with\n teststatistic, pvalues, degrees of freedom, the second element are\n the OLS estimation results for the restricted model, the unrestricted\n model and the restriction (contrast) matrix for the parameter f_test.\n\n Notes\n -----\n TODO: convert to class and attach results properly\n\n The Null hypothesis for grangercausalitytests is that the time series in\n the second column, x2, does NOT Granger cause the time series in the first\n column, x1. Grange causality means that past values of x2 have a\n statistically significant effect on the current value of x1, taking past\n values of x1 into account as regressors. We reject the null hypothesis\n that x2 does not Granger cause x1 if the pvalues are below a desired size\n of the test.\n\n The null hypothesis for all four test is that the coefficients\n corresponding to past values of the second time series are zero.\n\n 'params_ftest', 'ssr_ftest' are based on F distribution\n\n 'ssr_chi2test', 'lrtest' are based on chi-square distribution\n\n References\n ----------\n http://en.wikipedia.org/wiki/Granger_causality\n Greene: Econometric Analysis\n \"\"\"\n if verbose: # pragma: no cover\n raise NotImplementedError(\"Option `verbose` from upstream is \"\n \"not supported\")\n x = np.asarray(x)\n\n if x.shape[0] <= 3 * maxlag + int(addconst):\n raise ValueError(\"Insufficient observations. Maximum allowable \"\n \"lag is {0}\"\n .format(int((x.shape[0] - int(addconst)) / 3) - 1))\n\n resli = {}\n\n for mlg in range(1, maxlag + 1):\n result = {}\n mxlg = mlg\n\n # create lagmat of both time series\n dta = lagmat2ds(x, mxlg, trim='both', dropex=1)\n\n if addconst:\n dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)\n dtajoint = add_constant(dta[:, 1:], prepend=False)\n else:\n # TODO: Whats intended here?\n raise NotImplementedError\n # dtaown = dta[:, 1:mxlg]\n # dtajoint = dta[:, 1:]\n\n # Run ols on both models without and with lags of second variable\n res2down = OLS(dta[:, 0], dtaown).fit()\n res2djoint = OLS(dta[:, 0], dtajoint).fit()\n\n # for ssr based tests see:\n # http://support.sas.com/rnd/app/examples/ets/granger/index.htm\n # the other tests are made-up\n\n # Granger Causality test using ssr (F statistic)\n fgc1 = ((res2down.ssr - res2djoint.ssr) /\n res2djoint.ssr / mxlg * res2djoint.df_resid)\n\n result['ssr_ftest'] = (fgc1,\n stats.f.sf(fgc1, mxlg, res2djoint.df_resid),\n res2djoint.df_resid, mxlg)\n\n # Granger Causality test using ssr (ch2 statistic)\n fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr\n result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)\n\n # likelihood ratio test pvalue:\n lr = -2 * (res2down.llf - res2djoint.llf)\n result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)\n\n # F test that all lag coefficients of exog are zero\n rconstr = np.column_stack((np.zeros((mxlg, mxlg)),\n np.eye(mxlg, mxlg),\n np.zeros((mxlg, 1))))\n ftres = res2djoint.f_test(rconstr)\n result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],\n np.squeeze(ftres.pvalue)[()],\n ftres.df_denom, ftres.df_num)\n\n resli[mxlg] = (result, [res2down, res2djoint, rconstr])\n\n return resli\n\n\ndef _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):\n raise NotImplementedError(\"_safe_arma_fit not ported from \"\n \"upstream\") # pragma: no cover\n\n\ndef arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',\n model_kw={}, fit_kw={}): # pragma: no cover\n raise NotImplementedError(\"arma_order_select_ic not ported from upstream, \"\n \"as it is only used in tests\")\n\n\ndef has_missing(data): # pragma: no cover\n raise NotImplementedError(\"has_missing not ported from upstream; \"\n \"use `np.isnan(data).any()` instead.\")\n",
"\"\"\"\nTest Results for discrete models from Stata\n\"\"\"\nimport os\nimport numpy as np\n\n# Discrete Model Tests\n# Note that there is a slight refactor of the classes, so that one dataset\n# might be used for more than one model\n\ncur_dir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Namespace(object):\n pass\n\n\nclass Anes(object):\n \"\"\"Results are from Stata 11 (checked vs R nnet package).\"\"\"\n def mnlogit_basezero():\n obj = Namespace()\n obj.nobs = 944\n params = [-.01153598, .29771435, -.024945, .08249144, .00519655,\n -.37340167, -.08875065, .39166864, -.02289784, .18104276,\n .04787398, -2.2509132, -.1059667, .57345051, -.01485121,\n -.00715242, .05757516, -3.6655835, -.0915567, 1.2787718,\n -.00868135, .19982796, .08449838, -7.6138431, -.0932846,\n 1.3469616, -.01790407, .21693885, .08095841, -7.0604782,\n -.14088069, 2.0700801, -.00943265, .3219257, .10889408,\n -12.105751]\n obj.params = np.reshape(params, (6, -1), order='F')\n bse = [.0342823657, .093626795, .0065248584, .0735865799,\n .0176336937, .6298376313, .0391615553, .1082386919,\n .0079144618, .0852893563, .0222809297, .7631899491,\n .0570382292, .1585481337, .0113313133, .1262913234,\n .0336142088, 1.156541492, .0437902764, .1288965854,\n .0084187486, .0941250559, .0261963632, .9575809602,\n .0393516553, .1171860107, .0076110152, .0850070091,\n .0229760791, .8443638283, .042138047, .1434089089,\n .0081338625, .0910979921, .025300888, 1.059954821]\n obj.bse = np.reshape(bse, (6, -1), order='F')\n obj.linpred = np.loadtxt(os.path.join(cur_dir, 'yhat_mnlogit.csv'))\n obj.phat = np.loadtxt(os.path.join(cur_dir, 'phat_mnlogit.csv'))\n obj.cov_params = None\n obj.llf = -1461.922747312\n obj.llnull = -1750.34670999\n obj.llr = 576.8479253554\n obj.llr_pvalue = 1.8223179e-102\n obj.prsquared = .1647810465387\n obj.df_model = 30\n obj.df_resid = 944 - 36\n obj.J = 7\n obj.K = 6\n obj.aic = 2995.84549462\n obj.bic = 3170.45003661\n z = [-.3364988051, 3.179798597, -3.823070772, 1.121012042,\n .2946945327, -.5928538661, -2.266269864, 3.618564069,\n -2.893164162, 2.122688754, 2.148652536, -2.949348555,\n -1.857818873, 3.616885888, -1.310634214, -.0566342868,\n 1.712822091, -3.169435381, -2.090799808, 9.920912816,\n -1.031191864, 2.123004903, 3.225576554, -7.951122047,\n -2.370538224, 11.49421878, -2.352389066, 2.552011323,\n 3.523595639, -8.361890935, -3.34331327, 14.43480847,\n -1.159676452, 3.533839715, 4.303962885, -11.42100649]\n obj.z = np.reshape(z, (6, -1), order='F')\n pvalues = [\n 0.7364947525, 0.0014737744, 0.0001317999, 0.2622827367,\n 0.7682272401, 0.5532789548, 0.0234348654, 0.0002962422,\n 0.0038138191, 0.0337799420, 0.0316619538, 0.0031844460,\n 0.0631947400, 0.0002981687, 0.1899813744, 0.9548365214,\n 0.0867452747, 0.0015273542, 0.0365460134, 3.37654e-23,\n 0.3024508550, 0.0337534410, 0.0012571921, 1.84830e-15,\n 0.0177622072, 1.41051e-30, 0.0186532528, 0.0107103038,\n 0.0004257334, 6.17209e-17, 0.0008278439, 3.12513e-47,\n 0.2461805610, 0.0004095694, 0.0000167770, 3.28408e-30]\n obj.pvalues = np.reshape(pvalues, (6, -1), order='F')\n conf_int = [\n [[-0.0787282, 0.0556562],\n [0.1142092, 0.4812195],\n [-0.0377335, -0.0121565],\n [-0.0617356, 0.2267185],\n [-0.0293649, 0.0397580],\n [-1.6078610, 0.8610574]],\n [[-0.1655059, -0.0119954],\n [0.1795247, 0.6038126],\n [-0.0384099, -0.0073858],\n [0.0138787, 0.3482068],\n [0.0042042, 0.0915438],\n [-3.7467380, -0.7550884]],\n [[-0.2177596, 0.0058262],\n [0.2627019, 0.8841991],\n [-0.0370602, 0.0073578],\n [-0.2546789, 0.2403740],\n [-0.0083075, 0.1234578],\n [-5.9323630, -1.3988040]],\n [[-0.1773841, -0.0057293],\n [1.0261390, 1.5314040],\n [-0.0251818, 0.0078191],\n [0.0153462, 0.3843097],\n [0.0331544, 0.1358423],\n [-9.4906670, -5.7370190]],\n [[-0.1704124, -0.0161568],\n [1.1172810, 1.5766420],\n [-0.0328214, -0.0029868],\n [0.0503282, 0.3835495],\n [0.0359261, 0.1259907],\n [-8.7154010, -5.4055560]],\n [[-0.2234697, -0.0582916],\n [1.7890040, 2.3511560],\n [-0.0253747, 0.0065094],\n [0.1433769, 0.5004745],\n [0.0593053, 0.1584829],\n [-14.1832200, -10.0282800]]]\n obj.conf_int = np.asarray(conf_int)\n\n # margins, dydx(*) predict(outcome(#))\n obj.margeff_dydx_overall = np.array([\n [0.00868085993550, -0.09779854015456, 0.00272556969847,\n -0.01992376579372, -0.00603133322764],\n [0.00699386733148, -0.05022430802614, -0.00211003909752,\n -0.00536980000265, -0.00554366741814],\n [-0.00391040848820, -0.02824717135857, -0.00100551299310,\n 0.00664337806861, 0.00097987356999],\n [-0.00182580888015, -0.00573744730031, -0.00004249256428,\n -0.00546669558488, 0.00054101121854],\n [-0.00098558129923, 0.01985550937033, 0.00047972250012,\n 0.00172605778905, 0.00211291403209],\n [-0.00153469551647, 0.03755346502013, -0.00068531143399,\n 0.00472471794347, 0.00254733486106],\n [-0.00741820702809, 0.12459834487569, 0.00063806819375,\n 0.01766610701188, 0.00539385283759]]).T\n obj.margeff_dydx_overall_se = np.array([\n [.0038581061, .0080471125, .0007068488, .0082318967, .0020261706],\n [.003904378, .0073600286, .000756431, .0084381578, .0020482238],\n [.003137126, .0056813182, .0006601377, .0068932588, .0018481806],\n [.0019427783, .0031904763, .0003865411, .004361789, .0011523221],\n [.0029863227, .0054076092, .0005886612, .0064426365, .0018886818],\n [.0035806552, .0069497362, .000722511, .0078287717, .0022352393],\n [.0033641608, .008376629, .0006774697, .0073505286, .0021660086]]\n ).T\n\n obj.margeff_dydx_mean = np.array([\n [0.01149887431225, -0.13784207091973, 0.00273313385873,\n -0.02542974260540, -0.00855346837482],\n [0.01114846831102, -0.09864273512889, -0.00222435063712,\n -0.01214617126321, -0.00903581444579],\n [-0.00381702868421, -0.05132297961269, -0.00116763216994,\n 0.00624203027060, 0.00021912081810],\n [-0.00233455327258, -0.00928554037343, -0.00000206561214,\n -0.00775415690571, 0.00060004460394],\n [-0.00352579921274, 0.06412187169362, 0.00073938948643,\n 0.00747778063206, 0.00459965010365],\n [-0.00574308219449, 0.11126535089794, -0.00057337915464,\n 0.01467424346725, 0.00641760846097],\n [-0.00722687818452, 0.12170608820238, 0.00049490419675,\n 0.01693601418978, 0.00575285798725]]).T\n obj.margeff_dydx_mean_se = np.array([\n [.0043729758, .0110343353, .0008149907, .0092551389, .0023752071],\n [.004875051, .0124746358, .0009613152, .0105665812, .0026524426],\n [.0040718954, .0103613938, .0008554615, .0089931297, .0024374625],\n [.0026430804, .0070845916, .0005364369, .0057654258, .0015988838],\n [.0037798151, .0103849291, .0007393481, .0082021938, .0023489261],\n [.0045654631, .0130329403, .0009128134, .0100053262, .0028048602],\n [.0027682389, .0113292677, .0005325113, .0061289353, .0017330763]]\n ).T\n\n obj.margeff_dydx_dummy_overall = np.array([\n [0.00549149574321, -0.05348235321783, 0.00298963549049,\n -0.01479461677951, -0.00332167981255, -0.26502967041815],\n [0.00345677928276, -0.00950322030929, -0.00189456107189,\n 0.00033893662061, -0.00314690167350, -0.21040878091828],\n [-0.00645089013284, 0.00401746940204, -0.00083948249351,\n 0.01114202556889, 0.00277069841472, -0.15967397659686],\n [-0.00215436802341, -0.00366545199370, -0.00000002297812,\n -0.00457368049644, 0.00065303026027, -0.00094772782001],\n [0.00058038428936, -0.00369080100124, 0.00035948233235,\n -0.00018863693013, 0.00079351293461, 0.12640653743480],\n [0.00217597030999, -0.01279456622853, -0.00091882392767,\n 0.00001651192759, -0.00037998290789, 0.27175070356670],\n [-0.00309932483642, 0.07911868907484, 0.00030378521102,\n 0.00805941631677, 0.00263129901425, 0.23790291475181]]).T\n obj.margeff_dydx_dummy_overall_se = np.array([\n [.0037314453, .0094102332, .000688838, .0079744554, .0019365971,\n .0243914836],\n [.0038215262, .0095938828, .0007410885, .008259353, .0019984087,\n .0317628806],\n [.0031045718, .00785814, .0006504353, .0067892866, .0018060332,\n 0.0262803561],\n [.0019756086, .0051031194, .0003862449, .0043621673, .0011796953,\n .0219999601],\n [.0029714074, .0081732018, .0005715192, .0064742872, .0019130195,\n .0331694192],\n [.0034443743, .0097296187, .0006774867, .0075996454, .0021993881,\n .038600835],\n [.0032003518, .0098741227, .0006335772, .0070902078, .0021003227,\n .0255727127]]).T\n\n obj.margeff_eydx_dummy_overall = np.array([\n [.03939188, -.65758371, .01750922, -.12131806, -.03613241,\n -3.2132513],\n [.02752366, -.383165, -.00830021, -.03652935, -.03286046,\n -1.8741853],\n [-.05006681, -.2719659, -.00626481, .06525323, .01012554,\n -2.0058029],\n [-.05239558, -.22549142, .00025015, -.13104416, .01114517,\n -.27052009],\n [-.00296374, .25627809, .00140513, .03358712, .02296041,\n 1.3302701],\n [.00328283, .2800168, -.0083912, .04332782, .01575863,\n 1.8441023],\n [-.03257068, .98346111, -.00122118, .10847807, .0406456,\n 2.9119099]]).T\n\n obj.margeff_eydx_dummy_overall_se = np.array([\n [.0272085605, .0777760394, .0052427952, .0584011446, .0148618012,\n .5796921383],\n [.0262290023, .0724479385, .005174736, .0567743614, .0144447083,\n .3015738731],\n [.0321415498, .0895589422, .0067480662, .0701460193, .0190451865,\n .3904138447],\n [.0511305319, .1420904068, .0102342163, .1129912244, .0308618233,\n .3693799595],\n [.0340186217, .0991711703, .0065812158, .0737441012, .0212966336,\n .2346982385],\n [.0289250212, .0840662279, .0056743561, .0631772185, .0177278895,\n .2089516714],\n [.0318251305, .1085637405, .0062400589, .0699123044, .0201045606,\n .3727166284]]).T\n\n # taken from gretl\n obj.resid = np.loadtxt(os.path.join(cur_dir, 'mnlogit_resid.csv'),\n delimiter=\",\")\n return obj\n mnlogit_basezero = mnlogit_basezero()\n\n\nclass DiscreteL1(object):\n \"\"\"\n Special results for L1 models\n Uses the Spector data and a script to generate the baseline results\n \"\"\"\n\n def logit():\n \"\"\"\n Results generated with:\n data = sm.datasets.spector.load(as_pandas=False)\n data.exog = sm.add_constant(data.exog, prepend=True)\n alpha = 3 * np.array([0, 1, 1, 1])\n res2 = sm.Logit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, trim_mode='size',\n size_trim_tol=1e-5, acc=1e-10, maxiter=1000)\n \"\"\"\n obj = Namespace()\n obj.params = [-4.10271595, 0., 0.15493781, 0.]\n obj.conf_int = [\n [-9.15205122, 0.94661932],\n [np.nan, np.nan],\n [-0.06539482, 0.37527044],\n [np.nan, np.nan]]\n obj.bse = [2.5762388, np.nan, 0.11241668, np.nan]\n obj.nnz_params = 2\n obj.aic = 42.091439368583671\n obj.bic = 45.022911174183122\n obj.cov_params = [\n [6.63700638, np.nan, -0.28636261, np.nan],\n [np.nan, np.nan, np.nan, np.nan],\n [-0.28636261, np.nan, 0.01263751, np.nan],\n [np.nan, np.nan, np.nan, np.nan]]\n return obj\n logit = logit()\n\n def sweep():\n \"\"\"\n Results generated with\n params = np.zeros((3, 4))\n alphas = np.array(\n [[0.1, 0.1, 0.1, 0.1],\n [0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]])\n model = sm.Logit(data.endog, data.exog)\n for i in range(3):\n alpha = alphas[i, :]\n res2 = model.fit_regularized(method=\"l1\", alpha=alpha,\n disp=0, acc=1e-10,\n maxiter=1000, trim_mode='off')\n params[i, :] = res2.params\n print params\n \"\"\"\n obj = Namespace()\n obj.params = [\n [-10.37593611, 2.27080968, 0.06670638, 2.05723691],\n [-5.32670811, 1.18216019, 0.01402395, 1.45178712],\n [-3.92630318, 0.90126958, -0., 1.09498178]]\n return obj\n sweep = sweep()\n\n def probit():\n \"\"\"\n Results generated with\n data = sm.datasets.spector.load(as_pandas=False)\n data.exog = sm.add_constant(data.exog, prepend=True)\n alpha = np.array([0.1, 0.2, 0.3, 10])\n res2 = sm.Probit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, trim_mode='auto',\n auto_trim_tol=0.02, acc=1e-10, maxiter=1000)\n \"\"\"\n obj = Namespace()\n nan = np.nan\n obj.params = [-5.40476992, 1.25018458, 0.04744558, 0.]\n obj.conf_int = [\n [-9.44077951, -1.36876033],\n [0.03716721, 2.46320194],\n [-0.09727571, 0.19216687],\n [np.nan, np.nan]]\n obj.bse = [2.05922641, 0.61889778, 0.07383875, np.nan]\n obj.nnz_params = 3\n obj.aic = 38.399773877542927\n obj.bic = 42.796981585942106\n obj.cov_params = [\n [4.24041339, -0.83432592, -0.06827915, nan],\n [-0.83432592, 0.38303447, -0.01700249, nan],\n [-0.06827915, -0.01700249, 0.00545216, nan],\n [nan, nan, nan, nan]]\n return obj\n probit = probit()\n\n def mnlogit():\n \"\"\"\n Results generated with\n anes_data = sm.datasets.anes96.load(as_pandas=False)\n anes_exog = anes_data.exog\n anes_exog = sm.add_constant(anes_exog, prepend=False)\n mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)\n\n alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))\n alpha[-1, :] = 0\n mlogit_l1_res = mlogit_mod.fit_regularized(\n method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,\n acc=1e-10)\n \"\"\"\n obj = Namespace()\n obj.params = [\n [0.00100163, -0.05864195, -0.06147822, -0.04769671, -0.05222987,\n -0.09522432],\n [0., 0.03186139, 0.12048999, 0.83211915, 0.92330292, 1.5680646],\n [-0.0218185, -0.01988066, -0.00808564, -0.00487463, -0.01400173,\n -0.00562079],\n [0., 0.03306875, 0., 0.02362861, 0.05486435, 0.14656966],\n [0., 0.04448213, 0.03252651, 0.07661761, 0.07265266, 0.0967758],\n [0.90993803, -0.50081247, -2.08285102, -5.26132955, -4.86783179,\n -9.31537963]]\n obj.conf_int = [\n [[-0.0646223, 0.06662556],\n [np.nan, np.nan],\n [-0.03405931, -0.00957768],\n [np.nan, np.nan],\n [np.nan, np.nan],\n [0.26697895, 1.55289711]],\n\n [[-0.1337913, 0.01650741],\n [-0.14477255, 0.20849532],\n [-0.03500303, -0.00475829],\n [-0.11406121, 0.18019871],\n [0.00479741, 0.08416684],\n [-1.84626136, 0.84463642]],\n\n [[-0.17237962, 0.04942317],\n [-0.15146029, 0.39244026],\n [-0.02947379, 0.01330252],\n [np.nan, np.nan],\n [-0.02501483, 0.09006785],\n [-3.90379391, -0.26190812]],\n\n [[-0.12938296, 0.03398954],\n [0.62612955, 1.03810876],\n [-0.02046322, 0.01071395],\n [-0.13738534, 0.18464256],\n [0.03017236, 0.12306286],\n [-6.91227465, -3.61038444]],\n\n [[-0.12469773, 0.02023799],\n [0.742564, 1.10404183],\n [-0.02791975, -0.00008371],\n [-0.08491561, 0.19464431],\n [0.0332926, 0.11201273],\n [-6.29331126, -3.44235233]],\n\n [[-0.17165567, -0.01879296],\n [1.33994079, 1.79618841],\n [-0.02027503, 0.00903345],\n [-0.00267819, 0.29581751],\n [0.05343135, 0.14012026],\n [-11.10419107, -7.52656819]]]\n\n obj.bse = [\n [0.03348221, 0.03834221, 0.05658338, 0.04167742, 0.03697408,\n 0.03899631],\n [np.nan, 0.09012101, 0.13875269, 0.10509867, 0.09221543,\n 0.11639184],\n [0.00624543, 0.00771564, 0.01091253, 0.00795351, 0.00710116,\n 0.00747679],\n [np.nan, 0.07506769, np.nan, 0.08215148, 0.07131762,\n 0.07614826],\n [np.nan, 0.02024768, 0.02935837, 0.02369699, 0.02008204,\n 0.02211492],\n [0.32804638, 0.68646613, 0.92906957, 0.84233441, 0.72729881,\n 0.91267567]]\n\n obj.nnz_params = 32\n obj.aic = 3019.4391360294126\n obj.bic = 3174.6431733460686\n return obj\n mnlogit = mnlogit()\n\n\nclass Spector(object):\n \"\"\"\n Results are from Stata 11\n \"\"\"\n nobs = 32\n\n def logit():\n obj = Namespace()\n obj.nobs = 32\n obj.params = [2.82611297201, .0951576702557, 2.37868772835,\n -13.0213483201]\n obj.cov_params = [\n [1.59502033639, -.036920566629, .427615725153, -4.57347950298],\n [-.036920566629, .0200375937069, .0149126464275, -.346255757562],\n [.427615725153, .0149126464275, 1.13329715236, -2.35916128427],\n [-4.57347950298, -.346255757562, -2.35916128427, 24.3179625937]]\n obj.bse = [1.26294114526, .141554207662, 1.06456430165, 4.93132462871]\n\n obj.resid_pearson = [-.1652382, -.2515266, -.4800059, -.1630655,\n .8687437, -.1900454, -.165002, -.2331563,\n -.3535812, .6647838, -.1583799, -.4843181,\n -.689527, 2.043449, -.7516119, -.1764176,\n -.2380445, -.2003426, -1.199277, .7164842,\n -.255713, .3242821, -.5646816, -2.400189,\n .4392082, 1.038473, .75747, -.6659256,\n .4336657, .2404583, -1.060033, 2.829577]\n\n obj.resid_dev = [\n -.2321102, -.3502712, -.6439626, -.2290982,\n 1.060478, -.2663844, -.2317827, -.3253788, -.4853875,\n .8555557, -.2225972, -.6491808, -.8819993, 1.813269,\n -.9463985, -.247583, -.3320177, -.2805444, -1.335131,\n .9103027, -.3559217, .4471892, -.744005, -1.955074,\n .5939538, 1.209638, .952332, -.8567857, .5870719, .335292,\n -1.227311, 2.096639]\n\n # from gretl\n obj.resid_generalized = [\n -0.026578, -0.059501, -0.187260,\n -0.025902, 0.430107, -0.034858, -0.026504, -0.051559,\n -0.111127, 0.306489, -0.024470, -0.189997, -0.322240,\n 0.806789, -0.360990, -0.030184, -0.053626, -0.038588,\n -0.589872, 0.339214, -0.061376, 0.095153, -0.241772,\n -0.852091, 0.161709, 0.518867, 0.364579, -0.307219,\n 0.158296, 0.054660, -0.529117, 0.888969]\n\n obj.phat = np.array([\n .02657799236476,\n .05950126051903,\n .18725991249084,\n .02590163610876,\n .56989300251007,\n .03485824912786,\n .02650404907763,\n .05155897513032,\n .11112663894892,\n .69351142644882,\n .02447037212551,\n .18999740481377,\n .32223951816559,\n .1932111531496,\n .36098992824554,\n .03018374741077,\n .05362640321255,\n .03858831897378,\n .58987241983414,\n .66078591346741,\n .06137581542134,\n .90484726428986,\n .24177247285843,\n .85209089517593,\n .8382905125618,\n .48113295435905,\n .63542068004608,\n .30721867084503,\n .84170418977737,\n .94534027576447,\n .52911710739136,\n .1110308393836])\n obj.linpred = np.array([\n -3.6007342338562,\n -2.7604126930237,\n -1.4679137468338,\n -3.6272060871124,\n .28141465783119,\n -3.3209850788116,\n -3.6035962104797,\n -2.9120934009552,\n -2.0792844295502,\n .81658720970154,\n -3.6855175495148,\n -1.4500269889832,\n -.74349880218506,\n -1.429278254509,\n -.57107019424438,\n -3.4698030948639,\n -2.8705959320068,\n -3.2154531478882,\n .36343798041344,\n .66679841279984,\n -2.7273993492126,\n 2.2522828578949,\n -1.1429864168167,\n 1.7510952949524,\n 1.6455633640289,\n -.07550399750471,\n .55554306507111,\n -.81315463781357,\n 1.6709630489349,\n 2.8504176139832,\n .11660042405128,\n -2.0802545547485])\n obj.llf = -12.8896334653335\n obj.llnull = -20.5917296966173\n obj.df_model = 3\n obj.df_resid = 32 - 4 # TODO: is this right? not reported in stata\n obj.llr = 15.4041924625676\n obj.prsquared = .374038332124624\n obj.llr_pvalue = .00150187761112892\n obj.aic = 33.779266930667\n obj.bic = 39.642210541866\n obj.z = [2.237723415, 0.6722348408, 2.234423721, -2.640537645]\n obj.conf_int = [[.3507938, 5.301432],\n [-.1822835, .3725988],\n [.29218, 4.465195],\n [-22.68657, -3.35613]]\n obj.pvalues = [.0252390974, .5014342039, .0254552063, .0082774596]\n\n # taken from margins command\n obj.margeff_nodummy_dydx = [.36258084688424, .01220841099085,\n .30517768382304]\n obj.margeff_nodummy_dydx_se = [.1094412, .0177942, .0923796]\n obj.margeff_nodummy_dydxmean = [.53385885781692, .01797548988961,\n .44933926079386]\n obj.margeff_nodummy_dydxmean_se = [.237038, .0262369, .1967626]\n obj.margeff_nodummy_dydxmedian = [.25009492465091, .00842091261329,\n .2105003352955]\n obj.margeff_nodummy_dydxmedian_se = [.1546708, .0134314, .0928183]\n obj.margeff_nodummy_dydxzero = [6.252993785e-06, 2.105437138e-07,\n 5.263030788e-06]\n obj.margeff_nodummy_dydxzero_se = [.0000288, 9.24e-07, .000025]\n obj.margeff_nodummy_dyex = [1.1774000792198, .27896245178384,\n .16960002159996]\n obj.margeff_nodummy_dyex_se = [.3616481, .4090679, .0635583]\n obj.margeff_nodummy_dyexmean = [1.6641381583512, .39433730945339,\n .19658592659731]\n obj.margeff_nodummy_dyexmean_se = [.7388917, .5755722, .0860836]\n # NOTE: PSI at median should be a NaN or 'omitted'\n obj.margeff_nodummy_dyexmedian = [.76654095836557, .18947053379898, 0]\n obj.margeff_nodummy_dyexmedian_se = [.4740659, .302207, 0]\n # NOTE: all should be NaN\n obj.margeff_nodummy_dyexzero = [0, 0, 0]\n obj.margeff_nodummy_dyexzero_se = [0, 0, 0]\n\n obj.margeff_nodummy_eydx = [1.8546366266779, .06244722072812,\n 1.5610138123033]\n obj.margeff_nodummy_eydx_se = [.847903, .0930901, .7146715]\n obj.margeff_nodummy_eydxmean = [2.1116143062702, .0710998816585,\n 1.7773072368626]\n obj.margeff_nodummy_eydxmean_se = [1.076109, .1081501, .9120842]\n obj.margeff_nodummy_eydxmedian = [2.5488082240624, .0858205793373,\n 2.1452853812126]\n obj.margeff_nodummy_eydxmedian_se = [1.255377, .1283771, 1.106872]\n obj.margeff_nodummy_eydxzero = [2.8261067189993, .0951574597115,\n 2.3786824653103]\n obj.margeff_nodummy_eydxzero_se = [1.262961, .1415544, 1.064574]\n obj.margeff_nodummy_eyex = [5.4747106798973, 1.3173389907576,\n .44600395466634]\n obj.margeff_nodummy_eyex_se = [2.44682, 1.943525, .1567618]\n obj.margeff_nodummy_eyexmean = [6.5822977203268, 1.5597536538833,\n .77757191612739]\n obj.margeff_nodummy_eyexmean_se = [3.354433, 2.372543, .3990368]\n obj.margeff_nodummy_eyexmedian = [7.8120973525952, 1.9309630350892, 0]\n obj.margeff_nodummy_eyexmedian_se = [3.847731951, 2.888485089, 0]\n\n obj.margeff_nodummy_eyexzero = [0, 0, 0]\n obj.margeff_nodummy_eyexzero_se = [0, 0, 0]\n\n # for below GPA = 2.0, psi = 1\n obj.margeff_nodummy_atexog1 = [.1456333017086, .00490359933927,\n .12257689308426]\n obj.margeff_nodummy_atexog1_se = [.145633, .0111226, .1777101]\n # for below GPA at mean, tuce = 21, psi = 0\n obj.margeff_nodummy_atexog2 = [.25105129214546, .00845311433473,\n .2113052923675]\n obj.margeff_nodummy_atexog2_se = [.1735778, .012017, .0971515]\n\n # must get this from older margeff or i.psi then margins\n obj.margeff_dummy_dydx = [.36258084688424, .01220841099085,\n .35751515254729]\n obj.margeff_dummy_dydx_se = [.1094412, .0177942, .1420034]\n obj.margeff_dummy_dydxmean = [.53385885781692, .01797548988961,\n .4564984096959]\n obj.margeff_dummy_dydxmean_se = [.237038, .0262369, .1810537]\n # obj.margeff_dummy_dydxmedian\n # from margeff\n obj.margeff_dummy_count_dydx_median = [0.250110487483923,\n 0.008426867847905,\n 0.441897738279663]\n obj.margeff_dummy_count_dydx_median_se = [.1546736661, .0134551951,\n .1792363708]\n\n # estimate with i.psi for the below then use margins\n obj.margeff_dummy_eydx = [1.8546366266779, .06244722072812,\n 1.5549034398832]\n obj.margeff_dummy_eydx_se = [.847903, .0930901, .7283702]\n # ie\n # margins, eydx(*) at((mean) _all)\n obj.margeff_dummy_eydxmean = [2.1116143062702, .0710998816585,\n 1.6631775707188]\n obj.margeff_dummy_eydxmean_se = [1.076109, .1081501, .801205]\n\n # Factor variables not allowed in below\n # test raises\n # obj.margeff_dummy_dydxzero\n # obj.margeff_dummy_eydxmedian\n # obj.margeff_dummy_eydxzero\n # obj.margeff_dummy_dyex\n # obj.margeff_dummy_dyexmean\n # elf.margeff_dummy_dyexmedian\n # obj.margeff_dummy_dyexzero\n # obj.margeff_dummy_eyex\n # obj.margeff_count_dummy_dydx_median\n # obj.margeff_count_dummy_dydx_median_se\n\n # NOTE: need old version of margeff for nodisc but at option is broken\n # stata command is margeff, count nodisc\n # this can be replicated with the new results by margeff\n # and then using margins for the last value\n obj.margeff_count_dydx = [.3625767598018, .0122068569914, .3051777]\n obj.margeff_count_dydx_se = [.1094379569, .0177869773, .0923796]\n\n # middle value taken from margeff rest from margins\n obj.margeff_count_dydxmean = [.5338588, 0.01797186545386, .4493393]\n obj.margeff_count_dydxmean_se = [.237038, .0262211, .1967626]\n\n # with new version of margeff this is just a call to\n # margeff\n # mat list e(margeff_b), nonames format(%17.16g)\n obj.margeff_count_dummy_dydxoverall = [.362576759801767,\n .012206856991439,\n .357515163621704]\n # AFAICT, an easy way to get se is\n # mata\n # V = st_matrix(\"e(margeff_V)\")\n # se = diagonal(cholesky(diag(V)))\n # last SE taken from margins with i.psi, don't know how they\n # don't know why margeff is different, but trust official results\n obj.margeff_count_dummy_dydxoverall_se = [.1094379569, .0177869773,\n .1420034]\n # .1574340751]\n\n # from new margeff\n obj.margeff_count_dummy_dydxmean = [0.533849340033768,\n 0.017971865453858,\n 0.456498405282412]\n obj.margeff_count_dummy_dydxmean_se = [.2370202503, .0262210796,\n .1810536852]\n\n # for below GPA = 2.0, psi = 1\n obj.margeff_dummy_atexog1 = [.1456333017086, .00490359933927,\n .0494715429937]\n obj.margeff_dummy_atexog1_se = [.145633, .0111226, .0731368]\n # for below GPA at mean, tuce = 21, psi = 0\n obj.margeff_dummy_atexog2 = [.25105129214546, .00845311433473,\n .44265645632553]\n obj.margeff_dummy_atexog2_se = [.1735778, .012017, .1811925]\n # The test for the prediction table was taken from Gretl\n # Gretl Output matched the Stata output here for params and SE\n obj.pred_table = np.array([[18, 3], [3, 8]])\n return obj\n logit = logit()\n\n def probit():\n obj = Namespace()\n obj.nobs = 32\n obj.params = [1.62581025407, .051728948442, 1.42633236818,\n -7.45232041607]\n obj.cov_params = [\n [.481472955383, -.01891350017, .105439226234, -1.1696681354],\n [-.01891350017, .00703757594, .002471864882, -.101172838897],\n [.105439226234, .002471864882, .354070126802, -.594791776765],\n [-1.1696681354, -.101172838897, -.594791776765, 6.46416639958]]\n obj.bse = [.693882522754, .083890261293, .595037920474, 2.54247249731]\n obj.llf = -12.8188033249334\n obj.llnull = -20.5917296966173\n obj.df_model = 3\n obj.df_resid = 32 - 4\n obj.llr = 15.5458527433678\n obj.prsquared = .377478069409622\n obj.llr_pvalue = .00140489496775855\n obj.aic = 33.637606649867\n obj.bic = 39.500550261066\n obj.z = [2.343062695, .6166263836, 2.397044489, -2.931131182]\n obj.conf_int = [[.2658255, 2.985795],\n [-.1126929, .2161508],\n [.2600795, 2.592585],\n [-12.43547, -2.469166]]\n obj.pvalues = [.0191261688, .537481188, .0165279168, .0033773013]\n obj.phat = [.0181707, .0530805, .1899263, .0185707, .5545748,\n .0272331, .0185033, .0445714, .1088081, .6631207,\n .0161024, .1935566, .3233282, .1951826, .3563406,\n .0219654, .0456943, .0308513, .5934023, .6571863,\n .0619288, .9045388, .2731908, .8474501, .8341947,\n .488726, .6424073, .3286732, .8400168, .9522446,\n .5399595, .123544]\n obj.linpred = np.array([\n -2.0930860042572,\n -1.615691781044,\n -.87816804647446,\n -2.0842070579529,\n .13722851872444,\n -1.9231110811234,\n -2.0856919288635,\n -1.6999372243881,\n -1.2328916788101,\n .42099541425705,\n -2.1418602466583,\n -.86486464738846,\n -.45841211080551,\n -.85895526409149,\n -.36825761198997,\n -2.0147502422333,\n -1.6881184577942,\n -1.8684275150299,\n .23630557954311,\n .40479621291161,\n -1.538782119751,\n 1.3078554868698,\n -.60319095849991,\n 1.025558590889,\n .97087496519089,\n -.02826354466379,\n .36490100622177,\n -.44357979297638,\n .99452745914459,\n 1.6670187711716,\n .10033150017262,\n -1.1574513912201])\n obj.resid_dev = [-.191509, -.3302762, -.6490455, -.1936247, 1.085867,\n -.2349926, -.1932698, -.3019776, -.4799906, .9064196,\n -.1801855, -.6559291, -.8838201, 1.807661, -.9387071,\n -.2107617, -.3058469, -.2503485, -1.341589, .9162835,\n -.3575735, .447951, -.7988633, -1.939208, .6021435,\n 1.196623, .9407793, -.8927477, .59048, .3128364,\n -1.246147, 2.045071]\n # Stata doesn't have it, but I think it's just oversight\n obj.resid_pearson = None\n # generalized residuals from gretl\n obj.resid_generalized = [-0.045452, -0.114220, -0.334908,\n -0.046321, 0.712624, -0.064538,\n -0.046175, -0.098447, -0.209349,\n 0.550593, -0.040906, -0.340339,\n -0.530763, 1.413373, -0.579170,\n -0.053593, -0.100556, -0.071855,\n -0.954156, 0.559294, -0.130167,\n 0.187523, -0.457597, -1.545643,\n 0.298511, 0.815964, 0.581013,\n -0.538579, 0.289631, 0.104405,\n -0.862836, 1.652638]\n obj.pred_table = np.array([[18, 3], [3, 8]])\n return obj\n probit = probit()\n\n\nclass RandHIE(object):\n \"\"\"\n Results obtained from Stata 11\n \"\"\"\n nobs = 20190\n\n def poisson():\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [\n -.052535114675, -.247086797633, .035290201794,\n -.03457750643, .271713973711, .033941474461, -.012635035534,\n .054056326828, .206115121809, .700352877227]\n obj.cov_params = None\n obj.bse = [.00288398915279, .01061725196728, .00182833684966,\n .00161284852954, .01223913844387, .00056476496963,\n .00925061122826, .01530987068312, .02627928267502,\n .01116266712362]\n predict = np.loadtxt(os.path.join(cur_dir, 'yhat_poisson.csv'),\n delimiter=\",\")\n obj.phat = predict[:, 0]\n obj.linpred = predict[:, 1]\n obj.llf = -62419.588535018\n obj.llnull = -66647.181687959\n obj.df_model = 9\n obj.df_resid = obj.nobs - obj.df_model - 1\n obj.llr = 8455.186305881856\n obj.prsquared = .0634324369893758\n obj.llr_pvalue = 0\n obj.aic = 124859.17707\n obj.bic = 124938.306497\n obj.z = [-18.21612769, -23.27219872, 19.30180524, -21.43878101,\n 22.20041672, 60.09840604, -1.36585953, 3.53081538,\n 7.84325525, 62.74063980]\n obj.conf_int = [[-.0581876, -.0468826],\n [-0.2678962, -0.2262774],\n [0.0317067, 0.0388737],\n [-0.0377386, -0.0314164],\n [0.2477257, 0.2957022],\n [0.0328346, 0.0350484],\n [-0.0307659, 0.0054958],\n [0.0240495, 0.0840631],\n [0.1546087, 0.2576216],\n [0.6784745, 0.7222313]]\n obj.pvalues = [3.84415e-74, 8.4800e-120, 5.18652e-83, 5.8116e-102,\n 3.4028e-109, 0, .1719830562, .0004142808,\n 4.39014e-15, 0]\n\n # from stata\n # use margins and put i. in front of dummies\n obj.margeff_dummy_overall = [-0.15027280560599, -0.66568074771099,\n 0.10094500919706, -0.09890639687842,\n 0.77721770295360, 0.09708707452600,\n -0.03608195237609, 0.15804581481115,\n 0.65104087597053]\n obj.margeff_dummy_overall_se = [.008273103, .0269856266,\n .0052466639, .0046317555, .0351582169,\n .0016652181, .0263736472, .0457480115,\n .0913901155]\n\n # just use margins\n obj.margeff_nodummy_overall = [-0.15027280560599, -0.70677348928158,\n 0.10094500919705, -0.09890639687842,\n 0.77721770295359, 0.09708707452600,\n -0.03614158359367, 0.15462412033340,\n 0.58957704430148]\n obj.margeff_nodummy_overall_se = [.008273103, .0305119343,\n .0052466639, .0046317555,\n .0351582168, .0016652181,\n .0264611158, .0437974779,\n .0752099666]\n # taken from gretl\n obj.resid = np.loadtxt(os.path.join(cur_dir, 'poisson_resid.csv'),\n delimiter=\",\")\n return obj\n poisson = poisson()\n\n def negativebinomial_nb2_bfgs():\n # R 2.15.1 MASS 7.3-22 glm.nb()\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [\n -0.0579469537244314,\n -0.267787718814838, 0.0412060770911646, -0.0381376804392121,\n 0.268915772213171, 0.0381637446219235, -0.0441338846217674,\n 0.0172521803400544, 0.177960787443151, 0.663556087183864,\n # lnalpha from stata\n 1.292953339909746]\n # alpha and stderr from stata\n obj.lnalpha_std_err = .0143932\n obj.lnalpha = 0.256929012449\n obj.bse = [\n 0.00607085853920512, 0.0226125368090765,\n 0.00405542008282773, 0.00344455937127785, 0.0298855063286547,\n 0.00142421904710063, 0.0199374393307107, 0.0358416931939136,\n 0.0741013728607101, 0.0250354082637892,\n # from stata\n .0186098]\n obj.z = [\n -9.54510030998327, -11.8424447940467,\n 10.1607419822296, -11.071860382846, 8.99820030672628,\n 26.7962605187844, -2.21361850384595, 0.481343898758222,\n 2.40158556546135, 26.5047040652267]\n obj.pvalues = [\n 1.35975947860026e-21,\n 2.35486776488278e-32, 2.96808970292151e-24,\n 1.71796558863781e-28, 2.2944789508802e-19,\n 3.57231639404726e-158, 0.0268550333379416, 0.630272102021494,\n 0.0163241908407114, 8.55476622951356e-155]\n obj.linpred = [\n 0.892904166867786, 0.892904166867786, 0.892904166867786,\n 0.892904166867786, 0.892904166867786, 0.937038051489553,\n 0.937038051489553, 0.937038051489553, 0.937038051489553,\n 0.937038051489553]\n # obj.aic = 86789.3241530713 # This is what R reports\n obj.aic = 86789.32415307125484 # from Stata\n obj.df_resid = 20180\n obj.df_model = 9\n # R conf_int: 1.96 * bse, not profile likelihood via R's confint()\n obj.conf_int = [\n # from Stata\n [-.0698826, -.0460113],\n [-.3122654, -.2233101],\n [.0330781, .049334],\n [-.0448006, -.0314748],\n [.2102246, .3276069],\n [.0352959, .0410316],\n [-.0834356, -.0048321],\n [-.0535908, .0880951],\n [.0324115, .3235101],\n [.6150055, .7121067],\n # from Stata\n [1.256989, 1.329947]]\n obj.bic = 86876.36652289562335 # stata\n obj.llnull = -44199.27443563430279 # stata\n obj.llr = 1631.224718197351 # stata\n obj.llf = -43383.66207653563 # stata\n obj.df_model = 9.0\n obj.llr_pvalue = 0.0\n return obj\n negativebinomial_nb2_bfgs = negativebinomial_nb2_bfgs()\n\n def negativebinomial_nb1_bfgs():\n # Unpublished implementation intended for R's COUNT package. Sent by\n # J.Hilbe (of Cambridge UP NBin book) and Andrew Robinson to Vincent\n # Arel-Bundock on 2012-12-06.\n obj = Namespace()\n obj.nobs = 20190\n # obj.params = [-0.065309744899923, -0.296016207412261,\n # 0.0411724098925173, -0.0320460533573259, 0.19083354227553,\n # 0.0318566232844115, -0.0331972813313092, -0.0484691550721231,\n # 0.111971860837541, 0.757560143822609,\n # 3.73086958562569]\n # from Stata\n obj.params = [-.065317260803762961, -.296023807893893376,\n .041187021258044826, -.032028789543547605,\n .19065933246421754, .031871625115758778,\n -.033250849053302826, -.04850769174426571,\n .111813637465757343, .757277086555503409,\n 3.731151380800305]\n # lnalpha and lnalpha_std_err are from stata\n obj.lnalpha = 1.316716867203\n obj.lnalpha_std_err = .0168876692\n obj.bse = [\n 0.00536019929563678,\n 0.0196998350459769, 0.00335779098766272, 0.00301145915122889,\n 0.0237984097096245, 0.00107360844112751, 0.0167174614755359,\n 0.0298037989274781, 0.0546838603596457, 0.0214703279904911,\n 0.0630011409376052]\n obj.z = [-12.1842008660173, -15.0263292419148,\n 12.2617548393554, -10.6413707601675, 8.0187518663633,\n 29.6724784046551, -1.98578482623631, -1.62627439508848,\n 2.04762173155154, 35.2840508145997,\n\n # From R, this is alpha/bse(alpha)\n 59.2190796881069\n\n # taken from Stata even though they don't report it\n # lnalpha/bse(lnalpha)\n # 77.968995\n ]\n\n obj.conf_int = [\n [-0.075815736, -0.0548037543],\n [-0.334627884, -0.2574045307],\n [0.034591140, 0.0477536802],\n [-0.037948513, -0.0261435934],\n [0.144188659, 0.2374784253],\n [0.029752351, 0.0339608958],\n [-0.065963506, -0.0004310568],\n [-0.106884601, 0.0099462908],\n [0.004791495, 0.2191522271],\n [3.607387349, 3.8543518219],\n [0.715478301, 0.7996419867]]\n # from Stata\n obj.llf = -43278.75612911823\n obj.llnull = -44199.2744356343\n obj.llr = 1841.036613032149\n obj.aic = 86579.51225823645655\n obj.bic = 86666.55462806082505\n obj.llr_pvalue = 0.0\n obj.df_model = 9.0\n obj.df_resid = 20180.0\n # Smoke tests TODO: check against other stats package\n obj.pvalues = [\n 3.65557865e-034, 5.24431864e-051,\n 1.42921171e-034, 2.09797259e-026, 1.15949461e-015,\n 1.56785415e-193, 4.71746349e-002, 1.04731854e-001,\n 4.07534831e-002, 1.95504975e-272, 0.00000000e+000]\n obj.conf_int = [[-.0758236, -.054811],\n [-.3346363, -.2574113],\n [.0346053, .0477687],\n [-.0379314, -.0261261],\n [.1440119, .2373067],\n [.0297667, .0339766],\n [-.0660178, -.0004839],\n [-.1069241, .0099087],\n [.0046266, .2190007],\n [.7151889, .7993652],\n # from stata for alpha no lnalpha\n [3.609675, 3.856716]]\n # [1.28360034e+00, 1.34979803e+00]]\n obj.linpred = [\n 0.8487497, 0.8487497, 0.8487497, 0.8487497,\n 0.8487497, 0.88201746, 0.88201746, 0.88201746, 0.88201746,\n 0.88201746]\n return obj\n negativebinomial_nb1_bfgs = negativebinomial_nb1_bfgs()\n\n def negativebinomial_geometric_bfgs():\n # Smoke tests TODO: Cross check with other stats package\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [-0.05768894, -0.26646696, 0.04088528, -0.03795503,\n 0.26885821, 0.03802523, -0.04308456, 0.01931675,\n 0.18051684, 0.66469896]\n obj.bse = [0.00553867, 0.02061988, 0.00375937, 0.0030924,\n 0.02701658, 0.00132201, 0.01821646, 0.03271784, 0.06666231,\n 0.02250053]\n obj.pvalues = [2.10310916e-025, 3.34666368e-038, 1.50697768e-027,\n 1.25468406e-034, 2.48155744e-023, 6.18745348e-182,\n 1.80230194e-002, 5.54919603e-001, 6.77044178e-003,\n 8.44913440e-192]\n obj.z = [-10.41567024, -12.92281571, 10.8755779, -12.27364916,\n 9.95160202, 28.76323587, -2.36514487, 0.59040434,\n 2.70792943, 29.54148082]\n obj.aic = 87101.159433012392 # old value 87101.160011780419\n obj.bic = 87180.288860125467 # old value 87180.289438893495\n obj.df_model = 9.0\n obj.df_resid = 20180.0\n obj.llf = -43540.58000589021\n obj.llnull = -44586.650971362695 # old value -44199.27443567125\n obj.llr = 2092.1425097129977 # old value 1317.3888595620811\n obj.llr_pvalue = 0 # old value 5.4288002863296022e-278\n obj.linpred = [\n 0.89348994, 0.89348994, 0.89348994,\n 0.89348994, 0.89348994, 0.9365745, 0.9365745, 0.9365745,\n 0.9365745, 0.9365745]\n obj.conf_int = [[-0.06854453, -0.04683335],\n [-0.30688118, -0.22605273],\n [0.03351706, 0.04825351],\n [-0.04401602, -0.03189404],\n [0.21590669, 0.32180972],\n [0.03543415, 0.04061632],\n [-0.07878816, -0.00738096],\n [-0.04480903, 0.08344253],\n [0.04986111, 0.31117258],\n [0.62059873, 0.70879919]]\n return obj\n negativebinomial_geometric_bfgs = negativebinomial_geometric_bfgs()\n\n def generalizedpoisson_gp2():\n # Stata gnpoisson function\n obj = Namespace()\n obj.nobs = 20190\n obj.llf = -43326.42720093228\n obj.params = [-0.0604495342, -0.277717228, 0.0438136144,\n -0.0395811744, 0.273044906, 0.0399108677, -0.0552626543,\n -0.001227569488, 0.151980519, 0.651125316, 0.448085318]\n obj.lnalpha_std_err = 0.0125607\n obj.lnalpha = -0.8027716\n obj.bse = [0.00634704, 0.02381906, 0.00443871, 0.00355094,\n 0.0334247, 0.00166303, 0.02102142, 0.0390845,\n 0.087821, 0.02626823, 0.00562825]\n obj.df_model = 9\n obj.aic = 86674.854401865\n obj.conf_int = [[-0.07288951, -0.04800956],\n [-0.32440173, -0.23103272],\n [0.03511389, 0.05251333],\n [-0.04654088, -0.03262147],\n [0.20753371, 0.33855610],\n [0.03665139, 0.04317034],\n [-0.09646387, -0.01406144],\n [-0.07783191, 0.07537652],\n [-0.02014548, 0.32410651],\n [0.59964053, 0.70261011],\n [0.43718883, 0.45925338]]\n obj.bic = 86761.896771689\n obj.wald_pvalue = 4.8795019354e-254\n obj.wald_statistic = 1206.46339591254\n return obj\n generalizedpoisson_gp2 = generalizedpoisson_gp2()\n\n def zero_inflated_poisson_logit():\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [.1033783, -1.045983, -.0821979, .0085692,\n -.0267957, 1.482363]\n obj.llf = -57005.72199826186\n obj.bse = [0.0079912, 0.02235510, .0107145, 0.0018697,\n 0.0014121, 0.0085915]\n obj.conf_int = [[0.0877159, 0.1190408],\n [-1.089798, -1.002167],\n [-0.1031979, -0.061198],\n [0.0049045, 0.0122338],\n [-0.0295635, -0.024028],\n [1.465524, 1.499202]]\n obj.aic = 114023.444\n obj.bic = 114070.9\n return obj\n zero_inflated_poisson_logit = zero_inflated_poisson_logit()\n\n def zero_inflated_poisson_probit():\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [.0622534, -.6429324, -.0821788, .0085673,\n -.0267952, 1.482369]\n obj.llf = -57006.05\n obj.bse = [.0048228, .0132516, .0107142, .0018697,\n .0014121, .0085913]\n obj.conf_int = [[0.0528009, .0717058],\n [-0.6689051, -.6169597],\n [-0.1031783, -.0611793],\n [0.0049027, .0122319],\n [-0.0295629, -.0240275],\n [1.46553, 1.499208]]\n obj.aic = 114024.1\n obj.bic = 114071.6\n return obj\n zero_inflated_poisson_probit = zero_inflated_poisson_probit()\n\n def zero_inflated_poisson_offset():\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [.1052014, -1.082434, -.0922822, .0115868,\n -.0283842, 1.347514]\n obj.llf = -58207.67\n obj.bse = [.0081836, .0230043, .0107788, .0018687, .0014162, .0086309]\n obj.conf_int = [[.0891619, .1212409],\n [-1.127522, -1.037347],\n [-.1134082, -.0711561],\n [.0079242, .0152494],\n [-.0311599, -.0256085],\n [1.330598, 1.36443]]\n obj.aic = 116427.3\n obj.bic = 116474.8\n return obj\n zero_inflated_poisson_offset = zero_inflated_poisson_offset()\n\n def zero_inflated_generalized_poisson():\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [3.57337, -17.95797, -0.21380, 0.03847,\n -0.05348, 1.15666, 1.36468]\n obj.llf = -43630.6\n obj.bse = [1.66109, 7.62052, 0.02066, 0.00339,\n 0.00289, 0.01680, 0.01606]\n obj.aic = 87275\n return obj\n zero_inflated_generalized_poisson = zero_inflated_generalized_poisson()\n\n def zero_inflated_negative_binomial():\n obj = Namespace()\n obj.nobs = 20190\n obj.params = [1.883859, -10.280888, -0.204769,\n 1.137985, 1.344457]\n obj.llf = -44077.91\n obj.bse = [0.3653, 1.6694, 0.02178, 0.01163, 0.0217496]\n obj.aic = 88165.81\n return obj\n zero_inflated_negative_binomial = zero_inflated_negative_binomial()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nUnit tests for fit_constrained\nTests for Poisson and Binomial are in discrete\n\n\nCreated on Sun Jan 7 09:21:39 2018\n\nAuthor: Josef Perktold\n\"\"\"\nimport warnings\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\n\nfrom sm2.tools.tools import add_constant\nfrom sm2.tools.sm_exceptions import ValueWarning\n\nfrom sm2.regression.linear_model import OLS, WLS\nfrom sm2.genmod.generalized_linear_model import GLM\nfrom sm2.genmod.families import family\n\n\[email protected]_vetted\nclass ConstrainedCompareMixin(object):\n model_cls = GLM\n fit_kwargs = {}\n idx_c = [1]\n idx_uc = [0, 2, 3, 4]\n\n @classmethod\n def setup_class(cls):\n nobs, k_exog = 100, 5\n np.random.seed(987125)\n x = np.random.randn(nobs, k_exog - 1)\n x = add_constant(x)\n\n y_true = x.sum(1) / 2\n y = y_true + 2 * np.random.randn(nobs)\n cls.endog = y\n cls.exog = x\n cls.idx_p_uc = np.array(cls.idx_uc)\n cls.exogc = xc = x[:, cls.idx_uc]\n mod_ols_c = OLS(y - 0.5 * x[:, 1], xc)\n mod_ols_c.exog_names[:] = ['const', 'x2', 'x3', 'x4']\n\n cls.mod2 = mod_ols_c\n cls.res2 = cls.mod2.fit(**cls.fit_kwargs)\n\n cls.init()\n\n def test_params(self):\n assert_allclose(self.res1.params[self.idx_p_uc],\n self.res2.params,\n rtol=1e-10)\n # Note: atol=1e-15 added by upstream GH#4603, doesnt seem needed\n\n def test_se(self):\n res1 = self.res1\n res2 = self.res2\n\n assert res1.df_resid == res2.df_resid\n assert_allclose(res1.scale,\n res2.scale,\n rtol=1e-10)\n assert_allclose(res1.bse[self.idx_p_uc],\n res2.bse,\n rtol=1e-10)\n assert_allclose(res1.cov_params()[self.idx_p_uc[:, None],\n self.idx_p_uc],\n res2.cov_params(),\n rtol=5e-9)\n # Note: rtol lowered from1e-10 to 5e-9 by GH#4620\n # (for TestGLMBinomialCountConstrainedHC I think)\n\n def test_resid(self):\n assert_allclose(self.res1.resid_response,\n self.res2.resid,\n rtol=1e-10)\n\n\[email protected]_vetted\nclass TestGLMGaussianOffset(ConstrainedCompareMixin):\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exogc,\n offset=0.5 * cls.exog[:, cls.idx_c].squeeze())\n mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit(**cls.fit_kwargs)\n cls.idx_p_uc = np.arange(cls.exogc.shape[1])\n\n\[email protected]_vetted\nclass TestGLMGaussianConstrained(ConstrainedCompareMixin):\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exog)\n mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit_constrained('x1=0.5', **cls.fit_kwargs)\n\n\[email protected]_vetted\nclass TestGLMGaussianOffsetHC(ConstrainedCompareMixin):\n fit_kwargs = {\"cov_type\": \"HC0\"}\n\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exogc,\n offset=0.5 * cls.exog[:, cls.idx_c].squeeze())\n mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit(**cls.fit_kwargs)\n cls.idx_p_uc = np.arange(cls.exogc.shape[1])\n\n\[email protected]_vetted\nclass TestGLMGaussianConstrainedHC(ConstrainedCompareMixin):\n fit_kwargs = {\"cov_type\": \"HC0\"}\n\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exog)\n mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit_constrained('x1=0.5', **cls.fit_kwargs)\n\n\[email protected]_vetted\nclass ConstrainedCompareWtdMixin(ConstrainedCompareMixin):\n @classmethod\n def setup_class(cls):\n nobs, k_exog = 100, 5\n np.random.seed(987125)\n x = np.random.randn(nobs, k_exog - 1)\n x = add_constant(x)\n cls.aweights = np.random.randint(1, 10, nobs)\n\n y_true = x.sum(1) / 2\n y = y_true + 2 * np.random.randn(nobs)\n cls.endog = y\n cls.exog = x\n cls.idx_p_uc = np.array(cls.idx_uc)\n cls.exogc = xc = x[:, cls.idx_uc]\n mod_ols_c = WLS(y - 0.5 * x[:, 1], xc, weights=cls.aweights)\n mod_ols_c.exog_names[:] = ['const', 'x2', 'x3', 'x4']\n cls.mod2 = mod_ols_c\n cls.res2 = cls.mod2.fit(**cls.fit_kwargs)\n\n cls.init()\n\n\[email protected]_vetted\nclass TestGLMWtdGaussianOffset(ConstrainedCompareWtdMixin):\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exogc,\n offset=0.5 * cls.exog[:, cls.idx_c].squeeze(),\n var_weights=cls.aweights)\n mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit(**cls.fit_kwargs)\n cls.idx_p_uc = np.arange(cls.exogc.shape[1])\n\n\[email protected]_vetted\nclass TestGLMWtdGaussianConstrained(ConstrainedCompareWtdMixin):\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exog, var_weights=cls.aweights)\n mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit_constrained('x1=0.5', **cls.fit_kwargs)\n\n\[email protected]_vetted\nclass TestGLMWtdGaussianOffsetHC(ConstrainedCompareWtdMixin):\n fit_kwargs = {\"cov_type\": \"HC0\"}\n\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exogc,\n offset=0.5 * cls.exog[:, cls.idx_c].squeeze(),\n var_weights=cls.aweights)\n mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit(**cls.fit_kwargs)\n cls.idx_p_uc = np.arange(cls.exogc.shape[1])\n\n\[email protected]_vetted\nclass TestGLMWtdGaussianConstrainedHC(ConstrainedCompareWtdMixin):\n fit_kwargs = {\"cov_type\": \"HC0\"}\n\n @classmethod\n def init(cls):\n mod = cls.model_cls(cls.endog, cls.exog, var_weights=cls.aweights)\n mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']\n cls.res1 = mod.fit_constrained('x1=0.5', **cls.fit_kwargs)\n\n\[email protected]_vetted\nclass TestGLMBinomialCountConstrained(ConstrainedCompareMixin):\n\n @classmethod\n def setup_class(cls):\n from sm2.datasets.star98 import load\n data = load(as_pandas=False)\n exog = add_constant(data.exog, prepend=True)\n offset = np.ones(len(data.endog))\n exog_keep = exog[:, :-5]\n cls.mod2 = GLM(data.endog, exog_keep, family=family.Binomial(),\n offset=offset)\n\n cls.mod1 = GLM(data.endog, exog, family=family.Binomial(),\n offset=offset)\n cls.init()\n\n @classmethod\n def init(cls):\n cls.res2 = cls.mod2.fit()\n k = cls.mod1.exog.shape[1]\n cls.idx_p_uc = np.arange(k - 5)\n constraints = np.eye(k)[-5:]\n cls.res1 = cls.mod1.fit_constrained(constraints)\n\n def test_resid(self):\n # need to override because res2 does not have resid\n res1 = self.res1\n res2 = self.res2\n assert_allclose(res1.resid_response, res2.resid_response, rtol=1e-8)\n\n def test_glm_attr(self):\n for attr in ['llf', 'null_deviance', 'aic', 'bic', 'df_resid',\n 'df_model', 'pearson_chi2', 'scale']:\n assert_allclose(getattr(self.res1, attr),\n getattr(self.res2, attr), rtol=1e-10)\n\n def test_wald(self):\n res1 = self.res1\n res2 = self.res2\n k1 = len(res1.params)\n k2 = len(res2.params)\n\n use_f = False\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', ValueWarning)\n wt2 = res2.wald_test(np.eye(k2)[1:], use_f=use_f)\n wt1 = res1.wald_test(np.eye(k1)[1:], use_f=use_f)\n assert_allclose(wt2.pvalue, wt1.pvalue, atol=1e-20) # pvalue = 0\n assert_allclose(wt2.statistic, wt1.statistic, rtol=1e-8)\n assert_equal(wt2.df_denom, wt1.df_denom)\n\n use_f = True\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', ValueWarning)\n wt2 = res2.wald_test(np.eye(k2)[1:], use_f=use_f)\n wt1 = res1.wald_test(np.eye(k1)[1:], use_f=use_f)\n assert_allclose(wt2.pvalue, wt1.pvalue, rtol=1) # pvalue = 8e-273\n assert_allclose(wt2.statistic, wt1.statistic, rtol=1e-8)\n assert_equal(wt2.df_denom, wt1.df_denom)\n assert_equal(wt2.df_num, wt1.df_num)\n assert_equal(wt2.summary()[-30:], wt1.summary()[-30:])\n\n # smoke # TODO: separate this out\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', ValueWarning)\n # RuntimeWarnings because of truedivide and scipy distributions\n warnings.simplefilter('ignore', RuntimeWarning)\n self.res1.summary()\n # self.res1.summary2()\n\n\[email protected]_vetted\nclass TestGLMBinomialCountConstrainedHC(TestGLMBinomialCountConstrained):\n @classmethod\n def init(cls):\n cls.res2 = cls.mod2.fit(cov_type='HC0')\n k = cls.mod1.exog.shape[1]\n cls.idx_p_uc = np.arange(k - 5)\n constraints = np.eye(k)[-5:]\n cls.res1 = cls.mod1.fit_constrained(constraints, cov_type='HC0')\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 30 16:22:29 2014\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\"\"\"\nfrom __future__ import division\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nimport pandas as pd\nimport patsy\nimport pytest\n\nfrom sm2.discrete.discrete_model import Poisson\nfrom sm2.base._constraints import fit_constrained, fit_constrained_wrap\n\nfrom sm2.tools.tools import add_constant\nfrom sm2 import datasets\nfrom sm2.api import GLM, families\n\nspector_data = datasets.spector.load(as_pandas=False)\nspector_data.exog = add_constant(spector_data.exog, prepend=False)\n\nfrom .results import results_poisson_constrained as results\nfrom .results import results_glm_logit_constrained as reslogit\n\nDEBUG = False\n\n# TODO: This used to be here in string form and loaded with pd.read_csv.\n# Is there an original source/reference for it?\ndata = pd.DataFrame([[1, 1, 32, 52407],\n [2, 1, 104, 43248],\n [3, 1, 206, 28612],\n [4, 1, 186, 12663],\n [5, 1, 102, 5317],\n [1, 0, 2, 18790],\n [2, 0, 12, 10673],\n [3, 0, 28, 5710],\n [4, 0, 28, 2585],\n [5, 0, 31, 1462]],\n columns=['agecat', 'smokes', 'deaths', 'pyears'])\ndata = data.astype(int) # explicit cast is needed for appveyor patsy tests\ndata['logpyears'] = np.log(data['pyears'])\n\n\nclass CheckPoissonConstrainedMixin(object):\n\n def test_basic(self):\n res1 = self.res1\n res2 = self.res2\n assert_allclose(res1[0], res2.params[self.idx], rtol=1e-6)\n # see below Stata has nan, we have zero\n bse1 = np.sqrt(np.diag(res1[1]))\n mask = (bse1 == 0) & np.isnan(res2.bse[self.idx])\n assert_allclose(bse1[~mask],\n res2.bse[self.idx][~mask],\n rtol=1e-6)\n\n # TODO: Split this into reasonably-scoped tests\n def test_basic_method(self):\n if not hasattr(self, 'res1m'):\n raise pytest.skip(\"not available yet\")\n res1 = (self.res1m if not hasattr(self.res1m, '_results')\n else self.res1m._results)\n res2 = self.res2\n assert_allclose(res1.params,\n res2.params[self.idx],\n rtol=1e-6)\n\n # when a parameter is fixed, the Stata has bse=nan, we have bse=0\n mask = (res1.bse == 0) & np.isnan(res2.bse[self.idx])\n assert_allclose(res1.bse[~mask],\n res2.bse[self.idx][~mask],\n rtol=1e-6)\n\n tvalues = res2.params_table[self.idx, 2]\n # when a parameter is fixed, the Stata has tvalue=nan,\n # we have tvalue=inf\n mask = np.isinf(res1.tvalues) & np.isnan(tvalues)\n assert_allclose(res1.tvalues[~mask],\n tvalues[~mask],\n rtol=1e-6)\n pvalues = res2.params_table[self.idx, 3]\n # note most pvalues are very small\n # examples so far agree at 8 or more decimal, but rtol is stricter\n # see above\n mask = (res1.pvalues == 0) & np.isnan(pvalues)\n assert_allclose(res1.pvalues[~mask],\n pvalues[~mask],\n rtol=5e-5)\n\n ci_low = res2.params_table[self.idx, 4]\n ci_upp = res2.params_table[self.idx, 5]\n ci = np.column_stack((ci_low, ci_upp))\n # note most pvalues are very small\n # examples so far agree at 8 or more decimal, but rtol is stricter\n # see above: nan versus value\n assert_allclose(res1.conf_int()[~np.isnan(ci)],\n ci[~np.isnan(ci)],\n rtol=5e-5)\n\n # other\n assert_allclose(res1.llf, res2.ll, rtol=1e-6)\n assert res1.df_model == res2.df_m\n # Stata doesn't have df_resid\n df_r = res2.N - res2.df_m - 1\n assert res1.df_resid == df_r\n\n # TODO: test needs informative name\n def test_other(self):\n # some results may not be valid or available for all models\n if not hasattr(self, 'res1m'):\n raise pytest.skip(\"not available yet\")\n\n res1 = self.res1m\n res2 = self.res2\n\n if hasattr(res2, 'll_0'):\n assert_allclose(res1.llnull, res2.ll_0, rtol=1e-6)\n else:\n if DEBUG:\n warnings.warn('test: ll_0 not available, llnull=%6.4F' %\n res1.llnull)\n\n\nclass TestPoissonConstrained1a(CheckPoissonConstrainedMixin):\n res2 = results.results_noexposure_constraint\n idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n # example without offset\n formula = 'deaths ~ logpyears + smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data)\n # get start_params, example fails to converge on one py TravisCI\n k_vars = len(mod.exog_names)\n start_params = np.zeros(k_vars)\n start_params[0] = np.log(mod.endog.mean())\n # if we need it, this is desired params\n # p = np.array([-3.93478643, 1.37276214, 2.33077032, 2.71338891,\n # 2.71338891, 0.57966535, 0.97254074])\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n start_params=start_params,\n fit_kwds={'method': 'bfgs',\n 'disp': 0})\n # TODO: Newton fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, start_params=start_params,\n method='bfgs', disp=0)\n\n @pytest.mark.smoke\n def test_summary(self):\n # trailing text in summary, assumes it's the first extra string\n # NOTE: see comment about convergence in llnull for self.res1m\n summ = self.res1m.summary()\n assert 'linear equality constraints' in summ.extra_txt\n\n @pytest.mark.skip(reason=\"summary2 not ported from upstream\")\n @pytest.mark.smoke\n def test_summary2(self):\n # trailing text in summary, assumes it's the first extra string\n # NOTE: see comment about convergence in llnull for self.res1m\n summ = self.res1m.summary2()\n assert 'linear equality constraints' in summ.extra_txt[0]\n\n\nclass TestPoissonConstrained1b(CheckPoissonConstrainedMixin):\n res2 = results.results_exposure_constraint\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n #offset=np.log(data['pyears'].values)\n exposure=data['pyears'].values)\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, method='newton',\n disp=0)\n\n\nclass TestPoissonConstrained1c(CheckPoissonConstrainedMixin):\n res2 = results.results_exposure_constraint\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n offset=np.log(data['pyears'].values))\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, method='newton', disp=0)\n\n\nclass TestPoissonNoConstrained(CheckPoissonConstrainedMixin):\n res2 = results.results_exposure_noconstraint\n idx = [6, 2, 3, 4, 5, 0] # 1 is dropped baseline for categorical\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n #exposure=data['pyears'].values)\n offset=np.log(data['pyears'].values))\n res1 = mod.fit(disp=0)._results\n # res1 is duplicate check, so we can follow the same pattern\n cls.res1 = (res1.params, res1.cov_params())\n cls.res1m = res1\n\n\nclass TestPoissonConstrained2a(CheckPoissonConstrainedMixin):\n res2 = results.results_noexposure_constraint2\n idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n # example without offset\n formula = 'deaths ~ logpyears + smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data)\n\n # get start_params, example fails to converge on one py TravisCI\n k_vars = len(mod.exog_names)\n start_params = np.zeros(k_vars)\n start_params[0] = np.log(mod.endog.mean())\n # if we need it, this is desired params\n # p = np.array([-9.43762015, 1.52762442, 2.74155711, 3.58730007,\n # 4.08730007, 1.15987869, 0.12111539])\n\n constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n start_params=start_params,\n fit_kwds={'method': 'bfgs', 'disp': 0})\n # TODO: Newton fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, start_params=start_params,\n method='bfgs', disp=0)\n\n\nclass TestPoissonConstrained2b(CheckPoissonConstrainedMixin):\n res2 = results.results_exposure_constraint2\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n #offset=np.log(data['pyears'].values),\n exposure=data['pyears'].values)\n constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails to converge. overflow somewhere?\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, method='bfgs', disp=0,\n start_params=cls.res1[0])\n\n\nclass TestPoissonConstrained2c(CheckPoissonConstrainedMixin):\n res2 = results.results_exposure_constraint2\n model_cls = Poisson\n\n @classmethod\n def setup_class(cls):\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n offset=np.log(data['pyears'].values))\n\n constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr,\n method='bfgs', disp=0,\n start_params=cls.res1[0])\n\n\[email protected]_vetted\nclass TestGLMPoissonConstrained1a(CheckPoissonConstrainedMixin):\n res2 = results.results_noexposure_constraint\n idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical\n model_cls = GLM\n\n @classmethod\n def setup_class(cls):\n # example without offset\n formula = 'deaths ~ logpyears + smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n family=families.Poisson())\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'atol': 1e-10})\n cls.constraints = lc\n cls.res1m = mod.fit_constrained(constr, atol=1e-10)\n\n\[email protected]_vetted\nclass TestGLMPoissonConstrained1b(CheckPoissonConstrainedMixin):\n res2 = results.results_exposure_constraint\n idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n model_cls = GLM\n\n @classmethod\n def setup_class(cls):\n # example with offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = cls.model_cls.from_formula(formula, data=data,\n family=families.Poisson(),\n offset=np.log(data['pyears'].values))\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'atol': 1e-10})\n cls.constraints = lc\n cls.res1m = mod.fit_constrained(constr, atol=1e-10)._results\n\n def test_compare_glm_poisson(self):\n res1 = self.res1m\n res2 = self.res2\n\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n #offset=np.log(data['pyears'].values),\n exposure=data['pyears'].values)\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n res2 = mod.fit_constrained(constr, start_params=self.res1m.params,\n method='newton', warn_convergence=False,\n disp=0)\n\n # we get high precision because we use the params as start_params\n\n # basic, just as check that we have the same model\n assert_allclose(res1.params, res2.params, rtol=1e-12)\n assert_allclose(res1.bse, res2.bse, rtol=1e-11)\n\n # check predict, fitted, ...\n\n predicted = res1.predict()\n assert_allclose(predicted, res2.predict(), rtol=1e-10)\n assert_allclose(res1.mu, predicted, rtol=1e-10)\n assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)\n assert_allclose(res2.predict(linear=True), res2.predict(linear=True),\n rtol=1e-10)\n\n\[email protected]_vetted\nclass CheckGLMConstrainedMixin(CheckPoissonConstrainedMixin):\n # add tests for some GLM specific attributes\n\n def test_glm(self):\n res2 = self.res2 # reference results\n res1 = self.res1m\n\n #assert_allclose(res1.aic, res2.aic, rtol=1e-10) # far away\n # Stata aic in ereturn and in estat ic are very different\n # we have the same as estat ic\n # see issue GH#1733\n assert_allclose(res1.aic, res2.infocrit[4], rtol=1e-10)\n\n assert_allclose(res1.bic, res2.bic, rtol=1e-10)\n # bic is deviance based\n # TODO: Does this comment refer to the line above, or the now-deleted\n # line below that compares res1.bic to res2.infocrit[5]?\n\n assert_allclose(res1.deviance, res2.deviance, rtol=1e-10)\n # TODO: which chi2 are these\n #assert_allclose(res1.pearson_chi2, res2.chi2, rtol=1e-10)\n\n\[email protected]_vetted\nclass TestGLMLogitConstrained1(CheckGLMConstrainedMixin):\n idx = slice(None)\n # params sequence same as Stata, but Stata reports param = nan\n # and we have param = value = 0\n model_cls = GLM\n\n @classmethod\n def setup_class(cls):\n cls.res2 = reslogit.results_constraint1\n mod1 = cls.model_cls(spector_data.endog, spector_data.exog,\n family=families.Binomial())\n\n constr = 'x1 = 2.8'\n cls.res1m = mod1.fit_constrained(constr)\n\n R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants\n cls.res1 = fit_constrained(mod1, R, q)\n\n\[email protected]_vetted\nclass TestGLMLogitConstrained2(CheckGLMConstrainedMixin):\n idx = slice(None) # params sequence same as Stata\n model_cls = GLM\n res2 = reslogit.results_constraint2\n\n @classmethod\n def setup_class(cls):\n mod1 = cls.model_cls(spector_data.endog, spector_data.exog,\n family=families.Binomial())\n\n constr = 'x1 - x3 = 0'\n cls.res1m = mod1.fit_constrained(constr, atol=1e-10)\n\n R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants\n cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'atol': 1e-10})\n cls.constraints_rq = (R, q)\n\n def test_predict(self):\n # results only available for this case\n res2 = self.res2 # reference results\n res1 = self.res1m\n\n predicted = res1.predict()\n assert_allclose(predicted, res2.predict_mu, atol=1e-7)\n assert_allclose(res1.mu, predicted, rtol=1e-10)\n assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)\n\n @pytest.mark.smoke\n def test_summary(self):\n # trailing text in summary, assumes it's the first extra string\n summ = self.res1m.summary()\n assert 'linear equality constraints' in summ.extra_txt\n\n @pytest.mark.skip(reason=\"summary2 not ported from upstream\")\n @pytest.mark.smoke\n def test_summary2(self):\n summ = self.res1m.summary2()\n assert 'linear equality constraints' in summ.extra_txt[0]\n\n def test_fit_constrained_wrap(self):\n # minimal test\n res2 = self.res2 # reference results\n\n res_wrap = fit_constrained_wrap(self.res1m.model, self.constraints_rq)\n assert_allclose(res_wrap.params, res2.params, rtol=1e-6)\n assert_allclose(res_wrap.params, res2.params, rtol=1e-6)\n\n\[email protected]_vetted\nclass TestGLMLogitConstrained2HC(CheckGLMConstrainedMixin):\n idx = slice(None) # params sequence same as Stata\n model_cls = GLM\n res2 = reslogit.results_constraint2_robust\n\n @classmethod\n def setup_class(cls):\n mod1 = cls.model_cls(spector_data.endog, spector_data.exog,\n family=families.Binomial())\n\n # not used to match Stata for HC\n # nobs, k_params = mod1.exog.shape\n # k_params -= 1 # one constraint\n cov_type = 'HC0'\n cov_kwds = {'scaling_factor': 32 / 31.}\n # looks like nobs / (nobs - 1) and not (nobs - 1.) / (nobs - k_params)}\n constr = 'x1 - x3 = 0'\n cls.res1m = mod1.fit_constrained(constr, cov_type=cov_type,\n cov_kwds=cov_kwds, atol=1e-10)\n\n R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants\n cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'atol': 1e-10,\n 'cov_type': cov_type,\n 'cov_kwds': cov_kwds})\n cls.constraints_rq = (R, q)\n",
"\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal\n\nfrom sm2.stats import contrast\n\n\[email protected](reason=\"Contrast not ported from upstream\")\[email protected]_vetted\nclass TestContrast(object):\n @classmethod\n def setup_class(cls):\n np.random.seed(54321)\n cls.X = np.random.standard_normal((40, 10))\n\n def test_contrast1(self):\n term = np.column_stack((self.X[:, 0], self.X[:, 2]))\n c = contrast.Contrast(term, self.X)\n test_contrast = [[1] + [0] * 9, [0] * 2 + [1] + [0] * 7]\n assert_almost_equal(test_contrast, c.contrast_matrix)\n\n def test_contrast2(self):\n zero = np.zeros((40,))\n term = np.column_stack((zero, self.X[:, 2]))\n c = contrast.Contrast(term, self.X)\n test_contrast = [0] * 2 + [1] + [0] * 7\n assert_almost_equal(test_contrast, c.contrast_matrix)\n\n def test_contrast3(self):\n P = np.dot(self.X, np.linalg.pinv(self.X))\n resid = np.identity(40) - P\n noise = np.dot(resid, np.random.standard_normal((40, 5)))\n term = np.column_stack((noise, self.X[:, 2]))\n c = contrast.Contrast(term, self.X)\n assert_equal(c.contrast_matrix.shape, (10,))\n # TODO: this should actually test the value of the contrast,\n # not only its dimension\n\n def test_estimable(self):\n X2 = np.column_stack((self.X, self.X[:, 5]))\n c = contrast.Contrast(self.X[:, 5], X2)\n # TODO: I don't think this should be estimable? isestimable correct?\n # TODO: do something with c?\n",
"\"\"\"\nGenerate data sets for testing OLS.fit_regularized\n\nAfter running this script, rerun lasso_r_results.R in R to rebuild the\nresults file \"glmnet_r_results.py\".\n\nCurrently only tests OLS. Our implementation covers GLS, but it's not\nclear if glmnet does.\n\"\"\"\nimport os\n\nimport numpy as np\n\nn = 300\np = 5\n\nnp.random.seed(83423)\n\nexog = np.random.normal(size=(n, p))\nparams = (-1.)**np.arange(p)\nparams[::3] = 0\nexpval = np.dot(exog, params)\nendog = expval + np.random.normal(size=n)\ndata = np.concatenate((endog[:, None], exog), axis=1)\ndata = np.around(100 * data)\n\nhere = os.path.dirname(__file__)\npath = os.path.join(here, \"results\", \"lasso_data.csv\")\nnp.savetxt(path, data, fmt=\"%.0f\", delimiter=\",\")\n# used in test_regression\n"
] | [
[
"numpy.dot",
"numpy.random.seed",
"numpy.linspace",
"numpy.linalg.inv",
"numpy.eye",
"numpy.testing.assert_almost_equal",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.column_stack"
],
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.diag",
"scipy.stats.norm.ppf",
"numpy.dot",
"numpy.sqrt",
"numpy.fft.fft",
"numpy.asarray",
"scipy.stats.chi2.sf",
"numpy.arange",
"numpy.eye",
"numpy.squeeze",
"numpy.argwhere",
"numpy.mean",
"numpy.zeros",
"scipy.stats.f.sf",
"numpy.empty"
],
[
"numpy.reshape",
"numpy.array",
"numpy.asarray"
],
[
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.arange",
"numpy.eye",
"numpy.random.randn",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.diag",
"numpy.log",
"numpy.isnan",
"pandas.DataFrame",
"numpy.column_stack",
"numpy.zeros",
"numpy.isinf",
"numpy.testing.assert_allclose"
],
[
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.random.standard_normal",
"numpy.testing.assert_almost_equal",
"numpy.linalg.pinv",
"numpy.identity",
"numpy.column_stack",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.random.seed",
"numpy.around",
"numpy.arange",
"numpy.concatenate",
"numpy.random.normal",
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wguanicedew/qiskit-terra | [
"f8e4fcb53e328b8b17762fc8df0a8d0a44da8d9a"
] | [
"qiskit/extensions/standard/u1.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nDiagonal single qubit gate.\n\"\"\"\nimport numpy\nfrom qiskit.circuit import ControlledGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\n\n\n# pylint: disable=cyclic-import\nclass U1Gate(Gate):\n \"\"\"Diagonal single-qubit gate.\"\"\"\n\n def __init__(self, theta, label=None):\n \"\"\"Create new diagonal single-qubit gate.\"\"\"\n super().__init__(\"u1\", 1, [theta], label=label)\n\n def _define(self):\n from qiskit.extensions.standard.u3 import U3Gate\n definition = []\n q = QuantumRegister(1, \"q\")\n rule = [\n (U3Gate(0, 0, self.params[0]), [q[0]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def control(self, num_ctrl_qubits=1, label=None):\n \"\"\"Controlled version of this gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n if num_ctrl_qubits == 1:\n return Cu1Gate(*self.params)\n return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label)\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n return U1Gate(-self.params[0])\n\n def to_matrix(self):\n \"\"\"Return a Numpy.array for the U3 gate.\"\"\"\n lam = self.params[0]\n lam = float(lam)\n return numpy.array([[1, 0], [0, numpy.exp(1j * lam)]], dtype=complex)\n\n\ndef u1(self, theta, q): # pylint: disable=invalid-name\n \"\"\"Apply u1 with angle theta to q.\"\"\"\n return self.append(U1Gate(theta), [q], [])\n\n\nQuantumCircuit.u1 = u1\n\n\nclass Cu1Gate(ControlledGate):\n \"\"\"controlled-u1 gate.\"\"\"\n\n def __init__(self, theta):\n \"\"\"Create new cu1 gate.\"\"\"\n super().__init__(\"cu1\", 2, [theta], num_ctrl_qubits=1)\n self.base_gate = U1Gate(theta)\n\n def _define(self):\n \"\"\"\n gate cu1(lambda) a,b\n { u1(lambda/2) a; cx a,b;\n u1(-lambda/2) b; cx a,b;\n u1(lambda/2) b;\n }\n \"\"\"\n from qiskit.extensions.standard.x import CnotGate\n definition = []\n q = QuantumRegister(2, \"q\")\n rule = [\n (U1Gate(self.params[0] / 2), [q[0]], []),\n (CnotGate(), [q[0], q[1]], []),\n (U1Gate(-self.params[0] / 2), [q[1]], []),\n (CnotGate(), [q[0], q[1]], []),\n (U1Gate(self.params[0] / 2), [q[1]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n return Cu1Gate(-self.params[0])\n\n\ndef cu1(self, theta, ctl, tgt):\n \"\"\"Apply cu1 from ctl to tgt with angle theta.\"\"\"\n return self.append(Cu1Gate(theta), [ctl, tgt], [])\n\n\nQuantumCircuit.cu1 = cu1\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ptigwe/treex | [
"c46687376ccc50c8fea6cb8617e22e4b4dd1924a"
] | [
"treex/metrics/metrics.py"
] | [
"import typing as tp\n\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport treeo as to\n\nfrom treex import types, utils\nfrom treex.metrics.metric import Metric\n\n\nclass Metrics(Metric):\n metrics: tp.Dict[str, Metric]\n\n def __init__(\n self,\n metrics: tp.Any,\n on: tp.Optional[types.IndexLike] = None,\n name: tp.Optional[str] = None,\n dtype: tp.Optional[jnp.dtype] = None,\n ):\n super().__init__(on=on, name=name, dtype=dtype)\n\n names: tp.Set[str] = set()\n\n def get_name(path, metric):\n name = utils._get_name(metric)\n return f\"{path}/{name}\" if path else name\n\n self.metrics = {\n utils._unique_name(names, get_name(path, metric)): metric\n for path, metric in utils._flatten_names(metrics)\n }\n\n def update(self, **kwargs) -> None:\n for name, metric in self.metrics.items():\n arg_names = utils._function_argument_names(metric.update)\n\n if arg_names is None:\n metric_kwargs = kwargs\n else:\n metric_kwargs = {arg: kwargs[arg] for arg in arg_names if arg in kwargs}\n\n metric.update(**metric_kwargs)\n\n def compute(self) -> tp.Dict[str, jnp.ndarray]:\n outputs = {}\n names = set()\n\n for name, metric in self.metrics.items():\n\n value = metric.compute()\n\n for path, value in utils._flatten_names(value):\n name = f\"{name}/{path}\" if path else name\n name = utils._unique_name(names, name)\n\n outputs[name] = value\n\n return outputs\n\n def __call__(self, **kwargs) -> tp.Dict[str, jnp.ndarray]:\n return super().__call__(**kwargs)\n\n\nclass AuxMetrics(Metric):\n totals: tp.Dict[str, jnp.ndarray] = types.MetricState.node()\n counts: tp.Dict[str, jnp.ndarray] = types.MetricState.node()\n\n def __init__(\n self,\n aux_metrics: tp.Any,\n on: tp.Optional[types.IndexLike] = None,\n name: tp.Optional[str] = None,\n dtype: tp.Optional[jnp.dtype] = None,\n ):\n super().__init__(on=on, name=name, dtype=dtype)\n logs = self.as_logs(aux_metrics)\n self.totals = {name: jnp.array(0.0, dtype=jnp.float32) for name in logs}\n self.counts = {name: jnp.array(0, dtype=jnp.uint32) for name in logs}\n\n def update(self, aux_metrics: tp.Any) -> None:\n logs = self.as_logs(aux_metrics)\n\n self.totals = {\n name: (self.totals[name] + logs[name]).astype(self.totals[name].dtype)\n for name in self.totals\n }\n self.counts = {\n name: (self.counts[name] + np.prod(logs[name].shape)).astype(\n self.counts[name].dtype\n )\n for name in self.counts\n }\n\n def compute(self) -> tp.Dict[str, jnp.ndarray]:\n return {name: self.totals[name] / self.counts[name] for name in self.totals}\n\n def __call__(self, aux_metrics: tp.Any) -> tp.Dict[str, jnp.ndarray]:\n return super().__call__(aux_metrics=aux_metrics)\n\n @staticmethod\n def metric_name(field_info: to.FieldInfo) -> str:\n return (\n field_info.value.name\n if isinstance(field_info.value, types.Named)\n else field_info.name\n if field_info.name is not None\n else \"aux_metric\"\n )\n\n def as_logs(self, tree: tp.Any) -> tp.Dict[str, jnp.ndarray]:\n\n names: tp.Set[str] = set()\n\n with to.add_field_info():\n fields_info: tp.List[to.FieldInfo] = jax.tree_flatten(\n tree,\n is_leaf=lambda x: isinstance(x, types.Named)\n and not isinstance(x.value, to.Nothing),\n )[0]\n\n # pretend Named values are leaves\n for i, x in enumerate(fields_info):\n if isinstance(x, types.Named):\n field_info = x.value\n field_info.value = types.Named(x.name, field_info.value)\n fields_info[i] = field_info\n\n metrics = {\n self.metric_name(field_info): field_info.value.value\n if isinstance(field_info.value, types.Named)\n else field_info.value\n for field_info in fields_info\n }\n metrics = {\n utils._unique_name(names, name): value for name, value in metrics.items()\n }\n\n return metrics\n"
] | [
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danhartfiction/LedFx | [
"eaf40ab180ef7e8f4f769193b35b3ffd5fe2a340"
] | [
"ledfx/effects/drano(Reactive).py"
] | [
"from ledfx.effects.audio import AudioReactiveEffect, FREQUENCY_RANGES\nfrom ledfx.effects.colorrainbow import ColorRainbowEffect \nimport voluptuous as vol\nimport numpy as np\nimport time\nimport statistics\nimport requests\nimport threading\n\nclass DranoBeatAudioEffect(AudioReactiveEffect, ColorRainbowEffect):\n\n NAME = \"DranoBeat\"\n CONFIG_SCHEMA = vol.Schema({\n vol.Optional('frequency_range', description='Frequency range for the beat detection', default = 'bass'): vol.In(list(FREQUENCY_RANGES.keys())),\n })\n\n def config_updated(self, config):\n self._frequency_range = np.linspace(\n FREQUENCY_RANGES[self.config['frequency_range']].min,\n FREQUENCY_RANGES[self.config['frequency_range']].max,\n 20)\n\n def updateThread(self):\n self.getBeat()\n if not hasattr(self, 'beatThreadStart'):\n print(\"afa\")\n self.beatThreadStart = True\n threading.Timer(self.next_beat - time.time(), self.beatThread).start()\n self.i = 0\n threading.Timer(2, self.updateThread).start()\n\n def beatThread(self):\n self.i += 1\n print(\"BEAT {}!\".format(self.i))\n self.pixels = self.apply_rainbow(True)\n now = time.time()\n if self.next_beat - 60/self.bpm < now:\n self.next_beat += 60/self.bpm\n print(\"next in {}\".format(self.next_beat - now))\n threading.Timer(self.next_beat - now, self.beatThread).start()\n self.faderThreadStart = True\n threading.Timer(.1, self.fader).start()\n \n def fader(self):\n# print(\"fading\")\n self.pixels = np.zeros(shape=(self.pixel_count, 3))\n\n def getBeat(self):\n r = requests.get(\"http://127.0.0.1:5000/\")\n data = r.text.split(':')\n self.next_beat = float(data[0])\n self.bpm = float(data[1])\n# self.next_beat = time.time() + 1\n# self.bpm = 60\n\n def audio_data_updated(self, data):\n if not hasattr(self, 'colormap'):\n self.colormap = np.zeros(shape=(self.pixel_count, 3)) \n self.updateThread()\n"
] | [
[
"numpy.zeros",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vanessagraber/bilby | [
"80ee2d123a913d881f2a790b04e2939c46584d27"
] | [
"bilby/core/prior.py"
] | [
"from __future__ import division\n\nimport os\nfrom collections import OrderedDict\nfrom future.utils import iteritems\n\nimport numpy as np\nimport scipy.stats\nfrom scipy.integrate import cumtrapz\nfrom scipy.interpolate import interp1d\nfrom scipy.special import erf, erfinv\n\n# Keep import bilby statement, it is necessary for some eval() statements\nimport bilby # noqa\nfrom .utils import logger, infer_args_from_method, check_directory_exists_and_if_not_mkdir\n\n\nclass PriorDict(OrderedDict):\n def __init__(self, dictionary=None, filename=None):\n \"\"\" A set of priors\n\n Parameters\n ----------\n dictionary: dict, None\n If given, a dictionary to generate the prior set.\n filename: str, None\n If given, a file containing the prior to generate the prior set.\n \"\"\"\n OrderedDict.__init__(self)\n if isinstance(dictionary, dict):\n self.from_dictionary(dictionary)\n elif type(dictionary) is str:\n logger.debug('Argument \"dictionary\" is a string.' +\n ' Assuming it is intended as a file name.')\n self.from_file(dictionary)\n elif type(filename) is str:\n self.from_file(filename)\n elif dictionary is not None:\n raise ValueError(\"PriorDict input dictionary not understood\")\n\n self.convert_floats_to_delta_functions()\n\n def to_file(self, outdir, label):\n \"\"\" Write the prior distribution to file.\n\n Parameters\n ----------\n outdir: str\n output directory name\n label: str\n Output file naming scheme\n \"\"\"\n\n check_directory_exists_and_if_not_mkdir(outdir)\n prior_file = os.path.join(outdir, \"{}.prior\".format(label))\n logger.debug(\"Writing priors to {}\".format(prior_file))\n with open(prior_file, \"w\") as outfile:\n for key in self.keys():\n outfile.write(\n \"{} = {}\\n\".format(key, self[key]))\n\n def from_file(self, filename):\n \"\"\" Reads in a prior from a file specification\n\n Parameters\n ----------\n filename: str\n Name of the file to be read in\n \"\"\"\n\n prior = {}\n with open(filename, 'r') as f:\n for line in f:\n if line[0] == '#':\n continue\n elements = line.split('=')\n key = elements[0].replace(' ', '')\n val = '='.join(elements[1:])\n prior[key] = eval(val)\n self.update(prior)\n\n def from_dictionary(self, dictionary):\n for key, val in iteritems(dictionary):\n if isinstance(val, str):\n try:\n prior = eval(val)\n if isinstance(prior, (Prior, float, int, str)):\n val = prior\n except (NameError, SyntaxError, TypeError):\n logger.debug(\n \"Failed to load dictionary value {} correctlty\"\n .format(key))\n pass\n self[key] = val\n\n def convert_floats_to_delta_functions(self):\n \"\"\" Convert all float parameters to delta functions \"\"\"\n for key in self:\n if isinstance(self[key], Prior):\n continue\n elif isinstance(self[key], float) or isinstance(self[key], int):\n self[key] = DeltaFunction(self[key])\n logger.debug(\n \"{} converted to delta function prior.\".format(key))\n else:\n logger.debug(\n \"{} cannot be converted to delta function prior.\"\n .format(key))\n\n def fill_priors(self, likelihood, default_priors_file=None):\n \"\"\"\n Fill dictionary of priors based on required parameters of likelihood\n\n Any floats in prior will be converted to delta function prior. Any\n required, non-specified parameters will use the default.\n\n Note: if `likelihood` has `non_standard_sampling_parameter_keys`, then\n this will set-up default priors for those as well.\n\n Parameters\n ----------\n likelihood: bilby.likelihood.GravitationalWaveTransient instance\n Used to infer the set of parameters to fill the prior with\n default_priors_file: str, optional\n If given, a file containing the default priors.\n\n\n Returns\n -------\n prior: dict\n The filled prior dictionary\n\n \"\"\"\n\n self.convert_floats_to_delta_functions()\n\n missing_keys = set(likelihood.parameters) - set(self.keys())\n\n for missing_key in missing_keys:\n if not self.test_redundancy(missing_key):\n default_prior = create_default_prior(missing_key, default_priors_file)\n if default_prior is None:\n set_val = likelihood.parameters[missing_key]\n logger.warning(\n \"Parameter {} has no default prior and is set to {}, this\"\n \" will not be sampled and may cause an error.\"\n .format(missing_key, set_val))\n else:\n self[missing_key] = default_prior\n\n for key in self:\n self.test_redundancy(key)\n\n def sample(self, size=None):\n \"\"\"Draw samples from the prior set\n\n Parameters\n ----------\n size: int or tuple of ints, optional\n See numpy.random.uniform docs\n\n Returns\n -------\n dict: Dictionary of the samples\n \"\"\"\n return self.sample_subset(keys=self.keys(), size=size)\n\n def sample_subset(self, keys=iter([]), size=None):\n \"\"\"Draw samples from the prior set for parameters which are not a DeltaFunction\n\n Parameters\n ----------\n keys: list\n List of prior keys to draw samples from\n size: int or tuple of ints, optional\n See numpy.random.uniform docs\n\n Returns\n -------\n dict: Dictionary of the drawn samples\n \"\"\"\n self.convert_floats_to_delta_functions()\n samples = dict()\n for key in keys:\n if isinstance(self[key], Prior):\n samples[key] = self[key].sample(size=size)\n else:\n logger.debug('{} not a known prior.'.format(key))\n return samples\n\n def prob(self, sample, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n sample: dict\n Dictionary of the samples of which we want to have the probability of\n kwargs:\n The keyword arguments are passed directly to `np.product`\n\n Returns\n -------\n float: Joint probability of all individual sample probabilities\n\n \"\"\"\n return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)\n\n def ln_prob(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dict\n Dictionary of the samples of which we want to have the log probability of\n\n Returns\n -------\n float: Joint log probability of all the individual sample probabilities\n\n \"\"\"\n return np.sum([self[key].ln_prob(sample[key]) for key in sample])\n\n def rescale(self, keys, theta):\n \"\"\"Rescale samples from unit cube to prior\n\n Parameters\n ----------\n keys: list\n List of prior keys to be rescaled\n theta: list\n List of randomly drawn values on a unit cube associated with the prior keys\n\n Returns\n -------\n list: List of floats containing the rescaled sample\n \"\"\"\n return [self[key].rescale(sample) for key, sample in zip(keys, theta)]\n\n def test_redundancy(self, key):\n \"\"\"Empty redundancy test, should be overwritten in subclasses\"\"\"\n return False\n\n\nclass PriorSet(PriorDict):\n\n def __init__(self, dictionary=None, filename=None):\n \"\"\" DEPRECATED: USE PriorDict INSTEAD\"\"\"\n logger.warning(\"The name 'PriorSet' is deprecated use 'PriorDict' instead\")\n super(PriorSet, self).__init__(dictionary, filename)\n\n\ndef create_default_prior(name, default_priors_file=None):\n \"\"\"Make a default prior for a parameter with a known name.\n\n Parameters\n ----------\n name: str\n Parameter name\n default_priors_file: str, optional\n If given, a file containing the default priors.\n\n Return\n ------\n prior: Prior\n Default prior distribution for that parameter, if unknown None is\n returned.\n \"\"\"\n\n if default_priors_file is None:\n logger.debug(\n \"No prior file given.\")\n prior = None\n else:\n default_priors = PriorDict(filename=default_priors_file)\n if name in default_priors.keys():\n prior = default_priors[name]\n else:\n logger.debug(\n \"No default prior found for variable {}.\".format(name))\n prior = None\n return prior\n\n\nclass Prior(object):\n _default_latex_labels = dict()\n\n def __init__(self, name=None, latex_label=None, unit=None, minimum=-np.inf,\n maximum=np.inf):\n \"\"\" Implements a Prior object\n\n Parameters\n ----------\n name: str, optional\n Name associated with prior.\n latex_label: str, optional\n Latex label associated with prior, used for plotting.\n unit: str, optional\n If given, a Latex string describing the units of the parameter.\n minimum: float, optional\n Minimum of the domain, default=-np.inf\n maximum: float, optional\n Maximum of the domain, default=np.inf\n\n \"\"\"\n self.name = name\n self.latex_label = latex_label\n self.unit = unit\n self.minimum = minimum\n self.maximum = maximum\n\n def __call__(self):\n \"\"\"Overrides the __call__ special method. Calls the sample method.\n\n Returns\n -------\n float: The return value of the sample method.\n \"\"\"\n return self.sample()\n\n def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if sorted(self.__dict__.keys()) != sorted(other.__dict__.keys()):\n return False\n for key in self.__dict__:\n if type(self.__dict__[key]) is np.ndarray:\n if not np.array_equal(self.__dict__[key], other.__dict__[key]):\n return False\n else:\n if not self.__dict__[key] == other.__dict__[key]:\n return False\n return True\n\n def sample(self, size=None):\n \"\"\"Draw a sample from the prior\n\n Parameters\n ----------\n size: int or tuple of ints, optional\n See numpy.random.uniform docs\n\n Returns\n -------\n float: A random number between 0 and 1, rescaled to match the distribution of this Prior\n\n \"\"\"\n return self.rescale(np.random.uniform(0, 1, size))\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the prior.\n\n This should be overwritten by each subclass.\n\n Parameters\n ----------\n val: float\n A random number between 0 and 1\n\n Returns\n -------\n None\n\n \"\"\"\n return None\n\n def prob(self, val):\n \"\"\"Return the prior probability of val, this should be overwritten\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n np.nan\n\n \"\"\"\n return np.nan\n\n def ln_prob(self, val):\n \"\"\"Return the prior ln probability of val, this should be overwritten\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n np.nan\n\n \"\"\"\n return np.log(self.prob(val))\n\n def is_in_prior_range(self, val):\n \"\"\"Returns True if val is in the prior boundaries, zero otherwise\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n np.nan\n\n \"\"\"\n return (val >= self.minimum) & (val <= self.maximum)\n\n @staticmethod\n def test_valid_for_rescaling(val):\n \"\"\"Test if 0 < val < 1\n\n Parameters\n ----------\n val: float\n\n Raises\n -------\n ValueError: If val is not between 0 and 1\n \"\"\"\n val = np.atleast_1d(val)\n tests = (val < 0) + (val > 1)\n if np.any(tests):\n raise ValueError(\"Number to be rescaled should be in [0, 1]\")\n\n def __repr__(self):\n \"\"\"Overrides the special method __repr__.\n\n Returns a representation of this instance that resembles how it is instantiated.\n Works correctly for all child classes\n\n Returns\n -------\n str: A string representation of this instance\n\n \"\"\"\n subclass_args = infer_args_from_method(self.__init__)\n prior_name = self.__class__.__name__\n\n property_names = [p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)]\n dict_with_properties = self.__dict__.copy()\n for key in property_names:\n dict_with_properties[key] = getattr(self, key)\n args = ', '.join(['{}={}'.format(key, repr(dict_with_properties[key])) for key in subclass_args])\n return \"{}({})\".format(prior_name, args)\n\n @property\n def is_fixed(self):\n \"\"\"\n Returns True if the prior is fixed and should not be used in the sampler. Does this by checking if this instance\n is an instance of DeltaFunction.\n\n\n Returns\n -------\n bool: Whether it's fixed or not!\n\n \"\"\"\n return isinstance(self, DeltaFunction)\n\n @property\n def latex_label(self):\n \"\"\"Latex label that can be used for plots.\n\n Draws from a set of default labels if no label is given\n\n Returns\n -------\n str: A latex representation for this prior\n\n \"\"\"\n return self.__latex_label\n\n @latex_label.setter\n def latex_label(self, latex_label=None):\n if latex_label is None:\n self.__latex_label = self.__default_latex_label\n else:\n self.__latex_label = latex_label\n\n @property\n def unit(self):\n return self.__unit\n\n @unit.setter\n def unit(self, unit):\n self.__unit = unit\n\n @property\n def latex_label_with_unit(self):\n \"\"\" If a unit is specifed, returns a string of the latex label and unit \"\"\"\n if self.unit is not None:\n return \"{} [{}]\".format(self.latex_label, self.unit)\n else:\n return self.latex_label\n\n @property\n def minimum(self):\n return self.__minimum\n\n @minimum.setter\n def minimum(self, minimum):\n self.__minimum = minimum\n\n @property\n def maximum(self):\n return self.__maximum\n\n @maximum.setter\n def maximum(self, maximum):\n self.__maximum = maximum\n\n @property\n def __default_latex_label(self):\n if self.name in self._default_latex_labels.keys():\n label = self._default_latex_labels[self.name]\n else:\n label = self.name\n return label\n\n\nclass DeltaFunction(Prior):\n\n def __init__(self, peak, name=None, latex_label=None, unit=None):\n \"\"\"Dirac delta function prior, this always returns peak.\n\n Parameters\n ----------\n peak: float\n Peak value of the delta function\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=peak, maximum=peak)\n self.peak = peak\n\n def rescale(self, val):\n \"\"\"Rescale everything to the peak with the correct shape.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Rescaled probability, equivalent to peak\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return self.peak * val ** 0\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: np.inf if val = peak, 0 otherwise\n\n \"\"\"\n at_peak = (val == self.peak)\n return np.nan_to_num(np.multiply(at_peak, np.inf))\n\n\nclass PowerLaw(Prior):\n\n def __init__(self, alpha, minimum, maximum, name=None, latex_label=None,\n unit=None):\n \"\"\"Power law with bounds and alpha, spectral index\n\n Parameters\n ----------\n alpha: float\n Power law exponent parameter\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit)\n self.alpha = alpha\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the power-law prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n\n Parameters\n ----------\n val: float\n Uniform probability\n\n Returns\n -------\n float: Rescaled probability\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n if self.alpha == -1:\n return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))\n else:\n return (self.minimum ** (1 + self.alpha) + val *\n (self.maximum ** (1 + self.alpha) - self.minimum ** (1 + self.alpha))) ** (1. / (1 + self.alpha))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n if self.alpha == -1:\n return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * self.is_in_prior_range(val)\n else:\n return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /\n (self.maximum ** (1 + self.alpha) -\n self.minimum ** (1 + self.alpha))) * self.is_in_prior_range(val)\n\n def ln_prob(self, val):\n \"\"\"Return the logarithmic prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float:\n\n \"\"\"\n if self.alpha == -1:\n normalising = 1. / np.log(self.maximum / self.minimum)\n else:\n normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -\n self.minimum ** (1 + self.alpha))\n\n return (self.alpha * np.log(val) + np.log(normalising)) + np.log(1. * self.is_in_prior_range(val))\n\n\nclass Uniform(Prior):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None):\n \"\"\"Uniform prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit)\n\n def rescale(self, val):\n Prior.test_valid_for_rescaling(val)\n return self.minimum + val * (self.maximum - self.minimum)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.uniform.pdf(val, loc=self.minimum,\n scale=self.maximum - self.minimum)\n\n def ln_prob(self, val):\n \"\"\"Return the log prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: log probability of val\n \"\"\"\n return scipy.stats.uniform.logpdf(val, loc=self.minimum,\n scale=self.maximum - self.minimum)\n\n\nclass LogUniform(PowerLaw):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None):\n \"\"\"Log-Uniform prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n PowerLaw.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum, alpha=-1)\n if self.minimum <= 0:\n logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))\n\n\nclass Cosine(Prior):\n\n def __init__(self, name=None, latex_label=None, unit=None,\n minimum=-np.pi / 2, maximum=np.pi / 2):\n \"\"\"Cosine prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to a uniform in cosine prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return np.arcsin(-1 + val * 2)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val. Defined over [-pi/2, pi/2].\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.cos(val) / 2 * self.is_in_prior_range(val)\n\n\nclass Sine(Prior):\n\n def __init__(self, name=None, latex_label=None, unit=None, minimum=0,\n maximum=np.pi):\n \"\"\"Sine prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to a uniform in sine prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return np.arccos(1 - val * 2)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val. Defined over [0, pi].\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.sin(val) / 2 * self.is_in_prior_range(val)\n\n\nclass Gaussian(Prior):\n\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"Gaussian prior with mean mu and width sigma\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n self.mu = mu\n self.sigma = sigma\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Gaussian prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 / self.sigma\n\n def ln_prob(self, val):\n return -0.5 * ((self.mu - val) ** 2 / self.sigma ** 2 + np.log(2 * np.pi * self.sigma ** 2))\n\n\nclass Normal(Gaussian):\n\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"A synonym for the Gaussian distribution.\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma: float\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Gaussian.__init__(self, mu=mu, sigma=sigma, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass TruncatedGaussian(Prior):\n\n def __init__(self, mu, sigma, minimum, maximum, name=None,\n latex_label=None, unit=None):\n \"\"\"Truncated Gaussian prior with mean mu and width sigma\n\n https://en.wikipedia.org/wiki/Truncated_normal_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum)\n self.mu = mu\n self.sigma = sigma\n\n @property\n def normalisation(self):\n \"\"\" Calculates the proper normalisation of the truncated Gaussian\n\n Returns\n -------\n float: Proper normalisation of the truncated Gaussian\n \"\"\"\n return (erf((self.maximum - self.mu) / 2 ** 0.5 / self.sigma) - erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate truncated Gaussian prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return erfinv(2 * val * self.normalisation + erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (\n 2 * np.pi) ** 0.5 / self.sigma / self.normalisation * self.is_in_prior_range(val)\n\n\nclass TruncatedNormal(TruncatedGaussian):\n\n def __init__(self, mu, sigma, minimum, maximum, name=None,\n latex_label=None, unit=None):\n \"\"\"A synonym for the TruncatedGaussian distribution.\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n TruncatedGaussian.__init__(self, mu=mu, sigma=sigma, minimum=minimum,\n maximum=maximum, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass HalfGaussian(TruncatedGaussian):\n def __init__(self, sigma, name=None, latex_label=None, unit=None):\n \"\"\"A Gaussian with its mode at zero, and truncated to only be positive.\n\n Parameters\n ----------\n sigma: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n TruncatedGaussian.__init__(self, 0., sigma, minimum=0., maximum=np.inf,\n name=name, latex_label=latex_label,\n unit=unit)\n\n\nclass HalfNormal(HalfGaussian):\n def __init__(self, sigma, name=None, latex_label=None, unit=None):\n \"\"\"A synonym for the HalfGaussian distribution.\n\n Parameters\n ----------\n sigma: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n HalfGaussian.__init__(self, sigma=sigma, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass LogNormal(Prior):\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"Log-normal prior with mean mu and width sigma\n\n https://en.wikipedia.org/wiki/Log-normal_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,\n unit=unit)\n\n if sigma <= 0.:\n raise ValueError(\"For the LogGaussian prior the standard deviation must be positive\")\n\n self.mu = mu\n self.sigma = sigma\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate LogNormal prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return scipy.stats.lognorm.ppf(val, self.sigma, scale=np.exp(self.mu))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n return scipy.stats.lognorm.pdf(val, self.sigma, scale=np.exp(self.mu))\n\n def ln_prob(self, val):\n return scipy.stats.lognorm.logpdf(val, self.sigma, scale=np.exp(self.mu))\n\n\nclass LogGaussian(LogNormal):\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"Synonym of LogNormal prior\n\n https://en.wikipedia.org/wiki/Log-normal_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n LogNormal.__init__(self, mu=mu, sigma=sigma, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass Exponential(Prior):\n def __init__(self, mu, name=None, latex_label=None, unit=None):\n \"\"\"Exponential prior with mean mu\n\n Parameters\n ----------\n mu: float\n Mean of the Exponential prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,\n unit=unit)\n self.mu = mu\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Exponential prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return scipy.stats.expon.ppf(val, scale=self.mu)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n return scipy.stats.expon.pdf(val, scale=self.mu)\n\n def ln_prob(self, val):\n return scipy.stats.expon.logpdf(val, scale=self.mu)\n\n\nclass StudentT(Prior):\n def __init__(self, df, mu=0., scale=1., name=None, latex_label=None,\n unit=None):\n \"\"\"Student's t-distribution prior with number of degrees of freedom df,\n mean mu and scale\n\n https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution\n\n Parameters\n ----------\n df: float\n Number of degrees of freedom for distribution\n mu: float\n Mean of the Student's t-prior\n scale:\n Width of the Student's t-prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n\n if df <= 0. or scale <= 0.:\n raise ValueError(\"For the StudentT prior the number of degrees of freedom and scale must be positive\")\n\n self.df = df\n self.mu = mu\n self.scale = scale\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Student's t-prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.t.ppf(val, self.df, loc=self.mu, scale=self.scale)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.t.pdf(val, self.df, loc=self.mu, scale=self.scale)\n\n def ln_prob(self, val):\n return scipy.stats.t.logpdf(val, self.df, loc=self.mu, scale=self.scale)\n\n\nclass Beta(Prior):\n def __init__(self, alpha, beta, minimum=0, maximum=1, name=None,\n latex_label=None, unit=None):\n \"\"\"Beta distribution\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n This wraps around\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html\n\n Parameters\n ----------\n alpha: float\n first shape parameter\n beta: float\n second shape parameter\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, minimum=minimum, maximum=maximum, name=name,\n latex_label=latex_label, unit=unit)\n\n if alpha <= 0. or beta <= 0.:\n raise ValueError(\"alpha and beta must both be positive values\")\n\n self.alpha = alpha\n self.beta = beta\n self._loc = minimum\n self._scale = maximum - minimum\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Beta prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.beta.ppf(\n val, self.alpha, self.beta, loc=self._loc, scale=self._scale)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n spdf = scipy.stats.beta.pdf(\n val, self.alpha, self.beta, loc=self._loc, scale=self._scale)\n if np.all(np.isfinite(spdf)):\n return spdf\n\n # deal with the fact that if alpha or beta are < 1 you get infinities at 0 and 1\n if isinstance(val, np.ndarray):\n pdf = np.zeros(len(val))\n pdf[np.isfinite(spdf)] = spdf[np.isfinite]\n return spdf\n else:\n return 0.\n\n def ln_prob(self, val):\n spdf = scipy.stats.beta.logpdf(\n val, self.alpha, self.beta, loc=self._loc, scale=self._scale)\n if np.all(np.isfinite(spdf)):\n return spdf\n\n if isinstance(val, np.ndarray):\n pdf = -np.inf * np.ones(len(val))\n pdf[np.isfinite(spdf)] = spdf[np.isfinite]\n return spdf\n else:\n return -np.inf\n\n\nclass Logistic(Prior):\n def __init__(self, mu, scale, name=None, latex_label=None, unit=None):\n \"\"\"Logistic distribution\n\n https://en.wikipedia.org/wiki/Logistic_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the distribution\n scale: float\n Width of the distribution\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n\n if scale <= 0.:\n raise ValueError(\"For the Logistic prior the scale must be positive\")\n\n self.mu = mu\n self.scale = scale\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Logistic prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.logistic.ppf(val, loc=self.mu, scale=self.scale)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.logistic.pdf(val, loc=self.mu, scale=self.scale)\n\n def ln_prob(self, val):\n return scipy.stats.logistic.logpdf(val, loc=self.mu, scale=self.scale)\n\n\nclass Cauchy(Prior):\n def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):\n \"\"\"Cauchy distribution\n\n https://en.wikipedia.org/wiki/Cauchy_distribution\n\n Parameters\n ----------\n alpha: float\n Location parameter\n beta: float\n Scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n\n if beta <= 0.:\n raise ValueError(\"For the Cauchy prior the scale must be positive\")\n\n self.alpha = alpha\n self.beta = beta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Cauchy prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.cauchy.ppf(val, loc=self.alpha, scale=self.beta)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.cauchy.pdf(val, loc=self.alpha, scale=self.beta)\n\n def ln_prob(self, val):\n return scipy.stats.cauchy.logpdf(val, loc=self.alpha, scale=self.beta)\n\n\nclass Lorentzian(Cauchy):\n def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):\n \"\"\"Synonym for the Cauchy distribution\n\n https://en.wikipedia.org/wiki/Cauchy_distribution\n\n Parameters\n ----------\n alpha: float\n Location parameter\n beta: float\n Scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Cauchy.__init__(self, alpha=alpha, beta=beta, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass Gamma(Prior):\n def __init__(self, k, theta=1., name=None, latex_label=None, unit=None):\n \"\"\"Gamma distribution\n\n https://en.wikipedia.org/wiki/Gamma_distribution\n\n Parameters\n ----------\n k: float\n The shape parameter\n theta: float\n The scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,\n unit=unit)\n\n if k <= 0 or theta <= 0:\n raise ValueError(\"For the Gamma prior the shape and scale must be positive\")\n\n self.k = k\n self.theta = theta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Gamma prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.gamma.ppf(val, self.k, loc=0., scale=self.theta)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n return scipy.stats.gamma.pdf(val, self.k, loc=0., scale=self.theta)\n\n def ln_prob(self, val):\n return scipy.stats.gamma.logpdf(val, self.k, loc=0., scale=self.theta)\n\n\nclass ChiSquared(Gamma):\n def __init__(self, nu, name=None, latex_label=None, unit=None):\n \"\"\"Chi-squared distribution\n\n https://en.wikipedia.org/wiki/Chi-squared_distribution\n\n Parameters\n ----------\n nu: int\n Number of degrees of freedom\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n\n if nu <= 0 or not isinstance(nu, int):\n raise ValueError(\"For the ChiSquared prior the number of degrees of freedom must be a positive integer\")\n\n Gamma.__init__(self, name=name, k=nu / 2., theta=2.,\n latex_label=latex_label, unit=unit)\n\n @property\n def nu(self):\n return int(self.k * 2)\n\n @nu.setter\n def nu(self, nu):\n self.k = nu / 2.\n\n\nclass Interped(Prior):\n\n def __init__(self, xx, yy, minimum=np.nan, maximum=np.nan, name=None,\n latex_label=None, unit=None):\n \"\"\"Creates an interpolated prior function from arrays of xx and yy=p(xx)\n\n Parameters\n ----------\n xx: array_like\n x values for the to be interpolated prior function\n yy: array_like\n p(xx) values for the to be interpolated prior function\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n Attributes\n -------\n probability_density: scipy.interpolate.interp1d\n Interpolated prior probability distribution\n cumulative_distribution: scipy.interpolate.interp1d\n Interpolated cumulative prior probability distribution\n inverse_cumulative_distribution: scipy.interpolate.interp1d\n Inverted cumulative prior probability distribution\n YY: array_like\n Cumulative prior probability distribution\n\n \"\"\"\n self.xx = xx\n self.yy = yy\n self.__all_interpolated = interp1d(x=xx, y=yy, bounds_error=False, fill_value=0)\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=np.nanmax(np.array((min(xx), minimum))),\n maximum=np.nanmin(np.array((max(xx), maximum))))\n self.__initialize_attributes()\n\n def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if np.array_equal(self.xx, other.xx) and np.array_equal(self.yy, other.yy):\n return True\n return False\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return self.probability_density(val)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the prior.\n\n This maps to the inverse CDF. This is done using interpolation.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n rescaled = self.inverse_cumulative_distribution(val)\n if rescaled.shape == ():\n rescaled = float(rescaled)\n return rescaled\n\n @property\n def minimum(self):\n \"\"\"Return minimum of the prior distribution.\n\n Updates the prior distribution if minimum is set to a different value.\n\n Returns\n -------\n float: Minimum of the prior distribution\n\n \"\"\"\n return self.__minimum\n\n @minimum.setter\n def minimum(self, minimum):\n self.__minimum = minimum\n if '_Interped__maximum' in self.__dict__ and self.__maximum < np.inf:\n self.__update_instance()\n\n @property\n def maximum(self):\n \"\"\"Return maximum of the prior distribution.\n\n Updates the prior distribution if maximum is set to a different value.\n\n Returns\n -------\n float: Maximum of the prior distribution\n\n \"\"\"\n return self.__maximum\n\n @maximum.setter\n def maximum(self, maximum):\n self.__maximum = maximum\n if '_Interped__minimum' in self.__dict__ and self.__minimum < np.inf:\n self.__update_instance()\n\n def __update_instance(self):\n self.xx = np.linspace(self.minimum, self.maximum, len(self.xx))\n self.yy = self.__all_interpolated(self.xx)\n self.__initialize_attributes()\n\n def __initialize_attributes(self):\n if np.trapz(self.yy, self.xx) != 1:\n logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))\n self.yy /= np.trapz(self.yy, self.xx)\n self.YY = cumtrapz(self.yy, self.xx, initial=0)\n # Need last element of cumulative distribution to be exactly one.\n self.YY[-1] = 1\n self.probability_density = interp1d(x=self.xx, y=self.yy, bounds_error=False, fill_value=0)\n self.cumulative_distribution = interp1d(x=self.xx, y=self.YY, bounds_error=False, fill_value=0)\n self.inverse_cumulative_distribution = interp1d(x=self.YY, y=self.xx, bounds_error=True)\n\n\nclass FromFile(Interped):\n\n def __init__(self, file_name, minimum=None, maximum=None, name=None,\n latex_label=None, unit=None):\n \"\"\"Creates an interpolated prior function from arrays of xx and yy=p(xx) extracted from a file\n\n Parameters\n ----------\n file_name: str\n Name of the file containing the xx and yy arrays\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n Attributes\n -------\n all_interpolated: scipy.interpolate.interp1d\n Interpolated prior function\n\n \"\"\"\n try:\n self.id = file_name\n xx, yy = np.genfromtxt(self.id).T\n Interped.__init__(self, xx=xx, yy=yy, minimum=minimum,\n maximum=maximum, name=name,\n latex_label=latex_label, unit=unit)\n except IOError:\n logger.warning(\"Can't load {}.\".format(self.id))\n logger.warning(\"Format should be:\")\n logger.warning(r\"x\\tp(x)\")\n"
] | [
[
"scipy.special.erfinv",
"numpy.nan_to_num",
"numpy.any",
"numpy.exp",
"scipy.integrate.cumtrapz",
"numpy.trapz",
"numpy.arcsin",
"numpy.sin",
"numpy.atleast_1d",
"scipy.interpolate.interp1d",
"scipy.special.erf",
"numpy.log",
"numpy.multiply",
"numpy.arccos",
"numpy.genfromtxt",
"numpy.isfinite",
"numpy.array_equal",
"numpy.cos",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
van-truong/comptox_ai | [
"393b05c617822e30f54c967ef07ec53ba4b09688",
"393b05c617822e30f54c967ef07ec53ba4b09688"
] | [
"comptox_ai/ml/nn.py",
"comptox_ai/build/scripts/make_maccs_fingerprints.py"
] | [
"\"\"\"\nBase class and utilities for defining neural networks to be used on ComptoxAI\ndata.\n\nWe stick to PyTorch for implementing all neural networks, due to its speed,\nexpressiveness, and readability. For more documentation on PyTorch, check out \n`PyTorch Documentation<https://pytorch.org/docs/stable/index.html>`_. Several\nof the models we have reimplemented for ComptoxAI were previously only\nimplemented in Tensorflow or another deep learning library. Users are strongly\nencouraged to submit pull requests or create a new issue on GitHub if they\ndiscover any errors made in the translation process!\n\"\"\"\n\nfrom _typeshed import NoneType\nfrom comptox_ai.db.graph_db import Graph, GraphDB\nimport shutil\nimport os\n\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.metrics import roc_auc_score\n\nfrom torch_geometric.utils import negative_sampling\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv\n# from torch_geometric.utils import train_test_split_edges\nfrom comptox_ai.ml.train_test_split_edges import train_test_split_edges\n\n\nclass NeuralNetwork(object):\n def __init__(self, **kwargs):\n arg_opts = {\n 'name',\n 'lr',\n 'num_epochs',\n 'logging',\n 'verbose'\n }\n for kwarg in kwargs.keys():\n assert kwarg in arg_opts, 'Invalid argument: {}'.format(kwarg)\n\n self.verbose = kwargs.get('verbose', False)\n self.data = None\n self.model = None\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n ## link prediction model - code doesn't work from here on down, will prob need to delete!\n ## - erica\n class Net(torch.nn.Module):\n def __init__(self, name, in_channels, out_channels):\n super(Net, self).__init__()\n if name == 'link-prediction':\n self.conv1 = GCNConv(in_channels, 128)\n self.conv2 = GCNConv(128, out_channels)\n\n def encode(self, x, edge_index):\n x = self.conv1(x, edge_index)\n x = x.relu()\n return self.conv2(x, edge_index)\n\n def decode(self, z, pos_ege_index, neg_edge_index):\n edge_index = torch.cat([pos_ege_index, neg_edge_index], dim=-1)\n return (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)\n\n def decode_all(self, z):\n prob_adj = z @ z.t()\n return (prob_adj > 0).nonzero(as_tuple=False).t()\n\n def load_data(self, graph_name, node_types):\n db = GraphDB()\n\n db.drop_all_existing_graphs() # alt: drop graph if exists rather than dropping all graphs \n db.build_graph_native_projection(\n graph_name=graph_name,\n node_proj=node_types,\n relationship_proj=\"'*'\"\n )\n\n dir_abspath = os.path.join(os.getcwd(), 'comptox_ai/db/exports', f\"{graph_name}\")\n try:\n shutil.rmtree(dir_abspath)\n except OSError as e:\n print(\"Error: %s : %s\" % (dir_abspath, e.strerror))\n\n db.export_graph(graph_name)\n data = db.to_pytorch(graph_name, node_types)\n\n ## debugging\n print(f\"data: {data}\")\n print(f\"data.x:\\n\\t{data.x}\")\n print(f\"data.edge_index:\\n\\t{data.edge_index}\")\n\n ## train test split data\n data = train_test_split_edges(data)\n self.data = data.to(self.device)\n\n self.model = self.Net(self.data.num_features, 64).to(self.device)\n\n\n def get_link_labels(pos_edge_index, neg_edge_index):\n num_links = pos_edge_index.size(1) + neg_edge_index.size(1)\n link_labels = torch.zeros(num_links, dtype=torch.float, device=device)\n link_labels[:pos_edge_index.size(1)] = 1.\n return link_labels\n\n\n def train(data):\n model.train()\n\n neg_edge_index = negative_sampling(\n edge_index=data.train_pos_edge_index, num_nodes=data.num_nodes,\n num_neg_samples=data.train_pos_edge_index.size(1))\n\n optimizer.zero_grad()\n z = model.encode(data.x, data.train_pos_edge_index)\n link_logits = model.decode(z, data.train_pos_edge_index, neg_edge_index)\n link_labels = get_link_labels(data.train_pos_edge_index, neg_edge_index)\n loss = F.binary_cross_entropy_with_logits(link_logits, link_labels)\n loss.backward()\n optimizer.step()\n\n return loss\n\n\n @torch.no_grad()\n def test(data):\n model.eval()\n\n z = model.encode(data.x, data.train_pos_edge_index)\n\n results = []\n for prefix in ['val', 'test']:\n pos_edge_index = data[f'{prefix}_pos_edge_index']\n neg_edge_index = data[f'{prefix}_neg_edge_index']\n link_logits = model.decode(z, pos_edge_index, neg_edge_index)\n link_probs = link_logits.sigmoid()\n link_labels = get_link_labels(pos_edge_index, neg_edge_index)\n results.append(roc_auc_score(link_labels.cpu(), link_probs.cpu()))\n return results\n\n\n best_val_auc = test_auc = 0\n for epoch in range(1, 101):\n loss = train(data)\n val_auc, tmp_test_auc = test(data)\n if val_auc > best_val_auc:\n best_val = val_auc\n test_auc = tmp_test_auc\n print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Val: {val_auc:.4f}, '\n f'Test: {test_auc:.4f}')\n\n z = model.encode(data.x, data.train_pos_edge_index)\n final_edge_index = model.decode_all(z)\n\n\n\n\n ",
"\"\"\"\nmake_maccs_fingerprints.py\n\nThis is a standalone script to extract SMILES strings from the DSSTOX MS ready\nstructure data files, and export them to a TSV for subsequent conversion into\nMACCS fingerprints.\n\"\"\"\n\nimport pandas as pd\nimport glob\nfrom tqdm import tqdm\nimport ipdb\n\nprint(\"Importing and merging Mass Spec-ready structure files from EPA - this may take a while.\")\n\nfiles = glob.glob(\"D:/Data/epa/DSSTOX_MS_Ready_Chemical_Structures/*.xlsx\")\n\nall_smiles = []\nfor f in tqdm(files):\n df = pd.read_excel(f)\n all_smiles += list(df[['DSSTox_Substance_ID', 'SMILES']].itertuples(index=False, name=None))\n\ndf = pd.DataFrame(all_smiles, columns=['DSSTox_Substance_ID', 'SMILES'], index=None)\n\ndf.to_csv(\"D:/Data/epa/DSSTOX_MS_Ready_Chemical_Structures/dsstox_smiles_for_maccs.tsv\", sep=\"\\t\")\n\n\n"
] | [
[
"torch.cat",
"torch.zeros",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.no_grad",
"torch.cuda.is_available"
],
[
"pandas.read_excel",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
imsandydk/Convoying_project_s22 | [
"0b60a23fe148839f243a8a96acae9ee0fd8b9a81"
] | [
"convoy.py"
] | [
"import carla\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport sys\nfrom srunner.tools.route_manipulation import interpolate_trajectory\n\n#Returns only the waypoints in one lane\ndef single_lane(waypoint_list, lane):\n waypoints = []\n for i in range(len(waypoint_list) - 1):\n if waypoint_list[i].lane_id == lane:\n waypoints.append(waypoint_list[i])\n return waypoints\n \n#Returns only the waypoints that are not along the straights\ndef get_curvy_waypoints(waypoints):\n curvy_waypoints = []\n for i in range(len(waypoints) - 1):\n x1 = waypoints[i].transform.location.x\n y1 = waypoints[i].transform.location.y\n x2 = waypoints[i+1].transform.location.x\n y2 = waypoints[i+1].transform.location.y\n if (abs(x1 - x2) > 1) and (abs(y1 - y2) > 1):\n print(\"x1: \" + str(x1) + \" x2: \" + str(x2))\n print(abs(x1 - x2))\n print(\"y1: \" + str(y1) + \" y2: \" + str(y2))\n print(abs(y1 - y2))\n curvy_waypoints.append(waypoints[i])\n \n #To make the path reconnect to the starting location\n curvy_waypoints.append(curvy_waypoints[0])\n\n return curvy_waypoints\n\ndef control_pure_pursuit(vehicle_tr, waypoint_tr, max_steer, wheelbase):\n # TODO: convert vehicle transform to rear axle transform\n wp_loc_rel = relative_location(vehicle_tr, waypoint_tr.location) + carla.Vector3D(wheelbase, 0, 0)\n wp_ar = [wp_loc_rel.x, wp_loc_rel.y]\n d2 = wp_ar[0]**2 + wp_ar[1]**2\n steer_rad = math.atan(2 * wheelbase * wp_loc_rel.y / d2)\n steer_deg = math.degrees(steer_rad)\n steer_deg = np.clip(steer_deg, -max_steer, max_steer)\n return steer_deg / max_steer\n\ndef relative_location(frame, location):\n origin = frame.location\n forward = frame.get_forward_vector()\n right = frame.get_right_vector()\n up = frame.get_up_vector()\n disp = location - origin\n x = np.dot([disp.x, disp.y, disp.z], [forward.x, forward.y, forward.z])\n y = np.dot([disp.x, disp.y, disp.z], [right.x, right.y, right.z])\n z = np.dot([disp.x, disp.y, disp.z], [up.x, up.y, up.z])\n return carla.Vector3D(x, y, z)\n\ndef get_next_waypoint(world, vehicle, waypoints):\n vehicle_location = vehicle.get_location()\n min_distance = 1000\n next_waypoint = None\n\n for waypoint in waypoints:\n waypoint_location = waypoint.transform.location\n\n #Only check waypoints that are in the front of the vehicle (if x is negative, then the waypoint is to the rear)\n #TODO: Check if this applies for all maps\n if (waypoint_location - vehicle_location).x > 0:\n\n #Find the waypoint closest to the vehicle, but once vehicle is close to upcoming waypoint, search for next one\n if vehicle_location.distance(waypoint_location) < min_distance and vehicle_location.distance(waypoint_location) > 5:\n min_distance = vehicle_location.distance(waypoint_location)\n next_waypoint = waypoint\n\n return next_waypoint\n\ndef main():\n\n ##Modifiable Variables\n targetLane = -3\n\n client = carla.Client('127.0.0.1', 2000)\n client.set_timeout(10.0)\n\n # Read the opendrive file to a string\n xodr_path = \"speedway.xodr\"\n #xodr_path = \"Crossing8Course.xodr\"\n od_file = open(xodr_path)\n data = od_file.read()\n\n # Load the opendrive map\n vertex_distance = 2.0 # in meters\n max_road_length = 50.0 # in meters\n wall_height = 1.0 # in meters\n extra_width = 0.6 # in meters\n world = client.generate_opendrive_world(\n data, carla.OpendriveGenerationParameters(\n vertex_distance=vertex_distance,\n max_road_length=max_road_length,\n wall_height=wall_height,\n additional_width=extra_width,\n smooth_junctions=True,\n enable_mesh_visibility=True))\n\n spectator = world.get_spectator()\n\n map = world.get_map()\n waypoint_list = map.generate_waypoints(40)\n\n print(\"Length: \" + str(len(waypoint_list)))\n \n #Take only the waypoints from the targetLane\n waypoints = single_lane(waypoint_list, targetLane)\n\n #Remove all unneccesary waypoints along the straights\n curvy_waypoints = get_curvy_waypoints(waypoints)\n\n #Save graph of plotted points as bezier.png\n x = [p.transform.location.x for p in curvy_waypoints]\n y = [p.transform.location.y for p in curvy_waypoints]\n plt.plot(x, y, marker = 'o')\n plt.savefig(\"bezier.png\")\n\n #Set spawning location as initial waypoint\n waypoint = curvy_waypoints[0]\n blueprint = world.get_blueprint_library().filter('vehicle.*model3*')[0]\n location = waypoint.transform.location + carla.Vector3D(0, 0, 1.5)\n rotation = waypoint.transform.rotation\n vehicle = world.spawn_actor(blueprint, carla.Transform(location, rotation))\n print(\"SPAWNED!\")\n \n #Vehicle properties setup\n physics_control = vehicle.get_physics_control()\n max_steer = physics_control.wheels[0].max_steer_angle\n rear_axle_center = (physics_control.wheels[2].position + physics_control.wheels[3].position)/200\n offset = rear_axle_center - vehicle.get_location()\n wheelbase = np.linalg.norm([offset.x, offset.y, offset.z])\n vehicle.set_simulate_physics(True)\n\n #Add spectator camera to get the view to move with the car \n camera_bp = world.get_blueprint_library().find('sensor.camera.rgb')\n camera_transform = carla.Transform(carla.Location(x=-10,z=10), carla.Rotation(-45,0,0))\n camera = world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle)\n\n ##INSERT MODIFYING WAYPOINTS HERE\n\n while True:\n\n #Update the camera view\n spectator.set_transform(camera.get_transform())\n\n #Get next waypoint\n waypoint = get_next_waypoint(world, vehicle, curvy_waypoints)\n world.debug.draw_point(waypoint.transform.location, life_time=5)\n\n #Control vehicle's throttle and steering\n throttle = 0.85\n vehicle_transform = vehicle.get_transform()\n vehicle_location = vehicle_transform.location\n steer = control_pure_pursuit(vehicle_transform, waypoint.transform, max_steer, wheelbase)\n control = carla.VehicleContr\n vehicle.apply_control(control)\n\nif __name__ == \"__main__\":\n sys.exit(main())"
] | [
[
"numpy.dot",
"numpy.clip",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
santhtadi/rest_api_in_django | [
"eae65bce23494e1950b9bd8dfdf6dbab71f1f922"
] | [
"restApi/views.py"
] | [
"from django.http import HttpResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport numpy as np\nfrom PIL import Image\nimport io\n\n\n# Create your views here.\ndef index(request):\n return HttpResponse(request, \"hi there\")\n\n\nclass SendImage(APIView):\n @staticmethod\n def check_validity(req):\n ret = True\n message = \"\"\n keys = [w for w in req.keys()]\n if \"image\" not in keys:\n ret = False\n message += \"image is not appended, \" \\\n \"try appending the image in header files with key 'image', please refer to \" \\\n \"https://github.com/santhtadi/rest_api_in_django \" \\\n \"for more details ; \"\n return ret, message\n\n # post is responsible for receiving files\n # develop det put and delete according to your need\n def post(self, request):\n # print the data in request to dashboard\n print(request.data)\n # convert the request data to a dictionary object in python\n req = dict(request.data)\n # check if all the required files are appended or not\n valid, error_message = self.check_validity(req)\n if not valid:\n return Response({\"message\": error_message}, status=status.HTTP_400_BAD_REQUEST)\n # read the image as bytes\n by = req['image'][0].read()\n # convert bytes as image using pillow library\n img = Image.open(io.BytesIO(by)).convert('RGB')\n # create an array using numpy\n image_in_rgb_format = np.array(img)\n # change RGB to BGR format for using with opencv library\n image_in_opencv_format = image_in_rgb_format[:, :, ::-1].copy()\n # returning size of image as output\n return Response({\"image_size\": image_in_opencv_format.shape}, status=status.HTTP_200_OK)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UtkarshK10/NLP-Spam-Filter | [
"795ea74897bd32cd3ce345e78f8c5d772a6da350"
] | [
"spam_filter.py"
] | [
"import sys\nimport nltk\nimport sklearn\n\n\nimport pandas as pd\nimport numpy as np\n\ndf= pd.read_table('SMSSpamCollection',header= None, encoding='utf-8')\n\n\nclasses = df[0]\nprint(classes.value_counts())\n\n#Preprocess the data\n\n\"\"\"\n0= ham\n1=spam\nfor this we use label encoder\n\"\"\"\nfrom sklearn.preprocessing import LabelEncoder\n\nencoder=LabelEncoder()\nY=encoder.fit_transform(classes)\n\n\n#store the sms data\ntext_messages = df[1]\n\n\n\n\n#replace email addresses with emailaddr\nprocessed= text_messages.str.replace(r'^.+@[^\\.].*\\.[a-z]{2,}$','emailaddr')\n\n#replace urls with webaddress\nprocessed= processed.str.replace(r'^http\\://[a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,3}(/\\S*)?$','webaddress')\n\n#replace money symbols with 'moneysymb'\nprocessed=processed.str.replace(r'£|\\$','moneysymb')\n\n#replace 10 digit number with 'phonenumber'\nprocessed= processed.str.replace(r'^\\(?[\\d]{3}\\)?[\\s-]?[\\d]{3}[\\s-]?[\\d]{4}$','phonenumber')\n\n#replace normal numbers with 'numbr' \nprocessed=processed.str.replace(r'\\d+(\\.\\d+)?','numbr')\n\n\n\n#remove punctuation\n\nprocessed=processed.str.replace(r'[^\\w\\d\\s]','')\nprocessed=processed.str.replace(r'\\s+',' ')\nprocessed=processed.str.lower()\n\n\n# remove stop words\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nstop_words=set(stopwords.words('english'))\nprocessed=processed.apply(lambda x : ' '.join(term for term in x.split() if term not in stop_words ))\n\n# Stemming - like,likes,liked ~like\nps=nltk.PorterStemmer()\nprocessed=processed.apply(lambda x : ' '.join(ps.stem(term) for term in x.split()))\n\n\n\n#Tokenizing\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize\n\nall_words=[]\n\nfor message in processed:\n words=word_tokenize(message)\n for w in words:\n all_words.append(w)\n \nall_words= nltk.FreqDist(all_words)\n\n\n#print the total number of words and 15 most common words\n'''\nprint('Number of words:{}'.format(len(all_words)))\nprint('Most Common Words:{}'.format(all_words.most_common(15)))\n'''\n\n#using the 1500 most common word as features\nword_features=list(all_words.keys())[:1500]\n\n\n#defining find a feature function\ndef find_features(message):\n words=word_tokenize(message)\n features={}\n for word in word_features:\n features[word]=(word in words)\n return features\n\n#example\nfeatures = find_features(processed[0])\nfor key,value in features.items():\n if value == True:\n print(key)\n \n# zipper method for appending i/p - o/p\ndef zipper(x, y):\n\tsize = len(x) if len(x) < len(y) else len(y)\n\tretList = []\n\tfor i in range(size):\n\t\tretList.append((x[i], y[i]))\n\treturn retList \n\n \n#find features for all this messages\nmessages = zipper(processed,Y)\n\n#define a seed for reproductibility\nseed=1\nnp.random.seed=seed\nnp.random.shuffle(messages)\nfeaturesets=[(find_features(text),label) for (text,label) in messages]\n\n#split training and testing data using sklearn\nfrom sklearn import model_selection\ntraining,testing = model_selection.train_test_split(featuresets,test_size=0.25,random_state=seed)\n'''\nprint('Training: {}'.format(len(training)))\nprint('Testing: {}'.format(len(testing)))\n'''\n\n#Scikitlearn classifiers with nltk\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report,accuracy_score,confusion_matrix\n\n\n#Define models to train and comparing best model on its accuracy\nnames=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear']\nclassifiers=[\n KNeighborsClassifier(),\n DecisionTreeClassifier(),\n RandomForestClassifier(),\n LogisticRegression(),\n SGDClassifier(max_iter=100),\n MultinomialNB(),\n SVC(kernel='linear')\n \n ]\n\nmodels = zipper(names,classifiers)\n\n#Wrap models in nltk and find their accuracy then select best method\nfrom nltk.classify.scikitlearn import SklearnClassifier\n\nfor name,model in models:\n nltk_model=SklearnClassifier(model)\n nltk_model.train(training)\n accuracy=nltk.classify.accuracy(nltk_model,testing)*100\n print('{}: Accuracy: {}'.format(name,accuracy))\n \n#ensemble method -- Voting Classifier for better accuracy\n \nfrom sklearn.ensemble import VotingClassifier\n\nnames=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear']\nclassifiers=[\n KNeighborsClassifier(),\n DecisionTreeClassifier(),\n RandomForestClassifier(),\n LogisticRegression(),\n SGDClassifier(max_iter=100),\n MultinomialNB(),\n SVC(kernel='linear')\n \n ]\n \nmodels = zipper(names,classifiers)\n# n_jobs=-1 means all algo can run in parallel\nnltk_ensemble= SklearnClassifier(VotingClassifier(estimators=models,voting='hard',n_jobs= -1))\nnltk_ensemble.train(training)\naccuracy=nltk.classify.accuracy(nltk_ensemble,testing)*100\nprint('Ensemble Method Accuracy: {}'.format(accuracy))\n\n#make class label predictions\ntxt_features,labels=zip(*testing)\nprediction = nltk_ensemble.classify_many(txt_features)\n\n#print a confusion matrix and a classification report\nprint(classification_report(labels,prediction))\npd.DataFrame(\n confusion_matrix(labels,prediction),\n index=[['actual','actual'],['ham','spam']],\n columns=[['predicted','predicted'],['ham','spam']]\n )\n"
] | [
[
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.model_selection.train_test_split",
"numpy.random.shuffle",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.VotingClassifier",
"pandas.read_table",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.confusion_matrix",
"sklearn.svm.SVC",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report",
"sklearn.linear_model.SGDClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jld23/saspy | [
"47adeb5b9e298e6b9ec017f850245e318f2faa57"
] | [
"saspy/sasiocom.py"
] | [
"#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport datetime\nimport csv\nimport io\nimport numbers\nimport os\nimport shlex\nimport sys\nimport warnings\n\nimport logging\nlogger = logging.getLogger('saspy')\n\ntry:\n from win32com.client import dynamic\nexcept ImportError:\n pass\n\ntry:\n import pandas as pd\nexcept ImportError:\n pass\n\n\nclass SASConfigCOM(object):\n \"\"\"\n This object is not intended to be used directly. Instantiate a SASSession\n object instead.\n \"\"\"\n NO_OVERRIDE = ['kernel', 'sb']\n\n def __init__(self, **kwargs):\n self._kernel = kwargs.get('kernel')\n\n session = kwargs['sb']\n sascfg = session.sascfg.SAScfg\n name = session.sascfg.name\n cfg = getattr(sascfg, name)\n opts = getattr(sascfg, 'SAS_config_options', {})\n outs = getattr(sascfg, 'SAS_output_options', {})\n\n self.host = cfg.get('iomhost')\n self.port = cfg.get('iomport')\n self.user = cfg.get('omruser')\n self.pw = cfg.get('omrpw')\n self.authkey = cfg.get('authkey')\n self.class_id = cfg.get('class_id', '440196d4-90f0-11d0-9f41-00a024bb830c')\n self.provider = cfg.get('provider')\n self.encoding = cfg.get('encoding', '')\n\n self.output = outs.get('output', 'html5')\n\n self.verbose = opts.get('verbose', True)\n self.verbose = kwargs.get('verbose', self.verbose)\n\n self._lock = opts.get('lock_down', True)\n self._prompt = session.sascfg._prompt\n\n if self.authkey is not None:\n self._set_authinfo()\n\n for key, value in filter(lambda x: x[0] not in self.NO_OVERRIDE, kwargs.items()):\n self._try_override(key, value)\n\n def _set_authinfo(self):\n \"\"\"\n Attempt to set the session user's credentials based on provided\n key to read from ~/.authinfo file. See .authinfo documentation\n here: https://documentation.sas.com/api/docsets/authinfo/9.4/content/authinfo.pdf.\n\n This method supports a subset of the .authinfo spec, in accordance with\n other IO access methods. This method will only parse `user` and `password`\n arguments, but does support spaces in values if the value is quoted. Use\n python's `shlex` library to parse these values.\n \"\"\"\n if os.name == 'nt':\n authfile = os.path.expanduser(os.path.join('~', '_authinfo'))\n else:\n authfile = os.path.expanduser(os.path.join('~', '.authinfo'))\n\n try:\n with open(authfile, 'r') as f:\n # Take first matching line found\n parsed = (shlex.split(x, posix=False) for x in f.readlines())\n authline = next(filter(lambda x: x[0] == self.authkey, parsed), None)\n\n except OSError:\n logger.error('Error trying to read {}'.format(authfile))\n authline = None\n\n if authline is None:\n logger.error('Key {} not found in authinfo file: {}'.format(self.authkey, authfile))\n elif len(authline) < 5:\n logger.error('Incomplete authinfo credentials in {}; key: {}'.format(authfile, self.authkey))\n else:\n # Override user/pw if previously set\n # `authline` is in the following format:\n # AUTHKEY username USERNAME password PASSWORD\n self.user = authline[2]\n self.pw = authline[4]\n\n def _try_override(self, attr, value):\n \"\"\"\n Attempt to override a configuration file option if `self._lock` is\n False. Otherwise, warn the user.\n :param attr: Configuration attribute.\n :param value: Configuration value.\n \"\"\"\n if self._lock is False:\n setattr(self, attr, value)\n else:\n err = \"Param '{}' was ignored due to configuration restriction\".format(attr)\n logger.warning(err, file=sys.stderr)\n\n\nclass SASSessionCOM(object):\n \"\"\"\n Initiate a connection to a SAS server and provide access for Windows\n clients without the Java dependency. Utilizes available COM objects for\n client communication with the IOM interface.\n It may be possible to communicate with local SAS instances as well,\n although this is functionality is untested. A slight change may be\n required to the `_startsas` method to support local instances.\n \"\"\"\n SAS_APP = 'SASApp'\n HTML_RESULT_FILE = 'saspy_results.html'\n\n # SASObjectManager.Protocols Enum values\n PROTOCOL_COM = 0\n PROTOCOL_IOM = 2\n\n # SAS Date/Time/Datetime formats\n FMT_DEFAULT_DATE_NAME = 'E8601DA'\n FMT_DEFAULT_DATE_LENGTH = 10\n FMT_DEFAULT_DATE_PRECISION = 0\n FMT_DEFAULT_TIME_NAME = 'E8601TM'\n FMT_DEFAULT_TIME_LENGTH = 15\n FMT_DEFAULT_TIME_PRECISION = 6\n FMT_DEFAULT_DATETIME_NAME = 'E8601DT'\n FMT_DEFAULT_DATETIME_LENGTH = 26\n FMT_DEFAULT_DATETIME_PRECISION = 6\n\n # Pandas data types\n PD_NUM_TYPE = ('i', 'u', 'f', 'c')\n PD_STR_TYPE = ('S', 'U', 'V')\n PD_DT_TYPE = ('M')\n PD_BOOL_TYPE = ('b')\n\n # ADODB RecordSet CursorTypeEnum values\n CURSOR_UNSPECIFIED = -1\n CURSOR_FORWARD = 0\n CURSOR_KEYSET = 1\n CURSOR_DYNAMIC = 2\n CURSOR_STATIC = 3\n\n # ADODB RecordSet LockTypeEnum values\n LOCK_UNSPECIFIED = -1\n LOCK_READONLY = 1\n LOCK_PESSIMISTIC = 2\n LOCK_OPTIMISTIC = 3\n LOCK_BATCH_OPTIMISTIC = 4\n\n # ADODB RecordSet CommandTypeEnum values\n CMD_UNSPECIFIED = -1\n CMD_TEXT = 1\n CMD_TABLE = 2\n CMD_STORED_PROC = 4\n CMD_UNKNOWN = 8\n CMD_FILE = 256\n CMD_TABLE_DIRECT = 512\n\n # ADODB Connection SchemaEnum values\n SCHEMA_COLUMNS = 4\n SCHEMA_TABLES = 20\n\n # ADODB ObjectStateEnum values\n STATE_CLOSED = 0\n STATE_OPEN = 1\n\n # FileService StreamOpenMode values\n STREAM_READ = 1\n STREAM_WRITE = 2\n\n def __init__(self, **kwargs):\n self._log = ''\n self.sascfg = SASConfigCOM(**kwargs)\n self._sb = kwargs.get('sb')\n\n self.pid = self._startsas()\n\n def __del__(self):\n if self.adodb.State == self.STATE_OPEN:\n self._endsas()\n\n def _startsas(self) -> str:\n \"\"\"\n Create a workspace and open a connection with SAS.\n :return [str]:\n \"\"\"\n if getattr(self, 'workspace', None) is not None:\n # Do not create a new connection\n return self.workspace.UniqueIdentifier\n\n factory = dynamic.Dispatch('SASObjectManager.ObjectFactoryMulti2')\n server = dynamic.Dispatch('SASObjectManager.ServerDef')\n\n self.keeper = dynamic.Dispatch('SASObjectManager.ObjectKeeper')\n self.adodb = dynamic.Dispatch('ADODB.Connection')\n\n if self.sascfg.host is None:\n # Create a local connection.\n server.MachineDNSName = '127.0.0.1'\n server.Port = 0\n server.Protocol = self.PROTOCOL_COM\n\n user = None\n password = None\n else:\n # Create a remote connection. The following are required:\n # 1. host\n # 2. port\n # 3. class_id\n server.MachineDNSName = self.sascfg.host\n server.Port = self.sascfg.port\n server.Protocol = self.PROTOCOL_IOM\n server.ClassIdentifier = self.sascfg.class_id\n\n if self.sascfg.user is not None:\n user = self.sascfg.user\n else:\n user = self.sascfg._prompt('Username: ')\n\n if self.sascfg.pw is not None:\n password = self.sascfg.pw\n else:\n password = self.sascfg._prompt('Password: ', pw=True)\n\n self.workspace = factory.CreateObjectByServer(self.SAS_APP, True,\n server, user, password)\n\n self.keeper.AddObject(1, 'WorkspaceObject', self.workspace)\n self.adodb.Open('Provider={}; Data Source=iom-id://{}'.format(\n self.sascfg.provider, self.workspace.UniqueIdentifier))\n\n ll = self.submit(\"options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;\", \"text\")\n if self.sascfg.verbose:\n logger.info(\"SAS Connection established. Workspace UniqueIdentifier is \"+str(self.workspace.UniqueIdentifier)+\"\\n\")\n\n return self.workspace.UniqueIdentifier\n\n def _endsas(self):\n \"\"\"\n Close a connection with SAS.\n \"\"\"\n self.adodb.Close()\n self.keeper.RemoveObject(self.workspace)\n self.workspace.Close()\n if self.sascfg.verbose:\n logger.info(\"SAS Connection terminated. Workspace UniqueIdentifierid was \"+str(self.pid))\n\n def _getlst(self, buf: int=2048) -> str:\n \"\"\"\n Flush listing.\n :option buf [int]: Download buffer. Default 2048.\n :return [str]:\n \"\"\"\n flushed = self.workspace.LanguageService.FlushList(buf)\n result = flushed\n while flushed:\n flushed = self.workspace.LanguageService.FlushList(buf)\n result += flushed\n\n return result\n\n def _getlog(self, buf: int=2048) -> str:\n \"\"\"\n Flush log.\n :option buf [int]: Download buffer. Default 2048.\n :return [str]:\n \"\"\"\n flushed = self.workspace.LanguageService.FlushLog(buf)\n result = flushed\n while flushed:\n flushed = self.workspace.LanguageService.FlushLog(buf)\n result += flushed\n\n # Store flush result in running log\n self._log += result\n\n if result.count('ERROR:') > 0:\n warnings.warn(\"Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem\")\n self._sb.check_error_log = True\n\n return result\n\n def _getfile(self, fname: str, buf: int=2048, decode: bool=False) -> str:\n \"\"\"\n Use object file service to download a file from the provider.\n :param fname [str]: Filename.\n :option buf [int]: Download buffer. Default 2048.\n :option decode [bool]: Decode the byte stream.\n :return [str]:\n \"\"\"\n fobj = self.workspace.FileService.AssignFileref('outfile', 'DISK', fname, '', '')\n\n # Use binary stream to support text and image transfers. The binary\n # stream interface does not require a max line length, which allows\n # support of arbitrarily wide tables.\n stream = fobj[0].OpenBinaryStream(self.STREAM_READ)\n flushed = stream.Read(buf)\n result = bytes(flushed)\n while flushed:\n flushed = stream.Read(buf)\n result += bytes(flushed)\n\n stream.Close()\n self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)\n\n if decode is True:\n result = result.decode(self.sascfg.encoding, errors='replace')\n\n return result\n\n def _gethtmlfn(self) -> str:\n \"\"\"\n Return the path of the output HTML file. This is the combination of\n the `workpath` attribute and `HTML_RESULT_FILE` constant.\n :return [str]:\n \"\"\"\n return self._sb.workpath + self.HTML_RESULT_FILE\n\n def _reset(self):\n \"\"\"\n Reset the LanguageService interface to its initial state with respect\n to token scanning. Use it to release the LanguageService from an error\n state associated with the execution of invalid syntax or incomplete\n program source. This primarily occurs when a statement is submitted\n without a trailing semicolon.\n \"\"\"\n self.workspace.LanguageService.Reset()\n\n def _tablepath(self, table: str, libref: str=None) -> str:\n \"\"\"\n Define a sas dataset path based on a table name and optional libref\n name. Will return a two-level or one-level path string based on the\n provided arguments. One-level names are of this form: `table`, while\n two-level names are of this form: `libref.table`. If libref is not\n defined, SAS will implicitly define the library to WORK or USER. The\n USER library needs to have been defined previously in SAS, otherwise\n WORK is the default option. If the `libref` parameter is any value\n that evaluates to `False`, the one-level path is returned.\n :param table [str]: SAS data set name.\n :option libref [str]: Optional library name.\n :return [str]:\n \"\"\"\n if not libref:\n path = \"'{}'n\".format(table.strip())\n else:\n path = \"{}.'{}'n\".format(libref, table.strip())\n\n return path\n\n def _schema(self, table: str, libref: str=None) -> dict:\n \"\"\"\n Request a table schema for a given `libref.table`.\n :param table [str]: Table name\n :option libref [str]: Library name.\n :return [dict]:\n \"\"\"\n #tablepath = self._tablepath(table, libref=libref)\n if not libref:\n tablepath = table\n else:\n tablepath = \"{}.{}\".format(libref, table)\n\n criteria = [None, None, tablepath]\n\n schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)\n schema.MoveFirst()\n\n metadata = {}\n while not schema.EOF:\n col_info = {x.Name: x.Value for x in schema.Fields}\n if col_info['FORMAT_NAME'] in self._sb.sas_date_fmts:\n col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(days=x) if x else x\n elif col_info['FORMAT_NAME'] in self._sb.sas_datetime_fmts:\n col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(seconds=x) if x else x\n # elif FIXME TIME FORMATS\n else:\n col_info['CONVERT'] = lambda x: x\n\n metadata[col_info['COLUMN_NAME']] = col_info\n schema.MoveNext()\n\n schema.Close()\n\n return metadata\n\n def _prompt(self, key: str, hide: bool=False) -> tuple:\n \"\"\"\n Ask the user for input about a given key.\n :param key [str]: Key name.\n :option hide [bool]: Hide user keyboard input.\n :return [tuple]:\n \"\"\"\n input_ok = False\n while input_ok is False:\n val = self.sascfg._prompt('Enter value for macro variable {} '.format(key), pw=hide)\n\n if val is None:\n raise RuntimeError(\"No value for prompted macro variable provided.\")\n\n if val:\n input_ok = True\n else:\n print('Input not valid.')\n\n return (key, val)\n\n def _asubmit(self, code: str, results: str='html'):\n \"\"\"\n Submit any SAS code. Does not return a result.\n :param code [str]: SAS statements to execute.\n \"\"\"\n # Support html ods\n if results.lower() == 'html':\n ods_open = \"\"\"\n ods listing close;\n ods {} (id=saspy_internal) options(bitmap_mode='inline')\n file=\"{}\"\n device=svg\n style={};\n ods graphics on / outputfmt=png;\n \"\"\".format(self.sascfg.output, self._gethtmlfn(), self._sb.HTML_Style)\n\n ods_close = \"\"\"\n ods {} (id=saspy_internal) close;\n ods listing;\n \"\"\".format(self.sascfg.output)\n else:\n ods_open = ''\n ods_close = ''\n\n # Submit program\n full_code = ods_open + code + ods_close\n self.workspace.LanguageService.Submit(full_code)\n\n def submit(self, code: str, results: str='html', prompt: dict=None, **kwargs) -> dict:\n \"\"\"\n Submit any SAS code. Returns log and listing as dictionary with keys\n LOG and LST.\n :param code [str]: SAS statements to execute.\n :option results [str]: Result format. Options: HTML, TEXT. Default HTML.\n :option prompt [dict]: Create macro variables from prompted keys.\n \"\"\"\n RESET = \"\"\";*';*\";*/;quit;run;\"\"\"\n prompt = prompt if prompt is not None else {}\n printto = kwargs.pop('undo', False)\n\n macro_declare = ''\n for key, value in prompt.items():\n macro_declare += '%let {} = {};\\n'.format(*self._prompt(key, value))\n\n # Submit program\n self._asubmit(RESET + macro_declare + code + RESET, results)\n\n # Retrieve listing and log\n log = self._getlog()\n if results.lower() == 'html':\n # Make the following replacements in HTML listing:\n # 1. Swap \\x0c for \\n\n # 2. Change body class selector\n # 3. Increase font size\n listing = self._getfile(self._gethtmlfn(), decode=True) \\\n .replace(chr(12), chr(10)) \\\n .replace('<body class=\"c body\">', '<body class=\"l body\">') \\\n .replace('font-size: x-small;', 'font-size: normal;')\n else:\n listing = self._getlst()\n\n # Invalid syntax will put the interface in to an error state. Reset\n # the LanguageService to prevent further errors.\n # FIXME: In the future, may only want to reset on ERROR. However, this\n # operation seems pretty lightweight, so calling `_reset()` on all\n # submits is not a burden.\n self._reset()\n\n if printto:\n self._asubmit(\"\\nproc printto;run;\\n\", 'text')\n log += self._getlog()\n\n self._sb._lastlog = log\n return {'LOG': log, 'LST': listing}\n\n def saslog(self) -> str:\n \"\"\"\n Return the full SAS log.\n :return [str]:\n \"\"\"\n return self._log\n\n def exist(self, table: str, libref: str=None) -> bool:\n \"\"\"\n Determine if a `libref.table` exists.\n :param table [str]: Table name\n :option libref [str]: Library name.\n :return [bool]:\n \"\"\"\n #tablepath = self._tablepath(table, libref=libref)\n #criteria = [None, None, tablepath]\n\n #schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)\n #exists = not schema.BOF\n\n #schema.Close()\n\n #return exists\n\n code = 'data _null_; e = exist(\"'\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip()+\"'n\"+'\"'+\");\\n\"\n code += 'v = exist(\"'\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip()+\"'n\"+'\"'+\", 'VIEW');\\n if e or v then e = 1;\\n\"\n code += \"te='TABLE_EXISTS='; put te e;run;\\n\"\n\n ll = self.submit(code, \"text\")\n\n l2 = ll['LOG'].rpartition(\"TABLE_EXISTS= \")\n l2 = l2[2].partition(\"\\n\")\n exists = int(l2[0])\n\n return bool(exists)\n\n\n def read_sasdata(self, table: str, libref: str=None, dsopts: dict=None) -> tuple:\n \"\"\"\n Read any SAS dataset and return as a tuple of header, rows\n :param table [str]: Table name\n :option libref [str]: Library name.\n :option dsopts [dict]: Dataset options.\n :return [tuple]:\n \"\"\"\n TARGET = '_saspy_sd2df'\n EXPORT = \"\"\"\n data {tgt};\n set {tbl} {dopt};\n run;\n \"\"\"\n\n dsopts = self._sb._dsopts(dsopts) if dsopts is not None else ''\n tablepath = self._tablepath(table, libref=libref)\n recordset = dynamic.Dispatch('ADODB.RecordSet')\n\n # Create an intermediate dataset with `dsopts` applied\n export = EXPORT.format(tgt=TARGET, tbl=tablepath, dopt=dsopts)\n self.workspace.LanguageService.Submit(export)\n meta = self._schema(TARGET)\n\n # Connect RecordSet object to ADODB connection with params:\n # Cursor: Forward Only\n # Lock: Read Only\n # Command: Table Direct\n recordset.Open(TARGET, self.adodb, self.CURSOR_FORWARD,\n self.LOCK_READONLY, self.CMD_TABLE_DIRECT)\n recordset.MoveFirst()\n\n header = [x.Name for x in recordset.Fields]\n rows = []\n while not recordset.EOF:\n rows.append([meta[x.Name]['CONVERT'](x.Value) for x in recordset.Fields])\n recordset.MoveNext()\n\n recordset.Close()\n\n return (header, rows, meta)\n\n def read_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=False, opts: dict=None):\n \"\"\"\n Submit an import job to the SAS workspace.\n :param filepath [str]: File URI.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option nosob [bool]: Return the SAS code instead of executing it.\n :option opts [dict]: SAS PROC IMPORT options.\n \"\"\"\n opts = opts if opts is not None else {}\n filepath = 'url ' + filepath if filepath.lower().startswith('http') else filepath\n tablepath = self._tablepath(table, libref=libref)\n\n proc_code = \"\"\"\n filename csv_file \"{}\";\n proc import datafile=csv_file out={} dbms=csv replace;\n {}\n run;\n \"\"\".format(filepath.replace('\"', '\"\"'), tablepath, self._sb._impopts(opts))\n\n if nosub is True:\n return proc_code\n else:\n return self.submit(proc_code, 'text')\n\n def write_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=True, dsopts: dict=None, opts: dict=None):\n \"\"\"\n Submit an export job to the SAS workspace.\n :param filepath [str]: File URI.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option nosob [bool]: Return the SAS code instead of executing it.\n :option opts [dict]: SAS PROC IMPORT options.\n :option dsopts [dict]: SAS dataset options.\n \"\"\"\n opts = opts if opts is not None else {}\n dsopts = dsopts if dsopts is not None else {}\n tablepath = self._tablepath(table, libref=libref)\n\n proc_code = \"\"\"\n filename csv_file \"{}\";\n proc export data={} {} outfile=csv_file dbms=csv replace;\n {}\n run;\n \"\"\".format(filepath.replace('\"', '\"\"'), tablepath, self._sb._dsopts(dsopts), self._sb._expopts(opts))\n\n if nosub is True:\n return proc_code\n else:\n return self.submit(proc_code, 'text')['LOG']\n\n def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',\n libref: str =\"\", keep_outer_quotes: bool=False,\n embedded_newlines: bool=True,\n LF: str = '\\x01', CR: str = '\\x02',\n colsep: str = '\\x03', colrep: str = ' ',\n datetimes: dict={}, outfmts: dict={}, labels: dict={},\n outdsopts: dict={}, encode_errors = None, char_lengths = None,\n **kwargs):\n \"\"\"\n Create a SAS dataset from a pandas data frame.\n :param df [pd.DataFrame]: Pandas data frame containing data to write.\n :param table [str]: Table name.\n :option libref [str]: Library name. Default work.\n\n None of these options are used by this access method; they are needed for other access methods\n keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.\n embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set\n LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\\x01'\n CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\\x02'\n colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\\x03'\n colrep - the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' '\n datetimes - not implemented yet in this access method\n outfmts - not implemented yet in this access method\n labels - not implemented yet in this access method\n outdsopts - not implemented yet in this access method\n encode_errors - not implemented yet in this access method\n char_lengths - not implemented yet in this access method\n \"\"\"\n DATETIME_NAME = 'DATETIME26.6'\n DATETIME_FMT = '%Y-%m-%dT%H:%M:%S.%f'\n\n if self.sascfg.verbose:\n if keep_outer_quotes != False:\n logger.warning(\"'keep_outer_quotes=' is not used with this access method. option ignored.\")\n if embedded_newlines != True:\n logger.warning(\"'embedded_newlines=' is not used with this access method. option ignored.\")\n if LF != '\\x01' or CR != '\\x02' or colsep != '\\x03':\n logger.warning(\"'LF=, CR= and colsep=' are not used with this access method. option(s) ignored.\")\n if datetimes != {}:\n logger.warning(\"'datetimes=' is not used with this access method. option ignored.\")\n if outfmts != {}:\n logger.warning(\"'outfmts=' is not used with this access method. option ignored.\")\n if labels != {}:\n logger.warning(\"'labels=' is not used with this access method. option ignored.\")\n if outdsopts != {}:\n logger.warning(\"'outdsopts=' is not used with this access method. option ignored.\")\n if encode_errors:\n logger.warning(\"'encode_errors=' is not used with this access method. option ignored.\")\n if char_lengths:\n logger.warning(\"'char_lengths=' is not used with this access method. option ignored.\")\n\n tablepath = self._tablepath(table, libref=libref)\n\n if type(df.index) != pd.RangeIndex:\n warnings.warn(\"Note that Indexes are not transferred over as columns. Only actual coulmns are transferred\")\n\n columns = []\n formats = {}\n for i, name in enumerate(df.columns):\n if df[name].dtypes.kind in self.PD_NUM_TYPE:\n # Numeric type\n definition = \"'{}'n num\".format(name)\n formats[name] = lambda x: str(x) if pd.isnull(x) is False else 'NULL'\n elif df[name].dtypes.kind in self.PD_STR_TYPE:\n # Character type\n # NOTE: If a character string contains a single `'`, replace\n # it with `''`. This is the SAS equivalent to `\\'`.\n length = df[name].map(len).max()\n definition = \"'{}'n char({})\".format(name, length)\n formats[name] = lambda x: \"'{}'\".format(x.replace(\"'\", \"''\")) if pd.isnull(x) is False else 'NULL'\n elif df[name].dtypes.kind in self.PD_DT_TYPE:\n # Datetime type\n definition = \"'{}'n num informat={} format={}\".format(name, DATETIME_NAME, DATETIME_NAME)\n formats[name] = lambda x: \"'{:{}}'DT\".format(x, DATETIME_FMT) if pd.isnull(x) is False else 'NULL'\n else:\n # Default to character type\n # NOTE: If a character string contains a single `'`, replace\n # it with `''`. This is the SAS equivalent to `\\'`.\n length = df[name].map(str).map(len).max()\n definition = \"'{}'n char({})\".format(name, length)\n formats[name] = lambda x: \"'{}'\".format(x.replace(\"'\", \"''\")) if pd.isnull(x) is False else 'NULL'\n\n columns.append(definition)\n\n sql_values = []\n for index, row in df.iterrows():\n vals = []\n for i, col in enumerate(row):\n func = formats[df.columns[i]]\n vals.append(func(col))\n\n sql_values.append('values({})'.format(', '.join(vals)))\n\n sql_create = 'create table {} ({});'.format(tablepath, ', '.join(columns))\n sql_insert = 'insert into {} {};'.format(tablepath, '\\n'.join(sql_values))\n\n self.adodb.Execute(sql_create)\n self.adodb.Execute(sql_insert)\n return None\n\n def sasdata2dataframe(self, table: str, libref: str=None, dsopts: dict=None, method: str='', **kwargs) -> 'pd.DataFrame':\n \"\"\"\n Create a pandas data frame from a SAS dataset.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option dsopts [dict]: Dataset options.\n :option method [str]: Download method.\n :option tempkeep [bool]: Download the csv file if using the csv method.\n :option tempfile [str]: File path for the saved output file.\n :return [pd.DataFrame]:\n \"\"\"\n # strip off unused by this access method options from kwargs\n # so they can't be passes to panda later\n rowsep = kwargs.pop('rowsep', ' ')\n colsep = kwargs.pop('colsep', ' ')\n rowrep = kwargs.pop('rowrep', ' ')\n colrep = kwargs.pop('colrep', ' ')\n\n if method.upper() == 'DISK':\n logger.error(\"This access method doesn't support the DISK method. Try CSV or MEMORY\")\n return None\n\n if method.upper() == 'CSV':\n df = self.sasdata2dataframeCSV(table, libref, dsopts=dsopts, **kwargs)\n else:\n my_fmts = kwargs.pop('my_fmts', False)\n k_dts = kwargs.pop('dtype', None)\n if self.sascfg.verbose:\n if my_fmts != False:\n logger.warning(\"'my_fmts=' is not supported in this access method. option ignored.\")\n if k_dts is not None:\n logger.warning(\"'dtype=' is only used with the CSV version of this method. option ignored.\")\n\n header, rows, meta = self.read_sasdata(table, libref, dsopts=dsopts)\n df = pd.DataFrame.from_records(rows, columns=header, **kwargs)\n\n for col in meta.keys():\n if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:\n df[col] = pd.to_datetime(df[col], errors='coerce')\n elif meta[col]['DATA_TYPE'] == 5:\n df[col] = pd.to_numeric(df[col], errors='coerce')\n\n return df\n\n def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None,\n tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':\n \"\"\"\n Create a pandas data frame from a SAS dataset.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option dsopts [dict]: Dataset options.\n :option opts [dict]: dictionary containing any of the following Proc Export options(delimiter, putnames)\n :option tempkeep [bool]: Download the csv file if using the csv method.\n :option tempfile [str]: File path for the saved output file.\n :return [pd.DataFrame]:\n \"\"\"\n FORMAT_STRING = '{column} {format}{length}.{precision}'\n EXPORT = \"\"\"\n data _saspy_sd2df;\n format {fmt};\n set {tbl};\n run;\n\n proc export data=_saspy_sd2df {dopt}\n outfile=\"{out}\"\n dbms=csv replace;\n {exopts}\n run;\n \"\"\"\n k_dts = kwargs.get('dtype', None)\n my_fmts = kwargs.pop('my_fmts', False)\n if self.sascfg.verbose:\n if my_fmts != False:\n logger.warning(\"'my_fmts=' is not supported in this access method. option ignored.\")\n\n sas_csv = '{}saspy_sd2df.csv'.format(self._sb.workpath)\n dopts = self._sb._dsopts(dsopts) if dsopts is not None else ''\n tablepath = self._tablepath(table, libref=libref)\n\n expopts = self._sb._expopts(kwargs.pop('opts', {}))\n\n # Convert any date format to one pandas can understand (ISO-8601).\n # Save a reference of the column name in a list so pandas can parse\n # the column during construction.\n datecols = []\n fmtlist = []\n meta = self._schema(table, libref)\n for name, col in meta.items():\n if col['FORMAT_NAME'] in self._sb.sas_date_fmts:\n datecols.append(name)\n col_format = self.FMT_DEFAULT_DATE_NAME\n col_length = self.FMT_DEFAULT_DATE_LENGTH\n col_precis = self.FMT_DEFAULT_DATE_PRECISION\n elif col['FORMAT_NAME'] in self._sb.sas_datetime_fmts:\n datecols.append(name)\n col_format = self.FMT_DEFAULT_DATETIME_NAME\n col_length = self.FMT_DEFAULT_DATETIME_LENGTH\n col_precis = self.FMT_DEFAULT_DATETIME_PRECISION\n # elif FIXME TIME FORMATS\n else:\n col_format = col['FORMAT_NAME']\n col_length = col['FORMAT_LENGTH']\n col_precis = col['FORMAT_DECIMAL']\n\n if col['FORMAT_NAME']:\n full_format = FORMAT_STRING.format(\n column=col['COLUMN_NAME'],\n format=col_format,\n length=col_length,\n precision=col_precis)\n\n fmtlist.append(full_format)\n\n export = EXPORT.format(fmt=' '.join(fmtlist),\n tbl=tablepath,\n dopt=dopts,\n exopts=expopts,\n out=sas_csv)\n\n # Use `LanguageService.Submit` instead of `submit` for a slight\n # performance bump. We don't need the log or listing here so skip\n # the wrapper function.\n self.workspace.LanguageService.Submit(export)\n\n outstring = self._getfile(sas_csv, decode=True)\n\n # Write temp file if requested by user\n if kwargs.get('tempkeep') is True and kwargs.get('tempfile') is not None:\n with open(kwargs['tempfile'], 'w') as f:\n f.write(outstring)\n\n df = pd.read_csv(io.StringIO(outstring), parse_dates=datecols, **kwargs)\n\n if k_dts is None: # don't override these if user provided their own dtypes\n for col in meta.keys():\n if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:\n df[col] = pd.to_datetime(df[col], errors='coerce')\n\n return df\n\n def upload(self, local: str, remote: str, overwrite: bool=True, permission: str='', **kwargs):\n \"\"\"\n Upload a file to the SAS server.\n :param local [str]: Local filename.\n :param remote [str]: Local filename.\n :option overwrite [bool]: Overwrite the file if it exists.\n :option permission [str]: See SAS filename statement documentation.\n \"\"\"\n perms = \"PERMISSION='{}'\".format(permission) if permission else ''\n valid = self._sb.file_info(remote, quiet=True)\n\n if valid == {}:\n # Parameter `remote` references a directory. Default to using the\n # filename in `local` path.\n remote_file = remote + self._sb.hostsep + os.path.basename(local)\n elif valid is not None and overwrite is False:\n # Parameter `remote` references a file that exists but we cannot\n # overwrite it.\n # TODO: Raise exception here instead of returning dict\n return {'Success': False,\n 'LOG': 'File {} exists and overwrite was set to False. Upload was stopped.'.format(remote)}\n else:\n remote_file = remote\n\n with open(local, 'rb') as f:\n fobj = self.workspace.FileService.AssignFileref('infile', 'DISK', remote_file, perms, '')\n stream = fobj[0].OpenBinaryStream(self.STREAM_WRITE)\n\n stream.Write(f.read())\n stream.Close()\n self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)\n\n return {'Success': True,\n 'LOG': 'File successfully written using FileService.'}\n\n def download(self, local: str, remote: str, overwrite: bool=True, **kwargs):\n \"\"\"\n Download a file from the SAS server.\n :param local [str]: Local filename.\n :param remote [str]: Local filename.\n :option overwrite [bool]: Overwrite the file if it exists.\n \"\"\"\n valid = self._sb.file_info(remote, quiet=True)\n\n if valid is None:\n # Parameter `remote` references an invalid file path.\n # TODO: Raise exception here instead of returning dict\n return {'Success': False,\n 'LOG': 'File {} does not exist.'.format(remote)}\n elif valid == {}:\n # Parameter `remote` references a directory.\n # TODO: Raise exception here instead of returning dict\n return {'Success': False,\n 'LOG': 'File {} is a directory.'.format(remote)}\n\n if os.path.isdir(local) is True:\n # Parameter `local` references a directory. Default to using the\n # filename in `remote` path.\n local_file = os.path.join(local, remote.rpartition(self._sb.hostsep)[2])\n else:\n local_file = local\n\n with open(local_file, 'wb') as f:\n f.write(self._getfile(remote))\n\n return {'Success': True,\n 'LOG': 'File successfully read using FileService.'}\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.to_datetime",
"pandas.to_numeric",
"pandas.isnull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
verult-prowtest/Cirq | [
"653bf210f04635f6d8fde80d37cb25edbab6eb31"
] | [
"cirq/ops/eigen_gate.py"
] | [
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport fractions\nfrom typing import Tuple, Union, List, Optional, cast, TypeVar, NamedTuple, \\\n Iterable\n\nimport abc\n\nimport numpy as np\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq._compat import gcd\nfrom cirq.ops import raw_types\nfrom cirq.type_workarounds import NotImplementedType\n\n\nTSelf = TypeVar('TSelf', bound='EigenGate')\n\n\nEigenComponent = NamedTuple(\n 'EigenComponent',\n [\n # The θ in λ = exp(i π θ) where λ is a unique eigenvalue. The exponent\n # factor is used, instead of just a raw unit complex number, because it\n # disambiguates several cases. For example, when λ=-1 you can set θ to\n # -1 instead of +1 resulting in square root operations returning -i\n # instead of +i.\n ('eigenvalue_exponent_factor', float),\n\n # The projection matrix onto the eigenspace of the eigenvalue. Must\n # equal Σ_k |λ_k⟩⟨λ_k| where the |λ_k⟩ vectors form an orthonormal\n # basis for the eigenspace.\n ('eigenspace_projector', np.ndarray),\n ]\n)\n\n\[email protected]_equality(distinct_child_types=True, approximate=True)\nclass EigenGate(raw_types.Gate):\n \"\"\"A gate with a known eigendecomposition.\n\n EigenGate is particularly useful when one wishes for different parts of\n the same eigenspace to be extrapolated differently. For example, if a gate\n has a 2-dimensional eigenspace with eigenvalue -1, but one wishes for the\n square root of the gate to split this eigenspace into a part with\n eigenvalue i and a part with eigenvalue -i, then EigenGate allows this\n functionality to be unambiguously specified via the _eigen_components\n method.\n \"\"\"\n\n def __init__(self, *, # Forces keyword args.\n exponent: Union[sympy.Basic, float] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"Initializes the parameters used to compute the gate's matrix.\n\n The eigenvalue of each eigenspace of a gate is computed by\n\n 1. Starting with an angle in half turns as returned by the gate's\n ``_eigen_components`` method:\n\n θ\n\n 2. Shifting the angle by `global_shift`:\n\n θ + s\n\n 3. Scaling the angle by `exponent`:\n\n (θ + s) * e\n\n 4. Converting from half turns to a complex number on the unit circle:\n\n exp(i * pi * (θ + s) * e)\n\n Args:\n exponent: The t in gate**t. Determines how much the eigenvalues of\n the gate are scaled by. For example, eigenvectors phased by -1\n when `gate**1` is applied will gain a relative phase of\n e^{i pi exponent} when `gate**exponent` is applied (relative to\n eigenvectors unaffected by `gate**1`).\n global_shift: Offsets the eigenvalues of the gate at exponent=1.\n In effect, this controls a global phase factor on the gate's\n unitary matrix. The factor is:\n\n exp(i * pi * global_shift * exponent)\n\n For example, `cirq.X**t` uses a `global_shift` of 0 but\n `cirq.Rx(t)` uses a `global_shift` of -0.5, which is why\n `cirq.unitary(cirq.Rx(pi))` equals -iX instead of X.\n \"\"\"\n self._exponent = exponent\n self._global_shift = global_shift\n self._canonical_exponent_cached = None\n\n @property\n def exponent(self) -> Union[sympy.Basic, float]:\n return self._exponent\n\n # virtual method\n def _with_exponent(self: TSelf,\n exponent: Union[sympy.Basic, float]) -> TSelf:\n \"\"\"Return the same kind of gate, but with a different exponent.\n\n Child classes should override this method if they have an __init__\n method with a differing signature.\n \"\"\"\n # pylint: disable=unexpected-keyword-arg\n if self._global_shift == 0:\n return type(self)(exponent=exponent)\n return type(self)(\n exponent=exponent,\n global_shift=self._global_shift)\n # pylint: enable=unexpected-keyword-arg\n\n def _diagram_exponent(self,\n args: protocols.CircuitDiagramInfoArgs,\n *,\n ignore_global_phase: bool = True):\n \"\"\"The exponent to use in circuit diagrams.\n\n Basically, this just canonicalizes the exponent in a way that is\n insensitive to global phase. Only relative phases affect the \"true\"\n exponent period, and since we omit global phase detail in diagrams this\n is the appropriate canonicalization to use. To use the absolute period\n instead of the relative period (e.g. for when printing Rx(rads) style\n symbols, where rads=pi and rads=-pi are equivalent but should produce\n different text) set 'ignore_global_phase' to False.\n\n Note that the exponent is canonicalized into the range\n (-period/2, period/2]\n and that this canonicalization happens after rounding, so that e.g.\n X^-0.999999 shows as X instead of X^-1 when using a digit precision of\n 3.\n\n Args:\n args: The diagram args being used to produce the diagram.\n ignore_global_phase: Determines whether the global phase of the\n operation is considered when computing the period of the\n exponent.\n\n Returns:\n A rounded canonicalized exponent.\n \"\"\"\n if not isinstance(self._exponent, (int, float)):\n return self._exponent\n result = float(self._exponent)\n\n if ignore_global_phase:\n # Compute global-phase-independent period of the gate.\n shifts = list(self._eigen_shifts())\n relative_shifts = {e - shifts[0] for e in shifts[1:]}\n relative_periods = [abs(2/e) for e in relative_shifts if e != 0]\n diagram_period = _approximate_common_period(relative_periods)\n else:\n # Use normal period of the gate.\n diagram_period = self._period()\n if diagram_period is None:\n return result\n\n # Canonicalize the rounded exponent into (-period/2, period/2].\n if args.precision is not None:\n result = np.around(result, args.precision)\n h = diagram_period / 2\n if not (-h < result <= h):\n result = h - result\n result %= diagram_period\n result = h - result\n\n return result\n\n # virtual method\n def _eigen_shifts(self) -> List[float]:\n \"\"\"Describes the eigenvalues of the gate's matrix.\n\n By default, this just extracts the shifts by calling\n self._eigen_components(). However, because that method generates\n matrices it may be extremely expensive.\n\n Returns:\n A list of floats. Each float in the list corresponds to one of the\n eigenvalues of the gate's matrix, before accounting for any global\n shift. Each float is the θ in λ = exp(i π θ) (where λ is the\n eigenvalue).\n \"\"\"\n return [e[0] for e in self._eigen_components()]\n\n @abc.abstractmethod\n def _eigen_components(self) -> List[Union[EigenComponent,\n Tuple[float, np.ndarray]]]:\n \"\"\"Describes the eigendecomposition of the gate's matrix.\n\n Returns:\n A list of EigenComponent tuples. Each tuple in the list\n corresponds to one of the eigenspaces of the gate's matrix. Each\n tuple has two elements. The first element of a tuple is the θ in\n λ = exp(i π θ) (where λ is the eigenvalue of the eigenspace). The\n second element is a projection matrix onto the eigenspace.\n\n Examples:\n The Pauli Z gate's eigencomponents are:\n\n [\n (0, np.array([[1, 0],\n [0, 0]])),\n (1, np.array([[0, 0],\n [0, 1]])),\n ]\n\n Valid eigencomponents for Rz(π) = -iZ are:\n\n [\n (-0.5, np.array([[1, 0],\n [0, 0]])),\n (+0.5, np.array([[0, 0],\n [0, 1]])),\n ]\n\n But in principle you could also use this:\n\n [\n (+1.5, np.array([[1, 0],\n [0, 0]])),\n (-0.5, np.array([[0, 0],\n [0, 1]])),\n ]\n\n The choice between -0.5 and +1.5 does not affect the gate's\n matrix, but it does affect the matrix of powers of the gates\n (because (x+2)*s != x*s (mod 2) when s is a real number).\n\n The Pauli X gate's eigencomponents are:\n\n [\n (0, np.array([[0.5, 0.5],\n [0.5, 0.5]])),\n (1, np.array([[+0.5, -0.5],\n [-0.5, +0.5]])),\n ]\n \"\"\"\n\n def _period(self) -> Optional[float]:\n \"\"\"Determines how the exponent parameter is canonicalized when equating.\n\n Returns:\n None if the exponent should not be canonicalized. Otherwise a float\n indicating the period of the exponent. If the period is p, then a\n given exponent will be shifted by p until it is in the range\n (-p/2, p/2] during initialization.\n \"\"\"\n exponents = {e + self._global_shift for e in self._eigen_shifts()}\n real_periods = [abs(2/e) for e in exponents if e != 0]\n return _approximate_common_period(real_periods)\n\n def __pow__(self: TSelf, exponent: Union[float, sympy.Symbol]) -> TSelf:\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return self._with_exponent(exponent=new_exponent)\n\n @property\n def _canonical_exponent(self):\n if self._canonical_exponent_cached is None:\n period = self._period()\n if not period or protocols.is_parameterized(self._exponent):\n self._canonical_exponent_cached = self._exponent\n else:\n self._canonical_exponent_cached = self._exponent % period\n return self._canonical_exponent_cached\n\n def _value_equality_values_(self):\n return self._canonical_exponent, self._global_shift\n\n def _value_equality_approximate_values_(self):\n period = self._period()\n if not period or protocols.is_parameterized(self._exponent):\n exponent = self._exponent\n else:\n exponent = value.PeriodicValue(self._exponent, period)\n return exponent, self._global_shift\n\n def _trace_distance_bound_(self):\n if protocols.is_parameterized(self._exponent):\n return 1\n\n angles = [half_turns for half_turns, _ in self._eigen_components()]\n min_angle = min(angles)\n max_angle = max(angles)\n return abs((max_angle - min_angle) * self._exponent * 3.5)\n\n def _has_unitary_(self) -> bool:\n return not self._is_parameterized_()\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if self._is_parameterized_():\n return NotImplemented\n e = cast(float, self._exponent)\n return np.sum([\n component * 1j**(\n 2 * e * (half_turns + self._global_shift))\n for half_turns, component in self._eigen_components()\n ], axis=0)\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self._exponent)\n\n def _resolve_parameters_(self: TSelf, param_resolver) -> TSelf:\n return self._with_exponent(\n exponent=param_resolver.value_of(self._exponent))\n\n\ndef _lcm(vals: Iterable[int]) -> int:\n t = 1\n for r in vals:\n t = t * r // gcd(t, r)\n return t\n\n\ndef _approximate_common_period(periods: List[float],\n approx_denom: int = 60,\n reject_atol: float = 1e-8) -> Optional[float]:\n \"\"\"Finds a value that is nearly an integer multiple of multiple periods.\n\n The returned value should be the smallest non-negative number with this\n property. If `approx_denom` is too small the computation can fail to satisfy\n the `reject_atol` criteria and return `None`. This is actually desirable\n behavior, since otherwise the code would e.g. return a nonsense value when\n asked to compute the common period of `np.e` and `np.pi`.\n\n Args:\n periods: The result must be an approximate integer multiple of each of\n these.\n approx_denom: Determines how the floating point values are rounded into\n rational values (so that integer methods such as lcm can be used).\n Each floating point value f_k will be rounded to a rational number\n of the form n_k / approx_denom. If you want to recognize rational\n periods of the form i/d then d should divide `approx_denom`.\n reject_atol: If the computed approximate common period is at least this\n far from an integer multiple of any of the given periods, then it\n is discarded and `None` is returned instead.\n\n Returns:\n The approximate common period, or else `None` if the given\n `approx_denom` wasn't sufficient to approximate the common period to\n within the given `reject_atol`.\n \"\"\"\n if not periods:\n return None\n if any(e == 0 for e in periods):\n return None\n if len(periods) == 1:\n return abs(periods[0])\n approx_rational_periods = [\n fractions.Fraction(int(np.round(abs(p) * approx_denom)), approx_denom)\n for p in periods\n ]\n common = float(_common_rational_period(approx_rational_periods))\n\n for p in periods:\n if p != 0 and abs(p * np.round(common / p) - common) > reject_atol:\n return None\n\n return common\n\n\ndef _common_rational_period(rational_periods: List[fractions.Fraction]\n ) -> fractions.Fraction:\n \"\"\"Finds the least common integer multiple of some fractions.\n\n The solution is the smallest positive integer c such that there\n exists integers n_k satisfying p_k * n_k = c for all k.\n \"\"\"\n assert rational_periods, \"no well-defined solution for an empty list\"\n common_denom = _lcm(p.denominator for p in rational_periods)\n int_periods = [p.numerator * common_denom // p.denominator\n for p in rational_periods]\n int_common_period = _lcm(int_periods)\n return fractions.Fraction(int_common_period, common_denom)\n"
] | [
[
"numpy.round",
"numpy.around"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ocjosen/tensorflow | [
"ada0605591911094c142d39cbd87294ed2716e8b"
] | [
"tensorflow/python/feature_column/feature_column.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This API defines FeatureColumn abstraction.\n\nFeatureColumns provide a high level abstraction for ingesting and representing\nfeatures. FeatureColumns are also the primary way of encoding features for\ncanned `tf.estimator.Estimator`s.\n\nWhen using FeatureColumns with `Estimators`, the type of feature column you\nshould choose depends on (1) the feature type and (2) the model type.\n\n1. Feature type:\n\n * Continuous features can be represented by `numeric_column`.\n * Categorical features can be represented by any `categorical_column_with_*`\n column:\n - `categorical_column_with_vocabulary_list`\n - `categorical_column_with_vocabulary_file`\n - `categorical_column_with_hash_bucket`\n - `categorical_column_with_identity`\n - `weighted_categorical_column`\n\n2. Model type:\n\n * Deep neural network models (`DNNClassifier`, `DNNRegressor`).\n\n Continuous features can be directly fed into deep neural network models.\n\n age_column = numeric_column(\"age\")\n\n To feed sparse features into DNN models, wrap the column with\n `embedding_column` or `indicator_column`. `indicator_column` is recommended\n for features with only a few possible values. For features with many\n possible values, to reduce the size of your model, `embedding_column` is\n recommended.\n\n embedded_dept_column = embedding_column(\n categorical_column_with_vocabulary_list(\n \"department\", [\"math\", \"philosophy\", ...]), dimension=10)\n\n * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).\n\n Sparse features can be fed directly into linear models. They behave like an\n indicator column but with an efficient implementation.\n\n dept_column = categorical_column_with_vocabulary_list(\"department\",\n [\"math\", \"philosophy\", \"english\"])\n\n It is recommended that continuous features be bucketized before being\n fed into linear models.\n\n bucketized_age_column = bucketized_column(\n source_column=age_column,\n boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])\n\n Sparse features can be crossed (also known as conjuncted or combined) in\n order to form non-linearities, and then fed into linear models.\n\n cross_dept_age_column = crossed_column(\n columns=[\"department\", bucketized_age_column],\n hash_bucket_size=1000)\n\nExample of building canned `Estimator`s using FeatureColumns:\n\n ```python\n # Define features and transformations\n deep_feature_columns = [age_column, embedded_dept_column]\n wide_feature_columns = [dept_column, bucketized_age_column,\n cross_dept_age_column]\n\n # Build deep model\n estimator = DNNClassifier(\n feature_columns=deep_feature_columns,\n hidden_units=[500, 250, 50])\n estimator.train(...)\n\n # Or build a wide model\n estimator = LinearClassifier(\n feature_columns=wide_feature_columns)\n estimator.train(...)\n\n # Or build a wide and deep model!\n estimator = DNNLinearCombinedClassifier(\n linear_feature_columns=wide_feature_columns,\n dnn_feature_columns=deep_feature_columns,\n dnn_hidden_units=[500, 250, 50])\n estimator.train(...)\n ```\n\n\nFeatureColumns can also be transformed into a generic input layer for\ncustom models using `input_layer`.\n\nExample of building model using FeatureColumns, this can be used in a\n`model_fn` which is given to the {tf.estimator.Estimator}:\n\n ```python\n # Building model via layers\n\n deep_feature_columns = [age_column, embedded_dept_column]\n columns_to_tensor = parse_feature_columns_from_examples(\n serialized=my_data,\n feature_columns=deep_feature_columns)\n first_layer = input_layer(\n features=columns_to_tensor,\n feature_columns=deep_feature_columns)\n second_layer = fully_connected(first_layer, ...)\n ```\n\nNOTE: Functions prefixed with \"_\" indicate experimental or private parts of\nthe API subject to change, and should not be relied upon!\n\nNOTE: The new feature columns are being developed in feature_column_v2.py and\nare a somewhat duplicate of the code here. Please make sure to update logic\nin both places.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport math\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.feature_column import utils as fc_utils\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.layers import base\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import template\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _internal_input_layer(features,\n feature_columns,\n weight_collections=None,\n trainable=True,\n cols_to_vars=None,\n scope=None,\n cols_to_output_tensors=None,\n from_template=False):\n \"\"\"See input_layer. `scope` is a name or variable scope to use.\"\"\"\n\n feature_columns = _normalize_feature_columns(feature_columns)\n for column in feature_columns:\n if not isinstance(column, _DenseColumn):\n raise ValueError(\n 'Items of feature_columns must be a _DenseColumn. '\n 'You can wrap a categorical column with an '\n 'embedding_column or indicator_column. Given: {}'.format(column))\n weight_collections = list(weight_collections or [])\n if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:\n weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)\n if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:\n weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)\n\n def _get_logits(): # pylint: disable=missing-docstring\n builder = _LazyBuilder(features)\n output_tensors = []\n ordered_columns = []\n for column in sorted(feature_columns, key=lambda x: x.name):\n ordered_columns.append(column)\n with variable_scope.variable_scope(\n None, default_name=column._var_scope_name): # pylint: disable=protected-access\n tensor = column._get_dense_tensor( # pylint: disable=protected-access\n builder,\n weight_collections=weight_collections,\n trainable=trainable)\n num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access\n batch_size = array_ops.shape(tensor)[0]\n output_tensor = array_ops.reshape(\n tensor, shape=(batch_size, num_elements))\n output_tensors.append(output_tensor)\n if cols_to_vars is not None:\n # Retrieve any variables created (some _DenseColumn's don't create\n # variables, in which case an empty list is returned).\n cols_to_vars[column] = ops.get_collection(\n ops.GraphKeys.GLOBAL_VARIABLES,\n scope=variable_scope.get_variable_scope().name)\n if cols_to_output_tensors is not None:\n cols_to_output_tensors[column] = output_tensor\n _verify_static_batch_size_equality(output_tensors, ordered_columns)\n return array_ops.concat(output_tensors, 1)\n\n # If we're constructing from the `make_template`, that by default adds a\n # variable scope with the name of the layer. In that case, we dont want to\n # add another `variable_scope` as that would break checkpoints.\n if from_template:\n return _get_logits()\n else:\n with variable_scope.variable_scope(\n scope, default_name='input_layer', values=features.values()):\n return _get_logits()\n\n\n@tf_export(v1=['feature_column.input_layer'])\ndef input_layer(features,\n feature_columns,\n weight_collections=None,\n trainable=True,\n cols_to_vars=None,\n cols_to_output_tensors=None):\n \"\"\"Returns a dense `Tensor` as input layer based on given `feature_columns`.\n\n Generally a single example in training data is described with FeatureColumns.\n At the first layer of the model, this column oriented data should be converted\n to a single `Tensor`.\n\n Example:\n\n ```python\n price = numeric_column('price')\n keywords_embedded = embedding_column(\n categorical_column_with_hash_bucket(\"keywords\", 10K), dimensions=16)\n columns = [price, keywords_embedded, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n for units in [128, 64, 32]:\n dense_tensor = tf.compat.v1.layers.dense(dense_tensor, units, tf.nn.relu)\n prediction = tf.compat.v1.layers.dense(dense_tensor, 1)\n ```\n\n Args:\n features: A mapping from key to tensors. `_FeatureColumn`s look up via these\n keys. For example `numeric_column('price')` will look at 'price' key in\n this dict. Values can be a `SparseTensor` or a `Tensor` depends on\n corresponding `_FeatureColumn`.\n feature_columns: An iterable containing the FeatureColumns to use as inputs\n to your model. All items should be instances of classes derived from\n `_DenseColumn` such as `numeric_column`, `embedding_column`,\n `bucketized_column`, `indicator_column`. If you have categorical features,\n you can wrap them with an `embedding_column` or `indicator_column`.\n weight_collections: A list of collection names to which the Variable will be\n added. Note that variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n cols_to_vars: If not `None`, must be a dictionary that will be filled with a\n mapping from `_FeatureColumn` to list of `Variable`s. For example, after\n the call, we might have cols_to_vars =\n {_EmbeddingColumn(\n categorical_column=_HashedCategoricalColumn(\n key='sparse_feature', hash_bucket_size=5, dtype=tf.string),\n dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),\n <tf.Variable 'some_variable:1' shape=(5, 10)]}\n If a column creates no variables, its value will be an empty list.\n cols_to_output_tensors: If not `None`, must be a dictionary that will be\n filled with a mapping from '_FeatureColumn' to the associated\n output `Tensor`s.\n\n Returns:\n A `Tensor` which represents input layer of a model. Its shape\n is (batch_size, first_layer_dimension) and its dtype is `float32`.\n first_layer_dimension is determined based on given `feature_columns`.\n\n Raises:\n ValueError: if an item in `feature_columns` is not a `_DenseColumn`.\n \"\"\"\n return _internal_input_layer(\n features,\n feature_columns,\n weight_collections=weight_collections,\n trainable=trainable,\n cols_to_vars=cols_to_vars,\n cols_to_output_tensors=cols_to_output_tensors)\n\n\n# TODO(akshayka): InputLayer should be a subclass of Layer, and it\n# should implement the logic in input_layer using Layer's build-and-call\n# paradigm; input_layer should create an instance of InputLayer and\n# return the result of invoking its apply method, just as functional layers do.\nclass InputLayer(object):\n \"\"\"An object-oriented version of `input_layer` that reuses variables.\"\"\"\n\n def __init__(self,\n feature_columns,\n weight_collections=None,\n trainable=True,\n cols_to_vars=None,\n name='feature_column_input_layer',\n create_scope_now=True):\n \"\"\"See `input_layer`.\"\"\"\n\n self._feature_columns = feature_columns\n self._weight_collections = weight_collections\n self._trainable = trainable\n self._cols_to_vars = cols_to_vars\n self._name = name\n self._input_layer_template = template.make_template(\n self._name, _internal_input_layer, create_scope_now_=create_scope_now)\n self._scope = self._input_layer_template.variable_scope\n\n def __call__(self, features):\n return self._input_layer_template(\n features=features,\n feature_columns=self._feature_columns,\n weight_collections=self._weight_collections,\n trainable=self._trainable,\n cols_to_vars=None,\n from_template=True)\n\n @property\n def name(self):\n return self._name\n\n @property\n def non_trainable_variables(self):\n return self._input_layer_template.non_trainable_variables\n\n @property\n def non_trainable_weights(self):\n return self._input_layer_template.non_trainable_weights\n\n @property\n def trainable_variables(self):\n return self._input_layer_template.trainable_variables\n\n @property\n def trainable_weights(self):\n return self._input_layer_template.trainable_weights\n\n @property\n def variables(self):\n return self._input_layer_template.variables\n\n @property\n def weights(self):\n return self._input_layer_template.weights\n\n\n@tf_export(v1=['feature_column.linear_model'])\ndef linear_model(features,\n feature_columns,\n units=1,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True,\n cols_to_vars=None):\n \"\"\"Returns a linear prediction `Tensor` based on given `feature_columns`.\n\n This function generates a weighted sum based on output dimension `units`.\n Weighted sum refers to logits in classification problems. It refers to the\n prediction itself for linear regression problems.\n\n Note on supported columns: `linear_model` treats categorical columns as\n `indicator_column`s. To be specific, assume the input as `SparseTensor` looks\n like:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n }\n ```\n `linear_model` assigns weights for the presence of \"a\", \"b\", \"c' implicitly,\n just like `indicator_column`, while `input_layer` explicitly requires wrapping\n each of categorical columns with an `embedding_column` or an\n `indicator_column`.\n\n Example of usage:\n\n ```python\n price = numeric_column('price')\n price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])\n keywords = categorical_column_with_hash_bucket(\"keywords\", 10K)\n keywords_price = crossed_column('keywords', price_buckets, ...)\n columns = [price_buckets, keywords, keywords_price ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n prediction = linear_model(features, columns)\n ```\n\n The `sparse_combiner` argument works as follows\n For example, for two features represented as the categorical columns:\n\n ```python\n # Feature 1\n\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [0, 1]: \"b\"\n [1, 0]: \"c\"\n }\n\n # Feature 2\n\n shape = [2, 3]\n {\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n [1, 1]: \"f\"\n [1, 2]: \"f\"\n }\n ```\n\n with `sparse_combiner` as \"mean\", the linear model outputs consequently\n are:\n\n ```\n y_0 = 1.0 / 2.0 * ( w_a + w_b ) + w_d + b\n y_1 = w_c + 1.0 / 3.0 * ( w_e + 2.0 * w_f ) + b\n ```\n\n where `y_i` is the output, `b` is the bias, and `w_x` is the weight\n assigned to the presence of `x` in the input features.\n\n Args:\n features: A mapping from key to tensors. `_FeatureColumn`s look up via these\n keys. For example `numeric_column('price')` will look at 'price' key in\n this dict. Values are `Tensor` or `SparseTensor` depending on\n corresponding `_FeatureColumn`.\n feature_columns: An iterable containing the FeatureColumns to use as inputs\n to your model. All items should be instances of classes derived from\n `_FeatureColumn`s.\n units: An integer, dimensionality of the output space. Default value is 1.\n sparse_combiner: A string specifying how to reduce if a categorical column\n is multivalent. Except `numeric_column`, almost all columns passed to\n `linear_model` are considered as categorical columns. It combines each\n categorical column independently. Currently \"mean\", \"sqrtn\" and \"sum\" are\n supported, with \"sum\" the default for linear model. \"sqrtn\" often achieves\n good accuracy, in particular with bag-of-words columns.\n * \"sum\": do not normalize features in the column\n * \"mean\": do l1 normalization on features in the column\n * \"sqrtn\": do l2 normalization on features in the column\n weight_collections: A list of collection names to which the Variable will be\n added. Note that, variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n cols_to_vars: If not `None`, must be a dictionary that will be filled with a\n mapping from `_FeatureColumn` to associated list of `Variable`s. For\n example, after the call, we might have cols_to_vars = {\n _NumericColumn(\n key='numeric_feature1', shape=(1,):\n [<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],\n 'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],\n _NumericColumn(\n key='numeric_feature2', shape=(2,)):\n [<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}\n If a column creates no variables, its value will be an empty list. Note\n that cols_to_vars will also contain a string key 'bias' that maps to a\n list of Variables.\n\n Returns:\n A `Tensor` which represents predictions/logits of a linear model. Its shape\n is (batch_size, units) and its dtype is `float32`.\n\n Raises:\n ValueError: if an item in `feature_columns` is neither a `_DenseColumn`\n nor `_CategoricalColumn`.\n \"\"\"\n with variable_scope.variable_scope(None, 'linear_model') as vs:\n model_name = _strip_leading_slashes(vs.name)\n linear_model_layer = _LinearModel(\n feature_columns=feature_columns,\n units=units,\n sparse_combiner=sparse_combiner,\n weight_collections=weight_collections,\n trainable=trainable,\n name=model_name)\n retval = linear_model_layer(features) # pylint: disable=not-callable\n if cols_to_vars is not None:\n cols_to_vars.update(linear_model_layer.cols_to_vars())\n return retval\n\n\ndef _add_to_collections(var, weight_collections):\n \"\"\"Adds a var to the list of weight_collections provided.\n\n Handles the case for partitioned and non-partitioned variables.\n\n Args:\n var: A variable or Partitioned Variable.\n weight_collections: List of collections to add variable to.\n \"\"\"\n for weight_collection in weight_collections:\n # The layer self.add_variable call already adds it to GLOBAL_VARIABLES.\n if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:\n continue\n # TODO(rohanj): Explore adding a _get_variable_list method on `Variable`\n # so that we don't have to do this check.\n if isinstance(var, variables.PartitionedVariable):\n for constituent_var in list(var):\n ops.add_to_collection(weight_collection, constituent_var)\n else:\n ops.add_to_collection(weight_collection, var)\n\n\nclass _FCLinearWrapper(base.Layer):\n \"\"\"Wraps a _FeatureColumn in a layer for use in a linear model.\n\n See `linear_model` above.\n \"\"\"\n\n def __init__(self,\n feature_column,\n units=1,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True,\n name=None,\n **kwargs):\n super(_FCLinearWrapper, self).__init__(\n trainable=trainable, name=name, **kwargs)\n self._feature_column = feature_column\n self._units = units\n self._sparse_combiner = sparse_combiner\n self._weight_collections = weight_collections\n\n def build(self, _):\n if isinstance(self._feature_column, _CategoricalColumn):\n weight = self.add_variable(\n name='weights',\n shape=(self._feature_column._num_buckets, self._units), # pylint: disable=protected-access\n initializer=init_ops.zeros_initializer(),\n trainable=self.trainable)\n else:\n num_elements = self._feature_column._variable_shape.num_elements() # pylint: disable=protected-access\n weight = self.add_variable(\n name='weights',\n shape=[num_elements, self._units],\n initializer=init_ops.zeros_initializer(),\n trainable=self.trainable)\n _add_to_collections(weight, self._weight_collections)\n self._weight_var = weight\n self.built = True\n\n def call(self, builder):\n weighted_sum = _create_weighted_sum(\n column=self._feature_column,\n builder=builder,\n units=self._units,\n sparse_combiner=self._sparse_combiner,\n weight_collections=self._weight_collections,\n trainable=self.trainable,\n weight_var=self._weight_var)\n return weighted_sum\n\n\nclass _BiasLayer(base.Layer):\n \"\"\"A layer for the bias term.\n \"\"\"\n\n def __init__(self,\n units=1,\n trainable=True,\n weight_collections=None,\n name=None,\n **kwargs):\n super(_BiasLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n self._units = units\n self._weight_collections = weight_collections\n\n def build(self, _):\n self._bias_variable = self.add_variable(\n 'bias_weights',\n shape=[self._units],\n initializer=init_ops.zeros_initializer(),\n trainable=self.trainable)\n _add_to_collections(self._bias_variable, self._weight_collections)\n self.built = True\n\n def call(self, _):\n return self._bias_variable\n\n\ndef _get_expanded_variable_list(variable):\n if (isinstance(variable, variables.Variable) or\n resource_variable_ops.is_resource_variable(variable)):\n return [variable] # Single variable case.\n else: # Must be a PartitionedVariable, so convert into a list.\n return list(variable)\n\n\ndef _strip_leading_slashes(name):\n return name.rsplit('/', 1)[-1]\n\n\nclass _LinearModel(training.Model):\n \"\"\"Creates a linear model using feature columns.\n\n See `linear_model` for details.\n \"\"\"\n\n def __init__(self,\n feature_columns,\n units=1,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True,\n name=None,\n **kwargs):\n super(_LinearModel, self).__init__(name=name, **kwargs)\n self._feature_columns = _normalize_feature_columns(\n feature_columns)\n self._weight_collections = list(weight_collections or [])\n if ops.GraphKeys.GLOBAL_VARIABLES not in self._weight_collections:\n self._weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)\n if ops.GraphKeys.MODEL_VARIABLES not in self._weight_collections:\n self._weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)\n\n column_layers = {}\n for column in sorted(self._feature_columns, key=lambda x: x.name):\n with variable_scope.variable_scope(\n None, default_name=column._var_scope_name) as vs: # pylint: disable=protected-access\n # Having the fully expressed variable scope name ends up doubly\n # expressing the outer scope (scope with which this method was called)\n # in the name of the variable that would get created.\n column_name = _strip_leading_slashes(vs.name)\n column_layer = _FCLinearWrapper(column, units, sparse_combiner,\n self._weight_collections, trainable,\n column_name, **kwargs)\n column_layers[column_name] = column_layer\n self._column_layers = self._add_layers(column_layers)\n self._bias_layer = _BiasLayer(\n units=units,\n trainable=trainable,\n weight_collections=self._weight_collections,\n name='bias_layer',\n **kwargs)\n self._cols_to_vars = {}\n\n def cols_to_vars(self):\n \"\"\"Returns a dict mapping _FeatureColumns to variables.\n\n See `linear_model` for more information.\n This is not populated till `call` is called i.e. layer is built.\n \"\"\"\n return self._cols_to_vars\n\n def call(self, features):\n with variable_scope.variable_scope(self.name):\n for column in self._feature_columns:\n if not isinstance(column, (_DenseColumn, _CategoricalColumn)):\n raise ValueError(\n 'Items of feature_columns must be either a '\n '_DenseColumn or _CategoricalColumn. Given: {}'.format(column))\n weighted_sums = []\n ordered_columns = []\n builder = _LazyBuilder(features)\n for layer in sorted(self._column_layers.values(), key=lambda x: x.name):\n column = layer._feature_column # pylint: disable=protected-access\n ordered_columns.append(column)\n weighted_sum = layer(builder)\n weighted_sums.append(weighted_sum)\n self._cols_to_vars[column] = ops.get_collection(\n ops.GraphKeys.GLOBAL_VARIABLES, scope=layer.scope_name)\n\n _verify_static_batch_size_equality(weighted_sums, ordered_columns)\n predictions_no_bias = math_ops.add_n(\n weighted_sums, name='weighted_sum_no_bias')\n predictions = nn_ops.bias_add(\n predictions_no_bias,\n self._bias_layer( # pylint: disable=not-callable\n builder,\n scope=variable_scope.get_variable_scope()), # pylint: disable=not-callable\n name='weighted_sum')\n bias = self._bias_layer.variables[0]\n self._cols_to_vars['bias'] = _get_expanded_variable_list(bias)\n return predictions\n\n def _add_layers(self, layers):\n # \"Magic\" required for keras.Model classes to track all the variables in\n # a list of layers.Layer objects.\n # TODO(ashankar): Figure out API so user code doesn't have to do this.\n for name, layer in layers.items():\n setattr(self, 'layer-%s' % name, layer)\n return layers\n\n\ndef _transform_features(features, feature_columns):\n \"\"\"Returns transformed features based on features columns passed in.\n\n Please note that most probably you would not need to use this function. Please\n check `input_layer` and `linear_model` to see whether they will\n satisfy your use case or not.\n\n Example:\n\n ```python\n # Define features and transformations\n crosses_a_x_b = crossed_column(\n columns=[\"sparse_feature_a\", \"sparse_feature_b\"], hash_bucket_size=10000)\n price_buckets = bucketized_column(\n source_column=numeric_column(\"price\"), boundaries=[...])\n\n columns = [crosses_a_x_b, price_buckets]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n transformed = transform_features(features=features, feature_columns=columns)\n\n assertCountEqual(columns, transformed.keys())\n ```\n\n Args:\n features: A mapping from key to tensors. `_FeatureColumn`s look up via these\n keys. For example `numeric_column('price')` will look at 'price' key in\n this dict. Values can be a `SparseTensor` or a `Tensor` depends on\n corresponding `_FeatureColumn`.\n feature_columns: An iterable containing all the `_FeatureColumn`s.\n\n Returns:\n A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values.\n \"\"\"\n feature_columns = _normalize_feature_columns(feature_columns)\n outputs = {}\n with ops.name_scope(\n None, default_name='transform_features', values=features.values()):\n builder = _LazyBuilder(features)\n for column in sorted(feature_columns, key=lambda x: x.name):\n with ops.name_scope(None, default_name=column.name):\n outputs[column] = builder.get(column)\n return outputs\n\n\n@tf_export(v1=['feature_column.make_parse_example_spec'])\ndef make_parse_example_spec(feature_columns):\n \"\"\"Creates parsing spec dictionary from input feature_columns.\n\n The returned dictionary can be used as arg 'features' in\n `tf.io.parse_example`.\n\n Typical usage example:\n\n ```python\n # Define features and transformations\n feature_a = categorical_column_with_vocabulary_file(...)\n feature_b = numeric_column(...)\n feature_c_bucketized = bucketized_column(numeric_column(\"feature_c\"), ...)\n feature_a_x_feature_c = crossed_column(\n columns=[\"feature_a\", feature_c_bucketized], ...)\n\n feature_columns = set(\n [feature_b, feature_c_bucketized, feature_a_x_feature_c])\n features = tf.io.parse_example(\n serialized=serialized_examples,\n features=make_parse_example_spec(feature_columns))\n ```\n\n For the above example, make_parse_example_spec would return the dict:\n\n ```python\n {\n \"feature_a\": parsing_ops.VarLenFeature(tf.string),\n \"feature_b\": parsing_ops.FixedLenFeature([1], dtype=tf.float32),\n \"feature_c\": parsing_ops.FixedLenFeature([1], dtype=tf.float32)\n }\n ```\n\n Args:\n feature_columns: An iterable containing all feature columns. All items\n should be instances of classes derived from `_FeatureColumn`.\n\n Returns:\n A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`\n value.\n\n Raises:\n ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`\n instance.\n \"\"\"\n result = {}\n for column in feature_columns:\n if not isinstance(column, _FeatureColumn):\n raise ValueError(\n 'All feature_columns must be _FeatureColumn instances. '\n 'Given: {}'.format(column))\n config = column._parse_example_spec # pylint: disable=protected-access\n for key, value in six.iteritems(config):\n if key in result and value != result[key]:\n raise ValueError(\n 'feature_columns contain different parse_spec for key '\n '{}. Given {} and {}'.format(key, value, result[key]))\n result.update(config)\n return result\n\n\ndef _embedding_column(categorical_column,\n dimension,\n combiner='mean',\n initializer=None,\n ckpt_to_load_from=None,\n tensor_name_in_ckpt=None,\n max_norm=None,\n trainable=True):\n \"\"\"`_DenseColumn` that converts from sparse, categorical input.\n\n Use this when your inputs are sparse, but you want to convert them to a dense\n representation (e.g., to feed to a DNN).\n\n Inputs must be a `_CategoricalColumn` created by any of the\n `categorical_column_*` function. Here is an example of using\n `embedding_column` with `DNNClassifier`:\n\n ```python\n video_id = categorical_column_with_identity(\n key='video_id', num_buckets=1000000, default_value=0)\n columns = [embedding_column(video_id, 9),...]\n\n estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)\n\n label_column = ...\n def input_fn():\n features = tf.io.parse_example(\n ..., features=make_parse_example_spec(columns + [label_column]))\n labels = features.pop(label_column.name)\n return features, labels\n\n estimator.train(input_fn=input_fn, steps=100)\n ```\n\n Here is an example using `embedding_column` with model_fn:\n\n ```python\n def model_fn(features, ...):\n video_id = categorical_column_with_identity(\n key='video_id', num_buckets=1000000, default_value=0)\n columns = [embedding_column(video_id, 9),...]\n dense_tensor = input_layer(features, columns)\n # Form DNN layers, calculate loss, and return EstimatorSpec.\n ...\n ```\n\n Args:\n categorical_column: A `_CategoricalColumn` created by a\n `categorical_column_with_*` function. This column produces the sparse IDs\n that are inputs to the embedding lookup.\n dimension: An integer specifying dimension of the embedding, must be > 0.\n combiner: A string specifying how to reduce if there are multiple entries\n in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with\n 'mean' the default. 'sqrtn' often achieves good accuracy, in particular\n with bag-of-words columns. Each of this can be thought as example level\n normalizations on the column. For more information, see\n `tf.embedding_lookup_sparse`.\n initializer: A variable initializer function to be used in embedding\n variable initialization. If not specified, defaults to\n `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and\n standard deviation `1/sqrt(dimension)`.\n ckpt_to_load_from: String representing checkpoint name/pattern from which to\n restore column weights. Required if `tensor_name_in_ckpt` is not `None`.\n tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from\n which to restore the column weights. Required if `ckpt_to_load_from` is\n not `None`.\n max_norm: If not `None`, embedding values are l2-normalized to this value.\n trainable: Whether or not the embedding is trainable. Default is True.\n\n Returns:\n `_DenseColumn` that converts from sparse input.\n\n Raises:\n ValueError: if `dimension` not > 0.\n ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`\n is specified.\n ValueError: if `initializer` is specified and is not callable.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if (dimension is None) or (dimension < 1):\n raise ValueError('Invalid dimension {}.'.format(dimension))\n if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):\n raise ValueError('Must specify both `ckpt_to_load_from` and '\n '`tensor_name_in_ckpt` or none of them.')\n\n if (initializer is not None) and (not callable(initializer)):\n raise ValueError('initializer must be callable if specified. '\n 'Embedding of column_name: {}'.format(\n categorical_column.name))\n if initializer is None:\n initializer = init_ops.truncated_normal_initializer(\n mean=0.0, stddev=1 / math.sqrt(dimension))\n\n embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access\n\n def _creator(weight_collections, scope):\n embedding_column_layer = _EmbeddingColumnLayer(\n embedding_shape=embedding_shape,\n initializer=initializer,\n weight_collections=weight_collections,\n trainable=trainable,\n name='embedding_column_layer')\n return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable\n\n return _EmbeddingColumn(\n categorical_column=categorical_column,\n dimension=dimension,\n combiner=combiner,\n layer_creator=_creator,\n ckpt_to_load_from=ckpt_to_load_from,\n tensor_name_in_ckpt=tensor_name_in_ckpt,\n max_norm=max_norm,\n trainable=trainable)\n\n\ndef _numeric_column(key,\n shape=(1,),\n default_value=None,\n dtype=dtypes.float32,\n normalizer_fn=None):\n \"\"\"Represents real valued or numerical features.\n\n Example:\n\n ```python\n price = numeric_column('price')\n columns = [price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n\n # or\n bucketized_price = bucketized_column(price, boundaries=[...])\n columns = [bucketized_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n shape: An iterable of integers specifies the shape of the `Tensor`. An\n integer can be given which means a single dimension `Tensor` with given\n width. The `Tensor` representing the column will have the shape of\n [batch_size] + `shape`.\n default_value: A single value compatible with `dtype` or an iterable of\n values compatible with `dtype` which the column takes on during\n `tf.Example` parsing if data is missing. A default value of `None` will\n cause `tf.io.parse_example` to fail if an example does not contain this\n column. If a single value is provided, the same value will be applied as\n the default value for every item. If an iterable of values is provided,\n the shape of the `default_value` should be equal to the given `shape`.\n dtype: defines the type of values. Default value is `tf.float32`. Must be a\n non-quantized, real integer or floating point type.\n normalizer_fn: If not `None`, a function that can be used to normalize the\n value of the tensor after `default_value` is applied for parsing.\n Normalizer function takes the input `Tensor` as its argument, and returns\n the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that\n even though the most common use case of this function is normalization, it\n can be used for any kind of Tensorflow transformations.\n\n Returns:\n A `_NumericColumn`.\n\n Raises:\n TypeError: if any dimension in shape is not an int\n ValueError: if any dimension in shape is not a positive integer\n TypeError: if `default_value` is an iterable but not compatible with `shape`\n TypeError: if `default_value` is not compatible with `dtype`.\n ValueError: if `dtype` is not convertible to `tf.float32`.\n \"\"\"\n shape = _check_shape(shape, key)\n if not (dtype.is_integer or dtype.is_floating):\n raise ValueError('dtype must be convertible to float. '\n 'dtype: {}, key: {}'.format(dtype, key))\n default_value = fc_utils.check_default_value(\n shape, default_value, dtype, key)\n\n if normalizer_fn is not None and not callable(normalizer_fn):\n raise TypeError(\n 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))\n\n fc_utils.assert_key_is_string(key)\n return _NumericColumn(\n key,\n shape=shape,\n default_value=default_value,\n dtype=dtype,\n normalizer_fn=normalizer_fn)\n\n\ndef _bucketized_column(source_column, boundaries):\n \"\"\"Represents discretized dense input.\n\n Buckets include the left boundary, and exclude the right boundary. Namely,\n `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,\n `[1., 2.)`, and `[2., +inf)`.\n\n For example, if the inputs are\n\n ```python\n boundaries = [0, 10, 100]\n input tensor = [[-5, 10000]\n [150, 10]\n [5, 100]]\n ```\n\n then the output will be\n\n ```python\n output = [[0, 3]\n [3, 2]\n [1, 3]]\n ```\n\n Example:\n\n ```python\n price = numeric_column('price')\n bucketized_price = bucketized_column(price, boundaries=[...])\n columns = [bucketized_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n\n # or\n columns = [bucketized_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n `bucketized_column` can also be crossed with another categorical column using\n `crossed_column`:\n\n ```python\n price = numeric_column('price')\n # bucketized_column converts numerical feature to a categorical one.\n bucketized_price = bucketized_column(price, boundaries=[...])\n # 'keywords' is a string feature.\n price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)\n columns = [price_x_keywords, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n Args:\n source_column: A one-dimensional dense column which is generated with\n `numeric_column`.\n boundaries: A sorted list or tuple of floats specifying the boundaries.\n\n Returns:\n A `_BucketizedColumn`.\n\n Raises:\n ValueError: If `source_column` is not a numeric column, or if it is not\n one-dimensional.\n ValueError: If `boundaries` is not a sorted list or tuple.\n \"\"\"\n if not isinstance(source_column, _NumericColumn):\n raise ValueError(\n 'source_column must be a column generated with numeric_column(). '\n 'Given: {}'.format(source_column))\n if len(source_column.shape) > 1:\n raise ValueError(\n 'source_column must be one-dimensional column. '\n 'Given: {}'.format(source_column))\n if (not boundaries or\n not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):\n raise ValueError('boundaries must be a sorted list.')\n for i in range(len(boundaries) - 1):\n if boundaries[i] >= boundaries[i + 1]:\n raise ValueError('boundaries must be a sorted list.')\n return _BucketizedColumn(source_column, tuple(boundaries))\n\n\ndef _categorical_column_with_hash_bucket(key,\n hash_bucket_size,\n dtype=dtypes.string):\n \"\"\"Represents sparse feature where ids are set by hashing.\n\n Use this when your sparse features are in string or integer format, and you\n want to distribute your inputs into a finite number of buckets by hashing.\n output_id = Hash(input_feature_string) % bucket_size for string type input.\n For int type input, the value is converted to its string representation first\n and then hashed by the same formula.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n Example:\n\n ```python\n keywords = categorical_column_with_hash_bucket(\"keywords\", 10K)\n columns = [keywords, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n\n # or\n keywords_embedded = embedding_column(keywords, 16)\n columns = [keywords_embedded, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n hash_bucket_size: An int > 1. The number of buckets.\n dtype: The type of features. Only string and integer types are supported.\n\n Returns:\n A `_HashedCategoricalColumn`.\n\n Raises:\n ValueError: `hash_bucket_size` is not greater than 1.\n ValueError: `dtype` is neither string nor integer.\n \"\"\"\n if hash_bucket_size is None:\n raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))\n\n if hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be at least 1. '\n 'hash_bucket_size: {}, key: {}'.format(\n hash_bucket_size, key))\n\n fc_utils.assert_key_is_string(key)\n fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n\n return _HashedCategoricalColumn(key, hash_bucket_size, dtype)\n\n\ndef _categorical_column_with_vocabulary_file(key,\n vocabulary_file,\n vocabulary_size=None,\n num_oov_buckets=0,\n default_value=None,\n dtype=dtypes.string):\n \"\"\"A `_CategoricalColumn` with a vocabulary file.\n\n Use this when your inputs are in string or integer format, and you have a\n vocabulary file that maps each value to an integer ID. By default,\n out-of-vocabulary values are ignored. Use either (but not both) of\n `num_oov_buckets` and `default_value` to specify how to include\n out-of-vocabulary values.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n Example with `num_oov_buckets`:\n File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state\n abbreviation. All inputs with values in that file are assigned an ID 0-49,\n corresponding to its line number. All other values are hashed and assigned an\n ID 50-54.\n\n ```python\n states = categorical_column_with_vocabulary_file(\n key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,\n num_oov_buckets=5)\n columns = [states, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n Example with `default_value`:\n File '/us/states.txt' contains 51 lines - the first line is 'XX', and the\n other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'\n in input, and other values missing from the file, will be assigned ID 0. All\n others are assigned the corresponding line number 1-50.\n\n ```python\n states = categorical_column_with_vocabulary_file(\n key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,\n default_value=0)\n columns = [states, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n And to make an embedding with either:\n\n ```python\n columns = [embedding_column(states, 3),...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n vocabulary_file: The vocabulary file name.\n vocabulary_size: Number of the elements in the vocabulary. This must be no\n greater than length of `vocabulary_file`, if less than length, later\n values are ignored. If None, it is set to the length of `vocabulary_file`.\n num_oov_buckets: Non-negative integer, the number of out-of-vocabulary\n buckets. All out-of-vocabulary inputs will be assigned IDs in the range\n `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of\n the input value. A positive `num_oov_buckets` can not be specified with\n `default_value`.\n default_value: The integer ID value to return for out-of-vocabulary feature\n values, defaults to `-1`. This can not be specified with a positive\n `num_oov_buckets`.\n dtype: The type of features. Only string and integer types are supported.\n\n Returns:\n A `_CategoricalColumn` with a vocabulary file.\n\n Raises:\n ValueError: `vocabulary_file` is missing or cannot be opened.\n ValueError: `vocabulary_size` is missing or < 1.\n ValueError: `num_oov_buckets` is a negative integer.\n ValueError: `num_oov_buckets` and `default_value` are both specified.\n ValueError: `dtype` is neither string nor integer.\n \"\"\"\n if not vocabulary_file:\n raise ValueError('Missing vocabulary_file in {}.'.format(key))\n\n if vocabulary_size is None:\n if not gfile.Exists(vocabulary_file):\n raise ValueError('vocabulary_file in {} does not exist.'.format(key))\n\n with gfile.GFile(vocabulary_file) as f:\n vocabulary_size = sum(1 for _ in f)\n logging.info(\n 'vocabulary_size = %d in %s is inferred from the number of elements '\n 'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)\n\n # `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.\n if vocabulary_size < 1:\n raise ValueError('Invalid vocabulary_size in {}.'.format(key))\n if num_oov_buckets:\n if default_value is not None:\n raise ValueError(\n 'Can\\'t specify both num_oov_buckets and default_value in {}.'.format(\n key))\n if num_oov_buckets < 0:\n raise ValueError('Invalid num_oov_buckets {} in {}.'.format(\n num_oov_buckets, key))\n fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n fc_utils.assert_key_is_string(key)\n return _VocabularyFileCategoricalColumn(\n key=key,\n vocabulary_file=vocabulary_file,\n vocabulary_size=vocabulary_size,\n num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,\n default_value=-1 if default_value is None else default_value,\n dtype=dtype)\n\n\ndef _categorical_column_with_vocabulary_list(key,\n vocabulary_list,\n dtype=None,\n default_value=-1,\n num_oov_buckets=0):\n \"\"\"A `_CategoricalColumn` with in-memory vocabulary.\n\n Use this when your inputs are in string or integer format, and you have an\n in-memory vocabulary mapping each value to an integer ID. By default,\n out-of-vocabulary values are ignored. Use either (but not both) of\n `num_oov_buckets` and `default_value` to specify how to include\n out-of-vocabulary values.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n Example with `num_oov_buckets`:\n In the following example, each input in `vocabulary_list` is assigned an ID\n 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other\n inputs are hashed and assigned an ID 4-5.\n\n ```python\n colors = categorical_column_with_vocabulary_list(\n key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),\n num_oov_buckets=2)\n columns = [colors, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n Example with `default_value`:\n In the following example, each input in `vocabulary_list` is assigned an ID\n 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other\n inputs are assigned `default_value` 0.\n\n\n ```python\n colors = categorical_column_with_vocabulary_list(\n key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)\n columns = [colors, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n And to make an embedding with either:\n\n ```python\n columns = [embedding_column(colors, 3),...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n vocabulary_list: An ordered iterable defining the vocabulary. Each feature\n is mapped to the index of its value (if present) in `vocabulary_list`.\n Must be castable to `dtype`.\n dtype: The type of features. Only string and integer types are supported.\n If `None`, it will be inferred from `vocabulary_list`.\n default_value: The integer ID value to return for out-of-vocabulary feature\n values, defaults to `-1`. This can not be specified with a positive\n `num_oov_buckets`.\n num_oov_buckets: Non-negative integer, the number of out-of-vocabulary\n buckets. All out-of-vocabulary inputs will be assigned IDs in the range\n `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a\n hash of the input value. A positive `num_oov_buckets` can not be specified\n with `default_value`.\n\n Returns:\n A `_CategoricalColumn` with in-memory vocabulary.\n\n Raises:\n ValueError: if `vocabulary_list` is empty, or contains duplicate keys.\n ValueError: `num_oov_buckets` is a negative integer.\n ValueError: `num_oov_buckets` and `default_value` are both specified.\n ValueError: if `dtype` is not integer or string.\n \"\"\"\n if (vocabulary_list is None) or (len(vocabulary_list) < 1):\n raise ValueError(\n 'vocabulary_list {} must be non-empty, column_name: {}'.format(\n vocabulary_list, key))\n if len(set(vocabulary_list)) != len(vocabulary_list):\n raise ValueError(\n 'Duplicate keys in vocabulary_list {}, column_name: {}'.format(\n vocabulary_list, key))\n vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)\n if num_oov_buckets:\n if default_value != -1:\n raise ValueError(\n 'Can\\'t specify both num_oov_buckets and default_value in {}.'.format(\n key))\n if num_oov_buckets < 0:\n raise ValueError('Invalid num_oov_buckets {} in {}.'.format(\n num_oov_buckets, key))\n fc_utils.assert_string_or_int(\n vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))\n if dtype is None:\n dtype = vocabulary_dtype\n elif dtype.is_integer != vocabulary_dtype.is_integer:\n raise ValueError(\n 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(\n dtype, vocabulary_dtype, key))\n fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n fc_utils.assert_key_is_string(key)\n\n return _VocabularyListCategoricalColumn(\n key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype,\n default_value=default_value, num_oov_buckets=num_oov_buckets)\n\n\ndef _categorical_column_with_identity(key, num_buckets, default_value=None):\n \"\"\"A `_CategoricalColumn` that returns identity values.\n\n Use this when your inputs are integers in the range `[0, num_buckets)`, and\n you want to use the input value itself as the categorical ID. Values outside\n this range will result in `default_value` if specified, otherwise it will\n fail.\n\n Typically, this is used for contiguous ranges of integer indexes, but\n it doesn't have to be. This might be inefficient, however, if many of IDs\n are unused. Consider `categorical_column_with_hash_bucket` in that case.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n In the following examples, each input in the range `[0, 1000000)` is assigned\n the same value. All other inputs are assigned `default_value` 0. Note that a\n literal 0 in inputs will result in the same default ID.\n\n Linear model:\n\n ```python\n video_id = categorical_column_with_identity(\n key='video_id', num_buckets=1000000, default_value=0)\n columns = [video_id, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n Embedding for a DNN model:\n\n ```python\n columns = [embedding_column(video_id, 9),...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n num_buckets: Range of inputs and outputs is `[0, num_buckets)`.\n default_value: If `None`, this column's graph operations will fail for\n out-of-range inputs. Otherwise, this value must be in the range\n `[0, num_buckets)`, and will replace inputs in that range.\n\n Returns:\n A `_CategoricalColumn` that returns identity values.\n\n Raises:\n ValueError: if `num_buckets` is less than one.\n ValueError: if `default_value` is not in range `[0, num_buckets)`.\n \"\"\"\n if num_buckets < 1:\n raise ValueError(\n 'num_buckets {} < 1, column_name {}'.format(num_buckets, key))\n if (default_value is not None) and (\n (default_value < 0) or (default_value >= num_buckets)):\n raise ValueError(\n 'default_value {} not in range [0, {}), column_name {}'.format(\n default_value, num_buckets, key))\n fc_utils.assert_key_is_string(key)\n return _IdentityCategoricalColumn(\n key=key, num_buckets=num_buckets, default_value=default_value)\n\n\ndef _indicator_column(categorical_column):\n \"\"\"Represents multi-hot representation of given categorical column.\n\n - For DNN model, `indicator_column` can be used to wrap any\n `categorical_column_*` (e.g., to feed to DNN). Consider to Use\n `embedding_column` if the number of buckets/unique(values) are large.\n\n - For Wide (aka linear) model, `indicator_column` is the internal\n representation for categorical column when passing categorical column\n directly (as any element in feature_columns) to `linear_model`. See\n `linear_model` for details.\n\n ```python\n name = indicator_column(categorical_column_with_vocabulary_list(\n 'name', ['bob', 'george', 'wanda'])\n columns = [name, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n\n dense_tensor == [[1, 0, 0]] # If \"name\" bytes_list is [\"bob\"]\n dense_tensor == [[1, 0, 1]] # If \"name\" bytes_list is [\"bob\", \"wanda\"]\n dense_tensor == [[2, 0, 0]] # If \"name\" bytes_list is [\"bob\", \"bob\"]\n ```\n\n Args:\n categorical_column: A `_CategoricalColumn` which is created by\n `categorical_column_with_*` or `crossed_column` functions.\n\n Returns:\n An `_IndicatorColumn`.\n \"\"\"\n return _IndicatorColumn(categorical_column)\n\n\ndef _weighted_categorical_column(categorical_column,\n weight_feature_key,\n dtype=dtypes.float32):\n \"\"\"Applies weight values to a `_CategoricalColumn`.\n\n Use this when each of your sparse inputs has both an ID and a value. For\n example, if you're representing text documents as a collection of word\n frequencies, you can provide 2 parallel sparse input features ('terms' and\n 'frequencies' below).\n\n Example:\n\n Input `tf.Example` objects:\n\n ```proto\n [\n features {\n feature {\n key: \"terms\"\n value {bytes_list {value: \"very\" value: \"model\"}}\n }\n feature {\n key: \"frequencies\"\n value {float_list {value: 0.3 value: 0.1}}\n }\n },\n features {\n feature {\n key: \"terms\"\n value {bytes_list {value: \"when\" value: \"course\" value: \"human\"}}\n }\n feature {\n key: \"frequencies\"\n value {float_list {value: 0.4 value: 0.1 value: 0.2}}\n }\n }\n ]\n ```\n\n ```python\n categorical_column = categorical_column_with_hash_bucket(\n column_name='terms', hash_bucket_size=1000)\n weighted_column = weighted_categorical_column(\n categorical_column=categorical_column, weight_feature_key='frequencies')\n columns = [weighted_column, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n This assumes the input dictionary contains a `SparseTensor` for key\n 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have\n the same indices and dense shape.\n\n Args:\n categorical_column: A `_CategoricalColumn` created by\n `categorical_column_with_*` functions.\n weight_feature_key: String key for weight values.\n dtype: Type of weights, such as `tf.float32`. Only float and integer weights\n are supported.\n\n Returns:\n A `_CategoricalColumn` composed of two sparse features: one represents id,\n the other represents weight (value) of the id feature in that example.\n\n Raises:\n ValueError: if `dtype` is not convertible to float.\n \"\"\"\n if (dtype is None) or not (dtype.is_integer or dtype.is_floating):\n raise ValueError('dtype {} is not convertible to float.'.format(dtype))\n return _WeightedCategoricalColumn(\n categorical_column=categorical_column,\n weight_feature_key=weight_feature_key,\n dtype=dtype)\n\n\ndef _crossed_column(keys, hash_bucket_size, hash_key=None):\n \"\"\"Returns a column for performing crosses of categorical features.\n\n Crossed features will be hashed according to `hash_bucket_size`. Conceptually,\n the transformation can be thought of as:\n Hash(cartesian product of features) % `hash_bucket_size`\n\n For example, if the input features are:\n\n * SparseTensor referred by first key:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n }\n ```\n\n * SparseTensor referred by second key:\n\n ```python\n shape = [2, 1]\n {\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n }\n ```\n\n then crossed feature will look like:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: Hash64(\"d\", Hash64(\"a\")) % hash_bucket_size\n [1, 0]: Hash64(\"e\", Hash64(\"b\")) % hash_bucket_size\n [1, 1]: Hash64(\"e\", Hash64(\"c\")) % hash_bucket_size\n }\n ```\n\n Here is an example to create a linear model with crosses of string features:\n\n ```python\n keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)\n columns = [keywords_x_doc_terms, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n You could also use vocabulary lookup before crossing:\n\n ```python\n keywords = categorical_column_with_vocabulary_file(\n 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)\n keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)\n columns = [keywords_x_doc_terms, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n If an input feature is of numeric type, you can use\n `categorical_column_with_identity`, or `bucketized_column`, as in the example:\n\n ```python\n # vertical_id is an integer categorical feature.\n vertical_id = categorical_column_with_identity('vertical_id', 10K)\n price = numeric_column('price')\n # bucketized_column converts numerical feature to a categorical one.\n bucketized_price = bucketized_column(price, boundaries=[...])\n vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\n columns = [vertical_id_x_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n To use crossed column in DNN model, you need to add it in an embedding column\n as in this example:\n\n ```python\n vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\n vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)\n dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])\n ```\n\n Args:\n keys: An iterable identifying the features to be crossed. Each element can\n be either:\n * string: Will use the corresponding feature which must be of string type.\n * `_CategoricalColumn`: Will use the transformed tensor produced by this\n column. Does not support hashed categorical column.\n hash_bucket_size: An int > 1. The number of buckets.\n hash_key: Specify the hash_key that will be used by the `FingerprintCat64`\n function to combine the crosses fingerprints on SparseCrossOp (optional).\n\n Returns:\n A `_CrossedColumn`.\n\n Raises:\n ValueError: If `len(keys) < 2`.\n ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.\n ValueError: If any of the keys is `_HashedCategoricalColumn`.\n ValueError: If `hash_bucket_size < 1`.\n \"\"\"\n if not hash_bucket_size or hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be > 1. '\n 'hash_bucket_size: {}'.format(hash_bucket_size))\n if not keys or len(keys) < 2:\n raise ValueError(\n 'keys must be a list with length > 1. Given: {}'.format(keys))\n for key in keys:\n if (not isinstance(key, six.string_types) and\n not isinstance(key, _CategoricalColumn)):\n raise ValueError(\n 'Unsupported key type. All keys must be either string, or '\n 'categorical column except _HashedCategoricalColumn. '\n 'Given: {}'.format(key))\n if isinstance(key, _HashedCategoricalColumn):\n raise ValueError(\n 'categorical_column_with_hash_bucket is not supported for crossing. '\n 'Hashing before crossing will increase probability of collision. '\n 'Instead, use the feature name as a string. Given: {}'.format(key))\n return _CrossedColumn(\n keys=tuple(keys), hash_bucket_size=hash_bucket_size,\n hash_key=hash_key)\n\n\n# TODO(rohanj): Clearly define semantics of this layer.\nclass _EmbeddingColumnLayer(base.Layer):\n \"\"\"A layer that stores all the state required for a embedding column.\"\"\"\n\n def __init__(self,\n embedding_shape,\n initializer,\n weight_collections=None,\n trainable=True,\n name=None,\n **kwargs):\n \"\"\"Constructor.\n\n Args:\n embedding_shape: Shape of the embedding variable used for lookup.\n initializer: A variable initializer function to be used in embedding\n variable initialization.\n weight_collections: A list of collection names to which the Variable will\n be added. Note that, variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: Name of the layer\n **kwargs: keyword named properties.\n \"\"\"\n super(_EmbeddingColumnLayer, self).__init__(\n trainable=trainable, name=name, **kwargs)\n self._embedding_shape = embedding_shape\n self._initializer = initializer\n self._weight_collections = weight_collections\n\n def set_weight_collections(self, weight_collections):\n \"\"\"Sets the weight collections for the layer.\n\n Args:\n weight_collections: A list of collection names to which the Variable will\n be added.\n \"\"\"\n self._weight_collections = weight_collections\n\n def build(self, _):\n self._embedding_weight_var = self.add_variable(\n name='embedding_weights',\n shape=self._embedding_shape,\n dtype=dtypes.float32,\n initializer=self._initializer,\n trainable=self.trainable)\n if self._weight_collections and not context.executing_eagerly():\n _add_to_collections(self._embedding_weight_var, self._weight_collections)\n self.built = True\n\n def call(self, _):\n return self._embedding_weight_var\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass _FeatureColumn(object):\n \"\"\"Represents a feature column abstraction.\n\n WARNING: Do not subclass this layer unless you know what you are doing:\n the API is subject to future changes.\n\n To distinguish the concept of a feature family and a specific binary feature\n within a family, we refer to a feature family like \"country\" as a feature\n column. Following is an example feature in a `tf.Example` format:\n {key: \"country\", value: [ \"US\" ]}\n In this example the value of feature is \"US\" and \"country\" refers to the\n column of the feature.\n\n This class is an abstract class. User should not create instances of this.\n \"\"\"\n\n @abc.abstractproperty\n def name(self):\n \"\"\"Returns string. Used for naming and for name_scope.\"\"\"\n pass\n\n def __lt__(self, other):\n \"\"\"Allows feature columns to be sorted in Python 3 as they are in Python 2.\n\n Feature columns need to occasionally be sortable, for example when used as\n keys in a features dictionary passed to a layer.\n\n In CPython, `__lt__` must be defined for all objects in the\n sequence being sorted. If any objects do not have an `__lt__` compatible\n with feature column objects (such as strings), then CPython will fall back\n to using the `__gt__` method below.\n https://docs.python.org/3/library/stdtypes.html#list.sort\n\n Args:\n other: The other object to compare to.\n\n Returns:\n True if the string representation of this object is lexicographically less\n than the string representation of `other`. For FeatureColumn objects,\n this looks like \"<__main__.FeatureColumn object at 0xa>\".\n \"\"\"\n return str(self) < str(other)\n\n def __gt__(self, other):\n \"\"\"Allows feature columns to be sorted in Python 3 as they are in Python 2.\n\n Feature columns need to occasionally be sortable, for example when used as\n keys in a features dictionary passed to a layer.\n\n `__gt__` is called when the \"other\" object being compared during the sort\n does not have `__lt__` defined.\n Example: http://gpaste/4803354716798976\n\n Args:\n other: The other object to compare to.\n\n Returns:\n True if the string representation of this object is lexicographically\n greater than the string representation of `other`. For FeatureColumn\n objects, this looks like \"<__main__.FeatureColumn object at 0xa>\".\n \"\"\"\n return str(self) > str(other)\n\n @property\n def _var_scope_name(self):\n \"\"\"Returns string. Used for variable_scope. Defaults to self.name.\"\"\"\n return self.name\n\n @abc.abstractmethod\n def _transform_feature(self, inputs):\n \"\"\"Returns intermediate representation (usually a `Tensor`).\n\n Uses `inputs` to create an intermediate representation (usually a `Tensor`)\n that other feature columns can use.\n\n Example usage of `inputs`:\n Let's say a Feature column depends on raw feature ('raw') and another\n `_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will\n be used as follows:\n\n ```python\n raw_tensor = inputs.get('raw')\n fc_tensor = inputs.get(input_fc)\n ```\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n\n Returns:\n Transformed feature `Tensor`.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def _parse_example_spec(self):\n \"\"\"Returns a `tf.Example` parsing spec as dict.\n\n It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is\n a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other\n supported objects. Please check documentation of `tf.io.parse_example` for\n all supported spec objects.\n\n Let's say a Feature column depends on raw feature ('raw') and another\n `_FeatureColumn` (input_fc). One possible implementation of\n _parse_example_spec is as follows:\n\n ```python\n spec = {'raw': tf.io.FixedLenFeature(...)}\n spec.update(input_fc._parse_example_spec)\n return spec\n ```\n \"\"\"\n pass\n\n def _reset_config(self):\n \"\"\"Resets the configuration in the column.\n\n Some feature columns e.g. embedding or shared embedding columns might\n have some state that is needed to be reset sometimes. Use this method\n in that scenario.\n \"\"\"\n\n\nclass _DenseColumn(_FeatureColumn):\n \"\"\"Represents a column which can be represented as `Tensor`.\n\n WARNING: Do not subclass this layer unless you know what you are doing:\n the API is subject to future changes.\n\n Some examples of this type are: numeric_column, embedding_column,\n indicator_column.\n \"\"\"\n\n @abc.abstractproperty\n def _variable_shape(self):\n \"\"\"`TensorShape` of `_get_dense_tensor`, without batch dimension.\"\"\"\n pass\n\n @abc.abstractmethod\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns a `Tensor`.\n\n The output of this function will be used by model-builder-functions. For\n example the pseudo code of `input_layer` will be like:\n\n ```python\n def input_layer(features, feature_columns, ...):\n outputs = [fc._get_dense_tensor(...) for fc in feature_columns]\n return tf.concat(outputs)\n ```\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: List of graph collections to which Variables (if any\n will be created) are added.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n\n Returns:\n `Tensor` of shape [batch_size] + `_variable_shape`.\n \"\"\"\n pass\n\n\ndef _create_weighted_sum(column,\n builder,\n units,\n sparse_combiner,\n weight_collections,\n trainable,\n weight_var=None):\n \"\"\"Creates a weighted sum for a dense/categorical column for linear_model.\"\"\"\n if isinstance(column, _CategoricalColumn):\n return _create_categorical_column_weighted_sum(\n column=column,\n builder=builder,\n units=units,\n sparse_combiner=sparse_combiner,\n weight_collections=weight_collections,\n trainable=trainable,\n weight_var=weight_var)\n else:\n return _create_dense_column_weighted_sum(\n column=column,\n builder=builder,\n units=units,\n weight_collections=weight_collections,\n trainable=trainable,\n weight_var=weight_var)\n\n\ndef _create_dense_column_weighted_sum(column,\n builder,\n units,\n weight_collections,\n trainable,\n weight_var=None):\n \"\"\"Create a weighted sum of a dense column for linear_model.\"\"\"\n tensor = column._get_dense_tensor( # pylint: disable=protected-access\n builder,\n weight_collections=weight_collections,\n trainable=trainable)\n num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access\n batch_size = array_ops.shape(tensor)[0]\n tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))\n if weight_var is not None:\n weight = weight_var\n else:\n weight = variable_scope.get_variable(\n name='weights',\n shape=[num_elements, units],\n initializer=init_ops.zeros_initializer(),\n trainable=trainable,\n collections=weight_collections)\n return math_ops.matmul(tensor, weight, name='weighted_sum')\n\n\nclass _CategoricalColumn(_FeatureColumn):\n \"\"\"Represents a categorical feature.\n\n WARNING: Do not subclass this layer unless you know what you are doing:\n the API is subject to future changes.\n\n A categorical feature typically handled with a `tf.SparseTensor` of IDs.\n \"\"\"\n\n IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name\n 'IdWeightPair', ['id_tensor', 'weight_tensor'])\n\n @abc.abstractproperty\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n pass\n\n @abc.abstractmethod\n def _get_sparse_tensors(self,\n inputs,\n weight_collections=None,\n trainable=None):\n \"\"\"Returns an IdWeightPair.\n\n `IdWeightPair` is a pair of `SparseTensor`s which represents ids and\n weights.\n\n `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`\n `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a\n `SparseTensor` of `float` or `None` to indicate all weights should be\n taken to be 1. If specified, `weight_tensor` must have exactly the same\n shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing\n output of a `VarLenFeature` which is a ragged matrix.\n\n Args:\n inputs: A `LazyBuilder` as a cache to get input tensors required to\n create `IdWeightPair`.\n weight_collections: List of graph collections to which variables (if any\n will be created) are added.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.compat.v1.get_variable`).\n \"\"\"\n pass\n\n\ndef _create_categorical_column_weighted_sum(column,\n builder,\n units,\n sparse_combiner,\n weight_collections,\n trainable,\n weight_var=None):\n # pylint: disable=g-doc-return-or-yield,g-doc-args\n \"\"\"Create a weighted sum of a categorical column for linear_model.\n\n Note to maintainer: As implementation details, the weighted sum is\n implemented via embedding_lookup_sparse toward efficiency. Mathematically,\n they are the same.\n\n To be specific, conceptually, categorical column can be treated as multi-hot\n vector. Say:\n\n ```python\n x = [0 0 1] # categorical column input\n w = [a b c] # weights\n ```\n The weighted sum is `c` in this case, which is same as `w[2]`.\n\n Another example is\n\n ```python\n x = [0 1 1] # categorical column input\n w = [a b c] # weights\n ```\n The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.\n\n For both cases, we can implement weighted sum via embedding_lookup with\n sparse_combiner = \"sum\".\n \"\"\"\n\n sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access\n builder,\n weight_collections=weight_collections,\n trainable=trainable)\n id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [\n array_ops.shape(sparse_tensors.id_tensor)[0], -1\n ])\n weight_tensor = sparse_tensors.weight_tensor\n if weight_tensor is not None:\n weight_tensor = sparse_ops.sparse_reshape(\n weight_tensor, [array_ops.shape(weight_tensor)[0], -1])\n\n if weight_var is not None:\n weight = weight_var\n else:\n weight = variable_scope.get_variable(\n name='weights',\n shape=(column._num_buckets, units), # pylint: disable=protected-access\n initializer=init_ops.zeros_initializer(),\n trainable=trainable,\n collections=weight_collections)\n return embedding_ops.safe_embedding_lookup_sparse(\n weight,\n id_tensor,\n sparse_weights=weight_tensor,\n combiner=sparse_combiner,\n name='weighted_sum')\n\n\nclass _SequenceDenseColumn(_FeatureColumn):\n \"\"\"Represents dense sequence data.\"\"\"\n\n TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name\n 'TensorSequenceLengthPair', ['dense_tensor', 'sequence_length'])\n\n @abc.abstractmethod\n def _get_sequence_dense_tensor(\n self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns a `TensorSequenceLengthPair`.\"\"\"\n pass\n\n\nclass _LazyBuilder(object):\n \"\"\"Handles caching of transformations while building the model.\n\n `_FeatureColumn` specifies how to digest an input column to the network. Some\n feature columns require data transformations. This class caches those\n transformations.\n\n Some features may be used in more than one place. For example, one can use a\n bucketized feature by itself and a cross with it. In that case we\n should create only one bucketization op instead of creating ops for each\n feature column separately. To handle re-use of transformed columns,\n `_LazyBuilder` caches all previously transformed columns.\n\n Example:\n We're trying to use the following `_FeatureColumn`s:\n\n ```python\n bucketized_age = fc.bucketized_column(fc.numeric_column(\"age\"), ...)\n keywords = fc.categorical_column_with_hash_buckets(\"keywords\", ...)\n age_X_keywords = fc.crossed_column([bucketized_age, \"keywords\"])\n ... = linear_model(features,\n [bucketized_age, keywords, age_X_keywords]\n ```\n\n If we transform each column independently, then we'll get duplication of\n bucketization (one for cross, one for bucketization itself).\n The `_LazyBuilder` eliminates this duplication.\n \"\"\"\n\n def __init__(self, features):\n \"\"\"Creates a `_LazyBuilder`.\n\n Args:\n features: A mapping from feature column to objects that are `Tensor` or\n `SparseTensor`, or can be converted to same via\n `sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key\n signifies a base feature (not-transformed). A `_FeatureColumn` key\n means that this `Tensor` is the output of an existing `_FeatureColumn`\n which can be reused.\n \"\"\"\n self._features = features.copy()\n self._feature_tensors = {}\n\n def get(self, key):\n \"\"\"Returns a `Tensor` for the given key.\n\n A `str` key is used to access a base feature (not-transformed). When a\n `_FeatureColumn` is passed, the transformed feature is returned if it\n already exists, otherwise the given `_FeatureColumn` is asked to provide its\n transformed output, which is then cached.\n\n Args:\n key: a `str` or a `_FeatureColumn`.\n\n Returns:\n The transformed `Tensor` corresponding to the `key`.\n\n Raises:\n ValueError: if key is not found or a transformed `Tensor` cannot be\n computed.\n \"\"\"\n if key in self._feature_tensors:\n # FeatureColumn is already transformed or converted.\n return self._feature_tensors[key]\n\n if key in self._features:\n feature_tensor = self._get_raw_feature_as_tensor(key)\n self._feature_tensors[key] = feature_tensor\n return feature_tensor\n\n if isinstance(key, six.string_types):\n raise ValueError('Feature {} is not in features dictionary.'.format(key))\n\n if not isinstance(key, _FeatureColumn):\n raise TypeError('\"key\" must be either a \"str\" or \"_FeatureColumn\". '\n 'Provided: {}'.format(key))\n\n column = key\n logging.debug('Transforming feature_column %s.', column)\n transformed = column._transform_feature(self) # pylint: disable=protected-access\n if transformed is None:\n raise ValueError('Column {} is not supported.'.format(column.name))\n self._feature_tensors[column] = transformed\n return transformed\n\n def _get_raw_feature_as_tensor(self, key):\n \"\"\"Gets the raw_feature (keyed by `key`) as `tensor`.\n\n The raw feature is converted to (sparse) tensor and maybe expand dim.\n\n For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if\n the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will\n error out as it is not supported.\n\n Args:\n key: A `str` key to access the raw feature.\n\n Returns:\n A `Tensor` or `SparseTensor`.\n\n Raises:\n ValueError: if the raw feature has rank 0.\n \"\"\"\n raw_feature = self._features[key]\n feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(\n raw_feature)\n\n def expand_dims(input_tensor):\n # Input_tensor must have rank 1.\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n return sparse_ops.sparse_reshape(\n input_tensor, [array_ops.shape(input_tensor)[0], 1])\n else:\n return array_ops.expand_dims(input_tensor, -1)\n\n rank = feature_tensor.get_shape().ndims\n if rank is not None:\n if rank == 0:\n raise ValueError(\n 'Feature (key: {}) cannot have rank 0. Give: {}'.format(\n key, feature_tensor))\n return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n\n # Handle dynamic rank.\n with ops.control_dependencies([\n check_ops.assert_positive(\n array_ops.rank(feature_tensor),\n message='Feature (key: {}) cannot have rank 0. Given: {}'.format(\n key, feature_tensor))]):\n return control_flow_ops.cond(\n math_ops.equal(1, array_ops.rank(feature_tensor)),\n lambda: expand_dims(feature_tensor),\n lambda: feature_tensor)\n\n\n# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py\ndef _shape_offsets(shape):\n \"\"\"Returns moving offset for each dimension given shape.\"\"\"\n offsets = []\n for dim in reversed(shape):\n if offsets:\n offsets.append(dim * offsets[-1])\n else:\n offsets.append(dim)\n offsets.reverse()\n return offsets\n\n\n# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py\ndef _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):\n \"\"\"Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.\n\n If `input_tensor` is already a `SparseTensor`, just return it.\n\n Args:\n input_tensor: A string or integer `Tensor`.\n ignore_value: Entries in `dense_tensor` equal to this value will be\n absent from the resulting `SparseTensor`. If `None`, default value of\n `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).\n\n Returns:\n A `SparseTensor` with the same shape as `input_tensor`.\n\n Raises:\n ValueError: when `input_tensor`'s rank is `None`.\n \"\"\"\n input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(\n input_tensor)\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n return input_tensor\n with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):\n if ignore_value is None:\n if input_tensor.dtype == dtypes.string:\n # Exception due to TF strings are converted to numpy objects by default.\n ignore_value = ''\n elif input_tensor.dtype.is_integer:\n ignore_value = -1 # -1 has a special meaning of missing feature\n else:\n # NOTE: `as_numpy_dtype` is a property, so with the parentheses this is\n # constructing a new numpy object of the given type, which yields the\n # default value for that type.\n ignore_value = input_tensor.dtype.as_numpy_dtype()\n ignore_value = math_ops.cast(\n ignore_value, input_tensor.dtype, name='ignore_value')\n indices = array_ops.where(\n math_ops.not_equal(input_tensor, ignore_value), name='indices')\n return sparse_tensor_lib.SparseTensor(\n indices=indices,\n values=array_ops.gather_nd(input_tensor, indices, name='values'),\n dense_shape=array_ops.shape(\n input_tensor, out_type=dtypes.int64, name='dense_shape'))\n\n\ndef _normalize_feature_columns(feature_columns):\n \"\"\"Normalizes the `feature_columns` input.\n\n This method converts the `feature_columns` to list type as best as it can. In\n addition, verifies the type and other parts of feature_columns, required by\n downstream library.\n\n Args:\n feature_columns: The raw feature columns, usually passed by users.\n\n Returns:\n The normalized feature column list.\n\n Raises:\n ValueError: for any invalid inputs, such as empty, duplicated names, etc.\n \"\"\"\n if isinstance(feature_columns, _FeatureColumn):\n feature_columns = [feature_columns]\n\n if isinstance(feature_columns, collections.Iterator):\n feature_columns = list(feature_columns)\n\n if isinstance(feature_columns, dict):\n raise ValueError('Expected feature_columns to be iterable, found dict.')\n\n for column in feature_columns:\n if not isinstance(column, _FeatureColumn):\n raise ValueError('Items of feature_columns must be a _FeatureColumn. '\n 'Given (type {}): {}.'.format(type(column), column))\n if not feature_columns:\n raise ValueError('feature_columns must not be empty.')\n name_to_column = {}\n for column in feature_columns:\n if column.name in name_to_column:\n raise ValueError('Duplicate feature column name found for columns: {} '\n 'and {}. This usually means that these columns refer to '\n 'same base feature. Either one must be discarded or a '\n 'duplicated but renamed item must be inserted in '\n 'features dict.'.format(column,\n name_to_column[column.name]))\n name_to_column[column.name] = column\n\n return feature_columns\n\n\nclass _NumericColumn(_DenseColumn,\n collections.namedtuple('_NumericColumn', [\n 'key', 'shape', 'default_value', 'dtype',\n 'normalizer_fn'\n ])):\n \"\"\"see `numeric_column`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {\n self.key:\n parsing_ops.FixedLenFeature(self.shape, self.dtype,\n self.default_value)\n }\n\n def _transform_feature(self, inputs):\n input_tensor = inputs.get(self.key)\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n raise ValueError(\n 'The corresponding Tensor of numerical column must be a Tensor. '\n 'SparseTensor is not supported. key: {}'.format(self.key))\n if self.normalizer_fn is not None:\n input_tensor = self.normalizer_fn(input_tensor)\n return math_ops.cast(input_tensor, dtypes.float32)\n\n @property\n def _variable_shape(self):\n return tensor_shape.TensorShape(self.shape)\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns dense `Tensor` representing numeric feature.\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: Unused `weight_collections` since no variables are\n created in this function.\n trainable: Unused `trainable` bool since no variables are created in\n this function.\n\n Returns:\n Dense `Tensor` created within `_transform_feature`.\n \"\"\"\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n return inputs.get(self)\n\n\nclass _BucketizedColumn(_DenseColumn, _CategoricalColumn,\n collections.namedtuple('_BucketizedColumn', [\n 'source_column', 'boundaries'])):\n \"\"\"See `bucketized_column`.\"\"\"\n\n @property\n def name(self):\n return '{}_bucketized'.format(self.source_column.name)\n\n @property\n def _parse_example_spec(self):\n return self.source_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n source_tensor = inputs.get(self.source_column)\n return math_ops._bucketize( # pylint: disable=protected-access\n source_tensor,\n boundaries=self.boundaries)\n\n @property\n def _variable_shape(self):\n return tensor_shape.TensorShape(\n tuple(self.source_column.shape) + (len(self.boundaries) + 1,))\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n input_tensor = inputs.get(self)\n return array_ops.one_hot(\n indices=math_ops.cast(input_tensor, dtypes.int64),\n depth=len(self.boundaries) + 1,\n on_value=1.,\n off_value=0.)\n\n @property\n def _num_buckets(self):\n # By construction, source_column is always one-dimensional.\n return (len(self.boundaries) + 1) * self.source_column.shape[0]\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n \"\"\"Converts dense inputs to SparseTensor so downstream code can use it.\"\"\"\n input_tensor = inputs.get(self)\n batch_size = array_ops.shape(input_tensor)[0]\n # By construction, source_column is always one-dimensional.\n source_dimension = self.source_column.shape[0]\n\n i1 = array_ops.reshape(\n array_ops.tile(\n array_ops.expand_dims(math_ops.range(0, batch_size), 1),\n [1, source_dimension]),\n (-1,))\n i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])\n # Flatten the bucket indices and unique them across dimensions\n # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets\n bucket_indices = (\n array_ops.reshape(input_tensor, (-1,)) +\n (len(self.boundaries) + 1) * i2)\n\n indices = math_ops.cast(\n array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)\n dense_shape = math_ops.cast(\n array_ops.stack([batch_size, source_dimension]), dtypes.int64)\n sparse_tensor = sparse_tensor_lib.SparseTensor(\n indices=indices,\n values=bucket_indices,\n dense_shape=dense_shape)\n return _CategoricalColumn.IdWeightPair(sparse_tensor, None)\n\n\nclass _EmbeddingColumn(\n _DenseColumn, _SequenceDenseColumn,\n collections.namedtuple(\n '_EmbeddingColumn',\n ('categorical_column', 'dimension', 'combiner', 'layer_creator',\n 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):\n \"\"\"See `embedding_column`.\"\"\"\n\n @property\n def name(self):\n if not hasattr(self, '_name'):\n self._name = '{}_embedding'.format(self.categorical_column.name)\n return self._name\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n return inputs.get(self.categorical_column)\n\n @property\n def _variable_shape(self):\n if not hasattr(self, '_shape'):\n self._shape = tensor_shape.TensorShape([self.dimension])\n return self._shape\n\n def _get_dense_tensor_internal(self,\n inputs,\n weight_collections=None,\n trainable=None):\n \"\"\"Private method that follows the signature of _get_dense_tensor.\"\"\"\n # Get sparse IDs and weights.\n sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access\n inputs, weight_collections=weight_collections, trainable=trainable)\n sparse_ids = sparse_tensors.id_tensor\n sparse_weights = sparse_tensors.weight_tensor\n\n embedding_weights = self.layer_creator(\n weight_collections=weight_collections,\n scope=variable_scope.get_variable_scope())\n\n if self.ckpt_to_load_from is not None:\n to_restore = embedding_weights\n if isinstance(to_restore, variables.PartitionedVariable):\n to_restore = to_restore._get_variable_list() # pylint: disable=protected-access\n checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {\n self.tensor_name_in_ckpt: to_restore\n })\n\n # Return embedding lookup result.\n return embedding_ops.safe_embedding_lookup_sparse(\n embedding_weights=embedding_weights,\n sparse_ids=sparse_ids,\n sparse_weights=sparse_weights,\n combiner=self.combiner,\n name='%s_weights' % self.name,\n max_norm=self.max_norm)\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use input_layer, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'sequence_input_layer instead of input_layer. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n return self._get_dense_tensor_internal(\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n\n def _get_sequence_dense_tensor(\n self, inputs, weight_collections=None, trainable=None):\n if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use sequence_input_layer. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return _SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _get_graph_for_variable(var):\n if isinstance(var, variables.PartitionedVariable):\n return list(var)[0].graph\n else:\n return var.graph\n\n\nclass _SharedEmbeddingColumn(\n _DenseColumn, _SequenceDenseColumn,\n collections.namedtuple(\n '_SharedEmbeddingColumn',\n ('categorical_column', 'dimension', 'combiner', 'initializer',\n 'shared_embedding_collection_name', 'ckpt_to_load_from',\n 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):\n \"\"\"See `embedding_column`.\"\"\"\n\n @property\n def name(self):\n if not hasattr(self, '_name'):\n self._name = '{}_shared_embedding'.format(self.categorical_column.name)\n return self._name\n\n @property\n def _var_scope_name(self):\n return self.shared_embedding_collection_name\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n return inputs.get(self.categorical_column)\n\n @property\n def _variable_shape(self):\n if not hasattr(self, '_shape'):\n self._shape = tensor_shape.TensorShape([self.dimension])\n return self._shape\n\n def _get_dense_tensor_internal(self,\n inputs,\n weight_collections=None,\n trainable=None):\n \"\"\"Private method that follows the signature of _get_dense_tensor.\"\"\"\n # This method is called from a variable_scope with name _var_scope_name,\n # which is shared among all shared embeddings. Open a name_scope here, so\n # that the ops for different columns have distinct names.\n with ops.name_scope(None, default_name=self.name):\n # Get sparse IDs and weights.\n sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access\n inputs, weight_collections=weight_collections, trainable=trainable)\n sparse_ids = sparse_tensors.id_tensor\n sparse_weights = sparse_tensors.weight_tensor\n\n embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access\n shared_embedding_collection = ops.get_collection(\n self.shared_embedding_collection_name)\n if shared_embedding_collection:\n if len(shared_embedding_collection) > 1:\n raise ValueError(\n 'Collection {} can only contain one variable. '\n 'Suggested fix A: Choose a unique name for this collection. '\n 'Suggested fix B: Do not add any variables to this collection. '\n 'The feature_column library already adds a variable under the '\n 'hood.'.format(shared_embedding_collection))\n embedding_weights = shared_embedding_collection[0]\n if embedding_weights.get_shape() != embedding_shape:\n raise ValueError(\n 'Shared embedding collection {} contains variable {} of '\n 'unexpected shape {}. Expected shape is {}. '\n 'Suggested fix A: Choose a unique name for this collection. '\n 'Suggested fix B: Do not add any variables to this collection. '\n 'The feature_column library already adds a variable under the '\n 'hood.'.format(self.shared_embedding_collection_name,\n embedding_weights.name,\n embedding_weights.get_shape(), embedding_shape))\n else:\n embedding_weights = variable_scope.get_variable(\n name='embedding_weights',\n shape=embedding_shape,\n dtype=dtypes.float32,\n initializer=self.initializer,\n trainable=self.trainable and trainable,\n collections=weight_collections)\n ops.add_to_collection(self.shared_embedding_collection_name,\n embedding_weights)\n if self.ckpt_to_load_from is not None:\n to_restore = embedding_weights\n if isinstance(to_restore, variables.PartitionedVariable):\n to_restore = to_restore._get_variable_list() # pylint: disable=protected-access\n checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {\n self.tensor_name_in_ckpt: to_restore\n })\n\n # Return embedding lookup result.\n return embedding_ops.safe_embedding_lookup_sparse(\n embedding_weights=embedding_weights,\n sparse_ids=sparse_ids,\n sparse_weights=sparse_weights,\n combiner=self.combiner,\n name='%s_weights' % self.name,\n max_norm=self.max_norm)\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use input_layer, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'sequence_input_layer instead of input_layer. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column),\n self.categorical_column))\n return self._get_dense_tensor_internal(\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n\n def _get_sequence_dense_tensor(self,\n inputs,\n weight_collections=None,\n trainable=None):\n if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use sequence_input_layer. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column),\n self.categorical_column))\n dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return _SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _check_shape(shape, key):\n \"\"\"Returns shape if it's valid, raises error otherwise.\"\"\"\n assert shape is not None\n if not nest.is_sequence(shape):\n shape = [shape]\n shape = tuple(shape)\n for dimension in shape:\n if not isinstance(dimension, six.integer_types):\n raise TypeError('shape dimensions must be integer. '\n 'shape: {}, key: {}'.format(shape, key))\n if dimension < 1:\n raise ValueError('shape dimensions must be greater than 0. '\n 'shape: {}, key: {}'.format(shape, key))\n return shape\n\n\nclass _HashedCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_HashedCategoricalColumn',\n ['key', 'hash_bucket_size', 'dtype'])):\n \"\"\"see `categorical_column_with_hash_bucket`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n raise ValueError('SparseColumn input must be a SparseTensor.')\n\n fc_utils.assert_string_or_int(\n input_tensor.dtype,\n prefix='column_name: {} input_tensor'.format(self.key))\n\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError(\n 'Column dtype and SparseTensors dtype must be compatible. '\n 'key: {}, column dtype: {}, tensor dtype: {}'.format(\n self.key, self.dtype, input_tensor.dtype))\n\n if self.dtype == dtypes.string:\n sparse_values = input_tensor.values\n else:\n sparse_values = string_ops.as_string(input_tensor.values)\n\n sparse_id_values = string_ops.string_to_hash_bucket_fast(\n sparse_values, self.hash_bucket_size, name='lookup')\n return sparse_tensor_lib.SparseTensor(\n input_tensor.indices, sparse_id_values, input_tensor.dense_shape)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.hash_bucket_size\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _VocabularyFileCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_VocabularyFileCategoricalColumn', (\n 'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype',\n 'default_value'\n ))):\n \"\"\"See `categorical_column_with_vocabulary_file`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError(\n 'Column dtype and SparseTensors dtype must be compatible. '\n 'key: {}, column dtype: {}, tensor dtype: {}'.format(\n self.key, self.dtype, input_tensor.dtype))\n\n fc_utils.assert_string_or_int(\n input_tensor.dtype,\n prefix='column_name: {} input_tensor'.format(self.key))\n\n key_dtype = self.dtype\n if input_tensor.dtype.is_integer:\n # `index_table_from_file` requires 64-bit integer keys.\n key_dtype = dtypes.int64\n input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n\n return lookup_ops.index_table_from_file(\n vocabulary_file=self.vocabulary_file,\n num_oov_buckets=self.num_oov_buckets,\n vocab_size=self.vocabulary_size,\n default_value=self.default_value,\n key_dtype=key_dtype,\n name='{}_lookup'.format(self.key)).lookup(input_tensor)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.vocabulary_size + self.num_oov_buckets\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _VocabularyListCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_VocabularyListCategoricalColumn', (\n 'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'\n ))):\n \"\"\"See `categorical_column_with_vocabulary_list`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError(\n 'Column dtype and SparseTensors dtype must be compatible. '\n 'key: {}, column dtype: {}, tensor dtype: {}'.format(\n self.key, self.dtype, input_tensor.dtype))\n\n fc_utils.assert_string_or_int(\n input_tensor.dtype,\n prefix='column_name: {} input_tensor'.format(self.key))\n\n key_dtype = self.dtype\n if input_tensor.dtype.is_integer:\n # `index_table_from_tensor` requires 64-bit integer keys.\n key_dtype = dtypes.int64\n input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n\n return lookup_ops.index_table_from_tensor(\n vocabulary_list=tuple(self.vocabulary_list),\n default_value=self.default_value,\n num_oov_buckets=self.num_oov_buckets,\n dtype=key_dtype,\n name='{}_lookup'.format(self.key)).lookup(input_tensor)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return len(self.vocabulary_list) + self.num_oov_buckets\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _IdentityCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_IdentityCategoricalColumn', (\n 'key', 'num_buckets', 'default_value'\n ))):\n\n \"\"\"See `categorical_column_with_identity`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n\n if not input_tensor.dtype.is_integer:\n raise ValueError(\n 'Invalid input, not integer. key: {} dtype: {}'.format(\n self.key, input_tensor.dtype))\n\n values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')\n num_buckets = math_ops.cast(\n self.num_buckets, dtypes.int64, name='num_buckets')\n zero = math_ops.cast(0, dtypes.int64, name='zero')\n if self.default_value is None:\n # Fail if values are out-of-range.\n assert_less = check_ops.assert_less(\n values, num_buckets, data=(values, num_buckets),\n name='assert_less_than_num_buckets')\n assert_greater = check_ops.assert_greater_equal(\n values, zero, data=(values,),\n name='assert_greater_or_equal_0')\n with ops.control_dependencies((assert_less, assert_greater)):\n values = array_ops.identity(values)\n else:\n # Assign default for out-of-range values.\n values = array_ops.where(\n math_ops.logical_or(\n values < zero, values >= num_buckets, name='out_of_range'),\n array_ops.fill(\n dims=array_ops.shape(values),\n value=math_ops.cast(self.default_value, dtypes.int64),\n name='default_values'), values)\n\n return sparse_tensor_lib.SparseTensor(\n indices=input_tensor.indices,\n values=values,\n dense_shape=input_tensor.dense_shape)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.num_buckets\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _WeightedCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_WeightedCategoricalColumn', (\n 'categorical_column', 'weight_feature_key', 'dtype'\n ))):\n \"\"\"See `weighted_categorical_column`.\"\"\"\n\n @property\n def name(self):\n return '{}_weighted_by_{}'.format(\n self.categorical_column.name, self.weight_feature_key)\n\n @property\n def _parse_example_spec(self):\n config = self.categorical_column._parse_example_spec # pylint: disable=protected-access\n if self.weight_feature_key in config:\n raise ValueError('Parse config {} already exists for {}.'.format(\n config[self.weight_feature_key], self.weight_feature_key))\n config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)\n return config\n\n @property\n def _num_buckets(self):\n return self.categorical_column._num_buckets # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n weight_tensor = inputs.get(self.weight_feature_key)\n if weight_tensor is None:\n raise ValueError('Missing weights {}.'.format(self.weight_feature_key))\n weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(\n weight_tensor)\n if self.dtype != weight_tensor.dtype.base_dtype:\n raise ValueError('Bad dtype, expected {}, but got {}.'.format(\n self.dtype, weight_tensor.dtype))\n if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):\n # The weight tensor can be a regular Tensor. In this case, sparsify it.\n weight_tensor = _to_sparse_input_and_drop_ignore_values(\n weight_tensor, ignore_value=0.0)\n if not weight_tensor.dtype.is_floating:\n weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)\n return (inputs.get(self.categorical_column), weight_tensor)\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n tensors = inputs.get(self)\n return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1])\n\n\nclass _CrossedColumn(\n _CategoricalColumn,\n collections.namedtuple('_CrossedColumn',\n ['keys', 'hash_bucket_size', 'hash_key'])):\n \"\"\"See `crossed_column`.\"\"\"\n\n @property\n def name(self):\n feature_names = []\n for key in _collect_leaf_level_keys(self):\n if isinstance(key, _FeatureColumn):\n feature_names.append(key.name)\n else: # key must be a string\n feature_names.append(key)\n return '_X_'.join(sorted(feature_names))\n\n @property\n def _parse_example_spec(self):\n config = {}\n for key in self.keys:\n if isinstance(key, _FeatureColumn):\n config.update(key._parse_example_spec) # pylint: disable=protected-access\n else: # key must be a string\n config.update({key: parsing_ops.VarLenFeature(dtypes.string)})\n return config\n\n def _transform_feature(self, inputs):\n feature_tensors = []\n for key in _collect_leaf_level_keys(self):\n if isinstance(key, six.string_types):\n feature_tensors.append(inputs.get(key))\n elif isinstance(key, _CategoricalColumn):\n ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access\n if ids_and_weights.weight_tensor is not None:\n raise ValueError(\n 'crossed_column does not support weight_tensor, but the given '\n 'column populates weight_tensor. '\n 'Given column: {}'.format(key.name))\n feature_tensors.append(ids_and_weights.id_tensor)\n else:\n raise ValueError('Unsupported column type. Given: {}'.format(key))\n return sparse_ops.sparse_cross_hashed(\n inputs=feature_tensors,\n num_buckets=self.hash_bucket_size,\n hash_key=self.hash_key)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.hash_bucket_size\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\ndef _collect_leaf_level_keys(cross):\n \"\"\"Collects base keys by expanding all nested crosses.\n\n Args:\n cross: A `_CrossedColumn`.\n\n Returns:\n A list of strings or `_CategoricalColumn` instances.\n \"\"\"\n leaf_level_keys = []\n for k in cross.keys:\n if isinstance(k, _CrossedColumn):\n leaf_level_keys.extend(_collect_leaf_level_keys(k))\n else:\n leaf_level_keys.append(k)\n return leaf_level_keys\n\n\nclass _IndicatorColumn(_DenseColumn, _SequenceDenseColumn,\n collections.namedtuple('_IndicatorColumn',\n ['categorical_column'])):\n \"\"\"Represents a one-hot column for use in deep networks.\n\n Args:\n categorical_column: A `_CategoricalColumn` which is created by\n `categorical_column_with_*` function.\n \"\"\"\n\n @property\n def name(self):\n return '{}_indicator'.format(self.categorical_column.name)\n\n def _transform_feature(self, inputs):\n \"\"\"Returns dense `Tensor` representing feature.\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n\n Returns:\n Transformed feature `Tensor`.\n\n Raises:\n ValueError: if input rank is not known at graph building time.\n \"\"\"\n id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n id_tensor = id_weight_pair.id_tensor\n weight_tensor = id_weight_pair.weight_tensor\n\n # If the underlying column is weighted, return the input as a dense tensor.\n if weight_tensor is not None:\n weighted_column = sparse_ops.sparse_merge(\n sp_ids=id_tensor,\n sp_values=weight_tensor,\n vocab_size=int(self._variable_shape[-1]))\n # Remove (?, -1) index.\n weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],\n weighted_column.dense_shape)\n # Use scatter_nd to merge duplicated indices if existed,\n # instead of sparse_tensor_to_dense.\n return array_ops.scatter_nd(weighted_column.indices,\n weighted_column.values,\n weighted_column.dense_shape)\n\n dense_id_tensor = sparse_ops.sparse_tensor_to_dense(\n id_tensor, default_value=-1)\n\n # One hot must be float for tf.concat reasons since all other inputs to\n # input_layer are float32.\n one_hot_id_tensor = array_ops.one_hot(\n dense_id_tensor,\n depth=self._variable_shape[-1],\n on_value=1.0,\n off_value=0.0)\n\n # Reduce to get a multi-hot per example.\n return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n @property\n def _variable_shape(self):\n \"\"\"Returns a `TensorShape` representing the shape of the dense `Tensor`.\"\"\"\n return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns dense `Tensor` representing feature.\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: Unused `weight_collections` since no variables are\n created in this function.\n trainable: Unused `trainable` bool since no variables are created in\n this function.\n\n Returns:\n Dense `Tensor` created within `_transform_feature`.\n\n Raises:\n ValueError: If `categorical_column` is a `_SequenceCategoricalColumn`.\n \"\"\"\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use input_layer, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'sequence_input_layer instead of input_layer. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n return inputs.get(self)\n\n def _get_sequence_dense_tensor(\n self, inputs, weight_collections=None, trainable=None):\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use sequence_input_layer. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n dense_tensor = inputs.get(self)\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return _SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _verify_static_batch_size_equality(tensors, columns):\n \"\"\"Validates that the first dim (batch size) of all tensors are equal or None.\n\n Args:\n tensors: list of tensors to check.\n columns: list of feature columns matching tensors. Will be used for error\n messaging.\n\n Raises:\n ValueError: if one of the tensors has a variant batch size\n \"\"\"\n # bath_size is a tf.compat.v1.Dimension object.\n expected_batch_size = None\n for i in range(0, len(tensors)):\n if tensors[i].shape.dims[0].value is not None:\n if expected_batch_size is None:\n bath_size_column_index = i\n expected_batch_size = tensors[i].shape.dims[0]\n elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):\n raise ValueError(\n 'Batch size (first dimension) of each feature must be same. '\n 'Batch size of columns ({}, {}): ({}, {})'.format(\n columns[bath_size_column_index].name, columns[i].name,\n expected_batch_size, tensors[i].shape.dims[0]))\n\n\nclass _SequenceCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple(\n '_SequenceCategoricalColumn', ['categorical_column'])):\n \"\"\"Represents sequences of categorical data.\"\"\"\n\n @property\n def name(self):\n return self.categorical_column.name\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access\n\n @property\n def _num_buckets(self):\n return self.categorical_column._num_buckets # pylint: disable=protected-access\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n id_tensor = sparse_tensors.id_tensor\n weight_tensor = sparse_tensors.weight_tensor\n\n # Expands third dimension, if necessary so that embeddings are not\n # combined during embedding lookup. If the tensor is already 3D, leave\n # as-is.\n shape = array_ops.shape(id_tensor)\n # Compute the third dimension explicitly instead of setting it to -1, as\n # that doesn't work for dynamically shaped tensors with 0-length at runtime.\n # This happens for empty sequences.\n target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]\n id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)\n if weight_tensor is not None:\n weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)\n\n return _CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)\n"
] | [
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.feature_column.utils.assert_key_is_string",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.python.ops.check_ops.assert_greater_equal",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.ops.string_ops.string_to_hash_bucket_fast",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.template.make_template",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.training.checkpoint_utils.init_from_checkpoint",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.sparse_ops.sparse_reshape",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.ops.embedding_ops.safe_embedding_lookup_sparse",
"tensorflow.python.ops.parsing_ops.FixedLenFeature",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.math_ops._bucketize",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.array_ops.one_hot",
"tensorflow.python.ops.array_ops.scatter_nd",
"numpy.array",
"tensorflow.python.ops.sparse_ops.sparse_slice",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.check_ops.assert_less",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.parsing_ops.VarLenFeature",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.string_ops.as_string",
"tensorflow.python.ops.sparse_ops.sparse_cross_hashed",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.feature_column.utils.check_default_value",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.feature_column.utils.sequence_length_from_sparse_tensor",
"tensorflow.python.ops.math_ops.reduce_sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
JosephMeghanath/apptuit-py | [
"ae0d038931efca94435e3a5efe5e4a4ed6f1956e"
] | [
"tests/test_query.py"
] | [
"\"\"\"\nTests for the query API\n\"\"\"\n\nimport sys\n\ntry:\n from unittest.mock import Mock, patch\nexcept ImportError:\n from mock import Mock, patch\n\nfrom nose.tools import assert_is_not_none, assert_is_none, assert_equals, assert_true, assert_raises\nimport pandas as pd\nimport requests\nfrom apptuit import Apptuit, ApptuitException, apptuit_client\n\n\ndef get_mock_response():\n \"\"\"\n Returns a mock response for the get request\n \"\"\"\n with open('tests/response.json') as f:\n return f.readlines()[0]\n\n\ndef test_api_endpoint_param():\n \"\"\"\n Test the api_endpoint param of apptuit client\n \"\"\"\n _ = Apptuit(sanitize_mode=None, token=\"test_token\", api_endpoint=\"https://api.apptuit.ai/\")\n with assert_raises(ValueError):\n _ = Apptuit(sanitize_mode=None, token=\"test_token\", api_endpoint=None)\n with assert_raises(ValueError):\n _ = Apptuit(sanitize_mode=None, token=\"test_token\", api_endpoint=\"\")\n\n\ndef do_query(mock_get):\n \"\"\"\n Execute the query API and return the mock response\n \"\"\"\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n return client.query(query, start, end)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_query(mock_get):\n \"\"\"\n Test a valid query and make sure results are returned\n \"\"\"\n resp = do_query(mock_get)\n assert_is_not_none(resp[0])\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_query_result_number_index(mock_get):\n \"\"\"\n Test that we can access the output by number based indexing from\n the query result\n \"\"\"\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_is_not_none(df)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_query_result_string_index(mock_get):\n \"\"\"\n Test that we can access the output by the name of the metric from the\n query result\n \"\"\"\n resp = do_query(mock_get)\n df = resp[\"nyc.taxi.rides\"].to_df()\n assert_is_not_none(df)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_df_shape(mock_get):\n \"\"\"\n Verify the dataframe shape\n \"\"\"\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_equals(df.shape, (432, 1))\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_number_of_series(mock_get):\n \"\"\"\n Verify the number of time series in the query result\n \"\"\"\n resp = do_query(mock_get)\n assert_equals(len(resp[0].series), 1)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_data(mock_get):\n \"\"\"\n Verify the data returned from the query\n \"\"\"\n expected_df = pd.read_csv('tests/nyc.taxi.rides.csv', index_col=0, header=0, parse_dates=True)\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_true(df.equals(expected_df))\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_metadata(mock_get):\n \"\"\"\n Test that the metadata of the query results are as expected\n \"\"\"\n expected_series_name = \"nyc.taxi.rides\"\n expected_tags = {\"host\": \"localhost\"}\n resp = do_query(mock_get)\n series = resp[0].series[0]\n assert_equals(series.name.metric, expected_series_name)\n assert_equals(series.name.tags, expected_tags)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_multiple_retries(mock_get):\n \"\"\"\n Test that the query API attempts retries when an error is returned from\n the backend API. Since we patch the status code as 504 and create an HTTPError\n as a side effect of the get call, we cannot verify that the retries succeed.\n \"\"\"\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 504\n mock_get.side_effect = requests.exceptions.HTTPError\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n with assert_raises(ApptuitException):\n client.query(query, start, end, retry_count=3)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_get_error(mock_get):\n \"\"\"\n Test that when the retry_count is 0 for the query API we get an exception\n \"\"\"\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 504\n mock_get.side_effect = requests.exceptions.HTTPError()\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n with assert_raises(ApptuitException):\n client.query(query, start, end, retry_count=0)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_empty_dps(mock_get):\n \"\"\"\n Test that we get an exception if the dps array is empty in the JSON response\n \"\"\"\n mock_get.return_value.content = '{\"outputs\":[{\"id\":\"nyc:taxi:rides\",\"result\":[{ \\\n \"metric\":\"nyc.taxi.rides\",\"tags\":{\"host\":\"localhost\"}, \\\n \"aggregatedTags\":[],\"dps\":[]}]}], \\\n \"hints\":[],\"query\": {\"querytext\":\"fetch(\\'nyc.taxi.rides\\')\", \\\n \"startTime\":1406831400, \\\n \"startTimeHumanReadableSYS\":\"July 31, 2014 6:30:00 PM UTC\", \\\n \"startTimeHumanReadableIST\":\"August 1, 2014 12:00:00 AM IST\", \\\n \"endTime\":1407609000, \"endTimeHumanReadableSYS\":\"August 9,2014 6:30:00 PM UTC\", \\\n \"endTimeHumanReadableIST\":\"August 10, 2014 12:00:00 AM IST\", \\\n \"digest\":\"Mdt8e+HDjnGByMMJdEnTnNdUxKo=:60845\", \"optionsdigest\":\"\", \\\n \"options\":\"{}\"},\"query_stats\":{\"compactedRows\":217, \"processedRows\":217, \\\n \"dataPointsProcessed\":219, \"numSeries\":1, \"queryTimeMillis\":152, \\\n \"hbaseTimeMillis\":21},\"timing_diagnostics\": \\\n [{\"tag\":\"QUERY_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":152},{\"tag\":\"AST_BUILD_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":29}, \\\n {\"tag\":\"AST_JYTHON_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":29},{\"tag\":\"STATEMENT_VALIDATION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_BUILDING_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"QUERY_OPTIMIZATION_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":106},{\"tag\":\"SCHEMA_SERVICE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":93}, \\\n {\"tag\":\"DATASOURCE_FETCH_RUN_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":32},{\"tag\":\"TSD_HBASE_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":21}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":52},{\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_TAGS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":51}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_DPS_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"DATASOURCE_FETCH_DP_DECODE_CORE_PROCESSING_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_DS_WAIT_TIME\", \"instanceCount\":4, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"DATASOURCE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":12}, \\\n {\"tag\":\"PLAN_EXECUTION_JPY_REMOVE_DF_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":17},{\"tag\":\"RESULT_DATA_MARSHALLING_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}]}'\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n client.query(query, start, end)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_empty_output(mock_get):\n \"\"\"\n Test the case when the outputs array is empty in the response\n \"\"\"\n mock_get.return_value.content = '{\"outputs\":[],\"hints\":[],\"query\": \\\n {\"querytext\":\"fetch(\\'nyc.taxi.rides\\')\", \\\n \"startTime\":1406831400, \"startTimeHumanReadableSYS\":\"July 31, 2014 6:30:00 PM UTC\", \\\n \"startTimeHumanReadableIST\":\"August 1, 2014 12:00:00 AM IST\", \"endTime\":1407609000, \\\n \"endTimeHumanReadableSYS\":\"August 9, 2014 6:30:00 PM UTC\", \\\n \"endTimeHumanReadableIST\":\"August 10, 2014 12:00:00 AM IST\", \\\n \"digest\":\"Mdt8e+HDjnGByMMJdEnTnNdUxKo=:60845\", \"optionsdigest\":\"\", \\\n \"options\":\"{}\"},\"query_stats\":{\"compactedRows\":217, \"processedRows\":217, \\\n \"dataPointsProcessed\":219, \"numSeries\":1, \"queryTimeMillis\":152, \\\n \"hbaseTimeMillis\":21},\"timing_diagnostics\": \\\n [{\"tag\":\"QUERY_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":152},{\"tag\":\"AST_BUILD_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":29}, \\\n {\"tag\":\"AST_JYTHON_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":29},{\"tag\":\"STATEMENT_VALIDATION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_BUILDING_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"QUERY_OPTIMIZATION_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":106},{\"tag\":\"SCHEMA_SERVICE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":93}, \\\n {\"tag\":\"DATASOURCE_FETCH_RUN_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":32},{\"tag\":\"TSD_HBASE_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":21}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":52},{\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_TAGS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":51}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_DPS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_CORE_PROCESSING_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_DS_WAIT_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":12},{\"tag\":\"PLAN_EXECUTION_JPY_REMOVE_DF_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":17}, \\\n {\"tag\":\"RESULT_DATA_MARSHALLING_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0}]}'\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n resp = client.query(query, start, end)\n assert_is_none(resp)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_empty_results(mock_get):\n \"\"\"\n Test that when results array is empty in the response and we try to access the\n outputs in the results object we get a KeyError\n \"\"\"\n mock_get.return_value.content = '{\"outputs\":[{\"id\":\"nyc:taxi:rides\", \\\n \"result\":[]}],\"hints\":[],\"query\": \\\n {\"querytext\":\"fetch(\\'nyc.taxi.rides\\')\", \\\n \"startTime\":1406831400, \\\n \"startTimeHumanReadableSYS\":\"July 31, 2014 6:30:00 PM UTC\", \\\n \"startTimeHumanReadableIST\":\"August 1, 2014 12:00:00 AM IST\", \\\n \"endTime\":1407609000, \\\n \"endTimeHumanReadableSYS\":\"August 9, 2014 6:30:00 PM UTC\", \\\n \"endTimeHumanReadableIST\":\"August 10, 2014 12:00:00 AM IST\", \\\n \"digest\":\"Mdt8e+HDjnGByMMJdEnTnNdUxKo=:60845\", \\\n \"optionsdigest\":\"\", \"options\":\"{}\"}, \\\n \"query_stats\":{\"compactedRows\":217, \"processedRows\":217, \\\n \"dataPointsProcessed\":219, \"numSeries\":1, \"queryTimeMillis\":152, \\\n \"hbaseTimeMillis\":21}, \\\n \"timing_diagnostics\":[{\"tag\":\"QUERY_EXECUTION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":152}, \\\n {\"tag\":\"AST_BUILD_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":29},{\"tag\":\"AST_JYTHON_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":29}, \\\n {\"tag\":\"STATEMENT_VALIDATION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_BUILDING_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"QUERY_OPTIMIZATION_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":106},{\"tag\":\"SCHEMA_SERVICE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":93}, \\\n {\"tag\":\"DATASOURCE_FETCH_RUN_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":32},{\"tag\":\"TSD_HBASE_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":21}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":52}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_TAGS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":51}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_DPS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_CORE_PROCESSING_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_DS_WAIT_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":12}, \\\n {\"tag\":\"PLAN_EXECUTION_JPY_REMOVE_DF_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":17}, \\\n {\"tag\":\"RESULT_DATA_MARSHALLING_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0}]}'\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n resp = client.query(query, start, end)\n with assert_raises(KeyError):\n _ = resp[0]\n\ndef test_timeseries_obj_creation():\n \"\"\"\n Negative test cases for TimeSeries object when either index or\n values is missing (not both at the same time)\n \"\"\"\n with assert_raises(ValueError):\n apptuit_client.TimeSeries('metric', {}, values=[3.14])\n\n with assert_raises(ValueError):\n apptuit_client.TimeSeries('metric', {}, index=[123456])\n\n with assert_raises(ValueError):\n apptuit_client.TimeSeries('metric', {}, index=[123455, 123456], values=[3.14])\n\n with assert_raises(ValueError):\n apptuit_client.TimeSeries(metric=None, tags=None)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_missing_pandas(mock_get):\n orig_modules = sys.modules.copy()\n orig_pandas = orig_modules['pandas']\n orig_modules['pandas'] = None\n with patch.dict(sys.modules, orig_modules):\n if orig_pandas:\n sys.modules['pandas'] = None\n resp = do_query(mock_get)\n with assert_raises(ImportError):\n resp[0].to_df()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ktaebum/tf2-gnn | [
"3c763274f6586fb8d4cf3d74e3e0fd03b970b43f",
"4eb6bf367623fef28c937b632e3caa83288de206"
] | [
"tf2_gnn/test/layers/test_RGCN.py",
"tf2_gnn/layers/nodes_to_graph_representation.py"
] | [
"\"\"\"Tests for the RGCN message passing layer.\"\"\"\nimport tensorflow as tf\nimport pytest\n\nfrom tf2_gnn.layers.message_passing import MessagePassingInput, RGCN\n\n\nshape_test_data = [\n (tf.TensorShape(dims=(None, 3)), tuple(tf.TensorShape(dims=(None, 2)) for _ in range(3)), 5),\n (tf.TensorShape(dims=(None, 1)), tuple(tf.TensorShape(dims=(None, 2)) for _ in range(1)), 1),\n (tf.TensorShape(dims=(None, 7)), tuple(tf.TensorShape(dims=(None, 2)) for _ in range(14)), 7),\n]\n\n\[email protected](\"node_embedding_shape,adjacency_list_shapes,hidden_dim\", shape_test_data)\ndef test_rgcn_layer_has_expected_number_of_trainable_variables_when_not_using_source_and_target(\n node_embedding_shape, adjacency_list_shapes, hidden_dim\n):\n # Given:\n rgcn_params = RGCN.get_default_hyperparameters()\n rgcn_params[\"hidden_dim\"] = hidden_dim\n rgcn_params[\"use_target_state_as_input\"] = False\n rgcn_layer = RGCN(rgcn_params)\n\n # When:\n rgcn_layer.build(\n MessagePassingInput(\n node_embeddings=node_embedding_shape, adjacency_lists=adjacency_list_shapes\n )\n )\n trainable_vars = rgcn_layer.trainable_variables\n all_vars = rgcn_layer.variables\n\n # Then:\n assert len(trainable_vars) == len(adjacency_list_shapes) # One dense layer per layer type.\n assert len(all_vars) == len(trainable_vars) # There should be no un-trainable variables.\n\n for trainable_var in trainable_vars:\n assert tuple(trainable_var.shape.as_list()) == (node_embedding_shape[-1], hidden_dim)\n\n\[email protected](\"node_embedding_shape,adjacency_list_shapes,hidden_dim\", shape_test_data)\ndef test_rgcn_layer_has_expected_number_of_trainable_variables_when_using_source_and_target(\n node_embedding_shape, adjacency_list_shapes, hidden_dim\n):\n # Given:\n rgcn_params = RGCN.get_default_hyperparameters()\n rgcn_params[\"hidden_dim\"] = hidden_dim\n rgcn_params[\"use_target_state_as_input\"] = True\n rgcn_layer = RGCN(rgcn_params)\n\n # When:\n rgcn_layer.build(\n MessagePassingInput(\n node_embeddings=node_embedding_shape, adjacency_lists=adjacency_list_shapes\n )\n )\n trainable_vars = rgcn_layer.trainable_variables\n all_vars = rgcn_layer.variables\n\n # Then:\n assert len(trainable_vars) == len(adjacency_list_shapes) # One dense layer per layer type.\n assert len(all_vars) == len(trainable_vars) # There should be no un-trainable variables.\n for trainable_var in trainable_vars:\n assert tuple(trainable_var.shape.as_list()) == (2 * node_embedding_shape[-1], hidden_dim)\n",
"\"\"\"Graph representation aggregation layer.\"\"\"\nfrom abc import abstractmethod\nfrom typing import List, NamedTuple, Optional\n\nimport tensorflow as tf\nfrom dpu_utils.tf2utils import MLP, get_activation_function_by_name, unsorted_segment_softmax\n\n\nclass NodesToGraphRepresentationInput(NamedTuple):\n \"\"\"A named tuple to hold input to layers computing graph representations from nodes\n representations.\"\"\"\n\n node_embeddings: tf.Tensor\n node_to_graph_map: tf.Tensor\n num_graphs: tf.Tensor\n\n\nclass NodesToGraphRepresentation(tf.keras.layers.Layer):\n \"\"\"Abstract class to compute graph representations from node representations.\n\n Throughout we use the following abbreviations in shape descriptions:\n * V: number of nodes (across all graphs)\n * VD: node representation dimension\n * G: number of graphs\n * GD: graph representation dimension\n \"\"\"\n\n def __init__(self, graph_representation_size: int, **kwargs):\n super().__init__(**kwargs)\n self._graph_representation_size = graph_representation_size\n\n @abstractmethod\n def call(self, inputs: NodesToGraphRepresentationInput, training: bool = False):\n \"\"\"Call the layer.\n\n Args:\n inputs: A tuple containing two items:\n node_embeddings: float32 tensor of shape [V, VD], the representation of each\n node in all graphs.\n node_to_graph_map: int32 tensor of shape [V] with values in range [0, G-1],\n mapping each node to a graph ID.\n num_graphs: int32 scalar, specifying the number G of graphs.\n training: A bool that denotes whether we are in training mode.\n\n Returns:\n float32 tensor of shape [G, GD]\n \"\"\"\n pass\n\n\nclass WeightedSumGraphRepresentation(NodesToGraphRepresentation):\n \"\"\"Layer computing graph representations as weighted sum of node representations.\n The weights are either computed from the original node representations (\"self-attentional\")\n or by a softmax across the nodes of a graph.\n Supports splitting operation into parallely computed independent \"heads\" which can focus\n on different aspects.\n\n Throughout we use the following abbreviations in shape descriptions:\n * V: number of nodes (across all graphs)\n * VD: node representation dimension\n * G: number of graphs\n * GD: graph representation dimension\n * H: number of heads\n \"\"\"\n\n def __init__(\n self,\n graph_representation_size: int,\n num_heads: int,\n weighting_fun: str = \"softmax\", # One of {\"softmax\", \"sigmoid\"}\n scoring_mlp_layers: List[int] = [128],\n scoring_mlp_activation_fun: str = \"ReLU\",\n scoring_mlp_use_biases: bool = False,\n scoring_mlp_dropout_rate: float = 0.2,\n transformation_mlp_layers: List[int] = [128],\n transformation_mlp_activation_fun: str = \"ReLU\",\n transformation_mlp_use_biases: bool = False,\n transformation_mlp_dropout_rate: float = 0.2,\n transformation_mlp_result_lower_bound: Optional[float] = None,\n transformation_mlp_result_upper_bound: Optional[float] = None,\n **kwargs,\n ):\n \"\"\"\n Args:\n graph_representation_size: Size of the computed graph representation.\n num_heads: Number of independent heads to use to compute weights.\n weighting_fun: \"sigmoid\" ([0, 1] weights for each node computed from its\n representation), \"softmax\" ([0, 1] weights for each node computed\n from all nodes in same graph), \"average\" (weight is fixed to 1/num_nodes),\n or \"none\" (weight is fixed to 1).\n scoring_mlp_layers: MLP layer structure for computing raw scores turned into\n weights.\n scoring_mlp_activation_fun: MLP activcation function for computing raw scores\n turned into weights.\n scoring_mlp_dropout_rate: MLP inter-layer dropout rate for computing raw scores\n turned into weights.\n transformation_mlp_layers: MLP layer structure for computing graph representations.\n transformation_mlp_activation_fun: MLP activcation function for computing graph\n representations.\n transformation_mlp_dropout_rate: MLP inter-layer dropout rate for computing graph\n representations.\n transformation_mlp_result_lower_bound: Lower bound that results of the transformation\n MLP will be clipped to before being scaled and summed up.\n This is particularly useful to limit the magnitude of results when using \"sigmoid\"\n or \"none\" as weighting function.\n transformation_mlp_result_upper_bound: Upper bound that results of the transformation\n MLP will be clipped to before being scaled and summed up.\n \"\"\"\n super().__init__(graph_representation_size, **kwargs)\n assert (\n graph_representation_size % num_heads == 0\n ), f\"Number of heads {num_heads} needs to divide final representation size {graph_representation_size}!\"\n assert weighting_fun.lower() in {\n \"none\",\n \"average\",\n \"softmax\",\n \"sigmoid\",\n }, f\"Weighting function {weighting_fun} unknown, {{'softmax', 'sigmoid', 'none', 'average'}} supported.\"\n\n self._num_heads = num_heads\n self._weighting_fun = weighting_fun.lower()\n self._transformation_mlp_activation_fun = get_activation_function_by_name(\n transformation_mlp_activation_fun\n )\n self._transformation_mlp_result_lower_bound = transformation_mlp_result_lower_bound\n self._transformation_mlp_result_upper_bound = transformation_mlp_result_upper_bound\n\n # Build sub-layers:\n if self._weighting_fun not in (\"none\", \"average\"):\n self._scoring_mlp = MLP(\n out_size=self._num_heads,\n hidden_layers=scoring_mlp_layers,\n use_biases=scoring_mlp_use_biases,\n activation_fun=get_activation_function_by_name(\n scoring_mlp_activation_fun\n ),\n dropout_rate=scoring_mlp_dropout_rate,\n name=\"ScoringMLP\",\n )\n\n self._transformation_mlp = MLP(\n out_size=self._graph_representation_size,\n hidden_layers=transformation_mlp_layers,\n use_biases=transformation_mlp_use_biases,\n activation_fun=self._transformation_mlp_activation_fun,\n dropout_rate=transformation_mlp_dropout_rate,\n name=\"TransformationMLP\",\n )\n\n def build(self, input_shapes: NodesToGraphRepresentationInput):\n with tf.name_scope(\"WeightedSumGraphRepresentation\"):\n if self._weighting_fun not in (\"none\", \"average\"):\n self._scoring_mlp.build(\n tf.TensorShape((None, input_shapes.node_embeddings[-1]))\n )\n self._transformation_mlp.build(tf.TensorShape((None, input_shapes.node_embeddings[-1])))\n\n super().build(input_shapes)\n\n \"\"\"\n @tf.function(\n input_signature=(\n NodesToGraphRepresentationInput(\n node_embeddings=tf.TensorSpec(shape=tf.TensorShape((None, None)), dtype=tf.float32),\n node_to_graph_map=tf.TensorSpec(shape=tf.TensorShape((None,)), dtype=tf.int32),\n num_graphs=tf.TensorSpec(shape=(), dtype=tf.int32),\n ),\n tf.TensorSpec(shape=(), dtype=tf.bool),\n )\n )\n \"\"\"\n def call(self, inputs: NodesToGraphRepresentationInput, training: bool = False):\n # (1) compute weights for each node/head pair:\n if self._weighting_fun not in (\"none\", \"average\"):\n scores = self._scoring_mlp(inputs.node_embeddings, training=training) # Shape [V, H]\n if self._weighting_fun == \"sigmoid\":\n weights = tf.nn.sigmoid(scores) # Shape [V, H]\n elif self._weighting_fun == \"softmax\":\n weights_per_head = []\n for head_idx in range(self._num_heads):\n head_scores = scores[:, head_idx] # Shape [V]\n head_weights = unsorted_segment_softmax(\n logits=head_scores,\n segment_ids=inputs.node_to_graph_map,\n num_segments=inputs.num_graphs,\n ) # Shape [V]\n weights_per_head.append(tf.expand_dims(head_weights, -1))\n weights = tf.concat(weights_per_head, axis=1) # Shape [V, H]\n else:\n raise ValueError()\n\n # (2) compute representations for each node/head pair:\n node_reprs = self._transformation_mlp_activation_fun(\n self._transformation_mlp(inputs.node_embeddings, training=training)\n ) # Shape [V, GD]\n if self._transformation_mlp_result_lower_bound is not None:\n node_reprs = tf.maximum(node_reprs, self._transformation_mlp_result_lower_bound)\n if self._transformation_mlp_result_upper_bound is not None:\n node_reprs = tf.minimum(node_reprs, self._transformation_mlp_result_upper_bound)\n node_reprs = tf.reshape(\n node_reprs,\n shape=(-1, self._num_heads, self._graph_representation_size // self._num_heads),\n ) # Shape [V, H, GD//H]\n\n # (3) if necessary, weight representations and aggregate by graph:\n if self._weighting_fun == \"none\":\n node_reprs = tf.reshape(\n node_reprs, shape=(-1, self._graph_representation_size)\n ) # Shape [V, GD]\n graph_reprs = tf.math.segment_sum(\n data=node_reprs, segment_ids=inputs.node_to_graph_map\n ) # Shape [G, GD]\n elif self._weighting_fun == \"average\":\n node_reprs = tf.reshape(\n node_reprs, shape=(-1, self._graph_representation_size)\n ) # Shape [V, GD]\n graph_reprs = tf.math.segment_mean(\n data=node_reprs, segment_ids=inputs.node_to_graph_map\n ) # Shape [G, GD]\n else:\n weights = tf.expand_dims(weights, -1) # Shape [V, H, 1]\n weighted_node_reprs = weights * node_reprs # Shape [V, H, GD//H]\n\n weighted_node_reprs = tf.reshape(\n weighted_node_reprs, shape=(-1, self._graph_representation_size)\n ) # Shape [V, GD]\n graph_reprs = tf.math.segment_sum(\n data=weighted_node_reprs, segment_ids=inputs.node_to_graph_map\n ) # Shape [G, GD]\n\n return graph_reprs\n"
] | [
[
"tensorflow.TensorShape"
],
[
"tensorflow.TensorShape",
"tensorflow.nn.sigmoid",
"tensorflow.concat",
"tensorflow.maximum",
"tensorflow.minimum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.name_scope",
"tensorflow.math.segment_mean",
"tensorflow.math.segment_sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
WeipengMO/flair | [
"e6c9990bcfdd1d2e585bab1f45b7f8dc68b21fbc"
] | [
"script/get_full_length_transcripts.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n'''\r\nAuthor : windz\r\nDate : 2020-04-15 15:26:26\r\nLastEditTime : 2021-10-09 16:43:49\r\nDescription : get full length transcripts from bam\r\n'''\r\n\r\n\r\nimport pysam\r\nimport pandas as pd\r\nimport click\r\n\r\n\r\[email protected]()\r\[email protected]('-i', '--infile', required=True)\r\[email protected]('--full_len', required=True)\r\[email protected]('--first_exon_path', required=True)\r\ndef main(infile, full_len, first_exon_path):\r\n #first_exon_path = '/public/home/mowp/test/nanopore_cdna/supplementary_data/representative_gene_model/representative_gene_first_exon.tsv'\r\n first_exon_df = pd.read_csv(first_exon_path, sep='\\t')\r\n first_exon_df.set_index(['gene_id'], inplace=True)\r\n first_exon_dict = first_exon_df.to_dict(orient='index')\r\n\r\n #infile = '/public/home/mowp/test/nanopore_cdna/aligned_data/fhh.tagged.mm2.sorted.bam'\r\n with pysam.AlignmentFile(infile, 'rb') as inbam:\r\n full_len_bam = pysam.AlignmentFile(full_len, 'wb', template=inbam)\r\n for read in inbam:\r\n read_gene_id = read.get_tag('gi')\r\n\r\n if read_gene_id in first_exon_dict:\r\n # 过滤与基因方向不一致的reads\r\n if first_exon_dict[read_gene_id]['strand'] == '+' and read.is_reverse:\r\n continue\r\n if first_exon_dict[read_gene_id]['strand'] == '-' and not read.is_reverse:\r\n continue\r\n\r\n if (first_exon_dict[read_gene_id]['strand'] == '+' and \r\n read.reference_start <= first_exon_dict[read_gene_id]['exon_end']):\r\n full_len_bam.write(read)\r\n elif (first_exon_dict[read_gene_id]['strand'] == '-' and\r\n read.reference_end >= first_exon_dict[read_gene_id]['exon_start']):\r\n full_len_bam.write(read)\r\n\r\n \r\n full_len_bam.close()\r\n pysam.index(full_len)\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Moyzes-Campos/pykernels | [
"c8afdc79a15197ad3be2a0db0118f5e948577f49"
] | [
"tests/datasets.py"
] | [
"\"\"\"\nAccess layer for datasets used in tests\n\"\"\"\n\n__author__ = 'lejlot'\n\nimport numpy as np\n\ndef baseline_logic(operator):\n \"\"\" Creates 4-point dataset with given logical operator \"\"\"\n\n data = np.array([[1, 1], [0, 0], [1, 0], [0, 1]])\n labels = np.array([max(0, min(1, operator(*point))) for point in data])\n return data, labels\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeromekelleher/msprime_sim | [
"8ec1945290fcfd2889dbb2a677e21012162fbc89",
"8ec1945290fcfd2889dbb2a677e21012162fbc89"
] | [
"src/write.py",
"src/phenotypes.py"
] | [
"from __future__ import division\nimport msprime\nimport pandas as pd\nimport numpy as np\nimport os\n\ndef trees(out, tree_sequence, chr, m, n_pops, N, sim, vcf, sample_index):\n\t# DEV: Throw a warning if you try to do this and n_sims is high.\n\tvcf_name = out + \".vcf\"\n\twith open(vcf_name, \"w\") as vcf_file:\n\t\ttree_sequence.write_vcf(vcf_file, ploidy=2)\n\n\tprint(N)\n\t# DEV: update the function - no longer require N (double check this).\n\tN = int(tree_sequence.get_sample_size() / 2)\n\tprint(N)\n\t\n\t# Create altered IDs an Family IDs to replace the .fam file that we will create.\n\tfam_id = np.tile('msp', N)\n\tindex_old = [i for i in xrange(N)]\n\t# Have to change the '0' ID to something else, as plink doesn't like IIDs to be '0'.\n\tindex_old[0] = 'A'\n\t# Similarly for the new list of indices, plink doesn't like IIDs to be '0'.\n\tindex_new = [i for i in sample_index]\n\tmatches = [x for x in index_new if x == 0]\n\tif len(matches) == 1:\n\t\tindex_new[index_new.index(0)] = 'A'\n\n\t# Create a new table to define the re-indexing of the tree. \n\t# Writing to .vcf does not save the sample numbers, so we need to keep track of these and \n\t# replace them in the .fam file.\n\n\td={'old_fam':fam_id, 'old_within_fam':index_new, 'new_fam':fam_id, 'new_within_fam':index_old}\n\tdf=pd.DataFrame(d)\n\ttmp_index_tsv = out + '.index.tmp.tsv'\n\tdf.to_csv(tmp_index_tsv, sep='\\t', header=False, index=False)\n\n\tif vcf is False:\n\t\t# Note that the following line is OS dependent. OSX requires a gap after '-i'.\n\t\tos.system(\"sed -i.bak '1,/msp_0/ s/msp_0/msp_A/' \" + vcf_name)\n\t\t# Convert to Plink bed format - need to ensure that plink is in your path.\n\t\tbfile_out = out + \".chr\" + str(chr+1) + \".sim\" + str(sim+1)\n\t\tos.system(\"../plink/plink --vcf \" + vcf_name + \" --out \" + bfile_out + \" --make-bed\")\n\t\t# Now, fix the chromosome number and the names of the mutations.\n\t\tmut_names=np.core.defchararray.add('rs.' + str(chr+1) + \".\", np.arange(1,m+1).astype('str'))\n\t\tchr_vec=np.tile(chr+1, m)\n\t\td={'chr':chr_vec, 'rs': mut_names}\n\t\tdf=pd.DataFrame(d)\n\t\ttmp_tsv = out + '.tmp.tsv'\n\t\ttmp_bim = out + '.bim_tmp.tsv'\n\t\tdf.to_csv(tmp_tsv, sep='\\t', header=False, index=False)\n\t\tos.system('cut -f 3,4,5,6 ' + bfile_out + '.bim > ' + tmp_bim)\n\t\tos.system('paste ' + tmp_tsv + ' ' + tmp_bim + ' > ' + bfile_out + '.bim')\n\t\tos.system('rm ' + tmp_tsv + '; rm ' + tmp_bim)\n\n\t\t# Now remove the .vcf files.\n\t\tos.system('rm ' + vcf_name + '; rm ' + vcf_name + '.bak; rm ' + bfile_out + '.fam.bak')\n\n\t\tos.system('../plink/plink --update-ids ' + tmp_index_tsv + ' --bfile ' + bfile_out + ' --make-bed --out ' + bfile_out)\n\t\t# Rename 'A' to '0'.\n\t\tos.system(\"sed -i.bak 's/msp A/msp 0/' \" + bfile_out + '.fam')\n\t\t# os.system('rm ' + tmp_index_tsv)\n\t\t# Remove the .bak and temporary files\n\t\tos.system('rm ' + bfile_out + '*.bak')\n\t\tos.system('rm ' + bfile_out + '*~')\n\n\tpop_ann = np.empty(N)\n\n\tfor pops in xrange(n_pops):\n\t\tpop_leaves = tree_sequence.get_samples(population_id=pops)\n\t\tpop_ann[map(int, [x/2 for x in pop_leaves[0::2]])] = pops\n\n\tif chr==0:\n\t\tdf_pop=pd.DataFrame({'sample':sample_index, 'population':pop_ann.astype(int)})\n\t\tdf_pop.to_csv(out + \".sim\" + str(sim+1) + '.pop.tsv', sep='\\t', header=True, index=False)",
"from __future__ import division\nimport msprime\nimport numpy as np\nimport random\nimport tqdm\nimport scipy.stats as sp\nimport src.regressions as reg\nimport src.tools as tl\nimport src.tree_sequence as ts\nimport src.snpgetter as sg\nimport src.ldscores as ld\nimport src.printing as pr\nimport statsmodels.api as sm\nimport time, sys, traceback, argparse\nimport src.write as write\nimport pandas as pd\n\ndef case_control(y, prevalence, sample_prevalence, N):\n\t# Determine the liability threshold.\n\tp = prevalence\n\tT = sp.norm.ppf(1-p)\n\t\n\t# Index the cases.\n\tcases = [i for (i, x) in enumerate(y) if x >= T]\n\tmask = np.ones(len(y), dtype=bool)\n\tmask[cases] = False\n\tn_cases = len(cases)\n\n\tif sample_prevalence is None:\n\t\tn_controls = N - n_cases\n\telse:\n\t\tn_controls = int(((1-sample_prevalence) / sample_prevalence) * n_cases)\n\n\tcontrols = np.arange(N)\n\tif (N - n_cases) < n_controls:\n\t\tn_controls = N - n_cases\n\t\tlog.log('Warning: this condition should not hold - '\n\t\t\t'is sample prevalence close to population prevalence?')\n\t\tcontrols = controls[mask]\n\telse:\n\t\tcontrols = controls[mask][random.sample(range(N - n_cases), n_controls)]\n\n\tcontrols = sorted(controls)\n\n\treturn cases, controls, n_cases, n_controls, T\n\ndef get_phenotypes(args, N, n_pops, tree_sequence_list, m_total, log):\n\n\ty = np.zeros(N)\n\n\tif args.debug:\n\t\trandom.seed(1)\n\t\tnp.random.seed(1)\n\n\tif args.C_bool:\n\t\tC = np.random.binomial(1, args.C_bool_p, size=N)\n\t\tC = (C - np.mean(C)) / np.std(C)\n\t\tlog.log('Boolean covariate.')\n\t\tlog.log('Average kurtosis for these phenotypes: {K}.'.format(K=np.sum(C**4)/N))\n\telse:\n\t\tC = np.random.normal(loc=0, scale=1, size=N)\n\t\tC = (C - np.mean(C)) / np.std(C)\n\t\t# print the average kurtosis across the individuals\n\t\tlog.log('Normally distributed covariate. Kurtosis should be around 3.')\n\t\tlog.log('Average kurtosis for these phenotypes: {K}.'.format(K=np.sum(C**4)/N))\n\t\n\tif args.include_pop_strat is True and args.s2 > 0:\n\t\t# Get the means for the populations.\n\t\talpha = np.random.normal(loc=0, scale=np.sqrt(args.s2), size=n_pops)\n\t\tlog.log(alpha)\n\t\t# Add pop-strat additions to the phenotype vector, conditional on the population sampled from.\n\t\tfor pops in range(n_pops):\n\t\t\tpop_leaves = tree_sequence_list[0].get_samples(population_id=pops)\n\t\t\tlen(map(int, [x/2 for x in pop_leaves[0::2]]))\n\t\t\ty[map(int, [x/2 for x in pop_leaves[0::2]])] += alpha[pops]\n\n\tfor chr in range(args.n_chr):\n\t\tm_chr = int(tree_sequence_list[chr].get_num_mutations())\n\t\tlog.log('Picking causal variants and determining effect sizes in chromosome {chr}'.format(chr=chr+1))\n\t\t\n\t\tif (((1 + int(args.dominance) + int(args.gxe)) * args.p_causal) < 1) or args.same_causal_sites: # If the number of runs through the data is less than 1, run this speedup.\n\t\t\ttree_sequence_pheno_A, m_causal_A = ts.set_mutations_in_tree(tree_sequence_list[chr], args.p_causal)\n\t\t\tlog.log('Picked {m} additive causal variants out of {mc}'.format(m=m_causal_A, mc=m_chr))\n\n\t\t\tif args.same_causal_sites is False:\n\t\t\t\ttree_sequence_pheno_D, m_causal_D = ts.set_mutations_in_tree(tree_sequence_list[chr], args.p_causal)\n\t\t\t\tif args.h2_D > 0: log.log('Picked {m} dominance causal variants out of {mc}'.format(m=m_causal_D, mc=m_chr))\n\t\t\t\ttree_sequence_pheno_AC, m_causal_AC = ts.set_mutations_in_tree(tree_sequence_list[chr], args.p_causal)\n\t\t\t\tif args.h2_AC > 0: log.log('Picked {m} gxe causal variants out of {mc}'.format(m=m_causal_AC, mc=m_chr))\n\n\t\t\t\tif args.h2_A > 0:\n\t\t\t\t\tbeta_A = np.random.normal(loc=0, scale=np.sqrt(args.h2_A / (m_total * args.p_causal)), size=m_causal_A)\n\t\t\t\t\t# Get the phenotypes.\n\t\t\t\t\tk = 0\n\t\t\t\t\tlog.log('Determining phenotype data: additive.')\n\t\t\t\t\tfor variant in tl.progress(args.progress_bars, tree_sequence_pheno_A.variants(), total=m_causal_A): # Note, progress here refers you to tqdm which just creates a pretty progress bar.\n\t\t\t\t\t\tX_A = sg.nextSNP_add(variant)\n\t\t\t\t\t\t# Effect size on the phenotype.\n\t\t\t\t\t\ty += X_A * beta_A[k]\n\t\t\t\t\t\tk += 1\n\n\t\t\t\tif args.dominance and args.h2_D >0:\n\t\t\t\t\tbeta_D = np.random.normal(loc=0, scale=np.sqrt(args.h2_D / (m_total * args.p_causal)), size=m_causal_D)\n\t\t\t\t\tk = 0\n\t\t\t\t\tlog.log('Determining phenotype data: dominance.')\n\t\t\t\t\tfor variant in tl.progress(args.progress_bars, tree_sequence_pheno_D.variants(), total=m_causal_D): # Note, progress here refers you to tqdm which just creates a pretty progress bar.\n\t\t\t\t\t\tX_A, X_D = sg.nextSNP(variant)\n\t\t\t\t\t\t# Effect size on the phenotype.\n\t\t\t\t\t\ty += X_D * beta_D[k]\n\t\t\t\t\t\tk += 1\n\n\t\t\t\tif args.gxe and args.h2_AC > 0:\n\t\t\t\t\tbeta_AC = np.random.normal(loc=0, scale=np.sqrt(args.h2_AC / (m_total * args.p_causal)), size=m_causal_AC)\n\t\t\t\t\t# If examining interaction with a covariate, pick the values of the covariate, and normalise.\n\t\t\t\t\tk = 0\n\t\t\t\t\tlog.log('Determining phenotype data: gene x environment.')\n\t\t\t\t\tfor variant in tl.progress(args.progress_bars, tree_sequence_pheno_AC.variants(), total=m_causal_AC): # Note, progress here refers you to tqdm which just creates a pretty progress bar.\n\t\t\t\t\t\tX_A = sg.nextSNP_add(variant)\n\t\t\t\t\t\t# Effect size on the phenotype.\n\t\t\t\t\t\ty += C * X_A * beta_AC[k]\n\t\t\t\t\t\tk += 1\n\t\t\telse:\n\t\t\t\tbeta_A, beta_D, beta_AC = np.zeros(m_causal_A), np.zeros(m_causal_A), np.zeros(m_causal_A) \n\t\t\t\tif args.h2_A > 0:\n\t\t\t\t\tbeta_A = np.random.normal(loc=0, scale=np.sqrt(args.h2_A / (m_total * args.p_causal)), size=m_causal_A)\n\t\t\t\t\n\t\t\t\tif args.dominance and args.h2_D > 0:\n\t\t\t\t\tbeta_D = np.random.normal(loc=0, scale=np.sqrt(args.h2_D / (m_total * args.p_causal)), size=m_causal_A)\n\n\t\t\t\tif args.gxe and args.h2_AC > 0:\n\t\t\t\t\tbeta_AC = np.random.normal(loc=0, scale=np.sqrt(args.h2_AC / (m_total * args.p_causal)), size=m_causal_A)\n\n\t\t\t\tk = 0\n\t\t\t\tlog.log('Determining phenotype data')\n\n\t\t\t\t# Note that we use just one tree_sequence here, because the causal sites are the same in this portion of the code.\n\t\t\t\tfor variant in tl.progress(args.progress_bars, tree_sequence_pheno_A.variants(), total=m_causal_A): # Note, progress here refers you to tqdm which just creates a pretty progress bar.\n\t\t\t\t\tX_A, X_D = sg.nextSNP(variant)\n\t\t\t\t\t# Effect size on the phenotype.\n\t\t\t\t\ty += X_A * beta_A[k] + X_D * beta_D[k] + C * X_A * beta_AC[k]\n\t\t\t\t\tk += 1\n\n\t\telse:\n\t\t\tm_causal = int(m_chr * args.p_causal)\n\t\t\tbeta_A, beta_D, beta_AC = np.zeros(m_chr), np.zeros(m_chr), np.zeros(m_chr)\n\t\t\tbeta_A_causal_index = random.sample(range(m_chr), m_causal)\n\t\t\tlog.log('Picked {m} additive causal variants out of {mc}'.format(m=m_causal, mc=m_chr))\n\n\t\t\tif args.h2_A > 0:\n\t\t\t\tbeta_A[beta_A_causal_index] = np.random.normal(loc=0, scale=np.sqrt(args.h2_A / (m_total * args.p_causal)), size=m_causal)\n\t\t\t\n\t\t\tif args.dominance:\n\t\t\t\tbeta_D, beta_D_causal_index = np.zeros(m_chr), random.sample(range(m_chr), m_causal)\n\t\t\t\tlog.log('Picked {m} dominance causal variants out of {mc}'.format(m=m_causal, mc=m_chr))\n\t\t\t\tif args.h2_D > 0:\n\t\t\t\t\tbeta_D[beta_D_causal_index] = np.random.normal(loc=0, scale=np.sqrt(args.h2_D / (m_total * args.p_causal)), size=m_causal)\n\n\t\t\tif args.gxe:\n\t\t\t\tbeta_AC, beta_AC_causal_index = np.zeros(m_chr), random.sample(range(m_chr), m_causal)\n\t\t\t\tlog.log('Picked {m} gxe causal variants out of {mc}'.format(m=m_causal, mc=m_chr))\n\t\t\t\tif args.h2_AC > 0:\n\t\t\t\t\tbeta_AC[beta_AC_causal_index] = np.random.normal(loc=0, scale=np.sqrt(args.h2_AC / (m_total * args.p_causal)), size=m_causal)\n\t\t\t\n\t\t\t# Get the phenotypes.\n\t\t\tk = 0\n\t\t\tlog.log('Determining phenotype data.')\n\n\t\t\tfor variant in tl.progress(args.progress_bars, tree_sequence_list[chr].variants(), total=m_chr): # Note, progress here refers you to tqdm which just creates a pretty progress bar.\n\t\t\t\tX_A, X_D = sg.nextSNP(variant)\n\t\t\t\t# Effect size on the phenotype.\n\t\t\t\ty += X_A * beta_A[k] + X_D * beta_D[k] + X_A * C * beta_AC[k]\n\t\t\t\tk += 1\n\n\t# Add noise to the y.\n\ty += np.random.normal(loc=0, scale=np.sqrt(1-(args.h2_A+args.h2_D+args.h2_AC+args.s2)), size=N)\n\t# Finally, normalise.\n\ty = (y - np.mean(y)) / np.std(y)\n\n\treturn y, C\n\n# Here, want to create a chi sq function, and an LD score function.\n\ndef get_chisq(args, tree_sequence_list_geno, m_geno, m_geno_total, y, N, C, log):\n\t# Initialise the chi squared statistics.\n\tchisq_A, chisq_D, chisq_AC = np.zeros((m_geno_total,1)), np.zeros((m_geno_total,1)), np.zeros((m_geno_total,1))\n\n\tif args.case_control:\n\t\tlog.log(\"Running case-control simulation.\")\n\t\tif args.prevalence is None:\n\t\t\traise ValueError(\"prevalence must be set if running case-control analysis.\")\n\t\tcases, controls, n_cases, n_controls, T = case_control(y, args.prevalence, args.sample_prevalence, N)\n\t\tn = n_cases + n_controls\n\t\ty_cc = np.zeros(n)\n\t\ty_cc[:n_cases] = 1\n\t\tindex = cases + controls\n\t\tC_sim = C[index]\n\n\t\tif args.linear is False and args.ldsc is True:\n\t\t\tk = 0\n\t\t\tfor chr in range(args.n_chr):\n\t\t\t\tfor variant in tl.progress(args.progress_bars, tree_sequence_list_geno[chr].variants(), total=m_geno[chr]):\n\t\t\t\t\tX_A, X_D = sg.nextSNP(variant, index = index)\n\t\t\t\t\tchisq_A[k] = sm.Logit(y_cc, sm.add_constant(X_A)).fit(disp=0).llr\n\t\t\t\t\tchisq_D[k] = sm.Logit(y_cc, sm.add_constant(X_D)).fit(disp=0).llr\n\t\t\t\t\tchisq_AC[k] = sm.Logit(y_cc, sm.add_constant(C_sim * X_A)).fit(disp=0).llr\n\t\t\t\t\tk += 1\n\n\tif ( ((args.case_control is False) or (args.case_control is True and args.linear is True)) and args.ldsc is True ):\n\t\tif args.case_control:\n\t\t\tlog.log(\"Warning: running linear regression for case-control.\")\n\t\t\ty = (y_cc - np.mean(y_cc)) / np.std(y_cc)\n\t\t\tindex = cases + controls\n\t\t\tC_sim = C[index]\n\t\telse:\n\t\t\tindex = None\n\t\t\tC_sim = C\n\t\t\tn = N\n\n\t\t# Then use these ys to determine beta hats.\n\t\tk = 0\n\t\tfor chr in range(args.n_chr):\n\t\t\tlog.log('Determining chi-squared statistics in chromosome {chr}'.format(chr=chr+1))\n\t\t\tfor variant in tree_sequence_list_geno[chr].variants():\n\t\t\t\tX_A, X_D = sg.nextSNP(variant, index=index)\n\t\t\t\t# Then sum to get the effect size on the phenotype.\n\t\t\t\tchisq_A[k] = np.dot(y.reshape(1,n), X_A)**2 / n\n\t\t\t\tchisq_D[k] = np.dot(y.reshape(1,n), X_D)**2 / n\n\t\t\t\tchisq_AC[k] = np.dot(y.reshape(1,n), C_sim * X_A)**2 / n\n\t\t\t\tk += 1\n\n\tif args.write_pheno or args.write_trees:\n\t\tif args.case_control:\n\t\t\tsample_ID = index\n\t\t\ty = y_cc.astype(int)\n\n\t\t\tif args.write_trees:\n\t\t\t\ttree_index = [[2*x,2*x+1] for x in index]\n\t\t\t\ttree_index = [j for x in tree_index for j in x]\n\n\t\t\t\tfor chr in range(args.n_chr):\n\t\t\t\t\ttree_sequence_to_write = tree_sequence_list_geno[chr].simplify(tree_index)\n\t\t\t\t\twrite.trees(args.out, tree_sequence_to_write, chr, m_geno[chr], n_pops, N, sim, args.vcf, index)\n\n\t\telse:\n\t\t\tsample_ID = np.arange(N)\n\t\tdf_pheno=pd.DataFrame({'sample_ID':sample_ID, 'phenotype':y})\n\t\tdf_pheno.to_csv(args.out + \".sim\" + str(sim+1) + '.pheno.tsv', sep='\\t', header=True, index=False)\n\n\tif args.case_control:\n\t\treturn chisq_A, chisq_D, chisq_AC, n, C_sim, index, y_cc, n_cases, T\n\telse:\n\t\treturn chisq_A, chisq_D, chisq_AC, n, C_sim, index\n"
] | [
[
"numpy.arange",
"numpy.empty",
"numpy.tile",
"pandas.DataFrame"
],
[
"scipy.stats.norm.ppf",
"numpy.sqrt",
"numpy.random.seed",
"numpy.arange",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.std",
"numpy.mean",
"numpy.random.binomial",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pierfra-ro/allesfitter | [
"a6a885aaeb3253fec0d924ef3b45e8b7c473b181",
"a6a885aaeb3253fec0d924ef3b45e8b7c473b181",
"a6a885aaeb3253fec0d924ef3b45e8b7c473b181"
] | [
"allesfitter/detection/transit_search.py",
"allesfitter/postprocessing/plot_viol.py",
"tutorials/05_transits_and_rvs/simulate_data.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 17:55:39 2020\n\n@author:\nDr. Maximilian N. Günther\nEuropean Space Agency (ESA)\nEuropean Space Research and Technology Centre (ESTEC)\nKeplerlaan 1, 2201 AZ Noordwijk, The Netherlands\nEmail: [email protected]\nGitHub: mnguenther\nTwitter: m_n_guenther\nWeb: www.mnguenther.com\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\n#::: modules\nimport os, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport warnings\nfrom pprint import pprint\nfrom datetime import datetime\nfrom astropy import units as u\nfrom astropy import constants as c\nfrom astropy.stats import sigma_clip\nfrom astropy.timeseries import BoxLeastSquares as bls\nfrom ..exoworlds_rdx.lightcurves.index_transits import index_transits\n# import time as timer\nimport contextlib\n\n#::: specific modules\ntry:\n from wotan import flatten\nexcept ImportError:\n pass\n\ntry:\n from transitleastsquares import transitleastsquares as tls\n from transitleastsquares import transit_mask, catalog_info\nexcept ImportError:\n pass\n\n#::: my modules\ntry:\n from exoworlds.tess import tessio\nexcept:\n pass\nfrom ..exoworlds_rdx.lightcurves.lightcurve_tools import plot_phase_folded_lightcurve, rebin_err \nfrom ..time_series import clean, slide_clip\nfrom ..lightcurves import tessclean\nfrom ..inout import write_json, write_csv\nfrom ..plotting import fullplot, brokenplot, tessplot, monthplot\n\n\n#::: plotting settings\nimport seaborn as sns\nsns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)\nsns.set_style({\"xtick.direction\": \"in\",\"ytick.direction\": \"in\"})\nsns.set_context(rc={'lines.markeredgewidth': 1})\n\n \n\n###############################################################################\n#::: print to logfile\n###############################################################################\ndef logprint(*text, options=None):\n original = sys.stdout\n try:\n with open(os.path.join(options['outdir'],'logfile.log'), 'a' ) as f:\n sys.stdout = f\n print(*text)\n except OSError:\n pass #For unknown reasons, the combination of open() and os.path.join() does not work on some Windows versions\n sys.stdout = original\n \n \n \n###############################################################################\n#::: pretty-print to logfile\n###############################################################################\ndef logpprint(*text, options=None):\n original = sys.stdout\n try:\n with open(os.path.join(options['outdir'],'logfile.log'), 'a' ) as f:\n sys.stdout = f\n pprint(*text)\n except OSError:\n pass #For unknown reasons, the combination of open() and os.path.join() does not work on some Windows versions\n sys.stdout = original\n\n\n \n###############################################################################\n#::: apply a mask (if wished so)\n###############################################################################\ndef mask(time, flux, flux_err, period, duration, T0):\n intransit = transit_mask(time, period, duration, T0)\n time = time[~intransit]\n flux = flux[~intransit]\n if flux_err is not None:\n flux_err = flux_err[~intransit]\n time, flux, flux_err = clean(time, flux, flux_err)\n else:\n time, flux = clean(time, flux)\n return time, flux, flux_err\n \n\n\n###############################################################################\n#::: check for multiples of a value (e.g., of a period)\n###############################################################################\ndef is_multiple_of(a, b, tolerance=0.05):\n a = np.float(a)\n b = np.float(b) \n result = a % b\n return (abs(result/b) <= tolerance) or (abs((b-result)/b) <= tolerance)\n\n\n\n###############################################################################\n#::: BLS search on an input lightcurve\n###############################################################################\ndef bls_search(time, flux, flux_err=None):\n if flux_err is None: \n ind = np.where(~np.isnan(time*flux))\n time = np.array(time)[ind]\n flux = np.array(flux)[ind]\n else:\n ind = np.where(~np.isnan(time*flux*flux_err))\n time = np.array(time)[ind]\n flux = np.array(flux)[ind]\n flux_err = np.array(flux_err)[ind]\n print(time, flux)\n plt.figure()\n plt.plot(time, flux, 'b.')\n model = bls(time * u.day, flux, dy=flux_err)\n print(model)\n periodogram = model.autopower(0.05)\n plt.plot(periodogram.period, periodogram.power) \n # max_power = np.argmax(periodogram.power)\n # stats = model.compute_stats(periodogram.period[max_power],\n # periodogram.duration[max_power],\n # periodogram.transit_time[max_power])\n # print(stats)\n \n \n \n###############################################################################\n#::: get TLS kwargs from TICv8\n###############################################################################\ndef get_tls_kwargs_by_tic(tic_id, sigma=3, tls_kwargs=None, quiet=True):\n #mass comes first, radius comes second in the TLS source code for catalog_info()\n u, M_star, M_star_lerr, M_star_uerr, R_star, R_star_lerr, R_star_uerr = catalog_info(TIC_ID=int(tic_id))\n if not quiet:\n print('TICv8 info:')\n print('Quadratic limb darkening u_0, u_1', u[0], u[1])\n print('Stellar radius', R_star, '+', R_star_lerr, '-', R_star_uerr)\n print('Stellar mass', M_star, '+', M_star_lerr, '-', M_star_uerr)\n \n if tls_kwargs is None: tls_kwargs = {}\n tls_kwargs['R_star']=float(R_star)\n tls_kwargs['R_star_min']=R_star-sigma*R_star_lerr\n tls_kwargs['R_star_max']=R_star+sigma*R_star_uerr\n tls_kwargs['M_star']=float(M_star)\n tls_kwargs['M_star_min']=M_star-sigma*M_star_lerr\n tls_kwargs['M_star_max']=M_star+sigma*M_star_uerr\n tls_kwargs['u']=u\n \n if np.isnan(tls_kwargs['R_star']): \n tls_kwargs['R_star'] = 1.\n warnings.warn(\"tls_kwargs: R_star was undefined in TICv8. Filling it with R_star=1.\")\n if np.isnan(tls_kwargs['R_star_min']): \n tls_kwargs['R_star_min'] = 0.13\n warnings.warn(\"tls_kwargs: R_star_min was undefined in TICv8. Filling it with R_star_min=0.13\")\n if np.isnan(tls_kwargs['R_star_max']): \n tls_kwargs['R_star_max'] = 3.5\n warnings.warn(\"tls_kwargs: R_star_max was undefined in TICv8. Filling it with R_star_max=3.5\")\n if np.isnan(tls_kwargs['M_star']): \n tls_kwargs['M_star'] = 1.\n warnings.warn(\"tls_kwargs: M_star was undefined in TICv8. Filling it with M_star=1.\")\n if np.isnan(tls_kwargs['M_star_min']): \n tls_kwargs['M_star_min'] = 0.1\n warnings.warn(\"tls_kwargs: M_star_min was undefined in TICv8. Filling it with M_star_min=0.1\")\n if np.isnan(tls_kwargs['M_star_max']): \n tls_kwargs['M_star_max'] = 1.\n warnings.warn(\"tls_kwargs: M_star_max was undefined in TICv8. Filling it with M_star_max=0.1\")\n if np.isnan(tls_kwargs['u']).any(): \n tls_kwargs['u'] = [0.4804, 0.1867]\n warnings.warn(\"tls_kwargs: u was undefined in TICv8. Filling it with u=[0.4804, 0.1867]\")\n\n return tls_kwargs\n\n\n\n###############################################################################\n#::: write TLS reuslts as a dictionary to a json file\n###############################################################################\ndef write_tls_results(fname, results):\n '''\n Parameters\n ----------\n fname : str\n Name of the output json file.\n results : transitleastsuqares.results class\n The results returned form a TLS run.\n\n Returns\n -------\n None.\n\n Outputs\n -------\n A json file that contains a dictionary of the most important tls results.\n The json file can be read into Python again via allesfitter's read_dic.\n \n Explanation\n -----------\n The TLS results object contains the following keys, where \n 'short' indicates it's a float or short list (e.g., the found period or depth per transit) and \n 'long' indicates it's a humongous array (e.g., the whole light curve).\n We only want to save the 'short' parts to save space:\n SDE short\n SDE_raw short\n chi2_min short\n chi2red_min short\n period short\n period_uncertainty short\n T0 short\n duration short\n depth short\n depth_mean short\n depth_mean_even short\n depth_mean_odd short\n transit_depths short\n transit_depths_uncertainties short\n rp_rs short\n snr short\n snr_per_transit short\n snr_pink_per_transit short\n odd_even_mismatch short\n transit_times short\n per_transit_count short\n transit_count short\n distinct_transit_count short\n empty_transit_count short\n FAP short\n in_transit_count short\n after_transit_count short\n before_transit_count short\n periods long\n power long\n power_raw long\n SR long\n chi2 long\n chi2red long\n model_lightcurve_time long\n model_lightcurve_model long\n model_folded_phase long\n folded_y long\n folded_dy long\n folded_phase long\n model_folded_model long\n Also:\n correct_duration short\n model long (our self-made model(time) curve)\n '''\n dic = {}\n for key in ['SDE', 'SDE_raw', 'chi2_min', 'chi2red_min', 'period', 'period_uncertainty',\\\n 'T0', 'duration', 'depth', 'depth_mean', 'depth_mean_even', 'depth_mean_odd',\\\n 'transit_depths', 'transit_depths_uncertainties', 'rp_rs',\\\n 'snr', 'snr_per_transit', 'snr_pink_per_transit', 'odd_even_mismatch',\\\n 'transit_times', 'per_transit_count', 'transit_count', 'distinct_transit_count',\\\n 'empty_transit_count', 'FAP', 'in_transit_count', 'after_transit_count',\\\n 'before_transit_count'] + ['correct_duration']: \n if (type(results[key])!=np.ndarray): #if it's not an array, save it as is\n dic[key] = results[key]\n else: #if it's a short array, save as list (for json)\n dic[key] = results[key].tolist()\n write_json(fname, dic)\n \n \n \n###############################################################################\n#::: function to convert the results into a dictionary\n###############################################################################\ndef _to_dic(results):\n dic = {}\n for key in results: \n dic[key] = results[key]\n return dic\n\n\n\n###############################################################################\n#::: TLS search on an input lightcurve\n###############################################################################\ndef tls_search(time, flux, flux_err, plot=True, plot_type='brokenplot', **kwargs):\n '''\n Summary:\n -------\n This runs TLS on these data with the given infos\n \n Inputs:\n -------\n time : array of flaot\n time stamps of observations\n flux : array of flaot\n normalized flux\n flux_err : array of flaot\n error of normalized flux\n **kwargs : collection of keyword arguments\n All keyword arguments will be passed to TLS.\n Missing keywords will be replaced with default values:\n R_star : float\n radius of the star (e.g. median)\n default 1 R_sun (from TLS)\n R_star_min : float\n minimum radius of the star (e.g. 1st percentile)\n default 0.13 R_sun (from TLS)\n R_star_max : float\n maximum radius of the star (e.g. 99th percentile)\n default 3.5 R_sun (from TLS)\n M_star : float\n mass of the star (e.g. median)\n default 1. M_sun (from TLS)\n M_star_min : float\n minimum mass of the star (e.g. 1st percentile)\n default 0.1 M_sun (from TLS)\n M_star_max : float\n maximum mass of the star (e.g. 99th percentile)\n default 1. M_sun (from TLS) \n u : list\n quadratic limb darkening parameters\n default [0.4804, 0.1867]\n period_min : float\n the minimum period to be searched (in days)\n period_max : float\n the maximum period to be searched (in days)\n show_progress_bar : bool\n Show a progress bar for TLS\n default True\n SNR_threshold : float\n the SNR threshold at which to stop the TLS search\n default 5\n SDE_threshold : float\n the SDE threshold at which to stop the TLS search\n default -inf\n FAP_threshold : float\n the False Alarm Probability threshold at which to stop the TLS search\n default inf\n quiet : bool\n silence all TLS outprint\n default True\n \n Returns:\n -------\n results_all : list of dictionaries\n List of all dictionaries containing the TLS results \n (with dictionaries made from the transitleastsqaures.results class).\n fig_all : list of matplotlib.figure object, optional\n List of all summary figures. Only returned if plot is True.\n '''\n \n #::: seeed\n np.random.seed(42)\n \n \n #::: handle inputs\n time, flux, flux_err = clean(time, flux, flux_err)\n plot_bool = plot\n \n if 'show_progress_bar' not in kwargs: kwargs['show_progress_bar'] = True\n if 'SNR_threshold' not in kwargs: kwargs['SNR_threshold'] = 5.\n if 'SDE_threshold' not in kwargs: kwargs['SDE_threshold'] = -np.inf #don't trust SDE\n if 'FAP_threshold' not in kwargs: kwargs['FAP_threshold'] = np.inf #don't trust FAP \n if 'quiet' not in kwargs: kwargs['quiet'] = True\n if 'inj_period' not in kwargs: kwargs['inj_period'] = np.nan\n \n non_tls_keys = ['SNR_threshold','SDE_threshold','FAP_threshold','quiet','inj_period']\n tls_kwargs_original = {key: kwargs[key] for key in kwargs.keys() if key not in non_tls_keys} #for the original tls\n #the rest is filled automatically by TLS if it was not given\n print('tls_kwargs_original', tls_kwargs_original)\n \n #::: init\n SNR = 1e12\n SDE = 1e12\n FAP = 0\n FOUND_SIGNAL = False\n results_all = [] \n fig_lightcurve_all = [] \n fig_folded_all = [] \n \n \n #::: function to run it once\n def _run1(time, flux, flux_err):\n if kwargs['quiet']:\n with open(os.devnull, 'w') as devnull:\n with contextlib.redirect_stdout(devnull):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = tls(time, flux, flux_err)\n results = model.power(**tls_kwargs_original)\n else:\n model = tls(time, flux, flux_err)\n results = model.power(**tls_kwargs_original)\n \n results = _to_dic(results)\n results['detection'] = (results['snr'] >= kwargs['SNR_threshold']) and (results['SDE'] >= kwargs['SDE_threshold']) and (results['FAP'] <= kwargs['FAP_threshold'])\n results['correct_duration'] = np.nan \n results['R_planet_'] = np.nan\n\n \n if results['detection']:\n #::: calculcate the correct_duration, as TLS sometimes returns unreasonable durations\n ind_tr_phase = np.where( results['model_folded_model'] < 1. )[0]\n results['correct_duration'] = results['period'] * (results['model_folded_phase'][ind_tr_phase[-1]] - results['model_folded_phase'][ind_tr_phase[0]])\n \n if 'R_star' in kwargs:\n results['R_planet'] = results['rp_rs'] * kwargs['R_star'] * 109.07637070600963 #from Rsun to Rearth\n \n return results\n \n \n #::: function to plot it once\n # def _plot1(time, flux, flux_err, results):\n # fig, axes = plt.subplots(1, 3, figsize=(20,5), tight_layout=True)\n \n # ax = axes[0]\n # ax.plot(results['folded_phase'], results['folded_y'], 'k.', color='silver', rasterized=True)\n # bintime, binflux, binflux_err, _ = rebin_err(results['folded_phase'], results['folded_y'], dt = 0.001*results['period'], ferr_type='medsig', ferr_style='sem')\n # ax.plot(bintime, binflux, 'b.', rasterized=True)\n # ax.plot(results['model_folded_phase'], results['model_folded_model'], 'r-', lw=3)\n \n # ax = axes[1]\n # ax.plot((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], 'k.', color='silver', rasterized=True)\n # bintime, binflux, binflux_err, _ = rebin_err((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], dt = 0.001*results['period']*24, ferr_type='medsig', ferr_style='sem')\n # ax.plot(bintime, binflux, 'bo', rasterized=True)\n # ax.plot((results['model_folded_phase']-0.5)*results['period']*24, results['model_folded_model'], 'r-', lw=3)\n # ax.set(xlim=[ -1.5*results['correct_duration']*24, +1.5*results['correct_duration']*24 ], xlabel='Time (h)', yticks=[])\n \n # ax = axes[2]\n # ax.text( .02, 0.95, 'P = ' + np.format_float_positional(results['period'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.85, 'Depth = ' + np.format_float_positional(1e3*(1.-results['depth']),4) + ' ppt', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.75, 'Duration = ' + np.format_float_positional(24*results['correct_duration'],4) + ' h', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.65, 'T_0 = ' + np.format_float_positional(results['T0'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.55, 'SNR = ' + np.format_float_positional(results['snr'],4), ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.45, 'SDE = ' + np.format_float_positional(results['SDE'],4), ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.35, 'FAP = ' + np.format_float_scientific(results['FAP'],4), ha='left', va='center', transform=ax.transAxes )\n # ax.set_axis_off()\n \n # return fig\n \n \n #::: search for transits in a loop\n while (SNR >= kwargs['SNR_threshold']) and (SDE >= kwargs['SDE_threshold']) and (FAP <= kwargs['FAP_threshold']) and (FOUND_SIGNAL==False):\n \n #::: run once \n results = _run1(time, flux, flux_err)\n \n #::: if a transit was detected, store the results, plot, and apply a mask for the next run\n if results['detection']:\n results_all.append(results)\n \n results['model'] = np.interp(time, results['model_lightcurve_time'], results['model_lightcurve_model'])\n \n if plot_bool:\n # fig = _plot1(time, flux, flux_err, results)\n fig_lightcurve = _tls_search_plot_lightcurve(time, flux, results, typ=plot_type)\n fig_folded = _tls_search_plot_folded(time, flux, results)\n fig_lightcurve_all.append(fig_lightcurve)\n fig_folded_all.append(fig_folded)\n \n time, flux, flux_err = mask(time, flux, flux_err, \n results['period'], \n np.max((1.5*results['correct_duration'])), \n results['T0'])\n\n #::: update values\n SNR = results['snr']\n SDE = results['SDE']\n FAP = results['FAP']\n if is_multiple_of(results['period'],kwargs['inj_period']): SNR = -np.inf #if run as part of an injection-recovery test, then abort if it matches the injected period\n \n \n #::: return\n if plot_bool:\n return results_all, fig_lightcurve_all, fig_folded_all\n else:\n return results_all\n\n\n\ndef _cut(time, model_lightcurve_time, model_lightcurve_flux):\n return np.interp(time, model_lightcurve_time, model_lightcurve_flux) \n\n\n\ndef _tls_search_plot_lightcurve(time, flux, results, typ='fullplot'):\n \"\"\"\n ...\n\n Parameters\n ----------\n time : TYPE\n DESCRIPTION.\n flux : TYPE\n DESCRIPTION.\n flux_err : TYPE\n DESCRIPTION.\n results : TYPE\n DESCRIPTION.\n typ : TYPE, optional\n 'fullplot', 'brokenplot', 'tessplot', 'monthplot'. The default is 'fullplot'.\n\n Returns\n -------\n None.\n \"\"\"\n \n if typ=='fullplot':\n axes = fullplot(time, flux)\n axes = fullplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes)\n elif typ=='brokenplot':\n axes = brokenplot(time, flux)\n axes = brokenplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes)\n elif typ=='tessplot':\n trend = _cut(time, results['model_lightcurve_time'], results['model_lightcurve_model'])\n axes = tessplot(time, flux, trend=trend)\n # axes = tessplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes, shade=False)\n elif typ=='monthplot':\n axes = monthplot(time, flux)\n axes = monthplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes)\n \n return plt.gcf()\n \n\n \ndef _tls_search_plot_folded(time, flux, results):\n \"\"\"\n ...\n\n Parameters\n ----------\n time : TYPE\n DESCRIPTION.\n flux : TYPE\n DESCRIPTION.\n results : TYPE\n DESCRIPTION.\n axes : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n axes : TYPE\n DESCRIPTION.\n \"\"\"\n \n fig, axes = plt.subplots(1, 3, figsize=(12,3), tight_layout=True)\n \n ax = axes[0]\n bintime, binflux, binflux_err, _ = rebin_err(results['folded_phase'], results['folded_y'], dt = 0.001*results['period'], ferr_type='medsig', ferr_style='sem')\n ax.plot(bintime, binflux, 'b.', rasterized=True)\n ax.plot(results['model_folded_phase'], results['model_folded_model'], 'r-', lw=3)\n ylim = ax.get_ylim()\n ax.plot(results['folded_phase'], results['folded_y'], 'k.', color='silver', rasterized=True, zorder=-1)\n ax.set_ylim(ylim)\n \n ax = axes[1]\n bintime, binflux, binflux_err, _ = rebin_err((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], dt = 0.001*results['period']*24, ferr_type='medsig', ferr_style='sem')\n ax.plot(bintime, binflux, 'bo', rasterized=True)\n ax.plot((results['model_folded_phase']-0.5)*results['period']*24, results['model_folded_model'], 'r-', lw=3)\n ax.set(xlim=[ -1.5*results['correct_duration']*24, +1.5*results['correct_duration']*24 ], xlabel='Time (h)', yticks=[])\n ylim = ax.get_ylim()\n ax.plot((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], 'k.', color='silver', rasterized=True, zorder=-1)\n ax.set_ylim(ylim)\n \n ax = axes[2]\n ax.text( .02, 0.95, 'P = ' + np.format_float_positional(results['period'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.85, 'Depth = ' + np.format_float_positional(1e3*(1.-results['depth']),4) + ' ppt', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.75, 'Duration = ' + np.format_float_positional(24*results['correct_duration'],4) + ' h', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.65, 'T_0 = ' + np.format_float_positional(results['T0'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.55, 'SNR = ' + np.format_float_positional(results['snr'],4), ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.45, 'SDE = ' + np.format_float_positional(results['SDE'],4), ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.35, 'FAP = ' + np.format_float_scientific(results['FAP'],4), ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.25, 'R_planet/R_star = ' + np.format_float_positional(results['rp_rs'],4), ha='left', va='center', transform=ax.transAxes )\n if ~np.isnan(results['R_planet']): \n ax.text( .02, 0.15, 'R_planet = ' + np.format_float_positional(results['R_planet'],4), ha='left', va='center', transform=ax.transAxes )\n ax.set_axis_off()\n \n return fig\n \n\n\ndef _tls_search_plot_individual(time, flux, flux_err, results):\n pass #TODO\n\n \n\n###############################################################################\n#::: Convenient wrapper for TESS tasks\n###############################################################################\n#TODO: work in progress\ndef tls_search_tess(time, flux, flux_err, \n wotan_kwargs=None,\n tls_kwargs=None,\n bad_regions=None,\n options=None):\n\n if options is None: options = {}\n if 'outdir' not in options: options['outdir'] = ''\n if wotan_kwargs is None: wotan_kwargs = {'flatten': {'method':'biweight', 'window_length':1}}\n \n #::: logprint\n with open( os.path.join(options['outdir'], 'logfile.log'), 'w' ) as f:\n f.write('TLS search, UTC ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + '\\n')\n logprint('\\nWotan kwargs:', options=options)\n logpprint(wotan_kwargs, options=options)\n logprint('\\nTLS kwargs:', options=options)\n logpprint(tls_kwargs, options=options)\n logprint('\\nOptions:', options=options)\n logpprint(options, options=options)\n \n \n #::: cleaning\n flux_clean, fig1, fig2, fig3 = \\\n tessclean(time, flux, plot=True,\n method=wotan_kwargs['flatten']['method'],\n window_length=wotan_kwargs['flatten']['window_length'],\n bad_regions=bad_regions)\n \n write_csv(os.path.join(options['outdir'],'flux_clean.csv'), (time, flux_clean, flux_err), header='time,flux_clean,flux_err')\n \n with PdfPages( os.path.join(options['outdir'],'flux_clean.pdf') ) as pdf:\n pdf.savefig( fig1 )\n pdf.savefig( fig2 )\n pdf.savefig( fig3 )\n plt.close('all')\n \n \n #::: transit search\n results_all, fig_lightcurve_all, fig_folded_all = \\\n tls_search(time, flux_clean, flux_err, \n plot=True, plot_type='tessplot',\n **tls_kwargs)\n \n if len(results_all)>0:\n with open( os.path.join(options['outdir'],'tls_summary.txt'), 'w' ) as f:\n f.write('TLS found '+str(len(results_all))+' potential signal(s).')\n \n for i, results in enumerate(results_all):\n write_tls_results( os.path.join(options['outdir'],'tls_signal_'+str(i)+'.txt'), results )\n \n for i, (fig1, fig2) in enumerate(zip(fig_lightcurve_all, fig_folded_all)):\n with PdfPages( os.path.join(options['outdir'],'tls_signal_'+str(i)+'.pdf') ) as pdf:\n pdf.savefig( fig1 )\n pdf.savefig( fig2 )\n plt.close('all')\n \n else:\n with open( os.path.join(options['outdir'],'tls_summary.txt'), 'w' ) as f:\n f.write('TLS found no potential signal(s).')\n \n \n\n###############################################################################\n#::: TLS search on an input lightcurve\n###############################################################################\n# def tls_search_old(time, flux, flux_err,\n# known_transits=None,\n# tls_kwargs=None,\n# wotan_kwargs=None,\n# options=None):\n# '''\n# Summary:\n# -------\n# This runs TLS on these data with the given infos\n \n# Inputs:\n# -------\n# time : array of flaot\n# time stamps of observations\n# flux : array of flaot\n# normalized flux\n# flux_err : array of flaot\n# error of normalized flux\n \n# Optional Inputs:\n# ----------------\n# known_transits : None or dict\n# >> can be used to mask known transits before running TLS\n# if None\n# nothing happens\n# if dict \n# if one transit is already known, give for example: \n# known_transits = {'period':[1.3], 'duration':[2.1], 'epoch':[245800.0]}\n# if multiple transits are already known, give for example: \n# known_transits = {'name':['b','c'], 'period':[1.3, 21.0], 'duration':[2.1, 4.1], 'epoch':[245800.0, 245801.0]}\n# 'period' is the period of the known transit(s)\n# 'duration' is the total duration of the known transit(s), i.e. from first ingress point to last egrees point, in days\n# 'epoch' is the epoch of the known transit(s)\n \n# tls_kwargs : None, str or dict:\n# >> can be used to fine-tune the TLS algorithm\n# if None\n# the default parameters will be chosen (see below)\n# if 'default'\n# the default parameters will be chosen (see below)\n# if dict\n# a dictionary with the following keywords is expected; \n# missing keywords will be replaced with default values\n# R_star : float\n# radius of the star (e.g. median)\n# default 1 R_sun (from TLS)\n# R_star_min : float\n# minimum radius of the star (e.g. 1st percentile)\n# default 0.13 R_sun (from TLS)\n# R_star_max : float\n# maximum radius of the star (e.g. 99th percentile)\n# default 3.5 R_sun (from TLS)\n# M_star : float\n# mass of the star (e.g. median)\n# default 1. M_sun (from TLS)\n# M_star_min : float\n# minimum mass of the star (e.g. 1st percentile)\n# default 0.1 M_sun (from TLS)\n# M_star_max : float\n# maximum mass of the star (e.g. 99th percentile)\n# default 1. M_sun (from TLS) \n# u : list\n# quadratic limb darkening parameters\n# default [0.4804, 0.1867]\n# SNR_threshold : float\n# the SNR threshold at which to stop the TLS search\n# default 5\n# SDE_threshold : float\n# the SDE threshold at which to stop the TLS search\n# default -inf\n# FAP_threshold : float\n# the False Alarm Probability threshold at which to stop the TLS search\n# default inf\n# period_min : float\n# the minimum period to be searched (in days)\n# period_max : float\n# the maximum period to be searched (in days)\n \n# wotan_kwargs : None, str, or dict:\n# >> can be used to detrend the data before the TLS search\n# if None\n# the default detrending will run (see below)\n# if str is 'default'\n# the default detrending will run (see below)\n# if str is 'off'\n# no detrending will run\n# if dict\n# a dictionary with two sub-dictionaries is expected; \n# missing keywords will be replaced with default values\n# wotan_kwargs['slide_clip'] : dict\n# this dictionary contains all slide clipping arguments\n# window_length : float\n# slide clip window length\n# default 1\n# low : float\n# slide clip lower sigma\n# default 20\n# high : float\n# slide clip upper sigma\n# default 3\n# wotan_kwargs['flatten'] : dict\n# this dictionary contains contains all detrending arguments\n# method : str\n# detrending method\n# default 'biweight'\n# window_length : float\n# detrending window length in days\n# default 1 \n \n# options : None or dict, keywords:\n# >> can be used for any general options\n# if None\n# the default options will be used (see below)\n# if dict\n# a dictionary with the following keywords is expected;\n# missing keywords will be replaced with default values\n# show_plot : bool\n# can show a plot of each phase-folded transit candidate and TLS model in the terminal \n# default is True\n# save_plot : bool or str\n# can save a plot of each phase-folded transit candidate and TLS model into outdir\n# if True, will be set to '123'\n# if str, then: '1': detrended plot, '2': TLS plot, '3': all TLS plots, and any combinations thereof\n# default is True\n# save_csv : bool\n# can save a csv of the detrended lightcurve\n# default is True\n# outdir : string\n# if None\n# a new directory called \"results\" will be created in the current folder\n# default is \"tls_results_[wotan_flatten_method]_[wotan_flatten_window_length]\"\n \n# Returns:\n# -------\n# List of all TLS results\n# '''\n \n# #::: seeed\n# np.random.seed(42)\n \n \n# #::: handle inputs\n# def clean(time,flux,flux_err):\n# if flux_err is None:\n# ind = np.where( ~np.isnan(time*flux) )[0]\n# time = time[ind]\n# flux = flux[ind]\n# else:\n# ind = np.where( ~np.isnan(time*flux*flux_err) )[0]\n# time = time[ind]\n# flux = flux[ind]\n# flux_err = flux_err[ind]\n# return time, flux, flux_err\n \n# time, flux, flux_err = clean(time,flux,flux_err)\n# time_input = 1.*time\n# flux_input = 1.*flux #for plotting\n \n \n# if type(wotan_kwargs)==str and wotan_kwargs=='off': \n# detrend = False\n# else:\n# detrend = True\n# if (wotan_kwargs is None) or (type(wotan_kwargs)==str and wotan_kwargs=='default'): wotan_kwargs={} \n# if 'slide_clip' not in wotan_kwargs: wotan_kwargs['slide_clip'] = {}\n# if wotan_kwargs['slide_clip'] is not None:\n# if 'window_length' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['window_length'] = 1.\n# if 'low' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['low'] = 20.\n# if 'high' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['high'] = 3.\n \n# if 'flatten' not in wotan_kwargs: wotan_kwargs['flatten'] = {}\n# if wotan_kwargs['flatten'] is not None:\n# if 'method' not in wotan_kwargs['flatten']: wotan_kwargs['flatten']['method'] = 'biweight'\n# if 'window_length' not in wotan_kwargs['flatten']: wotan_kwargs['flatten']['window_length'] = 1.\n# #the rest is filled automatically by Wotan\n \n# if tls_kwargs is None: tls_kwargs = {}\n# if 'show_progress_bar' not in tls_kwargs: tls_kwargs['show_progress_bar'] = False\n# if 'SNR_threshold' not in tls_kwargs: tls_kwargs['SNR_threshold'] = 5.\n# if 'SDE_threshold' not in tls_kwargs: tls_kwargs['SDE_threshold'] = -np.inf #don't trust SDE\n# if 'FAP_threshold' not in tls_kwargs: tls_kwargs['FAP_threshold'] = np.inf #don't trust FAP \n# tls_kwargs_original = {key: tls_kwargs[key] for key in tls_kwargs.keys() if key not in ['SNR_threshold','SDE_threshold','FAP_threshold']} #for the original tls\n# #the rest is filled automatically by TLS\n \n# if options is None: options = {}\n# if 'show_plot' not in options: options['show_plot'] = True\n# if type(options['show_plot'])==bool and (options['show_plot'] is True): options['show_plot']='123' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if type(options['show_plot'])==bool and (options['show_plot'] is False): options['show_plot']='' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if 'save_plot' not in options: options['save_plot'] = True\n# if type(options['save_plot'])==bool and (options['save_plot'] is True): options['save_plot']='123' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if type(options['save_plot'])==bool and (options['save_plot'] is False): options['save_plot']='' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if 'save_csv' not in options: options['save_csv'] = True\n# if 'outdir' not in options: \n# if detrend:\n# options['outdir'] = 'tls_results_'+wotan_kwargs['flatten']['method']+'_'+str(wotan_kwargs['flatten']['window_length'])\n# else:\n# options['outdir'] = 'tls_results_undetrended'\n# if 'quiet' not in options: options['quiet'] = True\n# if 'inj_period' not in options: options['inj_period'] = np.nan\n \n \n# #::: init\n# SNR = 1e12\n# SDE = 1e12\n# FAP = 0\n# FOUND_SIGNAL = False\n# results_all = [] \n# if len(options['outdir'])>0 and not os.path.exists(options['outdir']): os.makedirs(options['outdir']) \n \n \n# #::: logprint\n# with open( os.path.join(options['outdir'], 'logfile.log'), 'w' ) as f:\n# f.write('TLS search, UTC ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + '\\n')\n# logprint('\\nWotan kwargs:', options=options)\n# logpprint(wotan_kwargs, options=options)\n# logprint('\\nTLS kwargs:', options=options)\n# logpprint(tls_kwargs, options=options)\n# logprint('\\nOptions:', options=options)\n# logpprint(options, options=options)\n \n# # timer1 = timer.time()\n# # print('t1', timer1 - timer0)\n \n# #::: apply a mask (if wished so)\n# if known_transits is not None:\n# for period, duration, T0 in zip(known_transits['period'], known_transits['duration'], known_transits['epoch']):\n# time, flux, flux_err = mask(time, flux, flux_err, period, duration, T0)\n \n \n# #::: global sigma clipping\n# flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)\n \n# # timer2 = timer.time()\n# # print('t2', timer2 - timer0)\n \n# #::: detrend (if wished so)\n# if detrend:\n \n# #::: slide clipping (super slow)\n# # if wotan_kwargs['slide_clip'] is not None: flux = slide_clip(time, flux, **wotan_kwargs['slide_clip']) #slide_clip is super slow (10 seconds for a TESS 2 min lightcurve for a single Sector)\n# # timer3a = timer.time()\n# # print('t3a', timer3a - timer0) \n \n# #::: fast slide clipping (super fast)\n# if wotan_kwargs['slide_clip'] is not None: \n# flux = slide_clip(time, flux, **wotan_kwargs['slide_clip']) #slide_clip is super fast (<1 seconds for a TESS 2 min lightcurve for a single Sector)\n# flux_clip = 1*flux\n# # timer3a = timer.time()\n# # print('t3a', timer3a - timer0) \n \n# #::: detrending (super fast)\n# if wotan_kwargs['flatten'] is not None: \n# flux, trend = flatten(time, flux, return_trend=True, **wotan_kwargs['flatten']) #flatten is super fast, (<1 second for a TESS 2 min lightcurve for a single Sector)\n# # timer3b = timer.time()\n# # print('t3b', timer3b - timer0) \n \n# #::: global sigma clipping on the flattened flux (super fast)\n# flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)\n# # timer3c = timer.time()\n# # print('t3c', timer3c - timer0) \n \n# if ('1' in options['show_plot']) or ('1' in options['save_plot']):\n# gone = np.isnan(time_input*flux_input)\n# print(time_input, gone)\n# axes = tessplot(time_input[gone], flux_input[gone], color='r')\n# tessplot(time, flux_clip, trend=trend, axes=axes, shade=False)\n# for ax in axes: ax.set_ylabel('Flux\\n(original)')\n# fig1 = plt.gcf()\n \n# axes = tessplot(time, flux_clip, trend=trend)\n# for ax in axes: ax.set_ylabel('Flux\\n(clipped)')\n# fig2 = plt.gcf()\n \n# axes = tessplot(time, flux)\n# fig3 = plt.gcf()\n# for ax in axes: ax.set_ylabel('Flux\\n(clipped & detrended)')\n \n# # fig, axes = plt.subplots(2,1, figsize=(40,8))\n# # brokenplot(time_input, flux_input, trend=trend, ax=axes[0])\n# # axes[0].set(ylabel='Flux (input)', xticklabels=[])\n# # brokenplot(time, trend, fmt='r-', ax=axes[0])\n# # axes[0].plot(time_input, flux_input, 'b.', rasterized=True)\n# # axes[0].plot(time, trend, 'r-', lw=2)\n# # brokenplot(time_input, flux_input, ax=axes[1], clip=True)\n# # brokenplot(time, trend, fmt='r-', ax=axes[1], clip=True)\n# # axes[1].set(ylabel='Flux (clipped)', xticklabels=[])\n# # brokenplot(time, flux, ax=axes[1])\n# # axes[1].plot(time, flux, 'b.', rasterized=True)\n# # axes[1].set(ylabel='Flux (detrended)', xlabel='Time (BJD)')\n# # axes[2].set(ylabel='Flux (detrended)')\n# if ('1' in options['save_plot']):\n# # try: \n# f = os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.pdf')\n# with PdfPages(f) as pdf:\n# pdf.savefig( fig1 )\n# pdf.savefig( fig2 )\n# pdf.savefig( fig3 )\n# # fig.savefig(os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.pdf'), bbox_inches='tight') #some matplotlib versions crash when saving pdf...\n# # except: \n# # fig.savefig(os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.jpg'), bbox_inches='tight') #some matplotlib versions need pillow for jpg (conda install pillow)...\n \n# if ('1' in options['show_plot']):\n# plt.show()\n# else:\n# plt.close('all')\n \n# if options['save_csv']:\n# if flux_err is None: flux_err0 = np.nan*time\n# else: flux_err0 = flux_err\n# X = np.column_stack((time, flux, flux_err0, trend))\n# np.savetxt(os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.csv'), X, delimiter=',', header='time,flux_detrended,flux_err,trend')\n \n# time_detrended = 1.*time #just for plotting\n# flux_detrended = 1.*flux #just for plotting\n \n# # timer3d = timer.time()\n# # print('t3d', timer3d - timer0) \n \n \n# #::: search for transits\n# i = 0\n# ind_trs = []\n# while (SNR >= tls_kwargs['SNR_threshold']) and (SDE >= tls_kwargs['SDE_threshold']) and (FAP <= tls_kwargs['FAP_threshold']) and (FOUND_SIGNAL==False):\n \n# if options['quiet']:\n# with open(os.devnull, 'w') as devnull:\n# with contextlib.redirect_stdout(devnull):\n# with warnings.catch_warnings():\n# warnings.simplefilter(\"ignore\")\n# model = tls(time, flux, flux_err)\n# results = model.power(**tls_kwargs_original)\n# else:\n# model = tls(time, flux, flux_err)\n# results = model.power(**tls_kwargs_original)\n \n# # timer4 = timer.time()\n# # print('t4', timer4 - timer0) \n \n# # plt.figure()\n# # plt.plot(time, flux, 'b.')\n# # pprint(tls_kwargs_original)\n# # pprint(results)\n# # err\n \n# if (results['snr'] >= tls_kwargs['SNR_threshold']) and (results['SDE'] >= tls_kwargs['SDE_threshold']) and (results['FAP'] <= tls_kwargs['FAP_threshold']):\n \n# #::: calculcate the correct_duration, as TLS sometimes returns unreasonable durations\n# ind_tr_phase = np.where( results['model_folded_model'] < 1. )[0]\n# correct_duration = results['period'] * (results['model_folded_phase'][ind_tr_phase[-1]] - results['model_folded_phase'][ind_tr_phase[0]])\n \n# #::: mark transit\n# ind_tr, ind_out = index_transits(time_input, results['T0'], results['period'], correct_duration)\n# ind_trs.append(ind_tr)\n \n# #::: mask out detected transits and append results\n# time1, flux1 = 1*time, 1*flux #for plotting\n# time, flux, flux_err = mask(time, flux, flux_err, results['period'], np.max((1.5*correct_duration)), results['T0'])\n# results = _to_dic(results)\n# results['correct_duration'] = correct_duration\n# results_all.append(results)\n \n# #::: write TLS stats to file\n# write_tls_results(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.txt'), results)\n# # with open(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.txt'), 'wt') as out:\n# # pprint(results, stream=out)\n \n# # timer5 = timer.time()\n# # print('t5', timer5 - timer0) \n \n# #::: individual TLS plots\n# if ('2' in options['show_plot']) or ('2' in options['save_plot']):\n# fig = plt.figure(figsize=(20,8), tight_layout=True)\n# gs = fig.add_gridspec(2,3)\n \n# ax = fig.add_subplot(gs[0,:])\n# ax.plot(time1, flux1, 'k.', color='silver', rasterized=True)\n# bintime, binflux, binflux_err, _ = rebin_err(time1, flux1, dt = 10./60/24, ferr_type='medsig', ferr_style='sem') #in 10 min intervals\n# ax.plot(bintime, binflux, 'b.', rasterized=True)\n# ax.plot(results['model_lightcurve_time'], results['model_lightcurve_model'], 'r-', lw=3)\n# ax.set(xlabel='Time (BJD)', ylabel='Flux')\n \n# ax = fig.add_subplot(gs[1,0])\n# ax.plot(results['folded_phase'], results['folded_y'], 'k.', color='silver', rasterized=True)\n# bintime, binflux, binflux_err, _ = rebin_err(results['folded_phase'], results['folded_y'], dt = 0.001*results['period'], ferr_type='medsig', ferr_style='sem')\n# ax.plot(bintime, binflux, 'b.', rasterized=True)\n# # plot_phase_folded_lightcurve(time1, flux1, results['period'], results['T0'], dt=0.002, ax=ax)\n# ax.plot(results['model_folded_phase'], results['model_folded_model'], 'r-', lw=3)\n# # ax.set(xlabel='Phase', ylabel='Flux')\n \n# ax = fig.add_subplot(gs[1,1])\n# ax.plot((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], 'k.', color='silver', rasterized=True)\n# bintime, binflux, binflux_err, _ = rebin_err((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], dt = 0.001*results['period']*24, ferr_type='medsig', ferr_style='sem')\n# ax.plot(bintime, binflux, 'bo', rasterized=True)\n# # plot_phase_folded_lightcurve(time1*24, flux1, results['period']*24, results['T0'], ax=ax, dt=0.002)\n# ax.plot((results['model_folded_phase']-0.5)*results['period']*24, results['model_folded_model'], 'r-', lw=3)\n# ax.set(xlim=[ -1.5*correct_duration*24, +1.5*correct_duration*24 ], xlabel='Time (h)', yticks=[])\n \n# ax = fig.add_subplot(gs[1,2])\n# ax.text( .02, 0.95, 'P = ' + np.format_float_positional(results['period'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.85, 'Depth = ' + np.format_float_positional(1e3*(1.-results['depth']),4) + ' ppt', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.75, 'Duration = ' + np.format_float_positional(24*correct_duration,4) + ' h', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.65, 'T_0 = ' + np.format_float_positional(results['T0'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.55, 'SNR = ' + np.format_float_positional(results['snr'],4), ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.45, 'SDE = ' + np.format_float_positional(results['SDE'],4), ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.35, 'FAP = ' + np.format_float_scientific(results['FAP'],4), ha='left', va='center', transform=ax.transAxes )\n# ax.set_axis_off()\n# if ('2' in options['save_plot']):\n# try: fig.savefig(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.pdf'), bbox_inches='tight') #some matplotlib versions crash when saving pdf...\n# except: fig.savefig(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.jpg'), bbox_inches='tight') #some matplotlib versions need pillow for jpg (conda install pillow)...\n# if ('2' in options['show_plot']):\n# plt.show(fig)\n# else:\n# plt.close(fig)\n \n# # timer6 = timer.time()\n# # print('t6', timer6 - timer0) \n \n# SNR = results['snr']\n# SDE = results['SDE']\n# FAP = results['FAP']\n# if is_multiple_of(results['period'],options['inj_period']): SNR = -np.inf #if run as part of an inejction-recovery test, then abort if it matches the injected period\n# i+=1\n \n \n \n# #::: full lightcurve plot\n# if ('3' in options['show_plot']) or ('3' in options['save_plot']):\n \n# if detrend:\n# fig, axes = plt.subplots(2,1, figsize=(40,8), tight_layout=True)\n# ax = axes[0]\n# ax.plot(time_input, flux_input, 'k.', color='grey', rasterized=True)\n# ax.plot(time_input, trend, 'r-', lw=2)\n# for number, ind_tr in enumerate(ind_trs):\n# ax.plot(time_input[ind_tr], flux_input[ind_tr], marker='.', linestyle='none', label='signal '+str(number))\n# ax.set(ylabel='Flux (input)', xticklabels=[])\n# ax.legend()\n\n# ax = axes[1]\n# ax.plot(time_detrended, flux_detrended, 'k.', color='grey', rasterized=True)\n# for number, ind_tr in enumerate(ind_trs):\n# ax.plot(time_detrended[ind_tr], flux_detrended[ind_tr], marker='.', linestyle='none', label='signal '+str(number))\n# ax.set(ylabel='Flux (detrended)', xlabel='Time (BJD)')\n# ax.legend()\n \n# else:\n# fig = plt.figure(figsize=(20,4), tight_layout=True)\n# fig, ax = plt.subplots(1,1, figsize=(40,4))\n# ax.plot(time_input, flux_input, 'k.', color='grey', rasterized=True)\n# ax.set(ylabel='Flux (input)', xlabel='Time (BJD)')\n# for number, ind_tr in enumerate(ind_trs):\n# ax.plot(time_input[ind_tr], flux_input[ind_tr], marker='.', linestyle='none', label='signal '+str(number))\n# ax.legend()\n \n# if ('3' in options['save_plot']):\n# try: fig.savefig(os.path.join(options['outdir'],'tls_signal_all.pdf'), bbox_inches='tight') #some matplotlib versions crash when saving pdf...\n# except: fig.savefig(os.path.join(options['outdir'],'tls_signal_all.jpg'), bbox_inches='tight') #some matplotlib versions need pillow for jpg (conda install pillow)...\n# if ('3' in options['show_plot']):\n# plt.show(fig)\n# else:\n# plt.close(fig) \n \n \n# return results_all\n\n\n\n###############################################################################\n#::: TLS search using tessio\n###############################################################################\ndef tls_search_by_tic(tic_id,\n tls_kwargs=None, SNR_threshold=5., known_transits=None,\n options=None):\n '''\n Summary:\n -------\n wrapper around tls_search()\n retrieves the SPOC PDC-SAP lightcurve\n retrieves all TIC catalog information from MAST\n calls tls_search()\n \n Inputs:\n -------\n tic_id : str\n TIC ID\n \n Optional Inputs:\n ----------------\n see tls_search()\n \n Returns:\n -------\n list of all TLS results\n '''\n \n #::: handle inputs\n if options is None: options = {}\n if 'show_plot' not in options: options['show_plot']=False\n if 'save_plot' not in options: options['save_plot']=False\n if 'outdir' not in options: options['outdir']=''\n \n #::: format inputs\n tic_id = str(int(tic_id))\n \n #::: load data and inject transit\n time, flux, flux_err = tessio.get(tic_id, pipeline='spoc', PDC=True, unpack=True)\n \n #::: load TIC info / tls kwargs\n tls_kwargs = get_tls_kwargs_by_tic(tic_id, tls_kwargs=tls_kwargs)\n \n return tls_search(time, flux, flux_err,\n tls_kwargs=tls_kwargs,\n SNR_threshold=SNR_threshold,\n known_transits=known_transits,\n options=options)\n\n\n\n###############################################################################\n#::: main\n###############################################################################\nif __name__ == '__main__':\n pass\n \n ###########################################################################\n #::: Example: search for a transit with TLS and tessio\n ###########################################################################\n # tic_id = '269701147'\n # SNR_threshold=5.,\n # known_transits = {'epoch':[2458715.3547, 2458726.0526, 2458743.5534],\n # 'period':[8.8806, 28.5810, 38.3497],\n # 'duration':[3.09/24., 4.45/24., 5.52/24.]\n # }\n \n # results_all = tls_search_by_tic(tic_id,\n # SNR_threshold=SNR_threshold,\n # known_transits=known_transits)\n # print(results_all)",
"\"\"\"\nPipeline to analyze allesfitter output for planet transit timings\n\nargument 1: allesfitter path\nargument 2: p-value threshold\nargument 3: Boolean to select to plot wout/with TESS or wout/with/only TESS\n\nTansu Daylan\nMIT Kavli Institute, Cambridge, MA, 02109, US\[email protected]\nwww.tansudaylan.com\n\"\"\"\n\nimport numpy as np\nimport scipy\nimport os, datetime, sys\n\nimport matplotlib.pyplot as plt\nfrom tdpy.util import summgene \n\nimport allesfitter\nimport allesfitter.postprocessing.plot_viol\nfrom allesfitter import config\n\nimport astropy\n\nclass gdatstrt(object):\n\n def __init__(self):\n self.boollockmodi = False\n pass\n \n def __setattr__(self, attr, valu):\n super(gdatstrt, self).__setattr__(attr, valu)\n\n\ndef plot(gdat, indxstar, indxpara=None, strgtype='evol'):\n \n if indxstar.size == 1:\n strg = gdat.liststrgstar[indxstar[0]] + '_'\n else:\n strg = ''\n \n print('strgtype')\n print(strgtype)\n \n listticklabl = []\n if strgtype == 'epocevol':\n chanlist = [[[] for m in gdat.indxstar] for i in gdat.indxruns]\n xpos = np.array(gdat.listyear)\n for i in gdat.indxruns:\n for m in indxstar:\n chanlist[i][m] = [gdat.timejwst[k][i][m] for k in gdat.indxyear]\n for k in gdat.indxyear:\n listticklabl.append('%s' % str(gdat.listyear[k]))\n else:\n chanlist = []\n numbxdat = gdat.numbruns * indxstar.size\n xpos = 0.6 * (np.arange(numbxdat) + 1.)\n\n for i in gdat.indxruns:\n for m in indxstar:\n if strgtype == 'jwstcomp':\n chanlist.append(gdat.timejwst[1][i][m])\n if strgtype == 'paracomp':\n for k in indxpara:\n chanlist.append((gdat.listobjtalle[i][m].posterior_params[gdat.liststrgparaconc[k]] - \\\n np.mean(gdat.listobjtalle[i][m].posterior_params[gdat.liststrgparaconc[k]])) * 24. * 60.)\n \n if strgtype == 'paracomp' or strgtype == 'jwstcomp':\n ticklabl = '%s, %s' % (gdat.liststrgstar[m], gdat.liststrgruns[i])\n listticklabl.append(ticklabl)\n else:\n ticklabl = '%s, %s' % (gdat.liststrgstar[m], gdat.liststrgruns[i])\n listticklabl.append(ticklabl)\n \n if xpos.size != len(listticklabl):\n raise Exception('')\n \n print('xpos')\n summgene(xpos)\n print('chanlist')\n print(chanlist)\n figr, axis = plt.subplots(figsize=(5, 4))\n if strgtype != 'epocevol':\n axis.violinplot(chanlist, xpos, showmedians=True, showextrema=False)\n else:\n for i in gdat.indxruns:\n for m in indxstar:\n axis.violinplot(chanlist[i][m], xpos, showmedians=True, showextrema=False)\n\n axis.set_xticks(xpos)\n if strgtype == 'jwstcomp':\n axis.set_ylabel('Transit time residual in 2023 [min]')\n strgbase = strgtype\n\n if strgtype == 'paracomp':\n if gdat.liststrgparaconc[indxpara] == 'b_period':\n axis.set_ylabel('P [min]')\n else:\n labl = gdat.listlablparaconc[indxpara[0]]\n axis.set_ylabel(labl)\n strgbase = '%04d' % indxpara\n \n if strgtype == 'epocevol':\n axis.set_xlabel('Year')\n axis.set_ylabel('Transit time residual [min]')\n strgbase = strgtype\n \n path = gdat.pathimag + 'viol_%s.%s' % (strgbase, gdat.strgplotextn)\n axis.set_xticklabels(listticklabl)\n plt.tight_layout()\n print('Writing to %s...' % path)\n print()\n figr.savefig(path)\n plt.close()\n \n\ndef plot_viol(pathbase, liststrgstar, liststrgruns, lablstrgruns, pathimag, pvalthrs=1e-3):\n\n strgtimestmp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\n print('allesfitter postprocessing violin plot started at %s...' % strgtimestmp)\n \n # construct global object\n gdat = gdatstrt()\n \n # copy unnamed inputs to the global object\n #for attr, valu in locals().iter():\n for attr, valu in locals().items():\n if '__' not in attr and attr != 'gdat':\n setattr(gdat, attr, valu)\n\n # runs to be compared for each star\n gdat.numbruns = len(liststrgruns)\n gdat.indxruns = np.arange(gdat.numbruns)\n \n gdat.pathimag = pathimag\n gdat.liststrgstar = liststrgstar\n\n # stars\n numbstar = len(liststrgstar)\n gdat.indxstar = np.arange(numbstar)\n\n # plotting\n gdat.strgplotextn = 'png'\n\n # read parameter keys, labels and posterior from allesfitter output\n liststrgpara = [[] for i in gdat.indxruns]\n listlablpara = [[] for i in gdat.indxruns]\n gdat.listobjtalle = [[[] for m in gdat.indxstar] for i in gdat.indxruns]\n for i in gdat.indxruns:\n for m in gdat.indxstar:\n pathalle = pathbase + '%s/allesfits/allesfit_%s/' % (gdat.liststrgstar[m], gdat.liststrgruns[i])\n print('Reading from %s...' % pathalle)\n config.init(pathalle)\n liststrgpara[i] = np.array(config.BASEMENT.fitkeys)\n listlablpara[i] = np.array(config.BASEMENT.fitlabels)\n # read the chain\n print('pathalle')\n print(pathalle)\n gdat.listobjtalle[i][m] = allesfitter.allesclass(pathalle)\n \n # concatenate the keys, labels from different runs\n gdat.liststrgparaconc = np.concatenate(liststrgpara)\n gdat.liststrgparaconc = np.unique(gdat.liststrgparaconc)\n gdat.listlablparaconc = np.copy(gdat.liststrgparaconc)\n for k, strgparaconc in enumerate(gdat.liststrgparaconc):\n for i, strgruns in enumerate(liststrgruns):\n if strgparaconc in liststrgpara[i]:\n gdat.listlablparaconc[k] = listlablpara[i][np.where(liststrgpara[i] == strgparaconc)[0][0]]\n \n gdat.numbparaconc = len(gdat.liststrgparaconc)\n gdat.indxparaconc = np.arange(gdat.numbparaconc)\n for k, strgpara in enumerate(gdat.liststrgparaconc):\n booltemp = True\n for i in gdat.indxruns:\n if not strgpara in liststrgpara[i]:\n booltemp = False\n if not booltemp:\n continue\n \n ## violin plot\n ## mid-transit time prediction\n plot(gdat, gdat.indxstar, indxpara=np.array([k]), strgtype='paracomp')\n ## per-star \n #for m in gdat.indxstar:\n # plot(gdat, indxstar=np.array([m]), indxpara=k, strgtype='paracomp')\n \n # calculate the future evolution of epoch\n gdat.listyear = [2021, 2023, 2025]\n numbyear = len(gdat.listyear)\n gdat.indxyear = np.arange(numbyear)\n gdat.timejwst = [[[[] for m in gdat.indxstar] for i in gdat.indxruns] for k in gdat.indxyear]\n for k, year in enumerate(gdat.listyear):\n epocjwst = astropy.time.Time('%d-01-01 00:00:00' % year, format='iso').jd\n for i in gdat.indxruns:\n for m in gdat.indxstar:\n epoc = gdat.listobjtalle[i][m].posterior_params['b_epoch']\n peri = gdat.listobjtalle[i][m].posterior_params['b_period']\n indxtran = (epocjwst - epoc) / peri\n indxtran = np.mean(np.rint(indxtran))\n if indxtran.size != np.unique(indxtran).size:\n raise Exception('')\n gdat.timejwst[k][i][m] = epoc + peri * indxtran\n gdat.timejwst[k][i][m] -= np.mean(gdat.timejwst[k][i][m])\n gdat.timejwst[k][i][m] *= 24. * 60.\n \n listfigr = []\n listaxis = []\n\n # temporal evolution of mid-transit time prediction\n plot(gdat, gdat.indxstar, strgtype='epocevol')\n ## per-star \n #for m in gdat.indxstar:\n # plot(gdat, indxstar=np.array([m]), strgtype='epocevol')\n \n ## mid-transit time prediction\n plot(gdat, gdat.indxstar, strgtype='jwstcomp')\n ## per-star \n #for m in gdat.indxstar:\n # plot(gdat, indxstar=np.array([m]), strgtype='jwstcomp')\n \n return listfigr, listaxis\n\n\n \n\n\n",
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 15 15:57:46 2018\n\n@author:\nMaximilian N. Guenther\nMIT Kavli Institute for Astrophysics and Space Research, \nMassachusetts Institute of Technology,\n77 Massachusetts Avenue,\nCambridge, MA 02109, \nUSA\nEmail: [email protected]\nWeb: www.mnguenther.com\n\"\"\"\n\n#::: plotting settings\nimport seaborn as sns\nsns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)\nsns.set_style({\"xtick.direction\": \"in\",\"ytick.direction\": \"in\"})\nsns.set_context(rc={'lines.markeredgewidth': 1})\n\n#::: modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ellc\nfrom pprint import pprint\n\n\nnp.random.seed(42)\n\n\n\n###############################################################################\n#::: params\n###############################################################################\nparams = {\n 'b_radius_1':0.1,\n 'b_radius_2':0.01,\n 'b_sbratio':0.,\n 'b_incl':89.,\n 'b_epoch':1.1,\n 'b_period':3.4,\n 'b_K':0.1,\n 'b_q':1,\n 'ld_1_Leonardo':'quad',\n 'ldc_1_Leonardo':[0.3,0.1],\n 'ld_1_Michelangelo':'quad',\n 'ldc_1_Michelangelo':[0.5,0.4]\n }\na_1 = 0.019771142 * params['b_K'] * params['b_period']\nparams['b_a'] = (1.+1./params['b_q'])*a_1\npprint(params)\n\nq1 = (0.3 + 0.1)**2\nq2 = 0.5*0.3*(0.3 + 0.1)**(-1)\nprint('Leonardo q1 = '+str(q1))\nprint('Leonardo q1 = '+str(q2))\n\nq1 = (0.5 + 0.4)**2\nq2 = 0.5*0.5*(0.5 + 0.4)**(-1)\nprint('Michelangelo q1 = '+str(q1))\nprint('Michelangelo q1 = '+str(q2))\n\n\n\n###############################################################################\n#::: \"truth\" signals\n###############################################################################\nplanet = 'b'\n\ninst = 'Leonardo'\ntime_Leonardo = np.arange(0,10,5./60./24.)[::3]\ntime_Leonardo = time_Leonardo[ (time_Leonardo<2) | (time_Leonardo>4) ]\nflux_Leonardo = ellc.lc(\n t_obs = time_Leonardo, \n radius_1 = params[planet+'_radius_1'], \n radius_2 = params[planet+'_radius_2'], \n sbratio = params[planet+'_sbratio'],\n incl = params[planet+'_incl'],\n t_zero = params[planet+'_epoch'],\n period = params[planet+'_period'],\n ld_1 = params['ld_1_'+inst],\n ldc_1 = params['ldc_1_'+inst]\n )\nflux_Leonardo += 3e-4*np.sin(time_Leonardo/2.7) #red noise\nflux_Leonardo += np.random.normal(0,2e-3,size=len(flux_Leonardo)) #white noise\nflux_err_Leonardo = 2e-3*np.ones_like(flux_Leonardo) #white noise\nheader = 'time,flux,flux_err'\nX = np.column_stack(( time_Leonardo, flux_Leonardo, flux_err_Leonardo ))\nnp.savetxt('allesfit/Leonardo.csv', X, delimiter=',', header=header)\n\n\n\ninst = 'Michelangelo'\ntime_Michelangelo = np.arange(52,52.25,2./60./24.)[::2]\nflux_Michelangelo = ellc.lc(\n t_obs = time_Michelangelo, \n radius_1 = params[planet+'_radius_1'], \n radius_2 = params[planet+'_radius_2'], \n sbratio = params[planet+'_sbratio'],\n incl = params[planet+'_incl'],\n t_zero = params[planet+'_epoch'],\n period = params[planet+'_period'],\n ld_1 = params['ld_1_'+inst],\n ldc_1 = params['ldc_1_'+inst]\n )\nflux_Michelangelo += 2e-3*np.sin(time_Michelangelo*8) #red noise\nflux_Michelangelo += np.random.normal(0,5e-4,size=len(flux_Michelangelo)) #white noise\nflux_err_Michelangelo = 5e-4*np.ones_like(flux_Michelangelo) #white noise\nheader = 'time,flux,flux_err'\nX = np.column_stack(( time_Michelangelo, flux_Michelangelo, flux_err_Michelangelo ))\nnp.savetxt('allesfit/Michelangelo.csv', X, delimiter=',', header=header)\n\n\n\ninst = 'Donatello'\ntime_Donatello = [37.1, 38, 42, 55, 56, 58]\nrv_Donatello = ellc.rv(\n t_obs = time_Donatello, \n a = params[planet+'_a'],\n incl = params[planet+'_incl'], \n t_zero = params[planet+'_epoch'],\n period = params[planet+'_period'],\n q = params[planet+'_q'],\n flux_weighted = False,\n )[0]\nrv_Donatello += np.random.normal(0,6e-3,size=len(rv_Donatello)) #white noise\nrv_err_Donatello = 6e-3*np.ones_like(rv_Donatello) #white noise\nheader = 'time,flux,flux_err'\nX = np.column_stack(( time_Donatello, rv_Donatello, rv_err_Donatello ))\nnp.savetxt('allesfit/Donatello.csv', X, delimiter=',', header=header)\n\n\n\ninst = 'Raphael'\ntime_Raphael = [60, 60.5, 61, 61.5, 62, 62.5, 63]\n#time_Raphael = np.linspace(0,5,1000)\nrv_Raphael = ellc.rv(\n t_obs = time_Raphael, \n a = params[planet+'_a'],\n incl = params[planet+'_incl'], \n t_zero = params[planet+'_epoch'],\n period = params[planet+'_period'],\n q = params[planet+'_q'],\n flux_weighted = False,\n )[0]\nrv_Raphael += np.random.normal(0,1e-3,size=len(rv_Raphael)) #white noise\nrv_err_Raphael = 1e-3*np.ones_like(rv_Raphael) #white noise\nheader = 'time,flux,flux_err'\nX = np.column_stack(( time_Raphael, rv_Raphael, rv_err_Raphael ))\nnp.savetxt('allesfit/Raphael.csv', X, delimiter=',', header=header)\n\n\n\n\n###############################################################################\n#::: plot\n###############################################################################\nfig, axes = plt.subplots(2,2,figsize=(10,10))\naxes[0,0].plot(time_Leonardo, flux_Leonardo, 'b.', label='Leonardo')\naxes[0,0].legend()\naxes[0,0].set(xlabel='BJD', ylabel='Flux')\naxes[0,1].errorbar(time_Michelangelo, flux_Michelangelo, yerr=flux_err_Michelangelo, fmt='b.', label='Michelangelo')\naxes[0,1].legend()\naxes[0,1].set(xlabel='BJD', ylabel='Flux')\naxes[1,0].errorbar(time_Donatello, rv_Donatello, yerr=rv_err_Donatello, fmt='b.', label='Donatello')\naxes[1,0].legend()\naxes[1,0].set(xlabel='BJD', ylabel='RV (km/s)')\naxes[1,1].errorbar(time_Raphael, rv_Raphael, yerr=rv_err_Raphael, fmt='b.', label='Raphael')\naxes[1,1].legend()\naxes[1,1].set(xlabel='BJD', ylabel='RV (km/s)')\nplt.tight_layout()\nfig.savefig('allesfit/data.png', bbox_inches='tight')"
] | [
[
"numpy.format_float_positional",
"numpy.random.seed",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.interp",
"matplotlib.pyplot.close",
"numpy.format_float_scientific",
"numpy.array",
"numpy.where",
"numpy.float",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.arange",
"numpy.rint",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"numpy.copy",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.where"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"numpy.random.seed",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.sin",
"numpy.column_stack",
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IgnacioCarlucho/linear_nonlinear_control | [
"37a7d720f64c6441c7eda386fa2eb6948634e120"
] | [
"mpc/extend/main_track.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport copy\n\n# from mpc_func_with_cvxopt import MpcController as MpcController_cvxopt\nfrom extended_MPC import IterativeMpcController\nfrom animation import AnimDrawer\n# from control import matlab\nfrom coordinate_trans import coordinate_transformation_in_angle, coordinate_transformation_in_position\nfrom traj_func import make_sample_traj\nfrom func_curvature import calc_curvatures, calc_ideal_vel\n\nclass WheeledSystem():\n \"\"\"SampleSystem, this is the simulator\n Kinematic model of car\n\n Attributes\n -----------\n xs : numpy.ndarray\n system states, [x, y, phi, beta]\n history_xs : list\n time history of state\n tau : float\n time constant of tire\n FRONT_WHEEL_BASE : float\n REAR_WHEEL_BASE : float\n predict_xs : \n \"\"\"\n def __init__(self, init_states=None):\n \"\"\"\n Palameters\n -----------\n init_state : float, optional, shape(3, )\n initial state of system default is None\n \"\"\"\n self.NUM_STATE = 4\n self.xs = np.zeros(self.NUM_STATE)\n\n self.tau = 0.01\n\n self.FRONT_WHEELE_BASE = 1.0\n self.REAR_WHEELE_BASE = 1.0\n\n if init_states is not None:\n self.xs = copy.deepcopy(init_states)\n\n self.history_xs = [init_states]\n self.history_predict_xs = []\n\n def update_state(self, us, dt=0.01):\n \"\"\"\n Palameters\n ------------\n u : numpy.ndarray\n inputs of system in some cases this means the reference\n dt : float in seconds, optional\n sampling time of simulation, default is 0.01 [s]\n \"\"\"\n k0 = [0.0 for _ in range(self.NUM_STATE)]\n k1 = [0.0 for _ in range(self.NUM_STATE)]\n k2 = [0.0 for _ in range(self.NUM_STATE)]\n k3 = [0.0 for _ in range(self.NUM_STATE)]\n\n functions = [self._func_x_1, self._func_x_2, self._func_x_3, self._func_x_4]\n\n # solve Runge-Kutta\n for i, func in enumerate(functions):\n k0[i] = dt * func(self.xs[0], self.xs[1], self.xs[2], self.xs[3], us[0], us[1])\n\n for i, func in enumerate(functions):\n k1[i] = dt * func(self.xs[0] + k0[0]/2., self.xs[1] + k0[1]/2., self.xs[2] + k0[2]/2., self.xs[3] + k0[3]/2, us[0], us[1])\n \n for i, func in enumerate(functions):\n k2[i] = dt * func(self.xs[0] + k1[0]/2., self.xs[1] + k1[1]/2., self.xs[2] + k1[2]/2., self.xs[3] + k1[3]/2., us[0], us[1])\n \n for i, func in enumerate(functions):\n k3[i] = dt * func(self.xs[0] + k2[0], self.xs[1] + k2[1], self.xs[2] + k2[2], self.xs[3] + k2[3], us[0], us[1])\n \n self.xs[0] += (k0[0] + 2. * k1[0] + 2. * k2[0] + k3[0]) / 6.\n self.xs[1] += (k0[1] + 2. * k1[1] + 2. * k2[1] + k3[1]) / 6.\n self.xs[2] += (k0[2] + 2. * k1[2] + 2. * k2[2] + k3[2]) / 6.\n self.xs[3] += (k0[3] + 2. * k1[3] + 2. * k2[3] + k3[3]) / 6.\n \n # save\n save_states = copy.deepcopy(self.xs)\n self.history_xs.append(save_states)\n # print(self.xs)\n\n def predict_state(self, us, dt=0.01):\n \"\"\"make predict state by using optimal input made by MPC\n Paramaters\n -----------\n us : array-like, shape(2, N)\n optimal input made by MPC\n dt : float in seconds, optional\n sampling time of simulation, default is 0.01 [s]\n \"\"\"\n\n xs = copy.deepcopy(self.xs)\n predict_xs = [copy.deepcopy(xs)]\n\n for i in range(us.shape[1]):\n k0 = [0.0 for _ in range(self.NUM_STATE)]\n k1 = [0.0 for _ in range(self.NUM_STATE)]\n k2 = [0.0 for _ in range(self.NUM_STATE)]\n k3 = [0.0 for _ in range(self.NUM_STATE)]\n\n functions = [self._func_x_1, self._func_x_2, self._func_x_3, self._func_x_4]\n\n # solve Runge-Kutta\n for i, func in enumerate(functions):\n k0[i] = dt * func(xs[0], xs[1], xs[2], xs[3], us[0, i], us[1, i])\n\n for i, func in enumerate(functions):\n k1[i] = dt * func(xs[0] + k0[0]/2., xs[1] + k0[1]/2., xs[2] + k0[2]/2., xs[3] + k0[3]/2., us[0, i], us[1, i])\n \n for i, func in enumerate(functions):\n k2[i] = dt * func(xs[0] + k1[0]/2., xs[1] + k1[1]/2., xs[2] + k1[2]/2., xs[3] + k1[3]/2., us[0, i], us[1, i])\n \n for i, func in enumerate(functions):\n k3[i] = dt * func(xs[0] + k2[0], xs[1] + k2[1], xs[2] + k2[2], xs[3] + k2[3], us[0, i], us[1, i])\n \n xs[0] += (k0[0] + 2. * k1[0] + 2. * k2[0] + k3[0]) / 6.\n xs[1] += (k0[1] + 2. * k1[1] + 2. * k2[1] + k3[1]) / 6.\n xs[2] += (k0[2] + 2. * k1[2] + 2. * k2[2] + k3[2]) / 6.\n xs[3] += (k0[3] + 2. * k1[3] + 2. * k2[3] + k3[3]) / 6.\n\n predict_xs.append(copy.deepcopy(xs))\n\n self.history_predict_xs.append(np.array(predict_xs))\n\n def _func_x_1(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"\n Parameters\n ------------\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = u_1 * math.cos(y_3 + y_4)\n y_dot = u_1 * math.cos(y_3)\n\n return y_dot\n \n def _func_x_2(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"\n Parameters\n ------------\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = u_1 * math.sin(y_3 + y_4)\n y_dot = u_1 * math.sin(y_3)\n\n return y_dot\n \n def _func_x_3(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"\n Parameters\n ------------\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = u_1 / self.REAR_WHEELE_BASE * math.sin(y_4)\n y_dot = u_1 * math.tan(y_4) / (self.REAR_WHEELE_BASE + self.FRONT_WHEELE_BASE)\n\n return y_dot\n\n def _func_x_4(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"Ad, Bd, W_D, Q, R\n ParAd, Bd, W_D, Q, R\n ---Ad, Bd, W_D, Q, R\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = math.atan2(self.REAR_WHEELE_BASE * math.tan(u_2) ,self.REAR_WHEELE_BASE + self.FRONT_WHEELE_BASE)\n y_dot = - 1. / self.tau * (y_4 - u_2)\n\n return y_dot\n\nclass SystemModel():\n \"\"\"\n Attributes\n -----------\n WHEEL_BASE : float\n wheel base of the car\n Ad_s : list\n list of system model matrix Ad\n Bd_s : list\n list of system model matrix Bd\n W_D_s : list\n list of system model matrix W_D_s\n Q : numpy.ndarray\n R : numpy.ndarray\n \"\"\"\n def __init__(self, tau = 0.01, dt = 0.01):\n \"\"\"\n Parameters\n -----------\n tau : time constant, optional\n dt : sampling time, optional\n \"\"\"\n self.dt = dt\n self.tau = tau\n self.WHEEL_BASE = 2.2\n\n self.Ad_s = []\n self.Bd_s = []\n self.W_D_s = []\n\n def calc_predict_sytem_model(self, V, curvatures, predict_step):\n \"\"\"\n calc next predict systemo models\n V : float\n current speed of car\n curvatures : list\n this is the next curvature's list\n predict_step : int\n predict step of MPC\n \"\"\"\n for i in range(predict_step):\n delta_r = math.atan2(self.WHEEL_BASE, 1. / curvatures[i])\n\n A12 = (V / self.WHEEL_BASE) / (math.cos(delta_r)**2)\n A22 = (1. - 1. / self.tau * self.dt)\n\n Ad = np.array([[1., V * self.dt, 0.], \n [0., 1., A12 * self.dt],\n [0., 0., A22]])\n\n Bd = np.array([[0.], [0.], [1. / self.tau]]) * self.dt\n\n # -v*curvature + v/L*(tan(delta_r)-delta_r*cos_delta_r_squared_inv);\n # W_D_0 = V / self.WHEEL_BASE * (delta_r / (math.cos(delta_r)**2)\n W_D_0 = -V * curvatures[i] + (V / self.WHEEL_BASE) * (math.tan(delta_r) - delta_r / (math.cos(delta_r)**2))\n\n W_D = np.array([[0.], [W_D_0], [0.]]) * self.dt\n\n self.Ad_s.append(Ad)\n self.Bd_s.append(Bd)\n self.W_D_s.append(W_D)\n\n # return self.Ad_s, self.Bd_s, self.W_D_s\n\ndef search_nearest_point(points, base_point):\n \"\"\"\n Parameters\n -----------\n points : numpy.ndarray, shape is (2, N)\n base_point : numpy.ndarray, shape is (2, 1)\n\n Returns\n -------\n nearest_index : \n nearest_point : \n \"\"\"\n distance_mat = np.sqrt(np.sum((points - base_point)**2, axis=0))\n\n index_min = np.argmin(distance_mat)\n\n return index_min, points[:, index_min]\n\n\ndef main():\n # parameters\n dt = 0.01\n simulation_time = 20 # in seconds\n PREDICT_STEP = 30\n iteration_num = int(simulation_time / dt)\n\n # make simulator with coninuous matrix\n init_xs_lead = np.array([0., 0., math.pi/5, 0.])\n lead_car = WheeledSystem(init_states=init_xs_lead)\n\n # make system model\n lead_car_system_model = SystemModel()\n\n # reference\n history_traj_ref = []\n history_angle_ref = []\n traj_ref_xs, traj_ref_ys = make_sample_traj(int(simulation_time/dt))\n traj_ref = np.array([traj_ref_xs, traj_ref_ys])\n \n # nearest point\n index_min, nearest_point = search_nearest_point(traj_ref, lead_car.xs[:2].reshape(2, 1))\n\n # get traj's curvature\n NUM_SKIP = 3\n MARGIN = 50\n angles, curvatures = calc_curvatures(traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN], PREDICT_STEP, NUM_SKIP)\n\n # save traj ref\n history_traj_ref.append(traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN])\n history_angle_ref.append(angles)\n\n # print(history_traj_ref)\n # input()\n\n # evaluation function weight\n Q = np.diag([1e2, 1., 1e3])\n R = np.diag([1e2])\n\n # System model update\n V = calc_ideal_vel(traj_ref, dt) # in pratical we should calc from the state\n lead_car_system_model.calc_predict_sytem_model(V, curvatures, PREDICT_STEP)\n\n # make controller with discreted matrix\n lead_controller = IterativeMpcController(lead_car_system_model, Q, R, PREDICT_STEP,\n dt_input_upper=np.array([1 * dt]), dt_input_lower=np.array([-1 * dt]),\n input_upper=np.array([1.]), input_lower=np.array([-1.]))\n\n\n # initialize\n lead_controller.initialize_controller()\n \n for i in range(iteration_num):\n print(\"simulation time = {0}\".format(i))\n\n ## lead\n # world traj\n lead_states = lead_car.xs\n\n # nearest point\n index_min, nearest_point = search_nearest_point(traj_ref, lead_car.xs[:2].reshape(2, 1))\n\n # end check\n if len(traj_ref_ys) <= index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN:\n print(\"break\")\n break \n\n # get traj's curvature\n angles, curvatures = calc_curvatures(traj_ref[:, index_min+MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN], PREDICT_STEP, NUM_SKIP)\n\n # save\n history_traj_ref.append(traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN])\n history_angle_ref.append(angles)\n\n # System model update\n V = calc_ideal_vel(traj_ref, dt) # in pratical we should calc from the state\n lead_car_system_model.calc_predict_sytem_model(V, curvatures, PREDICT_STEP)\n\n # transformation\n # car\n relative_car_position = coordinate_transformation_in_position(lead_states[:2].reshape(2, 1), nearest_point)\n relative_car_position = coordinate_transformation_in_angle(relative_car_position, angles[0])\n\n relative_car_angle = lead_states[2] - angles[0]\n relative_car_state = np.hstack((relative_car_position[1], relative_car_angle, lead_states[-1]))\n\n # traj_ref\n relative_traj = coordinate_transformation_in_position(traj_ref[:, index_min:index_min + PREDICT_STEP], nearest_point)\n relative_traj = coordinate_transformation_in_angle(relative_traj, angles[0])\n relative_ref_angle = np.array(angles) - angles[0]\n\n # make ref\n lead_reference = np.array([[relative_traj[1, -1], relative_ref_angle[-1], 0.] for i in range(PREDICT_STEP)]).flatten()\n\n print(\"relative car state = {}\".format(relative_car_state))\n print(\"nearest point = {}\".format(nearest_point))\n # input()\n\n # update system matrix\n lead_controller.update_system_model(lead_car_system_model)\n\n lead_opt_u, all_opt_u = lead_controller.calc_input(relative_car_state, lead_reference)\n\n lead_opt_u = np.hstack((np.array([V]), lead_opt_u))\n\n all_opt_u = np.stack((np.ones(PREDICT_STEP)*V, all_opt_u.flatten()))\n\n print(\"opt_u = {}\".format(lead_opt_u))\n print(\"all_opt_u = {}\".format(all_opt_u))\n \n # predict\n lead_car.predict_state(all_opt_u, dt=dt)\n\n # update\n lead_car.update_state(lead_opt_u, dt=dt)\n\n # print(lead_car.history_predict_xs)\n # input()\n\n # figures and animation\n lead_history_states = np.array(lead_car.history_xs)\n lead_history_predict_states = lead_car.history_predict_xs\n\n \"\"\"\n time_history_fig = plt.figure()\n x_fig = time_history_fig.add_subplot(311)\n y_fig = time_history_fig.add_subplot(312)\n theta_fig = time_history_fig.add_subplot(313)\n\n car_traj_fig = plt.figure()\n traj_fig = car_traj_fig.add_subplot(111)\n traj_fig.set_aspect('equal')\n\n x_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 0], label=\"lead\")\n x_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 0], label=\"follow\")\n x_fig.set_xlabel(\"time [s]\")\n x_fig.set_ylabel(\"x\")\n x_fig.legend()\n\n y_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 1], label=\"lead\")\n y_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 1], label=\"follow\")\n y_fig.plot(np.arange(0, simulation_time+0.01, dt), [4. for _ in range(iteration_num+1)], linestyle=\"dashed\")\n y_fig.set_xlabel(\"time [s]\")\n y_fig.set_ylabel(\"y\")\n y_fig.legend()\n\n theta_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 2], label=\"lead\")\n theta_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 2], label=\"follow\")\n theta_fig.plot(np.arange(0, simulation_time+0.01, dt), [0. for _ in range(iteration_num+1)], linestyle=\"dashed\")\n theta_fig.set_xlabel(\"time [s]\")\n theta_fig.set_ylabel(\"theta\")\n theta_fig.legend()\n\n time_history_fig.tight_layout()\n\n traj_fig.plot(lead_history_states[:, 0], lead_history_states[:, 1], label=\"lead\")\n traj_fig.plot(follow_history_states[:, 0], follow_history_states[:, 1], label=\"follow\")\n traj_fig.set_xlabel(\"x\")\n traj_fig.set_ylabel(\"y\")\n traj_fig.legend()\n plt.show()\n\n lead_history_us = np.array(lead_controller.history_us)\n follow_history_us = np.array(follow_controller.history_us)\n input_history_fig = plt.figure()\n u_1_fig = input_history_fig.add_subplot(111)\n\n u_1_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_us[:, 0], label=\"lead\")\n u_1_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_us[:, 0], label=\"follow\")\n u_1_fig.set_xlabel(\"time [s]\")\n u_1_fig.set_ylabel(\"u_omega\")\n \n input_history_fig.tight_layout()\n plt.show()\n \"\"\"\n\n animdrawer = AnimDrawer([lead_history_states, lead_history_predict_states, traj_ref, history_traj_ref, history_angle_ref])\n animdrawer.draw_anim()\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.diag",
"numpy.hstack",
"numpy.ones",
"numpy.argmin",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ili0820/Superresolution | [
"69d3f578d00c5521928c0614894d70b63ed42963"
] | [
"utility.py"
] | [
"import math\nimport time\nimport random\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lrs\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.device_count() == 1:\n torch.cuda.manual_seed(seed)\n else:\n torch.cuda.manual_seed_all(seed)\n \n\nclass timer():\n def __init__(self):\n self.acc = 0\n self.tic()\n\n def tic(self):\n self.t0 = time.time()\n\n def toc(self):\n return time.time() - self.t0\n\n def hold(self):\n self.acc += self.toc()\n\n def release(self):\n ret = self.acc\n self.acc = 0\n\n return ret\n\n def reset(self):\n self.acc = 0\n\n\ndef quantize(img, rgb_range):\n pixel_range = 255 / rgb_range\n return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)\n\n\ndef calc_psnr(sr, hr, scale, rgb_range, benchmark=False):\n if sr.size(-2) > hr.size(-2) or sr.size(-1) > hr.size(-1):\n print(\"the dimention of sr image is not equal to hr's! \")\n sr = sr[:,:,:hr.size(-2),:hr.size(-1)]\n diff = (sr - hr).data.div(rgb_range)\n\n if benchmark:\n shave = scale\n if diff.size(1) > 1:\n convert = diff.new(1, 3, 1, 1)\n convert[0, 0, 0, 0] = 65.738\n convert[0, 1, 0, 0] = 129.057\n convert[0, 2, 0, 0] = 25.064\n diff.mul_(convert).div_(256)\n diff = diff.sum(dim=1, keepdim=True)\n else:\n shave = scale + 6\n\n valid = diff[:, :, shave:-shave, shave:-shave]\n mse = valid.pow(2).mean()\n\n return -10 * math.log10(mse)\n\n\ndef make_optimizer(opt, my_model):\n trainable = filter(lambda x: x.requires_grad, my_model.parameters())\n optimizer_function = optim.Adam\n kwargs = {\n 'betas': (opt.beta1, opt.beta2),\n 'eps': opt.epsilon\n }\n kwargs['lr'] = opt.lr\n kwargs['weight_decay'] = opt.weight_decay\n \n return optimizer_function(trainable, **kwargs)\n\n\ndef make_dual_optimizer(opt, dual_models):\n dual_optimizers = []\n for dual_model in dual_models:\n temp_dual_optim = torch.optim.Adam(\n params=dual_model.parameters(),\n lr = opt.lr, \n betas = (opt.beta1, opt.beta2),\n eps = opt.epsilon,\n weight_decay=opt.weight_decay)\n dual_optimizers.append(temp_dual_optim)\n \n return dual_optimizers\n\n\ndef make_scheduler(opt, my_optimizer):\n scheduler = lrs.CosineAnnealingLR(\n my_optimizer,\n float(opt.epochs),\n eta_min=opt.eta_min\n )\n\n return scheduler\n\n\ndef make_dual_scheduler(opt, dual_optimizers):\n dual_scheduler = []\n for i in range(len(dual_optimizers)):\n scheduler = lrs.CosineAnnealingLR(\n dual_optimizers[i],\n float(opt.epochs),\n eta_min=opt.eta_min\n )\n dual_scheduler.append(scheduler)\n\n return dual_scheduler\n\n\ndef init_model(args):\n # Set the templates here\n if args.model.find('DRN-S') >= 0:\n if args.scale == 4:\n args.n_blocks = 30\n args.n_feats = 16\n elif args.scale == 8:\n args.n_blocks = 30\n args.n_feats = 8\n else:\n print('Use defaults n_blocks and n_feats.')\n args.dual = True\n\n if args.model.find('DRN-L') >= 0:\n if args.scale == 4:\n args.n_blocks = 40\n args.n_feats = 20\n elif args.scale == 8:\n args.n_blocks = 36\n args.n_feats = 10\n else:\n print('Use defaults n_blocks and n_feats.')\n args.dual = True\n\n\n"
] | [
[
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
seibert/metagraph-cuda | [
"ba29e59604cd34864cfed763c9ce9dc21d5b1377"
] | [
"metagraph_cuda/plugins/cugraph/types.py"
] | [
"import numpy as np\nfrom metagraph.wrappers import (\n EdgeSetWrapper,\n EdgeMapWrapper,\n CompositeGraphWrapper,\n BipartiteGraphWrapper,\n)\nfrom metagraph import dtypes\nfrom metagraph.types import (\n Graph,\n BipartiteGraph,\n EdgeSet,\n EdgeMap,\n)\nfrom .. import has_cugraph\nfrom typing import List, Set, Dict, Any\n\nif has_cugraph:\n import cugraph\n import cudf\n\n from ..cudf.types import CuDFNodeSet, CuDFNodeMap\n\n class CuGraphEdgeSet(EdgeSetWrapper, abstract=EdgeSet):\n def __init__(self, graph):\n self.value = graph\n\n class TypeMixin:\n @classmethod\n def _compute_abstract_properties(\n cls, obj, props: List[str], known_props: Dict[str, Any]\n ) -> Dict[str, Any]:\n ret = known_props.copy()\n\n # fast properties\n for prop in {\"is_directed\"} - ret.keys():\n if prop == \"is_directed\":\n ret[prop] = obj.value.is_directed()\n\n return ret\n\n @classmethod\n def assert_equal(\n cls,\n obj1,\n obj2,\n aprops1,\n aprops2,\n cprops1,\n cprops2,\n *,\n rel_tol=None,\n abs_tol=None,\n ):\n assert (\n aprops1 == aprops2\n ), f\"abstract property mismatch: {aprops1} != {aprops2}\"\n g1 = obj1.value\n g2 = obj2.value\n # Compare\n g1_type = type(g1.nodes())\n g2_type = type(g2.nodes())\n assert g1_type == g2_type, f\"node type mismatch: {g1_type} != {g2_type}\"\n nodes_equal = (g1.nodes() == g2.nodes()).all()\n if isinstance(nodes_equal, cudf.DataFrame):\n nodes_equal = nodes_equal.all()\n assert nodes_equal, f\"node mismatch: {g1.nodes()} != {g2.nodes()}\"\n assert len(g1.edges()) == len(\n g2.edges()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n g1_edges_reindexed = g1.edges().set_index([\"src\", \"dst\"])\n g2_edges_reindexed = g2.edges().set_index([\"src\", \"dst\"])\n assert (\n g2_edges_reindexed.index.isin(g2_edges_reindexed.index).all().item()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n\n class CuGraphEdgeMap(EdgeMapWrapper, abstract=EdgeMap):\n def __init__(self, graph):\n self.value = graph\n self._assert_instance(graph, cugraph.Graph)\n\n def _determine_dtype(self, all_values):\n all_types = {type(v) for v in all_values}\n if not all_types or (all_types - {float, int, bool}):\n return \"str\"\n for type_ in (float, int, bool):\n if type_ in all_types:\n return str(type_.__name__)\n\n class TypeMixin:\n @classmethod\n def _compute_abstract_properties(\n cls, obj, props: List[str], known_props: Dict[str, Any]\n ) -> Dict[str, Any]:\n ret = known_props.copy()\n\n # fast properties\n for prop in {\"is_directed\", \"dtype\"} - ret.keys():\n if prop == \"is_directed\":\n ret[prop] = obj.value.is_directed()\n if prop == \"dtype\":\n if obj.value.edgelist:\n obj_dtype = obj.value.view_edge_list().weights.dtype\n else:\n obj_dtype = obj.value.view_adj_list()[2].dtype\n ret[prop] = dtypes.dtypes_simplified[obj_dtype]\n\n # slow properties, only compute if asked\n slow_props = props - ret.keys()\n if \"has_negative_weights\" in slow_props:\n if obj.value.edgelist:\n weights = obj.value.view_edge_list().weights\n else:\n weights = obj.value.view_adj_list()[2]\n ret[\"has_negative_weights\"] = (weights < 0).any()\n\n return ret\n\n @classmethod\n def assert_equal(\n cls,\n obj1,\n obj2,\n aprops1,\n aprops2,\n cprops1,\n cprops2,\n *,\n rel_tol=1e-9,\n abs_tol=0.0,\n ):\n assert (\n aprops1 == aprops2\n ), f\"abstract property mismatch: {aprops1} != {aprops2}\"\n g1 = obj1.value\n g2 = obj2.value\n # Compare\n assert (\n g1.number_of_nodes() == g2.number_of_nodes()\n ), f\"{g1.number_of_nodes()} != {g2.number_of_nodes()}\"\n assert (\n g1.number_of_edges() == g2.number_of_edges()\n ), f\"{g1.number_of_edges()} != {g2.number_of_edges()}\"\n\n if g1.edgelist:\n g1_edge_list = g1.view_edge_list()\n g1_nodes = cudf.concat(\n [g1_edge_list[\"src\"], g1_edge_list[\"dst\"]]\n ).unique()\n g2_edge_list = g2.view_edge_list()\n g2_nodes = cudf.concat(\n [g2_edge_list[\"src\"], g2_edge_list[\"dst\"]]\n ).unique()\n assert (\n g1_nodes.isin(g2_nodes).all() and g2_nodes.isin(g1_nodes).all()\n ), \"g1 and g2 have different nodes\"\n assert len(g1_edge_list) == len(\n g2_edge_list\n ), f\"g1 and g2 have a different number of edges\"\n # TODO the below takes an additional possibly unneeded O(n) memory\n assert len(g1.edges()) == len(\n g2.edges()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n g1_edges_reindexed = g1_edge_list.set_index(\n [\"src\", \"dst\", \"weights\"]\n )\n g2_edges_reindexed = g2_edge_list.set_index(\n [\"src\", \"dst\", \"weights\"]\n )\n assert (\n g2_edges_reindexed.index.isin(g2_edges_reindexed.index)\n .all()\n .item()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n else:\n assert (\n g1.number_of_nodes() == g2.number_of_nodes()\n ), \"g1 and g2 have different nodes\"\n for i, g1_series in enumerate(g1.view_adj_list()):\n g2_series = g2.view_adj_list()[i]\n assert (g1_series == None) == (\n g2_series == None\n ), \"one of g1 or g2 is weighted while the other is not\"\n if g1_series != None:\n if np.issubdtype(g1_series.dtype.type, np.float):\n assert cupy.isclose(g1_series == g2_series)\n else:\n assert all(\n g1_series == g2_series\n ), \"g1 and g2 have different edges\"\n\n class CuGraph(CompositeGraphWrapper, abstract=Graph):\n def __init__(self, edges, nodes=None):\n if isinstance(edges, cugraph.Graph):\n if edges.edgelist:\n if edges.edgelist.weights:\n edges = CuGraphEdgeMap(edges)\n else:\n edges = CuGraphEdgeSet(edges)\n elif edges.adjlist:\n if edges.view_adj_list()[-1] is not None:\n edges = CuGraphEdgeMap(edges)\n else:\n edges = CuGraphEdgeSet(edges)\n self._assert_instance(edges, (CuGraphEdgeSet, CuGraphEdgeMap))\n if nodes is not None:\n self._assert_instance(nodes, (CuDFNodeSet, CuDFNodeMap))\n super().__init__(edges, nodes)\n\n class CuGraphBipartiteGraph(BipartiteGraphWrapper, abstract=BipartiteGraph):\n def __init__(self, graph):\n \"\"\"\n :param graph: cugraph.Graph instance s.t. cugraph.Graph.is_bipartite() returns True\n \"\"\"\n self._assert_instance(graph, cugraph.Graph)\n self._assert(graph.is_bipartite(), f\"{graph} is not bipartite\")\n nodes = graph.sets() # TODO consider storing this as an attribute\n self._assert(len(nodes) == 2, \"nodes must have length of 2\")\n self._assert_instance(nodes[0], cudf.Series)\n self._assert_instance(nodes[1], cudf.Series)\n # O(n^2), but cheaper than converting to Python sets\n common_nodes = nodes[0][nodes[0].isin(nodes[1])]\n if len(common_nodes) != 0:\n raise ValueError(\n f\"Node IDs found in both parts of the graph: {common_nodes.values.tolist()}\"\n )\n partition_nodes = cudf.concat([nodes[0], nodes[1]])\n unclaimed_nodes_mask = ~graph.nodes().isin(partition_nodes)\n if unclaimed_nodes_mask.any():\n unclaimed_nodes = graph.nodes()[unclaimed_nodes_mask].values.tolist()\n raise ValueError(\n f\"Node IDs found in graph, but not listed in either partition: {unclaimed_nodes}\"\n )\n # TODO handle node weights\n self.value = graph\n\n class TypeMixin:\n @classmethod\n def _compute_abstract_properties(\n cls, obj, props: Set[str], known_props: Dict[str, Any]\n ) -> Dict[str, Any]:\n ret = known_props.copy()\n\n if {\"edge_type\", \"edge_dtype\", \"edge_has_negative_weights\"} & (\n props - ret.keys()\n ):\n if obj.value.edgelist:\n edgelist = obj.value.view_edge_list()\n weights = (\n edgelist.weights if \"weights\" in edgelist.columns else None\n )\n else:\n weights = obj.value.view_adj_list()[2]\n\n # fast properties\n for prop in {\"is_directed\", \"edge_type\", \"edge_dtype\",} - ret.keys():\n if prop == \"is_directed\":\n ret[prop] = obj.value.is_directed()\n elif prop == \"edge_type\":\n ret[prop] = \"set\" if weights is None else \"map\"\n elif prop == \"edge_dtype\":\n ret[prop] = dtypes.dtypes_simplified[weights.dtype]\n\n # slow properties, only compute if asked\n slow_props = props - ret.keys()\n if {\"node0_dtype\", \"node1_dtype\"} & slow_props:\n nodes = obj.value.sets()\n if prop == \"node0_dtype\":\n ret[prop] = dtypes.dtypes_simplified[obj.nodes[0].dtype]\n elif prop == \"node1_dtype\":\n ret[prop] = dtypes.dtypes_simplified[obj.nodes[1].dtype]\n slow_props = slow_props - ret.keys()\n if {\n \"node0_type\",\n \"node1_type\",\n \"edge_has_negative_weights\",\n } & slow_props:\n for prop in slow_props:\n if prop == \"node0_type\":\n # TODO properly handle when node weights are supported\n ret[prop] = \"set\"\n elif prop == \"node1_type\":\n # TODO properly handle when node weights are supported\n ret[prop] = \"set\"\n elif prop == \"edge_has_negative_weights\":\n ret[prop] = weights.lt(0).any()\n\n return ret\n\n @classmethod\n def assert_equal(\n cls,\n obj1,\n obj2,\n aprops1,\n aprops2,\n cprops1,\n cprops2,\n *,\n rel_tol=1e-9,\n abs_tol=0.0,\n ):\n assert aprops1 == aprops2, f\"property mismatch: {aprops1} != {aprops2}\"\n g1 = obj1.value\n g2 = obj2.value\n canonicalize_nodes = lambda series: series.set_index(series)\n obj1_nodes = [canonicalize_nodes(nodes) for nodes in obj1.value.sets()]\n obj2_nodes = [canonicalize_nodes(nodes) for nodes in obj2.value.sets()]\n # Compare\n assert len(obj1_nodes[0]) == len(\n obj2_nodes[0]\n ), f\"{len(obj1_nodes[0])} == {len(obj2_nodes[0])}\"\n assert len(obj1_nodes[1]) == len(\n obj2_nodes[1]\n ), f\"{len(obj1_nodes[1])} == {len(obj2_nodes[1])}\"\n assert all(\n obj1_nodes[0] == obj2_nodes[0]\n ), f\"{obj1_nodes[0]} != {obj2_nodes[0]}\"\n assert all(\n obj1_nodes[1] == obj2_nodes[1]\n ), f\"{obj1_nodes[1]} != {obj2_nodes[1]}\"\n assert (\n g1.number_of_edges() == g2.number_of_edges()\n ), f\"{g1.number_of_edges()} != {g2.number_of_edges()}\"\n if g1.edgelist:\n g1_edge_list = g1.view_edge_list()\n g2_edge_list = g2.view_edge_list()\n assert len(g1_edge_list) == len(\n g2_edge_list\n ), f\"g1 and g2 have a different number of edges\"\n assert len(g1_edge_list.columns) == len(\n g2_edge_list.columns\n ), \"one of g1 or g2 is weighted while the other is not\"\n columns = g1_edge_list.columns\n # TODO the below takes an additional possibly unneeded O(n) memory\n assert g1_edge_list.set_index(columns) == g2_edge_list.set_index(\n columns\n ), \"g1 and g2 have different edges\"\n\n else:\n for i, g1_series in enumerate(g1.view_adj_list()):\n g2_series = g1.view_adj_list()[i]\n assert (g1_series is None) == (\n g2_series is None\n ), \"one of g1 or g2 is weighted while the other is not\"\n if g1_series is not None:\n if np.issubdtype(g1_series.dtype.type, np.float):\n assert cupy.isclose(g1_series == g2_series)\n else:\n assert all(\n g1_series == g2_series\n ), \"g1 and g2 have different edges\"\n\n if aprops1.get(\"node0_type\") == \"map\":\n pass # TODO handle this when node weights are supported\n\n if aprops1.get(\"node1_type\") == \"map\":\n pass # TODO handle this when node weights are supported\n"
] | [
[
"numpy.issubdtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BillGatesNephew/Cirq | [
"fda14a5f6c65356dfabf8a5bcd599bf57e542041",
"fda14a5f6c65356dfabf8a5bcd599bf57e542041"
] | [
"cirq/testing/consistent_protocols_test.py",
"cirq/ops/fourier_transform.py"
] | [
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import AbstractSet, Sequence, Union\n\nimport pytest\n\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq._compat import proper_repr\nfrom cirq.type_workarounds import NotImplementedType\n\n\nclass GoodGate(cirq.SingleQubitGate):\n def __init__(\n self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n ) -> None:\n self.phase_exponent = cirq.canonicalize_half_turns(phase_exponent)\n self.exponent = exponent\n\n def _has_unitary_(self):\n return not cirq.is_parameterized(self)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if cirq.is_parameterized(self):\n return NotImplemented\n z = cirq.unitary(cirq.Z ** self.phase_exponent)\n x = cirq.unitary(cirq.X ** self.exponent)\n return np.dot(np.dot(z, x), np.conj(z))\n\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs) -> Union[np.ndarray, NotImplementedType]:\n if self.exponent != 1 or cirq.is_parameterized(self):\n return NotImplemented\n\n zero = cirq.slice_for_qubits_equal_to(args.axes, 0)\n one = cirq.slice_for_qubits_equal_to(args.axes, 1)\n c = np.exp(1j * np.pi * self.phase_exponent)\n\n args.target_tensor[one] *= c.conj()\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n args.available_buffer[one] *= c\n\n return args.available_buffer\n\n def _decompose_(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q) ** self.phase_exponent\n x = cirq.X(q) ** self.exponent\n if cirq.is_parameterized(z):\n # coverage: ignore\n return NotImplemented\n return z ** -1, x, z\n\n def _pauli_expansion_(self) -> cirq.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self.phase_exponent / 2\n angle = np.pi * self.exponent / 2\n global_phase = np.exp(1j * angle)\n return cirq.LinearDict(\n {\n 'I': global_phase * np.cos(angle),\n 'X': -1j * global_phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * global_phase * np.sin(angle) * np.sin(2 * phase_angle),\n }\n )\n\n def _phase_by_(self, phase_turns, qubit_index):\n assert qubit_index == 0\n return GoodGate(\n exponent=self.exponent, phase_exponent=self.phase_exponent + phase_turns * 2\n )\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'GoodGate':\n new_exponent = cirq.mul(self.exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n # coverage: ignore\n return NotImplemented\n return GoodGate(phase_exponent=self.phase_exponent, exponent=new_exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n return 'GoodGate({})'.format(', '.join(args))\n\n def _is_parameterized_(self) -> bool:\n return cirq.is_parameterized(self.exponent) or cirq.is_parameterized(self.phase_exponent)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return cirq.parameter_names(self.exponent) | cirq.parameter_names(self.phase_exponent)\n\n def _resolve_parameters_(self, param_resolver) -> 'GoodGate':\n return GoodGate(\n phase_exponent=param_resolver.value_of(self.phase_exponent),\n exponent=param_resolver.value_of(self.exponent),\n )\n\n def _identity_tuple(self):\n return (GoodGate, self.phase_exponent, self.exponent)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n # coverage: ignore\n return NotImplemented\n return self._identity_tuple() == other._identity_tuple()\n\n\nclass BadGateIsParameterized(GoodGate):\n def _is_parameterized_(self) -> bool:\n return not super()._is_parameterized_()\n\n\nclass BadGateParameterNames(GoodGate):\n def _parameter_names_(self) -> AbstractSet[str]:\n return super()._parameter_names_() | {'not_a_param'}\n\n\nclass BadGateApplyUnitaryToTensor(GoodGate):\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs) -> Union[np.ndarray, NotImplementedType]:\n if self.exponent != 1 or cirq.is_parameterized(self):\n # coverage: ignore\n return NotImplemented\n\n zero = cirq.slice_for_qubits_equal_to(args.axes, 0)\n one = cirq.slice_for_qubits_equal_to(args.axes, 1)\n c = np.exp(1j * np.pi * self.phase_exponent)\n\n args.target_tensor[one] *= c\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n args.available_buffer[one] *= c\n\n return args.available_buffer\n\n\nclass BadGateDecompose(GoodGate):\n def _decompose_(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q) ** self.phase_exponent\n x = cirq.X(q) ** (2 * self.exponent)\n if cirq.is_parameterized(z):\n # coverage: ignore\n return NotImplemented\n return z ** -1, x, z\n\n\nclass BadGatePauliExpansion(GoodGate):\n def _pauli_expansion_(self) -> cirq.LinearDict[str]:\n return cirq.LinearDict({'I': 10})\n\n\nclass BadGatePhaseBy(GoodGate):\n def _phase_by_(self, phase_turns, qubit_index):\n assert qubit_index == 0\n return BadGatePhaseBy(\n exponent=self.exponent, phase_exponent=self.phase_exponent + phase_turns * 4\n )\n\n\nclass BadGateRepr(GoodGate):\n def __repr__(self):\n args = ['phase_exponent={!r}'.format(2 * self.phase_exponent)]\n if self.exponent != 1:\n # coverage: ignore\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n return 'BadGateRepr({})'.format(', '.join(args))\n\n\nclass GoodEigenGate(cirq.EigenGate, cirq.SingleQubitGate):\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n def __repr__(self):\n return 'GoodEigenGate' '(exponent={}, global_shift={!r})'.format(\n proper_repr(self._exponent), self._global_shift\n )\n\n\nclass BadEigenGate(GoodEigenGate):\n def _eigen_shifts(self):\n return [0, 0]\n\n def __repr__(self):\n return 'BadEigenGate' '(exponent={}, global_shift={!r})'.format(\n proper_repr(self._exponent), self._global_shift\n )\n\n\ndef test_assert_implements_consistent_protocols():\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=0.0), global_vals={'GoodGate': GoodGate}\n )\n\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=0.25), global_vals={'GoodGate': GoodGate}\n )\n\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=sympy.Symbol('t')), global_vals={'GoodGate': GoodGate}\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateIsParameterized(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateParameterNames(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateApplyUnitaryToTensor(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(BadGateDecompose(phase_exponent=0.25))\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGatePauliExpansion(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(BadGatePhaseBy(phase_exponent=0.25))\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateRepr(phase_exponent=0.25), global_vals={'BadGateRepr': BadGateRepr}\n )\n\n\ndef test_assert_eigengate_implements_consistent_protocols():\n cirq.testing.assert_eigengate_implements_consistent_protocols(\n GoodEigenGate, global_vals={'GoodEigenGate': GoodEigenGate}\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_eigengate_implements_consistent_protocols(\n BadEigenGate, global_vals={'BadEigenGate': BadEigenGate}\n )\n\n\ndef test_assert_commutes_magic_method_consistent_with_unitaries():\n gate_op = cirq.CNOT(*cirq.LineQubit.range(2))\n with pytest.raises(TypeError):\n cirq.testing.assert_commutes_magic_method_consistent_with_unitaries(gate_op)\n\n exponents = [sympy.Symbol('s'), 0.1, 0.2]\n gates = [cirq.ZPowGate(exponent=e) for e in exponents]\n cirq.testing.assert_commutes_magic_method_consistent_with_unitaries(*gates)\n\n cirq.testing.assert_commutes_magic_method_consistent_with_unitaries(cirq.Z, cirq.CNOT)\n",
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import AbstractSet, Any, Dict, Union\n\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, _compat\nfrom cirq.ops import raw_types\nfrom cirq._compat import deprecated\n\n\[email protected]_equality\nclass QuantumFourierTransformGate(raw_types.Gate):\n \"\"\"Switches from the computational basis to the frequency basis.\"\"\"\n\n def __init__(self, num_qubits: int, *, without_reverse: bool = False):\n \"\"\"\n Args:\n num_qubits: The number of qubits the gate applies to.\n without_reverse: Whether or not to include the swaps at the end\n of the circuit decomposition that reverse the order of the\n qubits. These are technically necessary in order to perform the\n correct effect, but can almost always be optimized away by just\n performing later operations on different qubits.\n \"\"\"\n self._num_qubits = num_qubits\n self._without_reverse = without_reverse\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'cirq_type': self.__class__.__name__,\n 'num_qubits': self._num_qubits,\n 'without_reverse': self._without_reverse,\n }\n\n def _value_equality_values_(self):\n return self._num_qubits, self._without_reverse\n\n def num_qubits(self) -> int:\n return self._num_qubits\n\n def _decompose_(self, qubits):\n if len(qubits) == 0:\n return\n yield cirq.H(qubits[0])\n for i in range(1, len(qubits)):\n yield PhaseGradientGate(num_qubits=i, exponent=0.5).on(*qubits[:i][::-1]).controlled_by(\n qubits[i]\n )\n yield cirq.H(qubits[i])\n if not self._without_reverse:\n for i in range(len(qubits) // 2):\n yield cirq.SWAP(qubits[i], qubits[-i - 1])\n\n def _has_unitary_(self):\n return True\n\n def __str__(self) -> str:\n return 'qft[norev]' if self._without_reverse else 'qft'\n\n def __repr__(self) -> str:\n return (\n 'cirq.QuantumFourierTransformGate('\n f'num_qubits={self._num_qubits!r}, '\n f'without_reverse={self._without_reverse!r})'\n )\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n return cirq.CircuitDiagramInfo(\n wire_symbols=(str(self),) + tuple(f'#{k+1}' for k in range(1, self._num_qubits)),\n exponent_qubit_index=0,\n )\n\n\[email protected]_equality\nclass PhaseGradientGate(raw_types.Gate):\n \"\"\"Phases each state |k⟩ out of n by e^(2*pi*i*k/n*exponent).\"\"\"\n\n def __init__(self, *, num_qubits: int, exponent: Union[float, sympy.Basic]):\n self._num_qubits = num_qubits\n self.exponent = exponent\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'cirq_type': self.__class__.__name__,\n 'num_qubits': self._num_qubits,\n 'exponent': self.exponent,\n }\n\n def _value_equality_values_(self):\n return self._num_qubits, self.exponent\n\n def num_qubits(self) -> int:\n return self._num_qubits\n\n def _decompose_(self, qubits):\n for i, q in enumerate(qubits):\n yield cirq.Z(q) ** (self.exponent / 2 ** i)\n\n def _apply_unitary_(self, args: 'cirq.ApplyUnitaryArgs'):\n if isinstance(self.exponent, sympy.Basic):\n return NotImplemented\n\n n = int(np.product([args.target_tensor.shape[k] for k in args.axes]))\n for i in range(n):\n p = 1j ** (4 * i / n * self.exponent)\n args.target_tensor[args.subspace_index(big_endian_bits_int=i)] *= p\n\n return args.target_tensor\n\n def __pow__(self, power):\n new_exponent = cirq.mul(self.exponent, power, NotImplemented)\n if new_exponent is NotImplemented:\n # coverage: ignore\n return NotImplemented\n return PhaseGradientGate(num_qubits=self._num_qubits, exponent=new_exponent)\n\n def _unitary_(self):\n if isinstance(self.exponent, sympy.Basic):\n return NotImplemented\n\n size = 1 << self._num_qubits\n return np.diag([1j ** (4 * i / size * self.exponent) for i in range(size)])\n\n def _has_unitary_(self) -> bool:\n return not cirq.is_parameterized(self)\n\n def _is_parameterized_(self) -> bool:\n return cirq.is_parameterized(self.exponent)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return cirq.parameter_names(self.exponent)\n\n def _resolve_parameters_(self, resolver):\n new_exponent = cirq.resolve_parameters(self.exponent, resolver)\n if new_exponent is self.exponent:\n return self\n return PhaseGradientGate(num_qubits=self._num_qubits, exponent=new_exponent)\n\n def __str__(self) -> str:\n return f'Grad[{self._num_qubits}]' + (f'^{self.exponent}' if self.exponent != 1 else '')\n\n def __repr__(self) -> str:\n return (\n 'cirq.PhaseGradientGate('\n f'num_qubits={self._num_qubits!r}, '\n f'exponent={_compat.proper_repr(self.exponent)})'\n )\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n return cirq.CircuitDiagramInfo(\n wire_symbols=('Grad',) + tuple(f'#{k+1}' for k in range(1, self._num_qubits)),\n exponent=self.exponent,\n exponent_qubit_index=0,\n )\n\n\ndef qft(\n *qubits: 'cirq.Qid', without_reverse: bool = False, inverse: bool = False\n) -> 'cirq.Operation':\n \"\"\"The quantum Fourier transform.\n\n Transforms a qubit register from the computational basis to the frequency\n basis.\n\n The inverse quantum Fourier transform is `cirq.qft(*qubits)**-1` or\n equivalently `cirq.inverse(cirq.qft(*qubits))`.\n\n Args:\n qubits: The qubits to apply the qft to.\n without_reverse: When set, swap gates at the end of the qft are omitted.\n This reverses the qubit order relative to the standard qft effect,\n but makes the gate cheaper to apply.\n inverse: If set, the inverse qft is performed instead of the qft.\n Equivalent to calling `cirq.inverse` on the result, or raising it\n to the -1.\n\n Returns:\n A `cirq.Operation` applying the qft to the given qubits.\n \"\"\"\n result = QuantumFourierTransformGate(len(qubits), without_reverse=without_reverse).on(*qubits)\n if inverse:\n result = cirq.inverse(result)\n return result\n\n\n@deprecated(deadline='v0.10.0', fix='Use cirq.qft instead.')\ndef QFT(*args, **kwargs):\n return qft(*args, **kwargs)\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.conj",
"numpy.cos",
"numpy.sin",
"numpy.exp"
],
[
"numpy.product"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fubel/vision | [
"6845355fd80a48ca7ec80c06aa2d97d50f0b077d"
] | [
"torchvision/models/detection/keypoint_rcnn.py"
] | [
"import torch\nfrom torch import nn\n\nfrom torchvision.ops import MultiScaleRoIAlign\n\nfrom ._utils import overwrite_eps\nfrom ..utils import load_state_dict_from_url\n\nfrom .faster_rcnn import FasterRCNN\nfrom .backbone_utils import resnet_fpn_backbone, _validate_trainable_layers\n\n\n__all__ = [\n \"KeypointRCNN\", \"keypointrcnn_resnet50_fpn\"\n]\n\n\nclass KeypointRCNN(FasterRCNN):\n \"\"\"\n Implements Keypoint R-CNN.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x\n between 0 and W and values of y between 0 and H\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the\n format [x, y, visibility], where visibility=0 means that the keypoint is not visible.\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the keypoint loss.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows:\n\n - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x\n between 0 and W and values of y between 0 and H\n - labels (Int64Tensor[N]): the predicted labels for each image\n - scores (Tensor[N]): the scores or each prediction\n - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.\n\n Args:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain a out_channels attribute, which indicates the number of output\n channels that each feature map has (and it should be the same for all feature maps).\n The backbone should return a single Tensor or and OrderedDict[Tensor].\n num_classes (int): number of output classes of the model (including the background).\n If box_predictor is specified, num_classes should be None.\n min_size (int): minimum size of the image to be rescaled before feeding it to the backbone\n max_size (int): maximum size of the image to be rescaled before feeding it to the backbone\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature\n maps.\n rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN\n rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training\n rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing\n rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training\n rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing\n rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals\n rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training of the RPN.\n rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be\n considered as negative during training of the RPN.\n rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN\n for computing the loss\n rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training\n of the RPN\n rpn_score_thresh (float): during inference, only return proposals with a classification score\n greater than rpn_score_thresh\n box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes\n box_head (nn.Module): module that takes the cropped feature maps as input\n box_predictor (nn.Module): module that takes the output of box_head and returns the\n classification logits and box regression deltas.\n box_score_thresh (float): during inference, only return proposals with a classification score\n greater than box_score_thresh\n box_nms_thresh (float): NMS threshold for the prediction head. Used during inference\n box_detections_per_img (int): maximum number of detections per image, for all classes.\n box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be\n considered as positive during training of the classification head\n box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be\n considered as negative during training of the classification head\n box_batch_size_per_image (int): number of proposals that are sampled during training of the\n classification head\n box_positive_fraction (float): proportion of positive proposals in a mini-batch during training\n of the classification head\n bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the\n bounding boxes\n keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes, which will be used for the keypoint head.\n keypoint_head (nn.Module): module that takes the cropped feature maps as input\n keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the\n heatmap logits\n\n Example::\n\n >>> import torch\n >>> import torchvision\n >>> from torchvision.models.detection import KeypointRCNN\n >>> from torchvision.models.detection.anchor_utils import AnchorGenerator\n >>>\n >>> # load a pre-trained model for classification and return\n >>> # only the features\n >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n >>> # KeypointRCNN needs to know the number of\n >>> # output channels in a backbone. For mobilenet_v2, it's 1280\n >>> # so we need to add it here\n >>> backbone.out_channels = 1280\n >>>\n >>> # let's make the RPN generate 5 x 3 anchors per spatial\n >>> # location, with 5 different sizes and 3 different aspect\n >>> # ratios. We have a Tuple[Tuple[int]] because each feature\n >>> # map could potentially have different sizes and\n >>> # aspect ratios\n >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n >>> aspect_ratios=((0.5, 1.0, 2.0),))\n >>>\n >>> # let's define what are the feature maps that we will\n >>> # use to perform the region of interest cropping, as well as\n >>> # the size of the crop after rescaling.\n >>> # if your backbone returns a Tensor, featmap_names is expected to\n >>> # be ['0']. More generally, the backbone should return an\n >>> # OrderedDict[Tensor], and in featmap_names you can choose which\n >>> # feature maps to use.\n >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=7,\n >>> sampling_ratio=2)\n >>>\n >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=14,\n >>> sampling_ratio=2)\n >>> # put the pieces together inside a KeypointRCNN model\n >>> model = KeypointRCNN(backbone,\n >>> num_classes=2,\n >>> rpn_anchor_generator=anchor_generator,\n >>> box_roi_pool=roi_pooler,\n >>> keypoint_roi_pool=keypoint_roi_pooler)\n >>> model.eval()\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n \"\"\"\n def __init__(self, backbone, num_classes=None,\n # transform parameters\n min_size=None, max_size=1333,\n image_mean=None, image_std=None,\n # RPN parameters\n rpn_anchor_generator=None, rpn_head=None,\n rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,\n rpn_score_thresh=0.0,\n # Box parameters\n box_roi_pool=None, box_head=None, box_predictor=None,\n box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n box_batch_size_per_image=512, box_positive_fraction=0.25,\n bbox_reg_weights=None,\n # keypoint parameters\n keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None,\n num_keypoints=17):\n\n assert isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None)))\n if min_size is None:\n min_size = (640, 672, 704, 736, 768, 800)\n\n if num_classes is not None:\n if keypoint_predictor is not None:\n raise ValueError(\"num_classes should be None when keypoint_predictor is specified\")\n\n out_channels = backbone.out_channels\n\n if keypoint_roi_pool is None:\n keypoint_roi_pool = MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=14,\n sampling_ratio=2)\n\n if keypoint_head is None:\n keypoint_layers = tuple(512 for _ in range(8))\n keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers)\n\n if keypoint_predictor is None:\n keypoint_dim_reduced = 512 # == keypoint_layers[-1]\n keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)\n\n super(KeypointRCNN, self).__init__(\n backbone, num_classes,\n # transform parameters\n min_size, max_size,\n image_mean, image_std,\n # RPN-specific parameters\n rpn_anchor_generator, rpn_head,\n rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,\n rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,\n rpn_nms_thresh,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n rpn_score_thresh,\n # Box parameters\n box_roi_pool, box_head, box_predictor,\n box_score_thresh, box_nms_thresh, box_detections_per_img,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights)\n\n self.roi_heads.keypoint_roi_pool = keypoint_roi_pool\n self.roi_heads.keypoint_head = keypoint_head\n self.roi_heads.keypoint_predictor = keypoint_predictor\n\n\nclass KeypointRCNNHeads(nn.Sequential):\n def __init__(self, in_channels, layers):\n d = []\n next_feature = in_channels\n for out_channels in layers:\n d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))\n d.append(nn.ReLU(inplace=True))\n next_feature = out_channels\n super(KeypointRCNNHeads, self).__init__(*d)\n for m in self.children():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n nn.init.constant_(m.bias, 0)\n\n\nclass KeypointRCNNPredictor(nn.Module):\n def __init__(self, in_channels, num_keypoints):\n super(KeypointRCNNPredictor, self).__init__()\n input_features = in_channels\n deconv_kernel = 4\n self.kps_score_lowres = nn.ConvTranspose2d(\n input_features,\n num_keypoints,\n deconv_kernel,\n stride=2,\n padding=deconv_kernel // 2 - 1,\n )\n nn.init.kaiming_normal_(\n self.kps_score_lowres.weight, mode=\"fan_out\", nonlinearity=\"relu\"\n )\n nn.init.constant_(self.kps_score_lowres.bias, 0)\n self.up_scale = 2\n self.out_channels = num_keypoints\n\n def forward(self, x):\n x = self.kps_score_lowres(x)\n return torch.nn.functional.interpolate(\n x, scale_factor=float(self.up_scale), mode=\"bilinear\", align_corners=False, recompute_scale_factor=False\n )\n\n\nmodel_urls = {\n # legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606\n 'keypointrcnn_resnet50_fpn_coco_legacy':\n 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth',\n 'keypointrcnn_resnet50_fpn_coco':\n 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth',\n}\n\n\ndef keypointrcnn_resnet50_fpn(pretrained=False, progress=True,\n num_classes=2, num_keypoints=17,\n pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):\n \"\"\"\n Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values of ``x``\n between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H``\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the\n format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the keypoint loss.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values of ``x``\n between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H``\n - labels (``Int64Tensor[N]``): the predicted labels for each image\n - scores (``Tensor[N]``): the scores or each prediction\n - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.\n\n Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.\n\n Example::\n\n >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n >>>\n >>> # optionally, if you want to export the model to ONNX:\n >>> torch.onnx.export(model, x, \"keypoint_rcnn.onnx\", opset_version = 11)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n num_keypoints (int): number of keypoints, default 17\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.\n \"\"\"\n trainable_backbone_layers = _validate_trainable_layers(\n pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3)\n\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers)\n model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)\n if pretrained:\n key = 'keypointrcnn_resnet50_fpn_coco'\n if pretrained == 'legacy':\n key += '_legacy'\n state_dict = load_state_dict_from_url(model_urls[key],\n progress=progress)\n model.load_state_dict(state_dict)\n overwrite_eps(model, 0.0)\n return model\n"
] | [
[
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.