repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
kklkodi/empirikushazi | [
"8780a9c4e3e4603e636da2fccbe6f2d8a5afad4b"
] | [
"hw_demo_estimation/data_manipulation.py"
] | [
"import pandas as pd\n\n\ndef compute_directed_edges(edges_w_features):\n \"\"\"\n Converts the undirected edge information into a directed format, by duplicating each edge and flipping the node\n attributes to make it point in the opposite direction. This makes computation from the viewpoint of each node\n simpler.\n :param edges_w_features:\n :return:\n \"\"\"\n opposite = edges_w_features.copy()\n # flipping the attributes of the endpoints\n opposite[[\"smaller_id\", \"greater_id\", \"AGE_x\", \"AGE_y\", \"gender_x\", \"gender_y\"]] = \\\n opposite[[\"greater_id\", \"smaller_id\", \"AGE_y\", \"AGE_x\", \"gender_y\", \"gender_x\"]]\n directed = pd.concat([edges_w_features, opposite], ignore_index=True)\n return directed\n\n\ndef add_nbrs_by_gender(nodes, directed_edges):\n \"\"\"\n Adds one column for each gender to the nodes table, which contain the number of neighbors of the given gender\n for each ndoe. Unknown-gender neighbors are not counted into either gender.\n :param nodes: Node feature data as DataFrame\n :param directed_edges: Edge data as DataFrame\n :return: the nodes DataFrame with the columns 0_nbrs and 1_nbrs added to it\n \"\"\"\n w_nbrs = nodes.copy()\n w_nbrs = w_nbrs.set_index(\"user_id\")\n nbrs = compute_nbrs_with_gender(directed_edges, 0.0)\n w_nbrs = w_nbrs.merge(nbrs, on=\"user_id\")\n nbrs = compute_nbrs_with_gender(directed_edges, 1.0)\n w_nbrs = w_nbrs.merge(nbrs, on=\"user_id\")\n return w_nbrs\n\n\ndef compute_nbrs_with_gender(directed_edges, gender):\n \"\"\"\n Counts the number of neighbors with the given gender for each node.\n :param directed_edges: directed edge information as a DataFrame\n :param gender: which gender the counted neighbors should have\n :return: A table containing a single column with the number of filtered neighbors.\n \"\"\"\n nbrs = directed_edges[directed_edges[\"gender_y\"] == gender].groupby(\"smaller_id\").count()[\"greater_id\"].to_frame()\n nbrs = nbrs.rename_axis(\"user_id\").rename(columns={\"greater_id\": (\"%d_nbrs\" % gender)})\n return nbrs"
] | [
[
"pandas.concat"
]
] |
Jianningli/MIA | [
"bf9b90b9972cd43f885f676c926a984bf38b743a"
] | [
"source/EncoderDecoder_boundaryloss.py"
] | [
"from __future__ import print_function, division\r\n\r\nimport os\r\nimport numpy as np\r\nfrom keras.layers import BatchNormalization, Activation\r\nfrom keras.layers import Input, Dense, Flatten, Dropout\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\r\nfrom keras.models import Sequential, Model\r\nfrom keras.models import load_model\r\nfrom keras.optimizers import Adam\r\nfrom sklearn.metrics import hamming_loss\r\nfrom utils import mkdirs\r\nfrom glob import glob\r\nimport random\r\nimport nrrd\r\nfrom scipy.ndimage import zoom\r\nfrom keras import backend as K\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom scipy.ndimage import distance_transform_edt as distance\r\n\r\n\r\n\r\n''' Boundary loss function adapted from https://github.com/LIVIAETS/boundary-loss.\r\n Credit goes to the original authors\r\n'''\r\n\r\n\r\n\r\ndef surface_loss_keras(y_true, y_pred):\r\n multipled = y_pred * y_true\r\n return K.mean(multipled)\r\n\r\n\r\n\r\ndef build_generator():\r\n\r\n model = Sequential()\r\n\r\n # Encoder\r\n model.add(Conv3D(32, kernel_size=5, strides=2, input_shape=vol_shape, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Conv3D(64, kernel_size=5, strides=2, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Conv3D(128, kernel_size=5, strides=2, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Conv3D(512, kernel_size=1, strides=2, padding=\"same\"))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(Dropout(0.5))\r\n\r\n # Decoder\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(256, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('relu'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n model.add(Deconv3D(128, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('relu'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(64, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('relu'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(channels, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('tanh'))\r\n model.add(BatchNormalization(momentum=0.8))\r\n\r\n model.add(UpSampling3D())\r\n model.add(Deconv3D(channels, kernel_size=5, padding=\"same\"))\r\n model.add(Activation('tanh'))\r\n\r\n\r\n model.summary()\r\n\r\n\r\n return model\r\n\r\n\r\n\r\ndef resizing(label):\r\n a,b,c=label.shape\r\n resized_data = zoom(label,(128/a,128/b,64/c),order=2, mode='constant') \r\n return resized_data\r\n\r\ndef resizing_up(label):\r\n resized_data = zoom(label,(4,4,2),order=2, mode='constant') \r\n return resized_data\r\n\r\n\r\ndef save_model(MODEL_DIR):\r\n def save(model, model_name):\r\n model_path = os.path.join(MODEL_DIR, \"%s.h5\" % model_name)\r\n model.save(model_path)\r\n save(generator, \"boundaryloss\")\r\n\r\n\r\ndef calc_dist_map(seg):\r\n res = np.zeros_like(seg)\r\n posmask = seg.astype(np.bool)\r\n\r\n if posmask.any():\r\n negmask = ~posmask\r\n res = distance(negmask) * negmask - (distance(posmask) - 1) * posmask\r\n\r\n return res\r\n\r\n\r\ndef train(generator,MODEL_DIR, epochs, batch_size=16, sample_interval=50):\r\n\r\n\r\n ipt=np.load('ipt_85_128_128_64.npy')\r\n gt=np.load('gt_denoised.npy')\r\n\r\n for epoch in range(epochs):\r\n print(epoch)\r\n idx = np.random.randint(0, ipt.shape[0], batch_size)\r\n masked_vols=ipt[idx]\r\n missing_parts=gt[idx]\r\n \r\n gt_dist_transform=np.array([calc_dist_map(y) for y in missing_parts]).astype(np.float32)\r\n \r\n print('masked_vols:',masked_vols.shape)\r\n print('missing_parts:',missing_parts.shape)\r\n print('gt_dist_transform:',gt_dist_transform.shape)\r\n # Train Generator\r\n g_loss = generator.train_on_batch(masked_vols, gt_dist_transform)\r\n print(g_loss)\r\n if epoch % sample_interval == 0:\r\n save_model(MODEL_DIR)\r\n\r\n\r\n\r\n\r\ndef evaluate(testdir,test_results_dir):\r\n print('evaluating the model...')\r\n\r\n test_list=glob('{}/*.nrrd'.format(testdir))\r\n for i in range(len(test_list)):\r\n data,h=nrrd.read(test_list[i])\r\n data=data[:,:,data.shape[2]-128:data.shape[2]]\r\n data=resizing(data)\r\n data=np.expand_dims(np.expand_dims(data,axis=0),axis=4)\r\n gen_missing = generator.predict(data)\r\n\r\n gen_missing=(gen_missing>0)\r\n gen_missing=gen_missing+1-1\r\n gen_missing_up=resizing_up(gen_missing[0,:,:,:,0])\r\n filename1=test_results_dir+test_list[i][-10:-5]+'.nrrd'\r\n nrrd.write(filename1,gen_missing[0,:,:,:,0],h)\r\n filename2=test_results_dir+'resized/'+test_list[i][-10:-5]+'.nrrd'\r\n nrrd.write(filename2,gen_missing_up,h)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n vol_rows = 128\r\n vol_cols = 128\r\n vol_height = 64\r\n mask_height = 128\r\n mask_width = 128\r\n mask_length = 64\r\n channels = 1\r\n num_classes = 2\r\n vol_shape = (vol_rows, vol_cols, vol_height, channels)\r\n missing_shape = (mask_height, mask_width, mask_length, channels)\r\n test_dir=\"../defective_skull_test\"\r\n test_results_dir=\"../results_ae_boundary/\"\r\n MODEL_DIR = '../boundarylosss'\r\n mkdirs(MODEL_DIR)\r\n try:\r\n generator = load_model('../boundaryloss.h5',custom_objects={'surface_loss_keras': surface_loss_keras})\r\n print(\"Loaded checkpoints\")\r\n except:\r\n generator = build_generator()\r\n print(\"No checkpoints found\")\r\n\r\n masked_vol = Input(shape=vol_shape)\r\n optimizer = Adam(0.0002, 0.5)\r\n generator.compile(loss=surface_loss_keras,optimizer=optimizer)\r\n train(generator,MODEL_DIR,epochs=3000, batch_size=4, sample_interval=200)\r\n #evaluate(test_dir,test_results_dir)\r\n\r\n\r\n"
] | [
[
"numpy.zeros_like",
"numpy.load",
"scipy.ndimage.distance_transform_edt",
"scipy.ndimage.zoom",
"numpy.expand_dims",
"numpy.random.randint"
]
] |
johnhendrick/adventofcode2021 | [
"04d884d65eebc0178ddb79b1ff2a5c088c349e5e"
] | [
"adventcode/day7.py"
] | [
"import numpy as np\nfrom utils import read_file\n\nfile_path = './input/day7.txt'\n\n\ndef parse_file(file_content):\n return [int(x) for x in file_content.split(',')]\n\n\ndef fuel(steps):\n return steps*(steps+1)/2\n\n\ndata = parse_file(read_file(file_path))\n\nnp_data = np.array(data)\nmedian = np.median(np_data)\n\ndistance_sum = np.abs(np_data - median).sum()\nprint(distance_sum)\n\n# part 2\nsimulation = []\npossible_mid = list(range(min(np_data), max(np_data)+1))\n\nfor mid in possible_mid:\n fuel_req = fuel(np.abs(np_data-mid)).sum()\n simulation.append(fuel_req)\n\nprint('part 2 ', min(simulation))\n"
] | [
[
"numpy.array",
"numpy.abs",
"numpy.median"
]
] |
cjluo/money-monkey | [
"b43e3c6df4221d14d78e1f6c8487ec2308286be1"
] | [
"model_presenter.py"
] | [
"import matplotlib\n# Do not use X for plotting\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nfrom matplotlib.ticker import FormatStrFormatter\nfrom tempfile import NamedTemporaryFile\n\n\ndef plot_to_file(symbol, timestamp, close, score):\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.plot(timestamp, close, color='r', marker='.', label=\"close\")\n ax2.plot(timestamp, score, color='b', marker='.', label=\"score\")\n plt.title(\"%s: score %0.2f\" % (symbol, score[-1]))\n\n fig.autofmt_xdate()\n ax1.xaxis.set_major_formatter(DateFormatter(\"%H:%M\"))\n ax1.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n ax1.legend(h1 + h2, l1 + l2)\n\n jpg_file = NamedTemporaryFile(delete=False, suffix='.jpg')\n jpg_file.close()\n\n fig.set_dpi(100)\n fig.set_size_inches(10, 4)\n fig.set_tight_layout(True)\n\n fig.savefig(jpg_file.name, quality=50)\n plt.close(fig)\n return jpg_file.name\n"
] | [
[
"matplotlib.dates.DateFormatter",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.use"
]
] |
TheRensselaerIDEA/ExplainableAI | [
"ea7a24c660120b61dc3d346ac0a0bc845c5eb0cf"
] | [
"build/lib/explainableAI/metrics/TAUOPTIMAL.py"
] | [
"import numpy as np\ndef optimalTau(probabilities, ylabels):\n \n \"\"\" Finds the Optimal tau based on the F1 score\"\"\"\n \n #STEP 1 SORT PROBABILITIES AND LABELS\n sortedIndexes = np.argsort( probabilities )\n probabilities1 = probabilities[ sortedIndexes ]\n ylabels1 = ylabels[ sortedIndexes ]\n \n #INITIALIZE THRESHOLD TO BE 0\n #SO EVERY POINT IS PREDICTED AS CLASS 1\n \n # initialPrediction = np.ones( probabilities1.shape[0] ) #matrix with all 1's - INITIAL PREDICTION\n \n TP = len( np.where( ylabels1 == 1)[0] ) #AT THE BEGGINING THE TRUE POSITIVES ARE THE SAME \n #AS THE POSITIVE LABELS OF THE DATASET\n \n FN = 0 #AT THE BEGGINING WE HAVE 0 POSITIVE POINTS CLASSIFIED AS NEGATIVE\n #XIAO HERE YOU WILL PUT ylabels == -1\n FP = len( np.where( ylabels1 == -1)[0] )\n \n precision = TP/(TP + FP)\n recall = TP/ (TP + FN)\n \n# print(precision, recall, TP, FN, FP)\n# return\n f1 = ( 2*precision*recall )/( precision + recall ) \n \n threshold = probabilities1.min()-0.1\n prob_F1 = [[threshold, f1]]\n \n for i, probability in enumerate( probabilities1 ):\n \n #print( \" Iteration: {}\".format(i))\n \n \n if ylabels1[i] == 1:\n \n TP -= 1\n FN += 1\n \n if ylabels1[i] == -1: #FOR XIAO HERE -1\n FP -= 1\n \n if (TP + FP == 0):\n \n precision = 0\n \n else:\n precision = TP/(TP + FP)\n \n recall = TP/ (TP + FN)\n \n if (precision + recall) == 0:\n \n f1new = 0\n \n else:\n \n f1new = ( 2*precision*recall )/( precision + recall ) \n \n prob_F1.append( [probability, f1new] ) #thresholds with F1 scores if you want to draw a graph\n \n if f1new >= f1 :\n threshold = probability\n f1 = f1new\n prec = precision\n rec = recall\n \n \n return threshold, f1, np.array(prob_F1), prec, rec\n"
] | [
[
"numpy.array",
"numpy.where",
"numpy.argsort"
]
] |
tranbaohieu/SAFL_pytorch | [
"4d582974e40031fd595c663489f631dff1abbb5a"
] | [
"demo.py"
] | [
"from __future__ import absolute_import\nimport sys\nsys.path.append('./')\nimport timeit\nimport argparse\nimport os\nimport os.path as osp\nimport numpy as np\nimport math\nimport time\nfrom PIL import Image, ImageFile\n\nimport torch\nfrom torch import nn, optim\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom config import get_args\nfrom lib import datasets, evaluation_metrics, models\nfrom lib.models.model_builder import ModelBuilder\nfrom lib.datasets.dataset import LmdbDataset, AlignCollate\nfrom lib.loss import SequenceCrossEntropyLoss\nfrom lib.trainers import Trainer\nfrom lib.evaluators import Evaluator\nfrom lib.utils.logging import Logger, TFLogger\nfrom lib.utils.serialization import load_checkpoint, save_checkpoint\nfrom lib.utils.osutils import make_symlink_if_not_exists\nfrom lib.evaluation_metrics.metrics import get_str_list\nfrom lib.utils.labelmaps import get_vocabulary, labels2strs\n\nglobal_args = get_args(sys.argv[1:])\n\ndef image_process(image_path, imgH=32, imgW=100, keep_ratio=False, min_ratio=1):\n img = Image.open(image_path).convert('RGB')\n\n if keep_ratio:\n w, h = img.size\n ratio = w / float(h)\n imgW = int(np.floor(ratio * imgH))\n imgW = max(imgH * min_ratio, imgW)\n\n img = img.resize((imgW, imgH), Image.BILINEAR)\n img = transforms.ToTensor()(img)\n img.sub_(0.5).div_(0.5)\n\n return img\n\nclass DataInfo(object):\n \"\"\"\n Save the info about the dataset.\n This a code snippet from dataset.py\n \"\"\"\n def __init__(self, voc_type):\n super(DataInfo, self).__init__()\n self.voc_type = voc_type\n\n assert voc_type in ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS']\n self.EOS = 'EOS'\n self.PADDING = 'PADDING'\n self.UNKNOWN = 'UNKNOWN'\n self.voc = get_vocabulary(voc_type, EOS=self.EOS, PADDING=self.PADDING, UNKNOWN=self.UNKNOWN)\n self.char2id = dict(zip(self.voc, range(len(self.voc))))\n self.id2char = dict(zip(range(len(self.voc)), self.voc))\n\n self.rec_num_classes = len(self.voc)\n\n\ndef main(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n\n args.cuda = args.cuda and torch.cuda.is_available()\n # args.cuda = False\n if args.cuda:\n print('using cuda.')\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n torch.set_default_tensor_type('torch.FloatTensor')\n \n # Create data loaders\n if args.height is None or args.width is None:\n args.height, args.width = (32, 100)\n\n dataset_info = DataInfo(args.voc_type)\n\n # Create model\n model = ModelBuilder(arch=args.arch, rec_num_classes=dataset_info.rec_num_classes,\n sDim=args.decoder_sdim, attDim=args.attDim, max_len_labels=args.max_len,\n eos=dataset_info.char2id[dataset_info.EOS], STN_ON=args.STN_ON, encoder_block=4, decoder_block=4)\n\n # Load from checkpoint\n if args.resume:\n checkpoint = load_checkpoint(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n if args.cuda:\n device = torch.device(\"cuda\")\n model = model.to(device)\n model = nn.DataParallel(model)\n\n #Save model \n torch.save(model, \"model.pth\")\n # Evaluation\n model.eval()\n img = image_process(args.image_path)\n with torch.no_grad():\n img = img.to(device)\n input_dict = {}\n input_dict['images'] = img.unsqueeze(0)\n # TODO: testing should be more clean.\n # to be compatible with the lmdb-based testing, need to construct some meaningless variables.\n rec_targets = torch.IntTensor(1, args.max_len).fill_(1)\n rec_targets[:,args.max_len-1] = dataset_info.char2id[dataset_info.EOS]\n input_dict['rec_targets'] = rec_targets\n input_dict['rec_lengths'] = [args.max_len]\n start = timeit.timeit()\n output_dict = model(input_dict)\n end = timeit.timeit()\n pred_rec = output_dict['output']['pred_rec']\n import cv2\n from matplotlib import cm\n import matplotlib.pyplot as plt\n rec_im = output_dict['output']['rectified_images'].squeeze().transpose(2, 0)\n rec_im = rec_im.transpose(1, 0)\n rec_im = (rec_im*0.5 + 0.5)*255\n rec_im = rec_im.cpu().detach().numpy()\n print(rec_im.shape)\n # new_im = Image.fromarray(rec_im)\n\n # plt.imsave(\"rec_im.png\", rec_im)\n # print(rec_im*255)\n cv2.imwrite(\"rec.png\", rec_im)\n pred_str, _ = get_str_list(pred_rec, input_dict['rec_targets'], dataset=dataset_info)\n print('Recognition result: {0}'.format(pred_str[0]))\n print('{:f}'.format(end-start))\n\n\nif __name__ == '__main__':\n # parse the config\n args = get_args(sys.argv[1:])\n main(args)"
] | [
[
"torch.cuda.manual_seed_all",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.save",
"numpy.random.seed",
"torch.no_grad",
"numpy.floor",
"torch.set_default_tensor_type",
"torch.cuda.is_available",
"torch.IntTensor",
"torch.nn.DataParallel",
"torch.device"
]
] |
erthalion/ansible-ycsb | [
"86916fbc4128629df77090b49819a9a4c4d15ea4"
] | [
"parse.py"
] | [
"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport glob\nimport json\nimport logging\nimport itertools\nimport toolz\nimport statistics\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text', usetex=True)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import spline\n\nfrom parser import Parser\nfrom base import (FailedExperiment, MetricNotFound, RESULT_PATTERN,\n MetricData, PlotNames)\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef compare_result_file(file_name):\n match = RESULT_PATTERN.search(file_name)\n if match:\n return int(match.group(1))\n else:\n logger.error(f\"Cannot find threads in file {file_name}\")\n\n\ndef thread_info(threads, db, stage, metric):\n PATH = \"workload?_threads_{}_*/*/{}_{}\"\n\n def _get_metric(data):\n return getattr(Parser, metric)(data)\n\n def get_metric(file_name, data):\n try:\n return _get_metric(data)\n except FailedExperiment:\n logger.error(f\"Experiment for {db} with {threads} threads \" +\n f\"from {file_name} is failed\")\n\n return 0\n\n except MetricNotFound:\n logger.error(\n f\"Experiment for {db} with {threads} \" +\n f\"threads from {file_name} does not have metric {metric}\"\n )\n\n return 0\n except Exception as ex:\n print(f\"Got an Exception {ex} parsing {file_name}\")\n\n def get_median_metric(thread, file_names):\n data_list = [\n (file_name, json.loads(open(file_name).read()))\n for file_name in file_names\n ]\n\n metrics = [get_metric(*row) for row in data_list]\n metrics = list(filter(toolz.identity, metrics))\n val = statistics.mean(metrics)\n logger.debug(\"Metrics for thread {thread} : {metrics}\")\n logger.debug(\"Median for thread {thread} : {val}\")\n return val\n\n data_files = sorted(glob.glob(PATH.format(threads, stage, db)), key=compare_result_file)\n data_files_by_threads = toolz.groupby(compare_result_file, data_files)\n return [\n get_median_metric(thread, file_names)\n for thread, file_names in data_files_by_threads.items()\n ]\n\n\ndef main(db, stage, metric, threads=None):\n if threads is None:\n threads = \"*\"\n\n if db:\n metric_values = thread_info(threads, db, stage, metric)\n return [MetricData(metric, metric_values, db)]\n else:\n pg_metric_values = thread_info(threads, Databases.PG.value, stage, metric)\n mysql_metric_values = thread_info(threads, Databases.MYSQL.value, stage, metric)\n mongodb_metric_values = thread_info(threads, Databases.MONGO.value, stage, metric)\n return [\n MetricData(metric, pg_metric_values, Databases.PG.value),\n MetricData(metric, mysql_metric_values, Databases.MYSQL.value),\n MetricData(metric, mongodb_metric_values, Databases.MONGODB.value),\n ]\n\n\ndef print_metrics(metrics):\n for metric in metrics:\n print(f\"{metric.name} {metric.db} {metric.values}\")\n\n\ndef get_metric_option(metric):\n return \"_\".join(metric.name.split(\"_\")[2:])\n\n\ndef plot_metrics(metrics):\n plt, ax = prepare_plot(PlotNames.get(get_metric_option(metrics[0]), \"\"))\n\n for metric in metrics:\n ox, values = interpolate_metric(metric)\n plot_values(ax, ox, values, metric.db)\n\n ax.legend(shadow=True)\n plt.savefig(f\"{metric.db}_{metric.name}.png\")\n\n\ndef interpolate_metric(metric):\n interpolated_x = np.linspace(1, 100, 100)\n original_x = [1] + list(range(10, 110, 10))\n return (interpolated_x, spline(original_x, metric.values, interpolated_x))\n\n\ndef prepare_plot(plot_name):\n ax = plt.subplot()\n ax.set_facecolor(\"#eaeaf2\")\n ax.grid(color='#ffffff', linestyle='-')\n plt.title(plot_name)\n return plt, ax\n\n\ndef plot_values(ax, ox, oy, db):\n ax.plot(ox, oy, '#8172b2', label=db, linewidth=2)\n\n\nif __name__ == \"__main__\":\n\n args = iter(sys.argv[1:])\n db = next(args, None)\n stage = next(args, None)\n metric = next(args, None)\n threads = next(args, None)\n plot = bool(os.environ.get(\"PLOT\", 0))\n\n if os.environ.get(\"DEBUG\"):\n logger.setLevel(os.environ.get(\"LOG_LEVEL\", logging.INFO))\n\n if plot:\n plot_metrics(main(db, stage, metric, threads))\n else:\n print_metrics(main(db, stage, metric, threads))\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.rc",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"scipy.interpolate.spline",
"numpy.linspace"
]
] |
CnBDM-Su/LV_XNN | [
"52f1ab2041d734a4b35066a7d2ffef1a3da7d792"
] | [
"scripts/benchmark/xgb_test.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 19 10:59:51 2020\n\n@author: suyu\n\"\"\"\nfrom sklearn.model_selection import train_test_split,GridSearchCV,PredefinedSplit\nfrom sklearn.metrics import make_scorer,mean_squared_error,roc_auc_score,mean_absolute_error,log_loss\nfrom xgboost import XGBClassifier,XGBRegressor\nimport numpy as np\nimport pandas as pd\nimport sys\nsys.path.append('../')\nfrom gammli.DataReader import data_initialize\n\ndef xgb(wc, data, meta_info_ori, task_type=\"Regression\", random_state=0):\n \n train, test = train_test_split(data, test_size=0.2, random_state=0)\n tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t= data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=True)\n \n x = np.concatenate([tr_x,val_x])\n y = np.concatenate([tr_y,val_y])\n \n val_fold = np.ones(x.shape[0])\n val_fold[:tr_x.shape[0]] = -1\n if task_type == \"Regression\":\n\n base = XGBRegressor(n_estimators=100, random_state=random_state)\n grid = GridSearchCV(base, param_grid={\"max_depth\": (3, 4, 5, 6, 7, 8)},\n scoring={\"mse\": make_scorer(mean_squared_error, greater_is_better=False)},\n cv=PredefinedSplit(val_fold), refit=False, n_jobs=-1, error_score=np.nan)\n grid.fit(x, y.ravel())\n model = grid.estimator.set_params(**grid.cv_results_[\"params\"][np.where((grid.cv_results_[\"rank_test_mse\"] == 1))[0][0]])\n cold_mae = []\n cold_rmse = []\n warm_mae = []\n warm_rmse = []\n for times in range(10):\n \n train, test = train_test_split(data, test_size=0.2, random_state=times)\n tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)\n \n model.fit(tr_x, tr_y.ravel())\n pred_test = model.predict(te_x).reshape([-1, 1])\n pred_test = sy.inverse_transform(pred_test.reshape(-1,1))\n te_y = sy_t.inverse_transform(te_y.reshape(-1,1))\n \n if wc == 'warm':\n if len([(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')])!=1:\n warm_y = te_y[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n warm_pred = pred_test[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n else:\n warm_y = te_y\n warm_pred= pred_test\n warm_mae.append(mean_absolute_error(warm_y,warm_pred))\n warm_rmse.append(mean_squared_error(warm_y,warm_pred)**0.5)\n \n if wc == 'cold':\n try:\n [(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')] != [True]\n print('no cold samples')\n continue\n except:\n cold_y = te_y[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_pred = pred_test[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_mae.append(mean_absolute_error(cold_y,cold_pred))\n cold_rmse.append(mean_squared_error(cold_y,cold_pred)**0.5)\n\n if wc == 'warm':\n \n i_result = np.array(['xgboost',np.mean(warm_mae),np.mean(warm_rmse),np.std(warm_mae),np.std(warm_rmse)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','warm_mae','warm_rmse','std_warm_mae','std_warm_rmse'])\n\n if wc == 'cold': \n i_result = np.array(['xgboost',np.mean(cold_mae),np.mean(cold_rmse),np.std(cold_mae),np.std(cold_rmse)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','cold_mae','cold_rmse','std_cold_mae','std_cold_rmse',])\n \n\n return result\n\n\n elif task_type == \"Classification\":\n\n base = XGBClassifier(n_estimators=100, random_state=random_state)\n grid = GridSearchCV(base, param_grid={\"max_depth\": (3, 4, 5, 6, 7, 8)},\n scoring={\"auc\": make_scorer(roc_auc_score, needs_proba=True)},\n cv=PredefinedSplit(val_fold), refit=False, n_jobs=-1, error_score=np.nan)\n grid.fit(x, y.ravel())\n model = grid.estimator.set_params(**grid.cv_results_[\"params\"][np.where((grid.cv_results_[\"rank_test_auc\"] == 1))[0][0]])\n \n cold_auc = []\n cold_logloss = []\n warm_auc = []\n warm_logloss = []\n for times in range(10):\n \n train, test = train_test_split(data, test_size=0.2, random_state=times)\n tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info , sy, sy_t= data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)\n\n model.fit(tr_x, tr_y.ravel())\n pred_test = model.predict_proba(te_x)[:,-1].reshape([-1, 1])\n \n if wc == 'warm':\n if len([(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')])!=1:\n warm_y = te_y[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n warm_pred = pred_test[(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')]\n else:\n warm_y = te_y\n warm_pred= pred_test\n warm_auc.append(roc_auc_score(warm_y,warm_pred))\n warm_logloss.append(log_loss(warm_y,warm_pred)) \n \n if wc == 'cold':\n \n try:\n [(te_Xi[:,1] != 'cold') & (te_Xi[:,0] != 'cold')] != [True]\n print('no cold samples')\n continue\n except:\n cold_y = te_y[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_pred = pred_test[(te_Xi[:,1] == 'cold') | (te_Xi[:,0] == 'cold')]\n cold_auc.append(roc_auc_score(cold_y,cold_pred))\n cold_logloss.append(log_loss(cold_y,cold_pred))\n\n if wc == 'warm':\n i_result = np.array(['xgboost',np.mean(warm_auc),np.mean(warm_logloss),np.std(warm_auc),np.std(warm_logloss)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','warm_auc','warm_logloss','std_warm_auc','std_warm_logloss'])\n\n if wc == 'cold':\n i_result = np.array(['xgboost',np.mean(cold_auc),np.mean(cold_logloss),np.std(cold_auc),np.std(cold_logloss)]).reshape(1,-1)\n result = pd.DataFrame(i_result,columns=['model','cold_auc','cold_logloss','std_cold_auc','std_cold_logloss'])\n \n\n return result"
] | [
[
"numpy.ones",
"sklearn.model_selection.PredefinedSplit",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.mean_absolute_error",
"pandas.DataFrame",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.make_scorer",
"numpy.where",
"sklearn.metrics.log_loss",
"numpy.std",
"numpy.concatenate",
"sklearn.model_selection.train_test_split",
"numpy.mean"
]
] |
Cyber-Neuron/inception_v3 | [
"d3f566ccfc17f4779900a9f2d81dd593b3100df5"
] | [
"inception/inception/inception_train.py"
] | [
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A library to train Inception using multiple GPU's with synchronous updates.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nfrom datetime import datetime\nimport os.path\nimport re\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom inception import image_processing\nfrom inception import inception_model as inception\nfrom inception.slim import slim\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 10000000,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_string('subset', 'train',\n \"\"\"Either 'train' or 'validation'.\"\"\")\n\n# Flags governing the hardware employed for running TensorFlow.\ntf.app.flags.DEFINE_integer('num_gpus', 1,\n \"\"\"How many GPUs to use.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\n\n# Flags governing the type of training.\ntf.app.flags.DEFINE_boolean('fine_tune', False,\n \"\"\"If set, randomly initialize the final layer \"\"\"\n \"\"\"of weights in order to train the network on a \"\"\"\n \"\"\"new task.\"\"\")\ntf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',\n \"\"\"If specified, restore this pretrained model \"\"\"\n \"\"\"before beginning any training.\"\"\")\n\n# **IMPORTANT**\n# Please note that this learning rate schedule is heavily dependent on the\n# hardware architecture, batch size and any changes to the model architecture\n# specification. Selecting a finely tuned learning rate schedule is an\n# empirical process that requires some experimentation. Please see README.md\n# more guidance and discussion.\n#\n# With 8 Tesla K40's and a batch size = 256, the following setup achieves\n# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).\n# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.\ntf.app.flags.DEFINE_float('initial_learning_rate', 0.1,\n \"\"\"Initial learning rate.\"\"\")\ntf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,\n \"\"\"Epochs after which learning rate decays.\"\"\")\ntf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,\n \"\"\"Learning rate decay factor.\"\"\")\n\n# Constants dictating the learning rate schedule.\nRMSPROP_DECAY = 0.9 # Decay term for RMSProp.\nRMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.\nRMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.\n\n\ndef _tower_loss(images, labels, num_classes, scope):\n \"\"\"Calculate the total loss on a single tower running the ImageNet model.\n\n We perform 'batch splitting'. This means that we cut up a batch across\n multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,\n then each tower will operate on an batch of 16 images.\n\n Args:\n images: Images. 4D tensor of size [batch_size, FLAGS.image_size,\n FLAGS.image_size, 3].\n labels: 1-D integer Tensor of [batch_size].\n num_classes: number of classes\n scope: unique prefix string identifying the ImageNet tower, e.g.\n 'tower_0'.\n\n Returns:\n Tensor of shape [] containing the total loss for a batch of data\n \"\"\"\n # When fine-tuning a model, we do not restore the logits but instead we\n # randomly initialize the logits. The number of classes in the output of the\n # logit is the number of classes in specified Dataset.\n restore_logits = not FLAGS.fine_tune\n\n # Build inference Graph.\n logits = inception.inference(images, num_classes, for_training=True,\n restore_logits=restore_logits,\n scope=scope)\n\n # Build the portion of the Graph calculating the losses. Note that we will\n # assemble the total_loss using a custom function below.\n split_batch_size = images.get_shape().as_list()[0]\n inception.loss(logits, labels, batch_size=split_batch_size)\n\n # Assemble all of the losses for the current tower only.\n losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)\n\n # Calculate the total loss for the current tower.\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summmary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on TensorBoard.\n loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.scalar_summary(loss_name +' (raw)', l)\n tf.scalar_summary(loss_name, loss_averages.average(l))\n\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n return total_loss\n\n\ndef _average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(0, grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\ndef train(dataset):\n \"\"\"Train on dataset for a number of steps.\"\"\"\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n # Create a variable to count the number of train() calls. This equals the\n # number of batches processed * FLAGS.num_gpus.\n global_step = tf.get_variable(\n 'global_step', [],\n initializer=tf.constant_initializer(0), trainable=False)\n\n # Calculate the learning rate schedule.\n num_batches_per_epoch = (dataset.num_examples_per_epoch() /\n FLAGS.batch_size)\n decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,\n global_step,\n decay_steps,\n FLAGS.learning_rate_decay_factor,\n staircase=True)\n\n # Create an optimizer that performs gradient descent.\n opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,\n momentum=RMSPROP_MOMENTUM,\n epsilon=RMSPROP_EPSILON)\n\n # Get images and labels for ImageNet and split the batch across GPUs.\n assert FLAGS.batch_size % FLAGS.num_gpus == 0, (\n 'Batch size must be divisible by number of GPUs')\n split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)\n\n # Override the number of preprocessing threads to account for the increased\n # number of GPU towers.\n num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus\n images, labels = image_processing.distorted_inputs(\n dataset,\n num_preprocess_threads=num_preprocess_threads)\n\n input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))\n\n # Number of classes in the Dataset label set plus 1.\n # Label 0 is reserved for an (unused) background class.\n num_classes = dataset.num_classes() + 1\n \n # Split the batch of images and labels for towers.\n images_splits = tf.split(0, FLAGS.num_gpus, images)\n labels_splits = tf.split(0, FLAGS.num_gpus, labels)\n\n # Calculate the gradients for each model tower.\n tower_grads = []\n for i in xrange(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:\n # Force all Variables to reside on the CPU.\n with slim.arg_scope([slim.variables.variable], device='/cpu:0'):\n # Calculate the loss for one tower of the ImageNet model. This\n # function constructs the entire ImageNet model but shares the\n # variables across all towers.\n loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,\n scope)\n\n # Reuse variables for the next tower.\n tf.get_variable_scope().reuse_variables()\n\n # Retain the summaries from the final tower.\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n\n # Retain the Batch Normalization updates operations only from the\n # final tower. Ideally, we should grab the updates from all towers\n # but these stats accumulate extremely fast so we can ignore the\n # other stats from the other towers without significant detriment.\n batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,\n scope)\n\n # Calculate the gradients for the batch of data on this ImageNet\n # tower.\n grads = opt.compute_gradients(loss)\n\n # Keep track of the gradients across all towers.\n tower_grads.append(grads)\n\n # We must calculate the mean of each gradient. Note that this is the\n # synchronization point across all towers.\n grads = _average_gradients(tower_grads)\n\n # Add a summaries for the input processing and global_step.\n summaries.extend(input_summaries)\n\n # Add a summary to track the learning rate.\n summaries.append(tf.scalar_summary('learning_rate', lr))\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n summaries.append(\n tf.histogram_summary(var.op.name + '/gradients', grad))\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n summaries.append(tf.histogram_summary(var.op.name, var))\n\n # Track the moving averages of all trainable variables.\n # Note that we maintain a \"double-average\" of the BatchNormalization\n # global statistics. This is more complicated then need be but we employ\n # this for backward-compatibility with our previous models.\n variable_averages = tf.train.ExponentialMovingAverage(\n inception.MOVING_AVERAGE_DECAY, global_step)\n\n # Another possiblility is to use tf.slim.get_variables().\n variables_to_average = (tf.trainable_variables() +\n tf.moving_average_variables())\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # Group all updates to into a single train op.\n batchnorm_updates_op = tf.group(*batchnorm_updates)\n train_op = tf.group(apply_gradient_op, variables_averages_op,\n batchnorm_updates_op)\n\n # Create a saver.\n saver = tf.train.Saver(tf.all_variables())\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.merge_summary(summaries)\n\n # Build an initialization operation to run below.\n init = tf.initialize_all_variables()\n\n # Start running operations on the Graph. allow_soft_placement must be set to\n # True to build towers on GPU, as some of the ops do not have GPU\n # implementations.\n sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=FLAGS.log_device_placement))\n sess.run(init)\n\n if FLAGS.pretrained_model_checkpoint_path:\n assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)\n variables_to_restore = tf.get_collection(\n slim.variables.VARIABLES_TO_RESTORE)\n restorer = tf.train.Saver(variables_to_restore)\n restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)\n print('%s: Pre-trained model restored from %s' %\n (datetime.now(), FLAGS.pretrained_model_checkpoint_path))\n\n # Start the queue runners.\n tf.train.start_queue_runners(sess=sess)\n\n summary_writer = tf.train.SummaryWriter(\n FLAGS.train_dir,\n graph_def=sess.graph.as_graph_def(add_shapes=True))\n\n for step in xrange(FLAGS.max_steps):\n start_time = time.time()\n _, loss_value = sess.run([train_op, loss])\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 10 == 0:\n examples_per_sec = FLAGS.batch_size / float(duration)\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print(format_str % (datetime.now(), step, loss_value,\n examples_per_sec, duration))\n\n if step % 100 == 0:\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Save the model checkpoint periodically.\n if step % 500 == 0 or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.name_scope",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.concat",
"tensorflow.identity",
"tensorflow.all_variables",
"tensorflow.get_variable_scope",
"tensorflow.split",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.device",
"tensorflow.Graph",
"tensorflow.train.exponential_decay",
"numpy.isnan",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.scalar_summary",
"tensorflow.constant_initializer",
"tensorflow.add_n",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.get_collection",
"tensorflow.histogram_summary",
"tensorflow.expand_dims",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Saver",
"tensorflow.merge_summary",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.group",
"tensorflow.ConfigProto",
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.trainable_variables",
"tensorflow.gfile.Exists",
"tensorflow.moving_average_variables"
]
] |
rhoadesScholar/daisy | [
"78cdd2ed0d67647a6602fb53cc952214450f3753"
] | [
"examples/visualize.py"
] | [
"#!/usr/bin/env python\n\nfrom funlib.show.neuroglancer import add_layer\nimport argparse\nimport daisy\nimport glob\nimport neuroglancer\nimport os\nimport webbrowser\nimport numpy as np\nimport zarr\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--file',\n '-f',\n type=str,\n action='append',\n help=\"The path to the container to show\")\nparser.add_argument(\n '--datasets',\n '-d',\n type=str,\n nargs='+',\n action='append',\n help=\"The datasets in the container to show\")\nparser.add_argument(\n '--graphs',\n '-g',\n type=str,\n nargs='+',\n action='append',\n help=\"The graphs in the container to show\")\nparser.add_argument(\n '--no-browser',\n '-n',\n type=bool,\n nargs='?',\n default=False,\n const=True,\n help=\"If set, do not open a browser, just print a URL\")\n\nargs = parser.parse_args()\n\nneuroglancer.set_server_bind_address('0.0.0.0')\nviewer = neuroglancer.Viewer()\n\ndef to_slice(slice_str):\n\n values = [int(x) for x in slice_str.split(':')]\n if len(values) == 1:\n return values[0]\n\n return slice(*values)\n\ndef parse_ds_name(ds):\n\n tokens = ds.split('[')\n\n if len(tokens) == 1:\n return ds, None\n\n ds, slices = tokens\n slices = list(map(to_slice, slices.rstrip(']').split(',')))\n\n return ds, slices\n\nclass Project:\n\n def __init__(self, array, dim, value):\n self.array = array\n self.dim = dim\n self.value = value\n self.shape = array.shape[:self.dim] + array.shape[self.dim + 1:]\n self.dtype = array.dtype\n\n def __getitem__(self, key):\n slices = key[:self.dim] + (self.value,) + key[self.dim:]\n ret = self.array[slices]\n return ret\n\ndef slice_dataset(a, slices):\n\n dims = a.roi.dims\n\n for d, s in list(enumerate(slices))[::-1]:\n\n if isinstance(s, slice):\n raise NotImplementedError(\"Slicing not yet implemented!\")\n else:\n index = (s - a.roi.get_begin()[d])//a.voxel_size[d]\n a.data = Project(a.data, d, index)\n a.roi = daisy.Roi(\n a.roi.get_begin()[:d] + a.roi.get_begin()[d + 1:],\n a.roi.get_shape()[:d] + a.roi.get_shape()[d + 1:])\n a.voxel_size = a.voxel_size[:d] + a.voxel_size[d + 1:]\n\n return a\n\ndef open_dataset(f, ds):\n original_ds = ds\n ds, slices = parse_ds_name(ds)\n slices_str = original_ds[len(ds):]\n\n try:\n dataset_as = []\n if all(key.startswith(\"s\") for key in zarr.open(f)[ds].keys()):\n raise AttributeError(\"This group is a multiscale array!\")\n for key in zarr.open(f)[ds].keys():\n dataset_as.extend(open_dataset(f, f\"{ds}/{key}{slices_str}\"))\n return dataset_as\n except AttributeError as e:\n # dataset is an array, not a group\n pass\n\n print(\"ds :\", ds)\n print(\"slices:\", slices)\n try:\n zarr.open(f)[ds].keys()\n is_multiscale = True\n except:\n is_multiscale = False\n\n if not is_multiscale:\n a = daisy.open_ds(f, ds)\n\n if slices is not None:\n a = slice_dataset(a, slices)\n\n if a.roi.dims == 2:\n print(\"ROI is 2D, recruiting next channel to z dimension\")\n a.roi = daisy.Roi((0,) + a.roi.get_begin(), (a.shape[-3],) + a.roi.get_shape())\n a.voxel_size = daisy.Coordinate((1,) + a.voxel_size)\n\n if a.roi.dims == 4:\n print(\"ROI is 4D, stripping first dimension and treat as channels\")\n a.roi = daisy.Roi(a.roi.get_begin()[1:], a.roi.get_shape()[1:])\n a.voxel_size = daisy.Coordinate(a.voxel_size[1:])\n\n if a.data.dtype == np.int64 or a.data.dtype == np.int16:\n print(\"Converting dtype in memory...\")\n a.data = a.data[:].astype(np.uint64)\n\n return [(a, ds)]\n else:\n return [([daisy.open_ds(f, f\"{ds}/{key}\") for key in zarr.open(f)[ds].keys()], ds)]\n\nfor f, datasets in zip(args.file, args.datasets):\n\n arrays = []\n for ds in datasets:\n try:\n\n print(\"Adding %s, %s\" % (f, ds))\n dataset_as = open_dataset(f, ds)\n\n except Exception as e:\n\n print(type(e), e)\n print(\"Didn't work, checking if this is multi-res...\")\n\n scales = glob.glob(os.path.join(f, ds, 's*'))\n if len(scales) == 0:\n print(f\"Couldn't read {ds}, skipping...\")\n raise e\n print(\"Found scales %s\" % ([\n os.path.relpath(s, f)\n for s in scales\n ],))\n a = [\n open_dataset(f, os.path.relpath(scale_ds, f))\n for scale_ds in scales\n ]\n for a in dataset_as:\n arrays.append(a)\n\n with viewer.txn() as s:\n for array, dataset in arrays:\n add_layer(s, array, dataset)\n\nif args.graphs:\n for f, graphs in zip(args.file, args.graphs):\n\n for graph in graphs:\n\n graph_annotations = []\n try:\n ids = daisy.open_ds(f, graph + '-ids').data\n loc = daisy.open_ds(f, graph + '-locations').data\n except:\n loc = daisy.open_ds(f, graph).data\n ids = None\n dims = loc.shape[-1]\n loc = loc[:].reshape((-1, dims))\n if ids is None:\n ids = range(len(loc))\n for i, l in zip(ids, loc):\n if dims == 2:\n l = np.concatenate([[0], l])\n graph_annotations.append(\n neuroglancer.EllipsoidAnnotation(\n center=l[::-1],\n radii=(5, 5, 5),\n id=i))\n graph_layer = neuroglancer.AnnotationLayer(\n annotations=graph_annotations,\n voxel_size=(1, 1, 1))\n\n with viewer.txn() as s:\n s.layers.append(name='graph', layer=graph_layer)\n\nurl = str(viewer)\nprint(url)\nif os.environ.get(\"DISPLAY\") and not args.no_browser:\n webbrowser.open_new(url)\n\nprint(\"Press ENTER to quit\")\ninput()"
] | [
[
"numpy.concatenate"
]
] |
aelnouby/summerschool2015 | [
"c96da4af353fc1b0c1a7e3a08863c6de89072b19"
] | [
"convnets/logistic_sgd.py"
] | [
"\"\"\"\nThis tutorial introduces logistic regression using Theano and stochastic\ngradient descent.\n\nLogistic regression is a probabilistic, linear classifier. It is parametrized\nby a weight matrix :math:`W` and a bias vector :math:`b`. Classification is\ndone by projecting data points onto a set of hyperplanes, the distance to\nwhich is used to determine a class membership probability.\n\nMathematically, this can be written as:\n\n.. math::\n P(Y=i|x, W,b) &= softmax_i(W x + b) \\\\\n &= \\frac {e^{W_i x + b_i}} {\\sum_j e^{W_j x + b_j}}\n\n\nThe output of the model or prediction is then done by taking the argmax of\nthe vector whose i'th element is P(Y=i|x).\n\n.. math::\n\n y_{pred} = argmax_i P(Y=i|x,W,b)\n\n\nThis tutorial presents a stochastic gradient descent optimization method\nsuitable for large datasets.\n\n\nReferences:\n\n - textbooks: \"Pattern Recognition and Machine Learning\" -\n Christopher M. Bishop, section 4.3.2\n\n\"\"\"\nfrom __future__ import print_function\nimport gzip\nimport os\nimport sys\nimport time\n\nimport numpy\nimport six\nfrom six.moves import cPickle, xrange\n\nimport theano\nimport theano.tensor as T\n\n__docformat__ = 'restructedtext en'\n\n\nclass LogisticRegression(object):\n \"\"\"Multi-class Logistic Regression Class\n\n The logistic regression is fully described by a weight matrix :math:`W`\n and bias vector :math:`b`. Classification is done by projecting data\n points onto a set of hyperplanes, the distance to which is used to\n determine a class membership probability.\n \"\"\"\n\n def __init__(self, input, n_in, n_out):\n \"\"\" Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n \"\"\"\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(\n value=numpy.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n # initialize the baises b as a vector of n_out 0s\n self.b = theano.shared(\n value=numpy.zeros(\n (n_out,),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyper plain for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of hyper\n # plain-k\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n def negative_log_likelihood(self, y):\n \"\"\"Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \\frac{1}{|\\mathcal{D}|} \\mathcal{L} (\\theta=\\{W,b\\}, \\mathcal{D}) =\n \\frac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\\\n \\ell (\\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n \"\"\"\n # start-snippet-2\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n # end-snippet-2\n\n def errors(self, y):\n \"\"\"Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n \"\"\"\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError()\n\n\ndef load_data(dataset):\n ''' Loads the dataset\n\n :type dataset: string\n :param dataset: the path to the dataset (here MNIST)\n '''\n\n #############\n # LOAD DATA #\n #############\n\n # Download the MNIST dataset if it is not present\n data_dir, data_file = os.path.split(dataset)\n if data_dir == \"\" and not os.path.isfile(dataset):\n # Check if dataset is in the data directory.\n if \"__file__\" in globals():\n new_path = os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"data\",\n )\n if os.path.isdir(new_path):\n new_path = os.path.join(new_path, dataset)\n if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':\n dataset = new_path\n\n if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':\n from six.moves.urllib.request import urlretrieve\n origin = (\n 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'\n )\n print('Downloading data from %s' % origin)\n urlretrieve(origin, dataset)\n\n print('... loading data')\n\n # Load the dataset\n f = gzip.open(dataset, 'rb')\n if six.PY3:\n train_set, valid_set, test_set = cPickle.load(f, encoding='latin1')\n else:\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n # train_set, valid_set, test_set format: tuple(input, target)\n # input is an numpy.ndarray of 2 dimensions (a matrix)\n # witch row's correspond to an example. target is a\n # numpy.ndarray of 1 dimensions (vector)) that have the same length as\n # the number of rows in the input. It should give the target\n # target to the example with the same index in the input.\n\n def shared_dataset(data_xy, borrow=True):\n \"\"\" Function that loads the dataset into shared variables\n\n The reason we store our dataset in shared variables is to allow\n Theano to copy it into the GPU memory (when code is run on GPU).\n Since copying data into the GPU is slow, copying a minibatch everytime\n is needed (the default behaviour if the data is not in a shared\n variable) would lead to a large decrease in performance.\n \"\"\"\n data_x, data_y = data_xy\n shared_x = theano.shared(numpy.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(numpy.asarray(data_y,\n dtype=theano.config.floatX),\n borrow=borrow)\n # When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets ous get around this issue\n return shared_x, T.cast(shared_y, 'int32')\n\n test_set_x, test_set_y = shared_dataset(test_set)\n valid_set_x, valid_set_y = shared_dataset(valid_set)\n train_set_x, train_set_y = shared_dataset(train_set)\n\n rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),\n (test_set_x, test_set_y)]\n return rval\n\n\ndef sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,\n dataset='mnist.pkl.gz',\n batch_size=600):\n \"\"\"\n Demonstrate stochastic gradient descent optimization of a log-linear\n model\n\n This is demonstrated on MNIST.\n\n :type learning_rate: float\n :param learning_rate: learning rate used (factor for the stochastic\n gradient)\n\n :type n_epochs: int\n :param n_epochs: maximal number of epochs to run the optimizer\n\n :type dataset: string\n :param dataset: the path of the MNIST dataset file from\n http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz\n\n \"\"\"\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print('... building the model')\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n\n # generate symbolic variables for input (x and y represent a\n # minibatch)\n x = T.matrix('x') # data, presented as rasterized images\n y = T.ivector('y') # labels, presented as 1D vector of [int] labels\n\n # construct the logistic regression class\n # Each MNIST image has size 28*28\n classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)\n\n # the cost we minimize during training is the negative log likelihood of\n # the model in symbolic format\n cost = classifier.negative_log_likelihood(y)\n\n # compiling a Theano function that computes the mistakes that are made by\n # the model on a minibatch\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n # compute the gradient of cost with respect to theta = (W,b)\n g_W = T.grad(cost=cost, wrt=classifier.W)\n g_b = T.grad(cost=cost, wrt=classifier.b)\n\n # start-snippet-3\n # specify how to update the parameters of the model as a list of\n # (variable, update expression) pairs.\n updates = [(classifier.W, classifier.W - learning_rate * g_W),\n (classifier.b, classifier.b - learning_rate * g_b)]\n\n # compiling a Theano function `train_model` that returns the cost, but in\n # the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n # end-snippet-3\n\n ###############\n # TRAIN MODEL #\n ###############\n print('... training the model')\n # early-stopping parameters\n patience = 5000 # look as this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is found\n\n # a relative improvement of this much is considered significant\n improvement_threshold = 0.995\n\n # go through this many minibatches before checking the network on\n # the validation set; in this case we check every epoch\n validation_frequency = min(n_train_batches, patience / 2)\n\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = time.clock()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in xrange(n_train_batches):\n\n train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i)\n for i in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n # improve patience if loss improvement is good enough\n if this_validation_loss < best_validation_loss * \\\n improvement_threshold:\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n # test it on the test set\n\n test_losses = [test_model(i)\n for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n ' epoch %i, minibatch %i/%i, test error of'\n ' best model %f %%'\n ) %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n test_score * 100.\n )\n )\n\n if patience <= iter:\n done_looping = True\n break\n\n end_time = time.clock()\n print(\n (\n 'Optimization complete with best validation score of %f %%,'\n 'with test performance %f %%'\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print('The code run for %d epochs, with %f epochs/sec' % (\n epoch, 1. * epoch / (end_time - start_time)))\n\n print('The code ran for %.1fs' % ((end_time - start_time)),\n file=sys.stderr)\n\n # Call Python GC to make sure the GPU memory is freed. That way,\n # we are sure next call will have enough memory.\n import gc\n for i in range(4):\n gc.collect()\n"
] | [
[
"numpy.mean",
"numpy.asarray",
"numpy.zeros"
]
] |
fxdupe/graphmatchingtools | [
"4503a04c4a0822315535e6ab3cd698417859908d"
] | [
"graph_matching_tools/algorithms/multiway/hippi.py"
] | [
"\"\"\"\nHiPPI algorithm as described in ICCV 2019 paper\n\n.. moduleauthor:: François-Xavier Dupé\n\"\"\"\nimport numpy as np\n\nimport graph_matching_tools.algorithms.multiway.utils as utils\n\n\ndef hippi_multiway_matching(s, sizes, knode, u_dim, iterations=100, tolerance=1e-6, init=None):\n \"\"\"\n HiPPI method for multi-graph matching based on a power method\n :param np.ndarray s: the bulk matrix with the adjacency matrices on the diagonal\n :param list sizes: the number of nodes of the different graphs (in order)\n :param np.ndarray knode: the node affinity matrix\n :param int u_dim: the dimension of the universe of nodes\n :param int iterations: the maximal number of iterations\n :param float tolerance: the tolerance for convergence\n :param np.ndarray init: the initialization, random if None\n :return: the universe of node projection for all the nodes\n \"\"\"\n if init is None:\n u = np.ones((s.shape[0], u_dim)) / u_dim + 1e-3 * np.random.randn(s.shape[0], u_dim)\n else:\n u = init\n\n w = knode.T @ s @ knode\n vi = w @ u @ u.T @ w @ u\n fu = np.trace(u.T @ vi)\n\n for i in range(iterations):\n u = utils.u_projector(vi, sizes)\n vi = w @ u @ u.T @ w @ u\n n_fu = np.trace(u.T @ vi)\n if np.abs(n_fu - fu) < tolerance:\n break\n fu = n_fu\n\n return u\n"
] | [
[
"numpy.random.randn",
"numpy.trace",
"numpy.abs",
"numpy.ones"
]
] |
summerRainn/DeepLearningNotes | [
"6657694d5e22e73969e47699b4e31a28385d0f19"
] | [
"Note-6 A3CNet/Note-6.2.1 代码阅读顺序/sonnet/python/modules/base_test.py"
] | [
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Tests for sonnet.python.modules.base.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport pickle\nimport numpy as np\nimport six\nfrom sonnet.python.modules import base\nimport tensorflow as tf\n\nlogging = tf.logging\n\n\nclass ModuleWithClassKeys(base.AbstractModule):\n \"\"\"Dummy module that defines some keys as class attributes.\"\"\"\n POSSIBLE_INITIALIZER_KEYS = {\"foo\", \"bar\"}\n\n\nclass ModuleWithNoInitializerKeys(base.AbstractModule):\n \"\"\"Dummy module without any intiailizer keys.\"\"\"\n pass\n\n\nclass ModuleWithCustomInitializerKeys(base.AbstractModule):\n \"\"\"Dummy module that overrides get_possible_initializer_keys.\"\"\"\n\n @classmethod\n def get_possible_initializer_keys(cls, custom_key):\n return {\"foo\"} if custom_key else {\"bar\"}\n\n\nclass IdentityModule(base.AbstractModule):\n \"\"\"Sonnet module that builds a single `tf.identity` op.\"\"\"\n\n def _build(self, inputs):\n return tf.identity(inputs)\n\n\nclass NoInitIdentityModule(base.AbstractModule):\n \"\"\"Sonnet module that inherits `base.AbstractModule.__init__`.\"\"\"\n\n def _build(self, inputs):\n return tf.identity(inputs)\n\n\nclass NoSuperInitIdentityModule(base.AbstractModule):\n \"\"\"Sonnet module that doesn't call `base.AbstractModule.__init__`.\"\"\"\n\n def __init__(self):\n pass # Don't call superclass initializer.\n\n def _build(self, inputs):\n return tf.identity(inputs)\n\n\nclass SimpleModule(base.AbstractModule):\n \"\"\"Simple module with variables created in constructor and build.\"\"\"\n\n def __init__(self, custom_getter=None, name=\"simple_module\"):\n\n super(SimpleModule, self).__init__(custom_getter=custom_getter,\n name=name)\n\n with self._enter_variable_scope():\n self._b = tf.get_variable(\"b\", dtype=tf.float32, shape=[10, 10])\n\n def _build(self, inputs):\n self._w = tf.get_variable(\"w\", dtype=tf.float32, shape=[10, 10])\n\n return self._w * inputs + self._b\n\n\nclass ComplexModule(base.AbstractModule):\n \"\"\"Complex module consisting of two sub modules.\"\"\"\n\n def __init__(self, custom_getter=None, name=\"complex_module\"):\n\n super(ComplexModule, self).__init__(custom_getter=custom_getter,\n name=name)\n\n with self._enter_variable_scope():\n self._a = SimpleModule(name=\"linear_1\")\n\n def _build(self, inputs):\n self._b = SimpleModule(name=\"linear_2\")\n\n return self._b(self._a(inputs)) # pylint: disable=not-callable\n\n\nclass AbstractModuleTest(tf.test.TestCase):\n\n def testInitializerKeys(self):\n keys = ModuleWithClassKeys.get_possible_initializer_keys()\n self.assertEqual(keys, {\"foo\", \"bar\"})\n keys = ModuleWithNoInitializerKeys.get_possible_initializer_keys()\n self.assertEqual(keys, set())\n msg = (\"missing 1 required positional argument\" if six.PY3\n else \"takes exactly 2 arguments\")\n self.assertRaisesRegexp(\n TypeError, msg,\n ModuleWithCustomInitializerKeys.get_possible_initializer_keys)\n keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(True)\n self.assertEqual(keys, {\"foo\"})\n keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(False)\n self.assertEqual(keys, {\"bar\"})\n\n def testMultipleGraphs(self):\n id_mod = IdentityModule(name=\"identity\")\n # gpylint incorrectly thinks IdentityModule is not callable, so disable.\n # pylint: disable=not-callable\n with tf.Graph().as_default() as graph:\n id_mod(tf.placeholder(dtype=tf.float32, shape=[42]))\n self.assertEqual(id_mod._graph, graph)\n\n with tf.Graph().as_default():\n with self.assertRaisesRegexp(base.DifferentGraphError,\n \"Cannot connect module\"):\n id_mod(tf.placeholder(dtype=tf.float32, shape=[42]))\n # pylint: enable=not-callable\n\n def testNameScopeRecording(self):\n id_mod = IdentityModule(name=\"foo\")\n\n # Connect inside different name scope contexts, check that each is recorded.\n # pylint: disable=not-callable\n id_mod(tf.placeholder(dtype=tf.float32, shape=[22]))\n self.assertIn(id_mod.name_scopes, ((\"foo\",), (\"foo_1\",)))\n with tf.name_scope(\"blah\"):\n id_mod(tf.placeholder(dtype=tf.float32, shape=[23]))\n self.assertIn(id_mod.name_scopes,\n ((\"foo\", \"blah/foo\"), (\"foo_1\", \"blah/foo\")))\n with tf.name_scope(\"baz\"):\n id_mod(tf.placeholder(dtype=tf.float32, shape=[24]))\n # pylint: enable=not-callable\n self.assertIn(id_mod.name_scopes,\n ((\"foo\", \"blah/foo\", \"baz/foo\"),\n (\"foo_1\", \"blah/foo\", \"baz/foo\")))\n\n def testSubgraphsRecording(self):\n id_mod = IdentityModule(name=\"foo\")\n\n with self.assertRaisesRegexp(base.NotConnectedError,\n \"not instantiated yet\"):\n id_mod.last_connected_subgraph()\n\n # pylint: disable=not-callable\n inputs = tf.placeholder(dtype=tf.float32, shape=[21])\n outputs = id_mod(inputs)\n with tf.name_scope(\"blah\"):\n blah_inputs = tf.placeholder(dtype=tf.float32, shape=[22])\n blah_outputs = id_mod(blah_inputs)\n with tf.name_scope(\"baz\"):\n baz_inputs = tf.placeholder(dtype=tf.float32, shape=[23])\n baz_outputs = id_mod(baz_inputs)\n # pylint: enable=not-callable\n subgraphs = id_mod.connected_subgraphs\n self.assertEqual(id_mod.last_connected_subgraph.name_scope, \"baz/foo\")\n self.assertIs(id_mod.last_connected_subgraph, subgraphs[2])\n self.assertIs(subgraphs[0].builder, id_mod)\n self.assertIn(subgraphs[0].name_scope, (\"foo\", \"foo_1\"))\n self.assertEqual(subgraphs[1].name_scope, \"blah/foo\")\n self.assertEqual(subgraphs[2].name_scope, \"baz/foo\")\n self.assertIs(subgraphs[0].inputs.args[0], inputs)\n self.assertIs(subgraphs[1].inputs.args[0], blah_inputs)\n self.assertIs(subgraphs[2].inputs.args[0], baz_inputs)\n self.assertIs(subgraphs[0].outputs, outputs)\n self.assertIs(subgraphs[1].outputs, blah_outputs)\n self.assertIs(subgraphs[2].outputs, baz_outputs)\n\n def testInitNoNamedArgs(self):\n \"\"\"Tests if calling __init__ without named args raises a ValueError.\"\"\"\n with self.assertRaises(ValueError):\n NoInitIdentityModule(\"foobar\")\n\n def testInitInvalidTypeArgs(self):\n \"\"\"Tests if calling __init__ without a string name raises a TypeError.\"\"\"\n with self.assertRaises(TypeError):\n NoInitIdentityModule(name=123)\n\n def testInitNoArgs(self):\n \"\"\"Tests if calling __init__ with no args uses correct defaults.\"\"\"\n module = NoInitIdentityModule()\n self.assertEqual(module.module_name, \"no_init_identity_module\")\n\n def testInitNoSuper(self):\n \"\"\"Tests if a __call__ with no __init__ raises an error.\"\"\"\n module = NoSuperInitIdentityModule()\n with self.assertRaises(base.NotInitializedError):\n module(tf.constant([1])) # pylint: disable=not-callable\n\n def testPicklingNotSupported(self):\n module = IdentityModule()\n with self.assertRaisesRegexp(base.NotSupportedError,\n \"cannot be serialized\"):\n # Writing the object to a string will fail.\n pickle.dumps(module)\n\n def testCustomGetter(self):\n\n connection_count = {\"x\": 0}\n def custom_getter(getter, name, *args, **kwargs):\n connection_count[\"x\"] += 1\n return getter(name, *args, **kwargs)\n\n inputs = tf.placeholder(tf.float32, [10, 10])\n\n with tf.variable_scope(\"scope\"):\n module = SimpleModule(name=\"mod1\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(0, connection_count[\"x\"])\n\n module = SimpleModule(custom_getter=custom_getter, name=\"mod2\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(2, connection_count[\"x\"]) # w & b\n\n module = SimpleModule(custom_getter={\"w\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(3, connection_count[\"x\"]) # w\n\n module = SimpleModule(custom_getter={\"w.*\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, connection_count[\"x\"]) # w\n\n module = SimpleModule(custom_getter={\".*\": custom_getter}, name=\"mod4\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(6, connection_count[\"x\"]) # w & b\n\n err = r\"More than one custom_getter matched scope/mod5/w \\(w\\):.*\"\n with self.assertRaisesRegexp(KeyError, err):\n module = SimpleModule(\n custom_getter={\".*\": custom_getter, \"w.*\": custom_getter},\n name=\"mod5\")\n module(inputs) # pylint: disable=not-callable\n\n err = \"Given custom_getter is not callable.\"\n with self.assertRaisesRegexp(TypeError, err):\n module = SimpleModule(custom_getter=0, name=\"mod6\")\n with self.assertRaisesRegexp(TypeError, err):\n module = SimpleModule(custom_getter={\"w\": 0}, name=\"mod7\")\n\n def testCustomGetterNested(self):\n\n def custom_getter(getter, name, *args, **kwargs):\n kwargs[\"trainable\"] = False\n return getter(name, *args, **kwargs)\n\n inputs = tf.placeholder(tf.float32, [10, 10])\n\n with tf.variable_scope(\"scope\"):\n module = ComplexModule(name=\"mod1\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, len(tf.trainable_variables()))\n\n module = ComplexModule(custom_getter=custom_getter, name=\"mod2\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, len(tf.trainable_variables())) # All variables.\n\n module = ComplexModule(custom_getter={\".*/w\": custom_getter},\n name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n trainable_names = [v.op.name for v in tf.trainable_variables()]\n self.assertEqual(6, len(trainable_names)) # linear_1/w and linear_2/w.\n self.assertIn(\"scope/mod3/linear_1/b\", trainable_names)\n self.assertIn(\"scope/mod3/linear_2/b\", trainable_names)\n\n module = ComplexModule(custom_getter={\".*/b\": custom_getter}, name=\"mod4\")\n module(inputs) # pylint: disable=not-callable\n trainable_names = [v.op.name for v in tf.trainable_variables()]\n self.assertEqual(8, len(trainable_names)) # linear_1/b and linear_2/b.\n self.assertIn(\"scope/mod4/linear_1/w\", trainable_names)\n self.assertIn(\"scope/mod4/linear_2/w\", trainable_names)\n\n module = ComplexModule(custom_getter={\".*\": custom_getter}, name=\"mod5\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(8, len(tf.trainable_variables())) # All variables.\n\n module = ComplexModule(custom_getter={\"w\": custom_getter}, name=\"mod6\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(12, len(tf.trainable_variables())) # No variables.\n\n\ndef _make_model_with_params(inputs, output_size):\n weight_shape = [inputs.get_shape().as_list()[-1], output_size]\n weight = tf.get_variable(\"w\", shape=weight_shape, dtype=inputs.dtype)\n return tf.matmul(inputs, weight)\n\n\nclass ModuleTest(tf.test.TestCase):\n\n def testFunctionType(self):\n with self.assertRaises(TypeError) as cm:\n base.Module(build=\"not_a_function\")\n\n self.assertEqual(str(cm.exception), \"Input 'build' must be callable.\")\n\n def testSharing(self):\n batch_size = 3\n in_size = 4\n inputs1 = tf.placeholder(tf.float32, shape=[batch_size, in_size])\n inputs2 = tf.placeholder(tf.float32, shape=[batch_size, in_size])\n\n build = functools.partial(_make_model_with_params, output_size=10)\n model = base.Module(build)\n self.assertEqual(model.scope_name, \"make_model_with_params\")\n outputs1 = model(inputs1)\n outputs2 = model(inputs2)\n input_data = np.random.rand(batch_size, in_size)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n outputs1, outputs2 = sess.run(\n [outputs1, outputs2],\n feed_dict={inputs1: input_data,\n inputs2: input_data})\n self.assertAllClose(outputs1, outputs2)\n\n def testCustomGetter(self):\n def simple_module_build(inputs):\n w = tf.get_variable(\"w\", dtype=tf.float32, shape=[10, 10])\n b = tf.get_variable(\"b\", dtype=tf.float32, shape=[10, 10])\n return w * inputs + b\n\n connection_count = {\"x\": 0}\n\n def custom_getter(getter, name, *args, **kwargs):\n connection_count[\"x\"] += 1\n return getter(name, *args, **kwargs)\n\n create_module = functools.partial(base.Module, build=simple_module_build)\n\n inputs = tf.placeholder(tf.float32, [10, 10])\n\n with tf.variable_scope(\"scope\"):\n module = create_module(name=\"mod1\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(0, connection_count[\"x\"])\n\n module = create_module(custom_getter=custom_getter, name=\"mod2\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(2, connection_count[\"x\"]) # w & b\n\n module = create_module(custom_getter={\"w\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(3, connection_count[\"x\"]) # w\n\n module = create_module(custom_getter={\"w.*\": custom_getter}, name=\"mod3\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(4, connection_count[\"x\"]) # w\n\n module = create_module(custom_getter={\".*\": custom_getter}, name=\"mod4\")\n module(inputs) # pylint: disable=not-callable\n self.assertEqual(6, connection_count[\"x\"]) # w & b\n\n err = r\"More than one custom_getter matched scope/mod5/w \\(w\\):.*\"\n with self.assertRaisesRegexp(KeyError, err):\n module = create_module(\n custom_getter={\".*\": custom_getter, \"w.*\": custom_getter},\n name=\"mod5\")\n module(inputs) # pylint: disable=not-callable\n\n err = \"Given custom_getter is not callable.\"\n with self.assertRaisesRegexp(TypeError, err):\n module = create_module(custom_getter=0, name=\"mod6\")\n with self.assertRaisesRegexp(TypeError, err):\n module = create_module(custom_getter={\"w\": 0}, name=\"mod7\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.Graph",
"tensorflow.trainable_variables",
"tensorflow.name_scope",
"numpy.random.rand",
"tensorflow.identity",
"tensorflow.constant",
"tensorflow.get_variable",
"tensorflow.test.main"
]
] |
h77h7/tvm-04.26 | [
"1bd8e6b921f392ae29b7672159326d94d40d6922"
] | [
"tutorials/language/reduction.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nReduction\n=========\n**Author**: `Tianqi Chen <https://tqchen.github.io>`_\n\nThis is an introduction material on how to do reduction in TVM.\nAssociative reduction operators like sum/max/min are typical\nconstruction blocks of linear algebra operations.\n\nIn this tutorial, we will demonstrate how to do reduction in TVM.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport tvm\nimport tvm.testing\nfrom tvm import te\nimport numpy as np\n\n######################################################################\n# Describe Sum of Rows\n# --------------------\n# Assume we want to compute sum of rows as our example.\n# In numpy semantics this can be written as :code:`B = numpy.sum(A, axis=1)`\n#\n# The following lines describe the row sum operation.\n# To create a reduction formula, we declare a reduction axis using\n# :any:`te.reduce_axis`. :any:`te.reduce_axis` takes in the range of reductions.\n# :any:`te.sum` takes in the expression to be reduced as well as the reduction\n# axis and compute the sum of value over all k in the declared range.\n#\n# The equivalent C code is as follows:\n#\n# .. code-block:: c\n#\n# for (int i = 0; i < n; ++i) {\n# B[i] = 0;\n# for (int k = 0; k < m; ++k) {\n# B[i] = B[i] + A[i][k];\n# }\n# }\n#\nn = te.var(\"n\")\nm = te.var(\"m\")\nA = te.placeholder((n, m), name=\"A\")\nk = te.reduce_axis((0, m), \"k\")\nB = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name=\"B\")\n\n######################################################################\n# Schedule the Reduction\n# ----------------------\n# There are several ways to schedule a reduction.\n# Before doing anything, let us print out the IR code of default schedule.\n#\ns = te.create_schedule(B.op)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# You can find that the IR code is quite like the C code.\n# The reduction axis is similar to a normal axis, it can be splitted.\n#\n# In the following code we split both the row axis of B as well\n# axis by different factors. The result is a nested reduction.\n#\nko, ki = s[B].split(B.op.reduce_axis[0], factor=16)\nxo, xi = s[B].split(B.op.axis[0], factor=32)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# If we are building a GPU kernel, we can bind the rows of B to GPU threads.\ns[B].bind(xo, te.thread_axis(\"blockIdx.x\"))\ns[B].bind(xi, te.thread_axis(\"threadIdx.x\"))\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# Reduction Factoring and Parallelization\n# ---------------------------------------\n# One problem of building a reduction is that we cannot simply\n# parallelize over the reduction axis. We need to divide the computation\n# of the reduction, store the local reduction result in a temporal array\n# before doing a reduction over the temp array.\n#\n# The rfactor primitive does such rewrite of the computation.\n# In the following schedule, the result of B is written to a temporary\n# result B.rf. The factored dimension becomes the first dimension of B.rf.\n#\ns = te.create_schedule(B.op)\nko, ki = s[B].split(B.op.reduce_axis[0], factor=16)\nBF = s.rfactor(B, ki)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\n######################################################################\n# The scheduled operator of B also get rewritten to be sum over\n# the first axis of reduced result of B.f\n#\nprint(s[B].op.body)\n\n######################################################################\n# Cross Thread Reduction\n# ----------------------\n# We can now parallelize over the factored axis.\n# Here the reduction axis of B is marked to be a thread.\n# TVM allows reduction axis to be marked as thread if it is the only\n# axis in reduction and cross thread reduction is possible in the device.\n#\n# This is indeed the case after the factoring.\n# We can directly compute BF at the reduction axis as well.\n# The final generated kernel will divide the rows by blockIdx.x and threadIdx.y\n# columns by threadIdx.x and finally do a cross thread reduction over threadIdx.x\n#\nxo, xi = s[B].split(s[B].op.axis[0], factor=32)\ns[B].bind(xo, te.thread_axis(\"blockIdx.x\"))\ns[B].bind(xi, te.thread_axis(\"threadIdx.y\"))\ntx = te.thread_axis(\"threadIdx.x\")\ns[B].bind(s[B].op.reduce_axis[0], tx)\ns[BF].compute_at(s[B], s[B].op.reduce_axis[0])\ns[B].set_store_predicate(tx.var.equal(0))\nfcuda = tvm.build(s, [A, B], \"cuda\")\nprint(fcuda.imported_modules[0].get_source())\n\n######################################################################\n# Verify the correctness of result kernel by comparing it to numpy.\n#\nnn = 128\ndev = tvm.gpu(0)\na = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev)\nb = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)\nfcuda(a, b)\ntvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4)\n\n######################################################################\n# Describe Convolution via 2D Reduction\n# -------------------------------------\n# In TVM, we can describe convolution via 2D reduction in a simple way.\n# Here is an example for 2D convolution with filter size = [3, 3] and strides = [1, 1].\n#\nn = te.var(\"n\")\nInput = te.placeholder((n, n), name=\"Input\")\nFilter = te.placeholder((3, 3), name=\"Filter\")\ndi = te.reduce_axis((0, 3), name=\"di\")\ndj = te.reduce_axis((0, 3), name=\"dj\")\nOutput = te.compute(\n (n - 2, n - 2),\n lambda i, j: te.sum(Input[i + di, j + dj] * Filter[di, dj], axis=[di, dj]),\n name=\"Output\",\n)\ns = te.create_schedule(Output.op)\nprint(tvm.lower(s, [Input, Filter, Output], simple_mode=True))\n\n######################################################################\n# .. _general-reduction:\n#\n# Define General Commutative Reduction Operation\n# ----------------------------------------------\n# Besides the built-in reduction operations like :any:`te.sum`,\n# :any:`tvm.te.min` and :any:`tvm.te.max`, you can also define your\n# commutative reduction operation by :any:`te.comm_reducer`.\n#\n\nn = te.var(\"n\")\nm = te.var(\"m\")\nproduct = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name=\"product\")\nA = te.placeholder((n, m), name=\"A\")\nk = te.reduce_axis((0, m), name=\"k\")\nB = te.compute((n,), lambda i: product(A[i, k], axis=k), name=\"B\")\n\n######################################################################\n# .. note::\n#\n# Sometimes we would like to perform reduction that involves multiple\n# values like :code:`argmax`, which can be done by tuple inputs.\n# See :ref:`reduction-with-tuple-inputs` for more detail.\n\n######################################################################\n# Summary\n# -------\n# This tutorial provides a walk through of reduction schedule.\n#\n# - Describe reduction with reduce_axis.\n# - Use rfactor to factor out axis if we need parallelism.\n# - Define new reduction operation by :any:`te.comm_reducer`\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros"
]
] |
AkshatSh/BinarizedNMT | [
"7fa15149fdfcad6b1fd0956157c3730f3dcd781f"
] | [
"translation/models/AttentionQRNN.py"
] | [
"import sys\nsys.path.append(\"..\")\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport random\nimport argparse\n\ntry:\n from torchqrnn import QRNN\nexcept:\n # to stop python 3.7.x breaking\n QRNN = None\n\nfrom models.EncoderDecoder import (\n EncoderModel,\n DecoderModel,\n EncoderDecoderModel,\n DecoderOutputType,\n)\n\nfrom models.components.attention import (\n AttentionModule\n)\n\nfrom vocab import Vocabulary\n\nfrom constants import (\n UNKNOWN_TOKEN,\n PAD_TOKEN,\n)\n\nclass EncoderQRNN(EncoderModel):\n def __init__(\n self,\n src_vocab: Vocabulary,\n hidden_size: int,\n num_layers: int,\n dropout: float,\n ):\n super(EncoderQRNN, self).__init__()\n self.input_size = len(src_vocab)\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout = dropout\n\n self.embedding = nn.Embedding(\n len(src_vocab),\n hidden_size,\n )\n self.lstm = QRNN(\n input_size=hidden_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n )\n \n def forward(\n self,\n src_tokens: torch.Tensor,\n src_lengths: torch.Tensor,\n hidden: torch.Tensor = None,\n ) -> torch.Tensor:\n embedded = self.embedding(src_tokens)\n # print(embedded.shape)\n #packed = nn.utils.rnn.pack_padded_sequence(embedded, src_lengths, batch_first=True)\n #packed = packed.t()\n embedded = embedded.transpose(0, 1)\n outputs, hidden = self.lstm(embedded, hidden)\n outputs = outputs.transpose(0, 1)\n #outputs, outputs_length = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)\n\n # sum up bidirectional outputs to keep hidden size the same\n #outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]\n # print('output: ', outputs.shape)\n return outputs, hidden\n\nclass AttentionDecoderQRNN(DecoderModel):\n def __init__(\n self,\n trg_vocab: Vocabulary,\n hidden_size: int,\n num_layers: int,\n dropout: float,\n teacher_student_ratio: float,\n ):\n super(AttentionDecoderQRNN, self).__init__()\n\n self.hidden_size = hidden_size\n self.output_size = len(trg_vocab)\n self.num_layers = num_layers\n self.dropout = dropout\n self.teacher_student_ratio = teacher_student_ratio\n self.trg_vocab = trg_vocab\n\n # layers\n self.embedding = nn.Embedding(\n len(trg_vocab),\n hidden_size,\n )\n\n self.dropout = nn.Dropout(dropout)\n\n self.attn = AttentionModule('general', hidden_size)\n\n self.lstm = QRNN(\n input_size=hidden_size * 2,\n hidden_size=hidden_size,\n num_layers=num_layers,\n )\n\n self.out = nn.Linear(\n hidden_size,\n len(trg_vocab),\n )\n \n def forward(\n self,\n prev_tokens: torch.Tensor,\n encoder_out: tuple,\n ) -> torch.Tensor:\n encoder_outputs, last_hidden = encoder_out\n batch_size, seq_len = prev_tokens.shape\n if random.random() <= self.teacher_student_ratio:\n return self.teacher_forward(\n last_hidden,\n encoder_outputs,\n prev_tokens,\n )\n else:\n return self.student_forward(\n last_hidden,\n encoder_outputs,\n seq_len,\n )\n \n def forward_eval(\n self,\n prev_tokens: torch.Tensor,\n encoder_out: tuple,\n intermediate: torch.Tensor,\n ) -> torch.Tensor:\n encoder_outputs, last_hidden = encoder_out\n return self.teacher_forward(\n last_hidden if intermediate is None else intermediate,\n encoder_outputs,\n prev_tokens,\n )\n\n def teacher_forward(\n self,\n final_hidden: torch.Tensor,\n encoder_outputs: torch.Tensor,\n prev_tokens: torch.Tensor,\n ) -> torch.Tensor:\n batch_size, seq_len = prev_tokens.shape\n final_hidden = final_hidden[:self.num_layers]\n final_encoder_hidden = final_hidden\n\n # embedded_prev_tokens: (batch, seq_len, trg_vocab)\n embedded_prev_tokens = self.embedding(prev_tokens)\n embedded_prev_tokens = self.dropout(embedded_prev_tokens)\n\n decoder_outputs = []\n last_hidden = final_hidden\n \n for i in range(seq_len):\n attn_weights = self.attn(last_hidden[-1], encoder_outputs)\n\n # encoder_outputs: (batch, seq_len, dim)\n # attn_weights = (batch, seq_len)\n context = attn_weights.transpose(1,2).bmm(encoder_outputs)\n #print(encoder_outputs.shape)\n\n #print(embedded_prev_tokens.shape, context.shape)\n lstm_input = torch.cat((embedded_prev_tokens[:, i:i+1, :], context), dim=2)\n lstm_input = lstm_input.transpose(0, 1)\n output, last_hidden = self.lstm(lstm_input, last_hidden)\n output = output.transpose(0, 1)\n decoder_outputs.append(output)\n decoder_outputs = torch.cat(decoder_outputs, dim=1)\n out = self.out(decoder_outputs)\n return out, last_hidden \n \n def student_forward(\n self,\n final_hidden: torch.Tensor,\n encoder_outputs: torch.Tensor,\n seq_len: int,\n ) -> torch.Tensor:\n batch_size = encoder_outputs.shape[0]\n final_hidden = final_hidden[:self.num_layers]\n device = final_hidden.device\n\n prev_output = torch.zeros((batch_size, 1)).long().to(device)\n prev_output[:, 0] = self.trg_vocab.stoi['<sos>']\n final_encoder_hidden = final_hidden\n\n decoder_outputs = []\n last_hidden = final_hidden\n \n for i in range(seq_len):\n attn_weights = self.attn(last_hidden[-1], encoder_outputs)\n\n # encoder_outputs: (batch, seq_len, dim)\n # attn_weights = (batch, seq_len)\n context = attn_weights.transpose(1,2).bmm(encoder_outputs)\n\n embedded_prev_tokens = self.embedding(prev_output)\n embedded_prev_tokens = self.dropout(embedded_prev_tokens)\n\n lstm_input = torch.cat((embedded_prev_tokens, context), dim=2)\n output, last_hidden = self.lstm(lstm_input, last_hidden)\n output = self.out(output)\n decoder_outputs.append(output)\n topi = output.data.max(2)[1]\n prev_output = topi\n decoder_outputs = torch.cat(decoder_outputs, dim=1)\n return decoder_outputs, last_hidden \n\ndef build_model(\n src_vocab: Vocabulary,\n trg_vocab: Vocabulary,\n encoder_embed_dim: int,\n encoder_hidden_dim: int,\n encoder_dropout: float,\n encoder_num_layers: int,\n decoder_embed_dim: int,\n decoder_hidden_dim: int,\n decoder_dropout: float,\n decoder_num_layers: int,\n teacher_student_ratio: float,\n) -> nn.Module:\n encoder = EncoderQRNN(\n src_vocab=src_vocab,\n hidden_size=encoder_hidden_dim,\n num_layers=encoder_num_layers,\n dropout=encoder_dropout,\n )\n\n decoder = AttentionDecoderQRNN(\n trg_vocab=trg_vocab,\n hidden_size=decoder_hidden_dim,\n num_layers=decoder_num_layers,\n dropout=decoder_dropout,\n teacher_student_ratio=teacher_student_ratio,\n )\n\n return EncoderDecoderModel(\n encoder,\n decoder,\n src_vocab,\n trg_vocab,\n )\n\ndef add_args(parser: argparse.ArgumentParser) -> None:\n parser.add_argument('--encoder_embed_dim', type=int, default=512, help='Embedding dimension for the encoder')\n parser.add_argument('--encoder_hidden_dim', type=int, default=512, help='The hidden (feature size) for the encoder')\n parser.add_argument('--encoder_dropout', type=float, default=0.2, help='the encoder dropout to apply')\n parser.add_argument('--decoder_embed_dim', type=int, default=512, help='the decoder embedding dimension')\n parser.add_argument('--decoder_hidden_dim', type=int, default=512, help='the hidden (feature size) for the decoder')\n parser.add_argument('--decoder_dropout', type=float, default=0.2, help='the decoder dropout')\n parser.add_argument('--encoder_layers', type=int, default=4, help='the number of layers in the encoder')\n parser.add_argument('--decoder_layers', type=int, default=4, help='the number of layers in the decoder')\n parser.add_argument('--teacher_student_ratio', type=float, default=1.0, help='the ratio of teacher to student to use')\n"
] | [
[
"torch.zeros",
"torch.cat",
"torch.nn.Dropout"
]
] |
imjoseangel/100DaysOfCode | [
"bff90569033e2b02a56e893bd45727125962aeb3"
] | [
"python/mltraining/cross-validation/leave-p-out.py"
] | [
"# Example of LOOCV and LPOCV splitting\n\nimport numpy\nfrom sklearn.model_selection import LeaveOneOut, LeavePOut\n\n# Configurable constants\nP_VAL = 2\n\n\ndef print_result(split_data):\n \"\"\"\n Prints the result of either a LPOCV or LOOCV operation\n Args:\n split_data: The resulting (train, test) split data\n \"\"\"\n for train, test in split_data:\n output_train = ''\n output_test = ''\n\n dash = [\"-\"] * (len(train) + len(test))\n\n # Build our output for display from the resulting split\n for i in train:\n output_train = \"{}({}: {}) \".format(output_train, i, data[i])\n\n for i in test:\n dash[i] = \"T\"\n output_test = \"{}({}: {}) \".format(output_test, i, data[i])\n\n print(\"[ {} ]\".format(\" \".join(dash)))\n print(\"Train: {}\".format(output_train))\n print(\"Test: {}\\n\".format(output_test))\n\n\n# Create some data to split with\ndata = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n\n# Our two methods\nloocv = LeaveOneOut()\nlpocv = LeavePOut(p=P_VAL)\n\nsplit_loocv = loocv.split(data)\nsplit_lpocv = lpocv.split(data)\n\nprint(\"\"\"\\\nThe Leave-P-Out method works by using every combination of P points as test\ndata.\nThe following output shows the result of splitting some sample data by\nLeave-One-Out and Leave-P-Out methods.\nA bar displaying the current train-test split as well as the actual data\npoints are displayed for each split.\nIn the bar, \"-\" is a training point and \"T\" is a test point.\n\"\"\")\n\nprint(\"Data:\\n{}\\n\".format(data))\n\nprint(\"Leave-One-Out:\\n\")\nprint_result(split_loocv)\n\nprint(\"Leave-P-Out (where p = {}):\\n\".format(P_VAL))\nprint_result(split_lpocv)\n"
] | [
[
"numpy.array",
"sklearn.model_selection.LeaveOneOut",
"sklearn.model_selection.LeavePOut"
]
] |
Jacob-Barhak/holoviews | [
"5df0269595ca7befca202f9d05522c68983dc974",
"5df0269595ca7befca202f9d05522c68983dc974"
] | [
"holoviews/tests/plotting/bokeh/testelementplot.py",
"holoviews/tests/plotting/matplotlib/testrasterplot.py"
] | [
"from unittest import SkipTest\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom bokeh.core.properties import value\nfrom holoviews.core import Dimension, DynamicMap, NdOverlay, HoloMap\nfrom holoviews.element import Curve, Image, Scatter, Labels\nfrom holoviews.streams import Stream, PointDraw\nfrom holoviews.plotting.util import process_cmap\nfrom holoviews.util import render\n\nfrom .testplot import TestBokehPlot, bokeh_renderer\nfrom ...utils import LoggingComparisonTestCase\n\ntry:\n from bokeh.document import Document\n from bokeh.models import tools\n from bokeh.models import (FuncTickFormatter, PrintfTickFormatter,\n NumeralTickFormatter, LogTicker)\nexcept:\n pass\n\n\n\nclass TestElementPlot(LoggingComparisonTestCase, TestBokehPlot):\n\n def test_element_show_frame_disabled(self):\n curve = Curve(range(10)).opts(plot=dict(show_frame=False))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.outline_line_alpha, 0)\n\n def test_element_xaxis_top(self):\n curve = Curve(range(10)).options(xaxis='top')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertTrue(xaxis in plot.state.above)\n\n def test_element_xaxis_bare(self):\n curve = Curve(range(10)).options(xaxis='bare')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.minor_tick_line_color, None)\n self.assertEqual(xaxis.major_tick_line_color, None)\n self.assertTrue(xaxis in plot.state.below)\n\n def test_element_xaxis_bottom_bare(self):\n curve = Curve(range(10)).options(xaxis='bottom-bare')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.minor_tick_line_color, None)\n self.assertEqual(xaxis.major_tick_line_color, None)\n self.assertTrue(xaxis in plot.state.below)\n\n def test_element_xaxis_top_bare(self):\n curve = Curve(range(10)).options(xaxis='top-bare')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(xaxis.minor_tick_line_color, None)\n self.assertEqual(xaxis.major_tick_line_color, None)\n self.assertTrue(xaxis in plot.state.above)\n\n def test_element_yaxis_right(self):\n curve = Curve(range(10)).options(yaxis='right')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertTrue(yaxis in plot.state.right)\n\n def test_element_yaxis_bare(self):\n curve = Curve(range(10)).options(yaxis='bare')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertEqual(yaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.minor_tick_line_color, None)\n self.assertEqual(yaxis.major_tick_line_color, None)\n self.assertTrue(yaxis in plot.state.left)\n\n def test_element_yaxis_left_bare(self):\n curve = Curve(range(10)).options(yaxis='left-bare')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertEqual(yaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.minor_tick_line_color, None)\n self.assertEqual(yaxis.major_tick_line_color, None)\n self.assertTrue(yaxis in plot.state.left)\n\n def test_element_yaxis_right_bare(self):\n curve = Curve(range(10)).options(yaxis='right-bare')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertEqual(yaxis.axis_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.major_label_text_font_size, value('0pt'))\n self.assertEqual(yaxis.minor_tick_line_color, None)\n self.assertEqual(yaxis.major_tick_line_color, None)\n self.assertTrue(yaxis in plot.state.right)\n\n def test_element_title_format(self):\n title_str = ('Label: {label}, group: {group}, '\n 'dims: {dimensions}, type: {type}')\n e = Scatter(\n [],\n label='the_label',\n group='the_group',\n ).opts(title=title_str)\n title = 'Label: the_label, group: the_group, dims: , type: Scatter'\n self.assertEqual(render(e).title.text, title)\n\n def test_element_xformatter_string(self):\n curve = Curve(range(10)).options(xformatter='%d')\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertIsInstance(xaxis.formatter, PrintfTickFormatter)\n self.assertEqual(xaxis.formatter.format, '%d')\n\n def test_element_yformatter_string(self):\n curve = Curve(range(10)).options(yformatter='%d')\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertIsInstance(yaxis.formatter, PrintfTickFormatter)\n self.assertEqual(yaxis.formatter.format, '%d')\n\n def test_element_xformatter_function(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(value):\n return str(value) + ' %'\n curve = Curve(range(10)).options(xformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertIsInstance(xaxis.formatter, FuncTickFormatter)\n\n def test_element_yformatter_function(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(value):\n return str(value) + ' %'\n curve = Curve(range(10)).options(yformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertIsInstance(yaxis.formatter, FuncTickFormatter)\n\n def test_element_xformatter_instance(self):\n formatter = NumeralTickFormatter()\n curve = Curve(range(10)).options(xformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertIs(xaxis.formatter, formatter)\n\n def test_element_yformatter_instance(self):\n formatter = NumeralTickFormatter()\n curve = Curve(range(10)).options(yformatter=formatter)\n plot = bokeh_renderer.get_plot(curve)\n yaxis = plot.handles['yaxis']\n self.assertIs(yaxis.formatter, formatter)\n\n def test_empty_element_visibility(self):\n curve = Curve([])\n plot = bokeh_renderer.get_plot(curve)\n self.assertTrue(plot.handles['glyph_renderer'].visible)\n\n def test_element_no_xaxis(self):\n curve = Curve(range(10)).opts(plot=dict(xaxis=None))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertFalse(plot.xaxis[0].visible)\n\n def test_element_no_yaxis(self):\n curve = Curve(range(10)).opts(plot=dict(yaxis=None))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertFalse(plot.yaxis[0].visible)\n\n def test_element_xrotation(self):\n curve = Curve(range(10)).opts(plot=dict(xrotation=90))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].major_label_orientation, np.pi/2)\n\n def test_element_yrotation(self):\n curve = Curve(range(10)).opts(plot=dict(yrotation=90))\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.yaxis[0].major_label_orientation, np.pi/2)\n\n def test_element_xlabel_override(self):\n curve = Curve(range(10)).options(xlabel='custom x-label')\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, 'custom x-label')\n\n def test_element_ylabel_override(self):\n curve = Curve(range(10)).options(ylabel='custom y-label')\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.yaxis[0].axis_label, 'custom y-label')\n\n def test_element_labelled_x_disabled(self):\n curve = Curve(range(10)).options(labelled=['y'])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, '')\n self.assertEqual(plot.yaxis[0].axis_label, 'y')\n\n def test_element_labelled_y_disabled(self):\n curve = Curve(range(10)).options(labelled=['x'])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, 'x')\n self.assertEqual(plot.yaxis[0].axis_label, '')\n\n def test_element_labelled_both_disabled(self):\n curve = Curve(range(10)).options(labelled=[])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertEqual(plot.xaxis[0].axis_label, '')\n self.assertEqual(plot.yaxis[0].axis_label, '')\n\n def test_static_source_optimization(self):\n global data\n data = np.ones((5, 5))\n img = Image(data)\n def get_img(test):\n global data\n data *= test\n return img\n stream = Stream.define(str('Test'), test=1)()\n dmap = DynamicMap(get_img, streams=[stream])\n plot = bokeh_renderer.get_plot(dmap, doc=Document())\n source = plot.handles['source']\n self.assertEqual(source.data['image'][0].mean(), 1)\n stream.event(test=2)\n self.assertTrue(plot.static_source)\n self.assertEqual(source.data['image'][0].mean(), 2)\n self.assertNotIn(source, plot.current_handles)\n\n def test_stream_cleanup(self):\n stream = Stream.define(str('Test'), test=1)()\n dmap = DynamicMap(lambda test: Curve([]), streams=[stream])\n plot = bokeh_renderer.get_plot(dmap)\n self.assertTrue(bool(stream._subscribers))\n plot.cleanup()\n self.assertFalse(bool(stream._subscribers))\n\n def test_element_formatter_xaxis(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(x):\n return '%s' % x\n curve = Curve(range(10), kdims=[Dimension('x', value_format=formatter)])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertIsInstance(plot.xaxis[0].formatter, FuncTickFormatter)\n\n def test_element_formatter_yaxis(self):\n try:\n import pscript # noqa\n except:\n raise SkipTest('Test requires pscript')\n def formatter(x):\n return '%s' % x\n curve = Curve(range(10), vdims=[Dimension('y', value_format=formatter)])\n plot = bokeh_renderer.get_plot(curve).state\n self.assertIsInstance(plot.yaxis[0].formatter, FuncTickFormatter)\n\n def test_element_grid_custom_xticker(self):\n curve = Curve([1, 2, 3]).opts(xticks=[0.5, 1.5], show_grid=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertIs(plot.state.xgrid[0].ticker, plot.state.xaxis[0].ticker)\n\n def test_element_grid_custom_yticker(self):\n curve = Curve([1, 2, 3]).opts(yticks=[0.5, 2.5], show_grid=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertIs(plot.state.ygrid[0].ticker, plot.state.yaxis[0].ticker)\n\n def test_element_grid_options(self):\n grid_style = {'grid_line_color': 'blue', 'grid_line_width': 1.5, 'ygrid_bounds': (0.3, 0.7),\n 'minor_xgrid_line_color': 'lightgray', 'xgrid_line_dash': [4, 4]}\n curve = Curve(range(10)).options(show_grid=True, gridstyle=grid_style)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.xgrid[0].grid_line_color, 'blue')\n self.assertEqual(plot.state.xgrid[0].grid_line_width, 1.5)\n self.assertEqual(plot.state.xgrid[0].grid_line_dash, [4, 4])\n self.assertEqual(plot.state.xgrid[0].minor_grid_line_color, 'lightgray')\n self.assertEqual(plot.state.ygrid[0].grid_line_color, 'blue')\n self.assertEqual(plot.state.ygrid[0].grid_line_width, 1.5)\n self.assertEqual(plot.state.ygrid[0].bounds, (0.3, 0.7))\n\n def test_change_cds_columns(self):\n lengths = {'a': 1, 'b': 2, 'c': 3}\n curve = DynamicMap(lambda a: Curve(range(lengths[a]), a), kdims=['a']).redim.values(a=['a', 'b', 'c'])\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['a', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'a')\n plot.update(('b',))\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['b', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'b')\n\n def test_update_cds_columns(self):\n curve = DynamicMap(lambda a: Curve(range(10), a), kdims=['a']).redim.values(a=['a', 'b', 'c'])\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['a', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'a')\n plot.update(('b',))\n self.assertEqual(sorted(plot.handles['source'].data.keys()), ['a', 'b', 'y'])\n self.assertEqual(plot.state.xaxis[0].axis_label, 'b')\n\n def test_categorical_axis_fontsize(self):\n curve = Curve([('A', 1), ('B', 2)]).options(fontsize={'minor_xticks': '6pt', 'xticks': 18})\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.major_label_text_font_size, '6pt')\n self.assertEqual(xaxis.group_text_font_size, {'value': '18pt'})\n\n def test_categorical_axis_fontsize_both(self):\n curve = Curve([('A', 1), ('B', 2)]).options(fontsize={'xticks': 18})\n plot = bokeh_renderer.get_plot(curve)\n xaxis = plot.handles['xaxis']\n self.assertEqual(xaxis.major_label_text_font_size, {'value': '18pt'})\n self.assertEqual(xaxis.group_text_font_size, {'value': '18pt'})\n\n def test_cftime_transform_gregorian_no_warn(self):\n try:\n import cftime\n except:\n raise SkipTest('Test requires cftime library')\n gregorian_dates = [cftime.DatetimeGregorian(2000, 2, 28),\n cftime.DatetimeGregorian(2000, 3, 1),\n cftime.DatetimeGregorian(2000, 3, 2)]\n curve = Curve((gregorian_dates, [1, 2, 3]))\n plot = bokeh_renderer.get_plot(curve)\n xs = plot.handles['cds'].data['x']\n self.assertEqual(xs.astype('int64'),\n np.array([951696000000, 951868800000, 951955200000]))\n\n def test_cftime_transform_noleap_warn(self):\n try:\n import cftime\n except:\n raise SkipTest('Test requires cftime library')\n gregorian_dates = [cftime.DatetimeNoLeap(2000, 2, 28),\n cftime.DatetimeNoLeap(2000, 3, 1),\n cftime.DatetimeNoLeap(2000, 3, 2)]\n curve = Curve((gregorian_dates, [1, 2, 3]))\n plot = bokeh_renderer.get_plot(curve)\n xs = plot.handles['cds'].data['x']\n self.assertEqual(xs.astype('int64'),\n np.array([951696000000, 951868800000, 951955200000]))\n substr = (\n \"Converting cftime.datetime from a non-standard calendar \"\n \"(noleap) to a standard calendar for plotting. This may \"\n \"lead to subtle errors in formatting dates, for accurate \"\n \"tick formatting switch to the matplotlib backend.\")\n self.log_handler.assertEndsWith('WARNING', substr)\n\n def test_active_tools_drag(self):\n curve = Curve([1, 2, 3]).options(active_tools=['box_zoom'])\n plot = bokeh_renderer.get_plot(curve)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_drag, tools.BoxZoomTool)\n\n def test_active_tools_scroll(self):\n curve = Curve([1, 2, 3]).options(active_tools=['wheel_zoom'])\n plot = bokeh_renderer.get_plot(curve)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_scroll, tools.WheelZoomTool)\n\n def test_active_tools_tap(self):\n curve = Curve([1, 2, 3]).options(active_tools=['tap'], tools=['tap'])\n plot = bokeh_renderer.get_plot(curve)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.TapTool)\n\n def test_active_tools_draw_stream(self):\n scatter = Scatter([1, 2, 3]).options(active_tools=['point_draw'])\n PointDraw(source=scatter)\n plot = bokeh_renderer.get_plot(scatter)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.PointDrawTool)\n self.assertIsInstance(toolbar.active_drag, tools.PointDrawTool)\n\n def test_hover_tooltip_update(self):\n hmap = HoloMap({'a': Curve([1, 2, 3], vdims='a'), 'b': Curve([1, 2, 3], vdims='b')}).opts(\n tools=['hover'])\n plot = bokeh_renderer.get_plot(hmap)\n self.assertEqual(plot.handles['hover'].tooltips, [('x', '@{x}'), ('a', '@{a}')])\n plot.update(('b',))\n self.assertEqual(plot.handles['hover'].tooltips, [('x', '@{x}'), ('b', '@{b}')])\n\n def test_categorical_dimension_values(self):\n curve = Curve([('C', 1), ('B', 3)]).redim.values(x=['A', 'B', 'C'])\n plot = bokeh_renderer.get_plot(curve)\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, ['A', 'B', 'C'])\n\n def test_categorical_dimension_type(self):\n curve = Curve([]).redim.type(x=str)\n plot = bokeh_renderer.get_plot(curve)\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, [])\n\n #################################################################\n # Aspect tests\n #################################################################\n\n def test_element_aspect(self):\n curve = Curve([1, 2, 3]).opts(aspect=2)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 300)\n self.assertEqual(plot.state.frame_width, 600)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_width(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_width_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, height=400, width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, 400)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.aspect_ratio, None)\n self.log_handler.assertContains('WARNING', \"aspect value was ignored\")\n\n def test_element_aspect_frame_width(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_frame_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.assertEqual(plot.state.aspect_ratio, None)\n\n def test_element_aspect_frame_width_frame_height(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_height=400, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_ratio, None)\n self.log_handler.assertContains('WARNING', \"aspect value was ignored\")\n\n def test_element_data_aspect(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=1.5)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 300)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 1.5)\n\n def test_element_data_aspect_width(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_height(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_width_height(self):\n curve = Curve([0, 2, 3]).opts(data_aspect=2, height=400, width=400)\n plot = bokeh_renderer.get_plot(curve)\n x_range, y_range = plot.handles['x_range'], plot.handles['y_range']\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n self.assertEqual(x_range.start, -2)\n self.assertEqual(x_range.end, 4)\n self.assertEqual(y_range.start, 0)\n self.assertEqual(y_range.end, 3)\n\n def test_element_data_aspect_frame_width(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 800)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_frame_height(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_height=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 200)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n def test_element_data_aspect_frame_width_frame_height(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_height=400, frame_width=400)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.aspect_scale, 2)\n\n #################################################################\n # Aspect tests\n #################################################################\n\n def test_element_responsive(self):\n curve = Curve([1, 2, 3]).opts(responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_both')\n\n def test_element_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, 400)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_height')\n\n def test_element_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_width')\n\n def test_element_frame_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'stretch_height')\n\n def test_element_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(frame_height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'stretch_width')\n\n def test_element_aspect_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'scale_both')\n\n def test_element_aspect_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_aspect_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_width_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(height=400, width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.plot_height, 400)\n self.assertEqual(plot.state.plot_width, 400)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.frame_height, None)\n self.assertEqual(plot.state.frame_width, None)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_aspect_frame_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n self.assertEqual(plot.state.plot_height, None)\n self.assertEqual(plot.state.plot_width, None)\n self.assertEqual(plot.state.frame_height, 200)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n\n def test_element_aspect_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(aspect=2, frame_height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 800)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_frame_width_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(frame_height=400, frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_responsive(self):\n curve = Curve([0, 2]).opts(data_aspect=1, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.aspect_ratio, 0.5)\n self.assertEqual(plot.state.aspect_scale, 1)\n self.assertEqual(plot.state.sizing_mode, 'scale_both')\n\n def test_element_data_aspect_and_aspect_responsive(self):\n curve = Curve([0, 2]).opts(data_aspect=1, aspect=2, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.aspect_ratio, 0.5)\n self.assertEqual(plot.state.aspect_scale, 1)\n self.assertEqual(plot.state.sizing_mode, 'scale_both')\n x_range = plot.handles['x_range']\n y_range = plot.handles['y_range']\n self.assertEqual(x_range.start, 0)\n self.assertEqual(x_range.end, 1)\n self.assertEqual(y_range.start, 0)\n self.assertEqual(y_range.end, 2)\n\n def test_element_data_aspect_width_responsive(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_height_responsive(self):\n curve = Curve([0, 0.5, 1, 1.5]).opts(data_aspect=2, height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_frame_width_responsive(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_width=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 800)\n self.assertEqual(plot.state.frame_width, 400)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n def test_element_data_aspect_frame_height_responsive(self):\n curve = Curve([1, 2, 3]).opts(data_aspect=2, frame_height=400, responsive=True)\n plot = bokeh_renderer.get_plot(curve)\n self.assertEqual(plot.state.frame_height, 400)\n self.assertEqual(plot.state.frame_width, 200)\n self.assertEqual(plot.state.sizing_mode, 'fixed')\n self.log_handler.assertContains('WARNING', \"responsive mode could not be enabled\")\n\n\n\nclass TestColorbarPlot(TestBokehPlot):\n\n def test_colormapper_symmetric(self):\n img = Image(np.array([[0, 1], [2, 3]])).options(symmetric=True)\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(cmapper.low, -3)\n self.assertEqual(cmapper.high, 3)\n\n def test_colormapper_color_levels(self):\n cmap = process_cmap('viridis', provider='bokeh')\n img = Image(np.array([[0, 1], [2, 3]])).options(color_levels=5, cmap=cmap)\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(len(cmapper.palette), 5)\n self.assertEqual(cmapper.palette, ['#440154', '#440255', '#440357', '#450558', '#45065A'])\n\n def test_colormapper_transparent_nan(self):\n img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'NaN': 'transparent'})\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(cmapper.nan_color, 'rgba(0, 0, 0, 0)')\n\n def test_colormapper_min_max_colors(self):\n img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'min': 'red', 'max': 'blue'})\n plot = bokeh_renderer.get_plot(img)\n cmapper = plot.handles['color_mapper']\n self.assertEqual(cmapper.low_color, 'red')\n self.assertEqual(cmapper.high_color, 'blue')\n\n def test_custom_colorbar_ticker(self):\n ticker = LogTicker()\n img = Image(np.array([[0, 1], [2, 3]])).options(colorbar=True, colorbar_opts=dict(ticker=ticker))\n plot = bokeh_renderer.get_plot(img)\n colorbar = plot.handles['colorbar']\n self.assertIs(colorbar.ticker, ticker)\n\n def test_explicit_categorical_cmap_on_integer_data(self):\n explicit_mapping = OrderedDict([(0, 'blue'), (1, 'red'), (2, 'green'), (3, 'purple')])\n points = Scatter(([0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]), vdims=['y', 'Category']).options(\n color_index='Category', cmap=explicit_mapping\n )\n plot = bokeh_renderer.get_plot(points)\n cmapper = plot.handles['color_mapper']\n cds = plot.handles['cds']\n self.assertEqual(cds.data['Category_str__'], ['0', '1', '2', '3'])\n self.assertEqual(cmapper.factors, ['0', '1', '2', '3'])\n self.assertEqual(cmapper.palette, ['blue', 'red', 'green', 'purple'])\n\n\nclass TestOverlayPlot(TestBokehPlot):\n\n def test_overlay_projection_clashing(self):\n overlay = Curve([]).options(projection='polar') * Curve([]).options(projection='custom')\n with self.assertRaises(Exception):\n bokeh_renderer.get_plot(overlay)\n\n def test_overlay_projection_propagates(self):\n overlay = Curve([]) * Curve([]).options(projection='custom')\n plot = bokeh_renderer.get_plot(overlay)\n self.assertEqual([p.projection for p in plot.subplots.values()], ['custom', 'custom'])\n\n def test_overlay_gridstyle_applies(self):\n grid_style = {'grid_line_color': 'blue', 'grid_line_width': 2}\n overlay = (Scatter([(10,10)]).options(gridstyle=grid_style, show_grid=True, size=20)\n * Labels([(10, 10, 'A')]))\n plot = bokeh_renderer.get_plot(overlay)\n self.assertEqual(plot.state.xgrid[0].grid_line_color, 'blue')\n self.assertEqual(plot.state.xgrid[0].grid_line_width, 2)\n\n def test_ndoverlay_legend_muted(self):\n overlay = NdOverlay({i: Curve(np.random.randn(10).cumsum()) for i in range(5)}).options(legend_muted=True)\n plot = bokeh_renderer.get_plot(overlay)\n for sp in plot.subplots.values():\n self.assertTrue(sp.handles['glyph_renderer'].muted)\n\n def test_overlay_legend_muted(self):\n overlay = (Curve(np.random.randn(10).cumsum(), label='A') *\n Curve(np.random.randn(10).cumsum(), label='B')).options(legend_muted=True)\n plot = bokeh_renderer.get_plot(overlay)\n for sp in plot.subplots.values():\n self.assertTrue(sp.handles['glyph_renderer'].muted)\n\n def test_active_tools_drag(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3])\n overlay = (scatter * curve).options(active_tools=['box_zoom'])\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_drag, tools.BoxZoomTool)\n\n def test_active_tools_scroll(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3])\n overlay = (scatter * curve).options(active_tools=['wheel_zoom'])\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_scroll, tools.WheelZoomTool)\n\n def test_active_tools_tap(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3]).options(tools=['tap'])\n overlay = (scatter * curve).options(active_tools=['tap'])\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.TapTool)\n\n def test_active_tools_draw_stream(self):\n curve = Curve([1, 2, 3])\n scatter = Scatter([1, 2, 3]).options(active_tools=['point_draw'])\n PointDraw(source=scatter)\n overlay = (scatter * curve)\n plot = bokeh_renderer.get_plot(overlay)\n toolbar = plot.state.toolbar\n self.assertIsInstance(toolbar.active_tap, tools.PointDrawTool)\n self.assertIsInstance(toolbar.active_drag, tools.PointDrawTool)\n\n def test_categorical_overlay_dimension_values(self):\n curve = Curve([('C', 1), ('B', 3)]).redim.values(x=['A', 'B', 'C'])\n scatter = Scatter([('A', 2)])\n plot = bokeh_renderer.get_plot(curve*scatter)\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, ['A', 'B', 'C'])\n\n def test_categorical_overlay_dimension_values_skip_factor(self):\n curve = Curve([('C', 1), ('B', 3)])\n scatter = Scatter([('A', 2)])\n plot = bokeh_renderer.get_plot((curve*scatter).redim.values(x=['A', 'C']))\n x_range = plot.handles['x_range']\n self.assertEqual(x_range.factors, ['A', 'C'])\n",
"import numpy as np\n\nfrom holoviews.element import Raster, Image\n\nfrom .testplot import TestMPLPlot, mpl_renderer\n\ntry:\n from matplotlib.colors import ListedColormap\nexcept:\n pass\n\n\nclass TestRasterPlot(TestMPLPlot):\n\n def test_raster_invert_axes(self):\n arr = np.array([[0, 1, 2], [3, 4, 5]])\n raster = Raster(arr).opts(invert_axes=True)\n plot = mpl_renderer.get_plot(raster)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array().data, arr.T[::-1])\n self.assertEqual(artist.get_extent(), [0, 2, 0, 3])\n\n def test_image_invert_axes(self):\n arr = np.array([[0, 1, 2], [3, 4, 5]])\n raster = Image(arr).opts(invert_axes=True)\n plot = mpl_renderer.get_plot(raster)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array().data, arr.T[::-1, ::-1])\n self.assertEqual(artist.get_extent(), [-0.5, 0.5, -0.5, 0.5])\n\n def test_image_listed_cmap(self):\n colors = ['#ffffff','#000000']\n img = Image(np.array([[0, 1, 2], [3, 4, 5]])).opts(cmap=colors)\n plot = mpl_renderer.get_plot(img)\n artist = plot.handles['artist']\n cmap = artist.get_cmap()\n self.assertIsInstance(cmap, ListedColormap)\n self.assertEqual(cmap.colors, colors)\n\n def test_image_cbar_extend_both(self):\n img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(1,2)))\n plot = mpl_renderer.get_plot(img.opts(colorbar=True))\n self.assertEqual(plot.handles['cbar'].extend, 'both')\n\n def test_image_cbar_extend_min(self):\n img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(1, None)))\n plot = mpl_renderer.get_plot(img.opts(colorbar=True))\n self.assertEqual(plot.handles['cbar'].extend, 'min')\n\n def test_image_cbar_extend_max(self):\n img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(None, 2)))\n plot = mpl_renderer.get_plot(img.opts(colorbar=True))\n self.assertEqual(plot.handles['cbar'].extend, 'max')\n\n def test_image_cbar_extend_clim(self):\n img = Image(np.array([[0, 1], [2, 3]])).opts(\n clim=(np.nan, np.nan), colorbar=True)\n plot = mpl_renderer.get_plot(img)\n self.assertEqual(plot.handles['cbar'].extend, 'neither')\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.random.randn"
],
[
"numpy.array"
]
] |
andreped/GSI-RADS | [
"3582ed84216266e05cb71f6223ce9814a6df203f"
] | [
"diagnosis/src/Utils/volume_utilities.py"
] | [
"import numpy as np\nfrom copy import deepcopy\nfrom skimage.transform import resize\nfrom scipy.ndimage import binary_fill_holes\nfrom skimage.measure import regionprops\nfrom diagnosis.src.Utils.configuration_parser import *\n\n\ndef crop_MR(volume, parameters):\n original_volume = np.copy(volume)\n volume[volume >= 0.2] = 1\n volume[volume < 0.2] = 0\n volume = volume.astype(np.uint8)\n volume = binary_fill_holes(volume).astype(np.uint8)\n regions = regionprops(volume)\n min_row, min_col, min_depth, max_row, max_col, max_depth = regions[0].bbox\n print('cropping params', min_row, min_col, min_depth, max_row, max_col, max_depth)\n\n cropped_volume = original_volume[min_row:max_row, min_col:max_col, min_depth:max_depth]\n bbox = [min_row, min_col, min_depth, max_row, max_col, max_depth]\n\n return cropped_volume, bbox\n\n\ndef resize_volume(volume, new_slice_size, slicing_plane, order=1):\n new_volume = None\n if len(new_slice_size) == 2:\n if slicing_plane == 'axial':\n new_val = int(volume.shape[2] * (new_slice_size[1] / volume.shape[1]))\n new_volume = resize(volume, (new_slice_size[0], new_slice_size[1], new_val), order=order)\n elif slicing_plane == 'sagittal':\n new_val = new_slice_size[0]\n new_volume = resize(volume, (new_val, new_slice_size[0], new_slice_size[1]), order=order)\n elif slicing_plane == 'coronal':\n new_val = new_slice_size[0]\n new_volume = resize(volume, (new_slice_size[0], new_val, new_slice_size[1]), order=order)\n elif len(new_slice_size) == 3:\n new_volume = resize(volume, new_slice_size, order=order)\n return new_volume\n\n\ndef __intensity_normalization_MRI(volume, parameters):\n result = deepcopy(volume).astype('float32')\n if parameters.intensity_clipping_range[1] - parameters.intensity_clipping_range[0] != 100:\n limits = np.percentile(volume, q=parameters.intensity_clipping_range)\n result[volume < limits[0]] = limits[0]\n result[volume > limits[1]] = limits[1]\n\n if parameters.normalization_method == 'zeromean':\n mean_val = np.mean(result)\n var_val = np.std(result)\n tmp = (result - mean_val) / var_val\n result = tmp\n else:\n min_val = np.min(result)\n max_val = np.max(result)\n if (max_val - min_val) != 0:\n tmp = (result - min_val) / (max_val - min_val)\n result = tmp\n # else:\n # result = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))\n\n return result\n\n\ndef intensity_normalization(volume, parameters):\n return __intensity_normalization_MRI(volume, parameters)\n\n\ndef padding_for_inference(data, slab_size, slicing_plane):\n new_data = data\n if slicing_plane == 'axial':\n missing_dimension = (slab_size - (data.shape[2] % slab_size)) % slab_size\n if missing_dimension != 0:\n new_data = np.pad(data, ((0, 0), (0, 0), (0, missing_dimension), (0, 0)), mode='edge')\n elif slicing_plane == 'sagittal':\n missing_dimension = (slab_size - (data.shape[0] % slab_size)) % slab_size\n if missing_dimension != 0:\n new_data = np.pad(data, ((0, missing_dimension), (0, 0), (0, 0), (0, 0)), mode='edge')\n elif slicing_plane == 'coronal':\n missing_dimension = (slab_size - (data.shape[1] % slab_size)) % slab_size\n if missing_dimension != 0:\n new_data = np.pad(data, ((0, 0), (0, missing_dimension), (0, 0), (0, 0)), mode='edge')\n\n return new_data, missing_dimension\n\n\ndef padding_for_inference_both_ends(data, slab_size, slicing_plane):\n new_data = data\n padding_val = int(slab_size / 2)\n if slicing_plane == 'axial':\n new_data = np.pad(data, ((0, 0), (0, 0), (padding_val, padding_val), (0, 0)), mode='edge')\n elif slicing_plane == 'sagittal':\n new_data = np.pad(data, ((padding_val, padding_val), (0, 0), (0, 0), (0, 0)), mode='edge')\n elif slicing_plane == 'coronal':\n new_data = np.pad(data, ((0, 0), (padding_val, padding_val), (0, 0), (0, 0)), mode='edge')\n\n return new_data\n"
] | [
[
"scipy.ndimage.binary_fill_holes",
"numpy.copy",
"numpy.max",
"numpy.min",
"numpy.std",
"numpy.pad",
"numpy.percentile",
"numpy.mean"
]
] |
2742195759/Paddle | [
"ce034db1834af85539b22ab68492df9972ff3e69"
] | [
"python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py"
] | [
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom scipy.special import expit, erf\nimport paddle.fluid.core as core\nfrom paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16\nfrom paddle.fluid.tests.unittests.test_activation_op import TestActivation, TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu, TestSwish, TestHardSwish, TestRelu6, TestSigmoid\nfrom paddle.fluid.tests.unittests.test_gelu_op import gelu\nfrom mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd\n\n\nclass TestMKLDNNReluDim2(TestRelu):\n def setUp(self):\n super(TestMKLDNNReluDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNRelu6Dim2(TestRelu6):\n def setUp(self):\n super(TestMKLDNNRelu6Dim2, self).setUp()\n self.attrs.update({\"use_mkldnn\": True})\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNLeakyReluDim2(TestLeakyRelu):\n def setUp(self):\n super(TestMKLDNNLeakyReluDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNGeluDim2(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)\n out = gelu(x, False)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNGeluDim2Approx(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)\n out = gelu(x, True)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"approximate\": True}\n\n\nclass TestMKLDNNTanhDim2(TestTanh):\n def setUp(self):\n super(TestMKLDNNTanhDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSqrtDim2(TestSqrt):\n def setUp(self):\n super(TestMKLDNNSqrtDim2, self).setUp()\n\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNAbsDim2(TestAbs):\n def setUp(self):\n super(TestMKLDNNAbsDim2, self).setUp()\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSwishDim2(TestSwish):\n def setUp(self):\n super(TestMKLDNNSwishDim2, self).setUp()\n\n self.attrs[\"use_mkldnn\"] = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNHardSwishDim2(TestHardSwish):\n def setUp(self):\n super(TestMKLDNNHardSwishDim2, self).setUp()\n\n self.attrs[\"use_mkldnn\"] = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSigmoidDim2(TestSigmoid):\n def setUp(self):\n super(TestMKLDNNSigmoidDim2, self).setUp()\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNReluDim4(TestRelu):\n def setUp(self):\n super(TestMKLDNNReluDim4, self).setUp()\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(\"float32\")\n # The same reason with TestAbs\n x[np.abs(x) < 0.005] = 0.02\n out = np.maximum(x, 0)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNLeakyReluDim4(TestLeakyRelu):\n def setUp(self):\n super(TestMKLDNNLeakyReluDim4, self).setUp()\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(\"float32\")\n # The same reason with TestAbs\n x[np.abs(x) < 0.005] = 0.02\n out = np.maximum(x, 0.02 * x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNGeluDim4(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)\n out = gelu(x, False)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNGeluDim4Approx(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.float32\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)\n out = gelu(x, True)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"approximate\": True}\n\n\[email protected](not core.supports_bfloat16(),\n \"place does not support BF16 evaluation\")\nclass TestMKLDNNGeluBf16Dim4(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.uint16\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)\n out = convert_float_to_uint16(gelu(x, False))\n\n self.inputs = {'X': convert_float_to_uint16(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def test_check_output(self):\n self.check_output_with_place(core.CPUPlace())\n\n def test_check_grad(self):\n pass\n\n\[email protected](not core.supports_bfloat16(),\n \"place does not support BF16 evaluation\")\nclass TestMKLDNNGeluBf16Dim4Approx(TestActivation):\n def setUp(self):\n self.op_type = \"gelu\"\n self.dtype = np.uint16\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)\n out = convert_float_to_uint16(gelu(x, True))\n\n self.inputs = {'X': convert_float_to_uint16(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"approximate\": True}\n\n def test_check_output(self):\n self.check_output_with_place(core.CPUPlace())\n\n def test_check_grad(self):\n pass\n\n\nclass TestMKLDNNTanhDim4(TestTanh):\n def setUp(self):\n super(TestMKLDNNTanhDim4, self).setUp()\n\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(\"float32\")\n }\n self.outputs = {'Out': np.tanh(self.inputs['X'])}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNSqrtDim4(TestSqrt):\n def setUp(self):\n super(TestMKLDNNSqrtDim4, self).setUp()\n\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(\"float32\")\n }\n self.outputs = {'Out': np.sqrt(self.inputs['X'])}\n self.attrs = {\"use_mkldnn\": True}\n\n\nclass TestMKLDNNAbsDim4(TestAbs):\n def setUp(self):\n super(TestMKLDNNAbsDim4, self).setUp()\n\n x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(\"float32\")\n # The same reason with TestAbs\n x[np.abs(x) < 0.005] = 0.02\n self.inputs = {'X': x}\n self.outputs = {'Out': np.abs(self.inputs['X'])}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSwishDim4(TestSwish):\n def setUp(self):\n super(TestMKLDNNSwishDim4, self).setUp()\n\n x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)\n beta = 2.3\n out = x * expit(beta * x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True, \"beta\": beta}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\ndef ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):\n return (x * np.minimum(np.maximum(x + offset, 0.), threshold) /\n scale).astype(x.dtype)\n\n\nclass TestMKLDNNHardSwishDim4(TestHardSwish):\n def setUp(self):\n super(TestMKLDNNHardSwishDim4, self).setUp()\n\n x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)\n threshold = 6.0\n scale = 6.0\n offset = 3.0\n x[np.abs(x + offset) < 0.005] = 0.02\n x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02\n\n out = ref_hardswish(x, threshold, scale, offset)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNSigmoidDim4(TestSigmoid):\n def setUp(self):\n super(TestMKLDNNSigmoidDim4, self).setUp()\n\n x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)\n out = 1 / (1 + np.exp(-x))\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n self.attrs = {\"use_mkldnn\": True}\n\n\n# Check if primitives already exist in backward\nclass TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):\n def setUp(self):\n super(TestMKLDNNAbsPrimitivesAlreadyExist, self).setUp()\n\n np.random.seed(123)\n self.op_type = 'abs'\n self.x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32)\n self.out = np.abs(self.x)\n self.out_grad = np.random.random_sample(self.x.shape).astype(np.float32)\n self.x_grad = self.__abs_bwd(self.x, self.out_grad)\n\n # Abs grad calculation\n def __abs_bwd(self, x, out_grad):\n return out_grad * np.sign(x)\n\n def test_check(self):\n check_if_mkldnn_primitives_exist_in_bwd(\n self, self.op_type, self.x, self.out, self.out_grad, self.x_grad)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.sqrt",
"numpy.random.uniform",
"numpy.random.random_sample",
"numpy.sign",
"numpy.random.seed",
"numpy.abs",
"numpy.exp",
"scipy.special.expit",
"numpy.maximum",
"numpy.tanh"
]
] |
Term-inator/Brain-Tumor-Detection | [
"b59715092cca7a17b589b5d906983eb42ee4ad87"
] | [
"run.py"
] | [
"# ====================================================\n# main\n# ====================================================\nimport os\nimport shutil\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import GroupKFold\n\nfrom utils import get_score, seed_torch\nfrom train import train_loop, set_params\nfrom logger import init_logger, close_logger, Logger\n\n\ntarget_cols_map = {\n 'tumor': ['label'],\n 'T1SS': ['label'],\n 'T2SS': ['label'],\n 'T1': ['T1'],\n '2label': ['label', 'T1'],\n 'randT1': ['label', 'T1'],\n 'randTumor': ['label', 'T1']\n}\n\ndata_path_map = {\n 'tumor': 'RealTrain/',\n 'T1SS': 'T1TrainSameSize/',\n 'T2SS': 'T2TrainSameSize/',\n 'T1': 'RealTrain/',\n '2label': 'RealTrain/',\n 'randT1': 'RealTrainRandomT1/',\n 'randTumor': 'RealTrainRandomTumor/'\n}\n\n\nclass Params:\n n_fold = 4\n trn_fold = [0, 1, 2]\n\n debug = False\n train = True\n\n type = None\n target_cols = None\n data_path = None\n output_dir = None\n seed = None\n\n def __init__(self, type, seed, epochs):\n Params.type = type\n output_base_path = '../output/'\n data_base_path = '../input/'\n\n Params.target_cols = target_cols_map[type]\n Params.data_path = data_base_path + data_path_map[type]\n\n Params.target_size = len(Params.target_cols)\n Params.seed = seed\n Params.epochs = epochs\n Params.output_dir = output_base_path + f'{type}_seed{seed}-ep{epochs}/'\n # ====================================================\n # Directory settings\n # ====================================================\n if os.path.exists(Params.output_dir):\n shutil.rmtree(Params.output_dir)\n os.makedirs(Params.output_dir)\n\n\nif Params.debug:\n Params.epochs = 1\n\n\ndef main():\n train = pd.read_csv(Params.data_path + 'data.csv')\n # print(train.head())\n init_logger(Params.output_dir + 'train.log')\n\n seed_torch(seed=Params.seed)\n\n # ====================================================\n # Split Train Test\n # ====================================================\n folds = train.copy()\n if Params.type != 'T1SS' and Params.type != 'T2SS':\n Fold = GroupKFold(n_splits=Params.n_fold)\n groups = folds['filename'].values\n for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[Params.target_cols], groups)):\n folds.loc[val_index, 'fold'] = int(n)\n folds['fold'] = folds['fold'].astype(int)\n # print(folds.groupby('fold').size())\n # print(folds)\n\n tst_idx = folds[folds['fold'] == Params.n_fold - 1].index\n\n test_fold = folds.loc[tst_idx].reset_index(drop=True)\n _test_fold = test_fold.copy(deep=True)\n\n train_folds = folds[folds['fold'].isin([i for i in range(Params.n_fold - 1)])]\n\n # print(train_folds.groupby('fold').size())\n # print(train_folds)\n\n def get_test_result(test_scores):\n Logger().info(f'Scores: {np.round(np.mean(test_scores, axis=0), decimals=4)}')\n\n def get_result(result_df):\n preds = result_df[[f'pred_{c}' for c in Params.target_cols]].values\n labels = result_df[Params.target_cols].values\n score, scores = get_score(labels, preds)\n Logger().info(f'Score: {score:<.4f} Scores: {np.round(scores, decimals=4)}')\n\n set_params(Params)\n all_test_scores = []\n\n if Params.train:\n # train\n oof_df = pd.DataFrame()\n for fold in range(Params.n_fold - 1):\n if fold in Params.trn_fold:\n _oof_df, test_scores = train_loop(train_folds, fold, _test_fold)\n oof_df = pd.concat([oof_df, _oof_df])\n all_test_scores.append(test_scores)\n Logger().info(f\"========== fold: {fold} result ==========\")\n get_result(_oof_df)\n # test result\n Logger().info(f\"\\n========== TEST ==========\")\n get_test_result(np.array(all_test_scores))\n # CV result\n Logger().info(f\"========== CV ==========\")\n get_result(oof_df)\n # save result\n oof_df.to_csv(Params.output_dir + 'result.csv', index=False)\n\n close_logger()\n\n\nseed_list = [31, 37, 41, 42, 43, 47, 53]\nseeds = [53]\ntype_list = ['tumor', 'T1SS', 'T2SS', 'T1', '2label', 'randT1', 'randTumor']\ntypes = ['randTumor']\n\nif __name__ == '__main__':\n for seed in seeds:\n for type in types:\n for epochs in range(10, 61, 10):\n Params(type, seed, epochs)\n print(f'target_cols: {Params.target_cols}')\n print(f'data_path: {Params.data_path}, output_dir: {Params.output_dir}')\n print(f'seed: {seed}, epochs: {epochs}')\n main()\n\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.GroupKFold",
"pandas.DataFrame",
"pandas.concat",
"numpy.array",
"numpy.round",
"numpy.mean"
]
] |
AlexFaernon/refactoring | [
"5aafbbcadd92f3fe1fb3b6a6cfb2fd79d6bef9a4"
] | [
"filter.py"
] | [
"from PIL import Image\r\nimport numpy as np\r\n\r\ncorrect_path = False\r\nCELL_X_OFFSET = 10\r\nCELL_Y_OFFSET = 10\r\nGREY_GRADATION = 50\r\nwhile not correct_path:\r\n img_path = input(\"Enter path to image for filtering: \")\r\n try:\r\n img = Image.open(img_path)\r\n correct_path = True\r\n except:\r\n print(\"Incorrect path/file format, please try again\")\r\nimg_matrix = np.array(img)\r\nlen_x = len(img_matrix)\r\nlen_y = len(img_matrix[1])\r\ncell_x = 0\r\nwhile cell_x < len_x:\r\n cell_y = 0\r\n while cell_y < len_y:\r\n sum_RGB = 0\r\n sum_RGB += np.sum(img_matrix[cell_x:cell_x + CELL_X_OFFSET, cell_y:cell_y + CELL_Y_OFFSET]) // 3\r\n sum_RGB = int(sum_RGB // 100)\r\n grey_matrix = np.zeros((CELL_X_OFFSET, CELL_Y_OFFSET, 3))\r\n grey_matrix[:] = int(sum_RGB // GREY_GRADATION) * GREY_GRADATION\r\n img_matrix[cell_x:cell_x + CELL_X_OFFSET, cell_y:cell_y + CELL_Y_OFFSET] = grey_matrix\r\n cell_y = cell_y + CELL_Y_OFFSET\r\n cell_x = cell_x + CELL_X_OFFSET\r\nres = Image.fromarray(img_matrix)\r\ncorrect_path = False\r\nwhile not correct_path:\r\n res_path = input(\"Enter path to resulting image: \")\r\n try:\r\n res.save(res_path)\r\n correct_path = True\r\n except:\r\n print(\"Incorrect path/file format, please try again\")\r\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.zeros"
]
] |
swiftfish/flexitext | [
"9863650b662bef6dd63222f0b9bade2e72f4d762"
] | [
"flexitext/flexitext.py"
] | [
"import matplotlib.pyplot as plt\n\nfrom matplotlib.offsetbox import AnnotationBbox\n\nfrom flexitext.parser import make_texts\nfrom flexitext.textgrid import make_text_grid\n\n\nclass FlexiText:\n \"\"\"Handle storing and drawing of formatted text.\n\n Parameters\n ----------\n\n texts: tuple or list of flexitext.Text instances\n These objects represent the text together with their styles.\n \"\"\"\n\n HORIZONTAL_ALIGNMENT = {\"center\": 0.5, \"left\": 0, \"right\": 1}\n VERTICAL_ALIGNMENT = {\"center\": 0.5, \"top\": 1, \"bottom\": 0}\n\n def __init__(self, *texts):\n self.texts = texts\n\n def plot(\n self,\n x,\n y,\n ha=\"left\",\n va=\"center\",\n ma=\"left\",\n mva=\"baseline\",\n xycoords=\"axes fraction\",\n ax=None,\n ):\n \"\"\"Draw text with multiple formats.\n\n Parameters\n ----------\n x: float\n The horizontal position to place the text. By default, this is in axes fraction\n coordinates.\n y: float\n The vertical position to place the text. By default, this is in axes fraction\n coordinates.\n ha: str\n Horizontal alignment. Must be one of `'center'`, `'right'`, or `'left'`.\n va: str\n Horizontal alignment. Must be one of `'center'`, `'top'`, or `'bottom'`.\n ma: str\n Alignment for multiline texts. The layout of the bounding box of all the lines is\n determined by the `ha` and `va` properties. This property controls the alignment of the\n text lines within that box.\n mva: str\n Vertical alignment for text within multiline texts. Can be one of `\"top\"`, `\"bottom\"`,\n `\"left\"`, `\"right\"`, `\"center\"`, or `\"baseline\"`. Defaults to `\"baseline\"`.\n xycoords: str\n The coordinate system for `x` and `y`. Must be one of `'axes fraction'` or\n `'figure fraction'`.\n ax: matplotlib.axes.Axes\n Matplotlib `Axes`. The default value means the `Axes` is obtained with `plt.gca()`\n\n Returns\n -------\n annotation_box: matplotlib.offsetbox.AnnotationBbox\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n if xycoords == \"axes fraction\":\n parent = ax\n elif xycoords == \"figure fraction\":\n parent = ax.figure\n xycoords = ax.figure.transFigure\n else:\n raise ValueError(\n f\"'xycoords' must be one of 'axes fraction' or 'figure fraction', not {xycoords}\"\n )\n\n offsetbox = self._make_offset_box(ma, mva)\n box_alignment = self._make_box_alignment(ha, va)\n annotation_box = AnnotationBbox(\n offsetbox,\n (x, y),\n xycoords=xycoords,\n frameon=False,\n box_alignment=box_alignment,\n pad=0,\n )\n\n parent.add_artist(annotation_box)\n return annotation_box\n\n def _make_box_alignment(self, ha, va):\n \"\"\"Convert ha and va to a touple of two numbers\"\"\"\n ha = self.HORIZONTAL_ALIGNMENT[ha]\n va = self.VERTICAL_ALIGNMENT[va]\n return (ha, va)\n\n def _make_offset_box(self, mha, mva):\n \"\"\"Create grid with formatted text\"\"\"\n return make_text_grid(self.texts, mha, mva)\n\n\ndef flexitext(\n x,\n y,\n s,\n ha=\"left\",\n va=\"center\",\n ma=\"left\",\n mva=\"baseline\",\n xycoords=\"axes fraction\",\n ax=None,\n):\n \"\"\"Draw text with multiple formats.\n\n Parameters\n ----------\n x: float\n The horizontal position to place the text. By default, this is in axes fraction\n coordinates.\n y: float\n The vertical position to place the text. By default, this is in axes fraction\n coordinates.\n ha: str\n Horizontal alignment. Must be one of `'center'`, `'right'`, or `'left'`.\n va: str\n Horizontal alignment. Must be one of `'center'`, `'top'`, or `'bottom'`.\n ma: str\n Alignment for multiline texts. The layout of the bounding box of all the lines is\n determined by the `ha` and `va` properties. This property controls the alignment of the\n text lines within that box.\n mva: str\n Vertical alignment for text within multiline texts. Can be one of `\"top\"`, `\"bottom\"`,\n `\"left\"`, `\"right\"`, `\"center\"`, or `\"baseline\"`. Defaults to `\"baseline\"`.\n xycoords: str\n The coordinate system for `x` and `y`. Must be one of `'axes fraction'` or\n `'figure fraction'`.\n ax: matplotlib.axes.Axes\n Matplotlib `Axes`. The default value means the `Axes` is obtained with `plt.gca()`\n\n Returns\n -------\n annotation_box: matplotlib.offsetbox.AnnotationBbox\n \"\"\"\n return FlexiText(*make_texts(s)).plot(x, y, ha, va, ma, mva, xycoords, ax)\n"
] | [
[
"matplotlib.offsetbox.AnnotationBbox",
"matplotlib.pyplot.gca"
]
] |
mwmoura/Concreto-armado | [
"487d9d513cfa4ba24c765fa8015bf5fc43cd60f1"
] | [
"Concreto/MomxCurv2.py"
] | [
"# Diagrama Momento x curvatura para seções transversais de concreto armado\n#\n# ENTRADA DE DADOS\n# Chamar biblioteca matemática\nimport numpy as np\n#\ndef tensao(esl):\n # Calcula a tensão no aço\n # es = módulo de elasticidade do aço em kN/cm2\n # esl = deformação de entrada\n # fyd = tensão de escoamento de cálculo em kN/cm2\n # tsl = tensão de saída em kN/cm2\n # \n # Trabalhando com deformação positiva\n ess = np.abs(esl)\n eyd = fyd / es\n if ess < eyd:\n tsl = es * ess\n else:\n tsl = fyd\n # Trocando o sinal se necessário\n if esl < 0:\n tsl = -tsl\n return tsl\n\ndef tensaoc (ecl):\n # Calcula a tensão no concreto\n # e0 = deformação do início do patamar de plastificação\n # ecl = deformação de entrada\n # tcd = resistência de cálculo em kN/cm2\n # tcl = tensão de saída em kN/cm2\n # \n ecs = np.abs(ecl)\n e0 = 2 / 1000\n eta = ecs / e0\n if ecs < e0:\n tcl = tcd * (2 * eta - eta ** 2)\n else:\n tcl = tcd\n return tcl\n#\ndef funcao(x):\n #\n # Calcula o valor da função f(x) dada na equaçãoo (6.3.10) do Volume 1 de Curso de Concreto Armado\n # O valor de saída é a variável f\n # \n # Constantes para o cálculo das deformações das camadas de armadura\n xl = eu * di[0] / (eu + 10)\n if x <= xl:\n # A linha neutra está no domínio 2 (C É A CURVATURA)\n c = 0.01 / (di[0] - x)\n else:\n # A linha neutra está nos domínios 3 ou 4\n c = eu / (1000 * x)\n # Resultante de compressão no concreto\n rc = alamb * b * x * tcd\n f = rc\n # Superpondo a contribuição das armaduras\n for i in range (0, n, 1):\n esi = c * (x - di[i])\n tsl = tensao(esi)\n tens[i] = tsl\n f = f + asi[i] * tsl\n # Transformando f em adimensional para testar a convergência\n f = f / (b * di[0] * tcd)\n return f\n#\n#fck=float(input('Resistência característica à compressão do concreto em MPa = '))\nfck = 20\n#fyk=float(input('Tensão de escoamento característica do aço em MPa = '))\nfyk = 500\n#es=float(input('Módulo de elasticidade do aço em GPa = '))\nes = 200\n#gamac=float(input('Coeficientes parciais de segurança para o concreto = '))\ngamac = 1.4\n#gamas=float(input('Coeficientes parciais de segurança para o aço = '))\ngamas = 1.15\n#b =float(input('Largura da seção transversal em cm = '))\nb = 15\n#n =int(input('Número de camadas de armadura = '))\nn = 1\nprint('Inserir dados referentes as camadas de armadura.')\nprint('As camadas são numeradas de baixo para cima e separadas por , .')\nasi = list(range(n))\ndi = list(range(n))\nprint('Dados das camadas de armadura.')\nprint('As camadas são inseridas de baixo para cima.')\nfor i in range (0, n, 1):\n print('Altura útil da camada',(i+1),'.')\n #di[i] = float(input('Valor: '))\n di[i] = 36\nfor i in range (0, n, 1):\n print('Área de aço da camada',(i+1),'.')\n #asi[i] = float(input('Valor: '))\n asi[i] = 2\ndi = np.asarray(di)\nasi = np.asarray(asi)\n#print (di[0])\n#print (asi[0])\n#\n# FIM DA ENTRADA DE DADOS\n#\n# INÍCIO DOS CÁLCULOS\n# \n# Parâmetros do diagrama retangular (PODE SAIR)\n'''if fck <= 50:\n alamb = 0.8\n alfac = 0.85\n eu = 3.5\nelse:\n alamb = 0.8 - (fck - 50) / 400\n alfac = 0.85 * (1 - (fck - 50) / 200)\n eu = 2.6 + 35 * ((90 - fck) / 100) ** 4'''\neu = 3.5\nalfac = 0.85\n#\n# Conversão de unidades: transformando para kN e cm\nfck = fck / 10\nfyk = fyk / 10\nes = 100 * es\n#\n# Resistências de cálculo\nfcd = fck / gamac\ntcd = alfac * fcd\nfyd = fyk / gamas\n#\n# Cálculo do momento de ruína através do processo iterativo da bissecante\n#\n# Valor inicial para a linha neutra\nxi = 0\ntens = list(range(n))\ntens = np.asarray(tens)\ntsl = 0.\nf = 0.\n# Chamar sub-rotina\nf = funcao(xi)\nfi = f\n# Valor final para a linha neutra\nxf = di[0]\n# Chamar sub-rotina\nf = funcao(xf)\nff = f\n# Processo iterativo da bissecante\nfk = 1\nwhile np.abs(fk) > 0.001:\n xk = (xi * ff - xf * fi) / (ff - fi)\n f = funcao(xk)\n fk = f\n prod = fk * fi\n if prod > 0:\n xi = xk\n fi = fk\n else:\n xf = xk\n ff = fk\n# Convergência alcançada\n# xk é a raiz da função f(x) dada na equação (6.3.10) do Volume 1 de Curso de Concreto Armado\n# Momento de ruina de cálculo\nx = xk\nrc = alamb * b * x * tcd\nzc = di[0] - 0.5 * alamb * x\namu = rc * zc\nfor i in range (0, n, 1):\n amu = amu + asi[i] * tens[i] * (di[0] - di[i])\n# Passando o momento para kN.m\namu = amu / 100\n# Convertendo a saída para duas casas decimais\namu = round(amu, 3)\nprint('O momento resistente é de', amu, 'kN.m.')"
] | [
[
"numpy.abs",
"numpy.asarray"
]
] |
vhn0912/python-snippets | [
"80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038"
] | [
"notebook/numpy_swap_select.py"
] | [
"import numpy as np\n\na = np.arange(10, 35).reshape(5, 5)\nprint(a)\n# [[10 11 12 13 14]\n# [15 16 17 18 19]\n# [20 21 22 23 24]\n# [25 26 27 28 29]\n# [30 31 32 33 34]]\n\ncol_swap = a[:, [3, 2, 4, 0, 1]]\nprint(col_swap)\n# [[13 12 14 10 11]\n# [18 17 19 15 16]\n# [23 22 24 20 21]\n# [28 27 29 25 26]\n# [33 32 34 30 31]]\n\ncol_inverse = a[:, ::-1]\nprint(col_inverse)\n# [[14 13 12 11 10]\n# [19 18 17 16 15]\n# [24 23 22 21 20]\n# [29 28 27 26 25]\n# [34 33 32 31 30]]\n\ncol_select = a[:, [2, 4, 0]]\nprint(col_select)\n# [[12 14 10]\n# [17 19 15]\n# [22 24 20]\n# [27 29 25]\n# [32 34 30]]\n\ncol_select2 = a[:, [2, 2, 2]]\nprint(col_select2)\n# [[12 12 12]\n# [17 17 17]\n# [22 22 22]\n# [27 27 27]\n# [32 32 32]]\n\nrow_swap = a[[3, 2, 4, 0, 1], :]\nprint(row_swap)\n# [[25 26 27 28 29]\n# [20 21 22 23 24]\n# [30 31 32 33 34]\n# [10 11 12 13 14]\n# [15 16 17 18 19]]\n\nrow_swap = a[[3, 2, 4, 0, 1]]\nprint(row_swap)\n# [[25 26 27 28 29]\n# [20 21 22 23 24]\n# [30 31 32 33 34]\n# [10 11 12 13 14]\n# [15 16 17 18 19]]\n\nrow_inverse = a[::-1]\nprint(row_inverse)\n# [[30 31 32 33 34]\n# [25 26 27 28 29]\n# [20 21 22 23 24]\n# [15 16 17 18 19]\n# [10 11 12 13 14]]\n\nrow_select = a[[2, 4, 0]]\nprint(row_select)\n# [[20 21 22 23 24]\n# [30 31 32 33 34]\n# [10 11 12 13 14]]\n\nrow_select2 = a[[2, 2, 2]]\nprint(row_select2)\n# [[20 21 22 23 24]\n# [20 21 22 23 24]\n# [20 21 22 23 24]]\n"
] | [
[
"numpy.arange"
]
] |
BaconBoi95/tensorflow | [
"484e8acedceebec8d7ea5fb008d4c367041c9cff"
] | [
"tensorflow/python/debug/lib/check_numerics_callback_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.debug.lib import check_numerics_callback\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import optimizer_v2\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\n\n\nclass LimitStringLengthTest(test_util.TensorFlowTestCase):\n\n def testLimitStringLengthWithExplicitLimit(self):\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"\", max_len=2), \"\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"e\", max_len=2), \"e\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"de\", max_len=2), \"de\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"abcde\", max_len=2),\n \"...de\")\n\n def testLimitStringLengthWithNoLimit(self):\n self.assertEqual(check_numerics_callback.limit_string_length(\n \"A\" * 100 + \"B\", max_len=None), \"A\" * 100 + \"B\")\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"\", max_len=None), \"\")\n\n def testLimitStringLengthWithDefaultLimit(self):\n self.assertEqual(\n check_numerics_callback.limit_string_length(\"A\" * 50 + \"B\"),\n \"...\" + \"A\" * 49 + \"B\")\n\n\nclass CheckNumericsCallbackTest(test_util.TensorFlowTestCase):\n\n def _assertRaisesInvalidArgumentErrorAndGetMessage(self, func):\n caught = None\n try:\n func()\n except errors.InvalidArgumentError as error:\n caught = error\n self.assertTrue(caught, \"Failed to catch expected InvalidArgumentError\")\n return caught.message\n\n def testCatchEagerOpFloat32Inf(self):\n \"\"\"Test catching Infinity in eager op execution: float32.\"\"\"\n with check_numerics_callback.check_numerics():\n x = constant_op.constant([2.0, 3.0])\n y = constant_op.constant([1.0, 0.0])\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: x / y)\n # Check the content of the error message.\n self.assertTrue(re.search(r\"eagerly-executing op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: (2,)\\n\", message)\n self.assertIn(\"# of +Inf elements: 1\\n\", message)\n self.assertIn(\"0: %s\" % x, message)\n self.assertIn(\"1: %s\" % y, message)\n\n def testCatchEagerOpFloat16NaN(self):\n \"\"\"Test catching Infinity in eager op execution: float16.\"\"\"\n with check_numerics_callback.check_numerics():\n def log1p(x):\n y = 1.0 + x\n return math_ops.log(y)\n x = constant_op.constant([[-1.0]], dtype=dtypes.float16)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: log1p(x))\n # Check the content of the error message.\n self.assertTrue(re.search(r\"eagerly-executing op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float16\", message))\n self.assertIn(\"shape: (1, 1)\\n\", message)\n self.assertIn(\"# of -Inf elements: 1\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*0\\.\", message))\n\n def testNoCatchEagerOpExecution(self):\n \"\"\"Test running multiple steps of eager execution without Inf/NaN.\"\"\"\n with check_numerics_callback.check_numerics():\n x = constant_op.constant([2.0, 3.0])\n y = constant_op.constant([1.0, 0.0])\n self.assertAllClose((x + y) * (x - y), [3.0, 9.0])\n\n def testCatchFunctionOpInfFloat64(self):\n \"\"\"Test catching infinites generated in a FuncGraph.\"\"\"\n with check_numerics_callback.check_numerics():\n @def_function.function\n def divide_sum_with_diff(x, y):\n w1 = x + y\n w2 = x - y\n u = w1 / w2\n return u * 2.0\n x = constant_op.constant(2.0, dtype=dtypes.float64)\n y = constant_op.constant(2.0, dtype=dtypes.float64)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: divide_sum_with_diff(x, y))\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float64\", message))\n self.assertIn(\"shape: ()\\n\", message)\n self.assertIn(\"Input tensors (2):\", message)\n # Check that the correct input ops are printed.\n self.assertTrue(re.search(r\"0:.*Tensor.*add:0\", message))\n self.assertTrue(re.search(r\"1:.*Tensor.*sub:0\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"u = w1 / w2\", message)\n\n def testControlFlowGraphWithNaNBFloat16(self):\n \"\"\"Test catching bfloat16 NaNs in a control-flow-v2 FuncGraph.\"\"\"\n @def_function.function\n def my_conditional(x):\n with check_numerics_callback.check_numerics():\n if math_ops.less(math_ops.reduce_sum(x), 0.0):\n return math_ops.log(x)\n else:\n return math_ops.log(-x)\n x = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.bfloat16)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: my_conditional(x))\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*bfloat16\", message))\n self.assertIn(\"shape: (3,)\\n\", message)\n # Check that the correct input op is printed.\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Neg\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"return math_ops.log(-x)\", message)\n self.assertTrue(message.endswith(\"\\n\"))\n\n def testOverflowInTfFunction(self):\n \"\"\"Test catching Infinity caused by overflow in a tf.function with while.\"\"\"\n with check_numerics_callback.check_numerics():\n\n @def_function.function\n def accumulation_function(counter, lim, accum):\n while math_ops.less(counter, lim):\n accum.assign(accum * 2.0)\n counter.assign_add(1)\n\n counter = variables.Variable(0, dtype=dtypes.int32)\n # Repeated `* 2.0` overflows a float32 tensor in 128 steps. So the\n # 1000-step limit is sufficient.\n lim = constant_op.constant(1000, dtype=dtypes.int32)\n accum = variables.Variable(1.0)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: accumulation_function(counter, lim, accum))\n\n self.assertAllClose(counter.numpy(), 128)\n # Check the content of the error message.\n # The overflow to +Infinity happens during the `* 2.0` operation.\n self.assertTrue(re.search(r\"graph op.*\\\"Mul\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: ()\\n\", message)\n # Check that the correct input op is printed.\n self.assertIn(\"Input tensors (2):\", message)\n # Check that the correct input ops are printed.\n self.assertTrue(re.search(r\"0:.*Tensor.*ReadVariableOp:0\", message))\n self.assertTrue(re.search(r\"1:.*Tensor.*mul/y:0\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"accum.assign(accum * 2.0)\", message)\n\n def testKerasModelHealthyPredictAndFitCalls(self):\n \"\"\"Test a simple healthy keras model runs fine under the callback.\"\"\"\n with check_numerics_callback.check_numerics():\n model = models.Sequential()\n model.add(layers.Dense(\n units=100,\n input_shape=(5,),\n activation=\"relu\",\n kernel_initializer=\"ones\"))\n model.add(layers.BatchNormalization())\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(\n units=1,\n activation=\"linear\",\n kernel_initializer=\"ones\"))\n\n model.compile(\n loss=\"mse\", optimizer=optimizer_v2.gradient_descent.SGD(1e-3))\n\n batch_size = 16\n xs = array_ops.zeros([batch_size, 5])\n ys = array_ops.ones([batch_size, 1])\n\n outputs = model.predict(xs)\n self.assertEqual(outputs.shape, (batch_size, 1))\n\n epochs = 100\n history = model.fit(xs, ys, epochs=epochs, verbose=0)\n self.assertEqual(len(history.history[\"loss\"]), epochs)\n\n def testKerasModelUnhealthyPredictAndFitCallsWithLargeLearningRate(self):\n \"\"\"Test keras model training crashes with Infinity is caught by callback.\"\"\"\n with check_numerics_callback.check_numerics():\n model = models.Sequential()\n # Use weight initializers for deterministic behavior during test.\n model.add(layers.Dense(\n units=100,\n input_shape=(5,),\n activation=\"relu\",\n kernel_initializer=\"ones\"))\n model.add(layers.Dense(\n units=1,\n activation=\"linear\",\n kernel_initializer=\"ones\"))\n\n lr = 1e3 # Intentionally huge learning rate.\n model.compile(loss=\"mse\", optimizer=optimizer_v2.gradient_descent.SGD(lr))\n\n batch_size = 16\n xs = array_ops.zeros([batch_size, 5])\n ys = array_ops.ones([batch_size, 1])\n\n outputs = model.predict(xs)\n self.assertEqual(outputs.shape, (batch_size, 1))\n\n epochs = 100\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: model.fit(xs, ys, epochs=epochs, verbose=0))\n\n # Check the content of the error message.\n # Let's not hardcode the op name for future-proof.\n self.assertTrue(re.search(r\"graph op.*\\\".*\\\"\", message))\n self.assertTrue(re.search(r\"dtype:.*float32\", message))\n self.assertTrue(re.search(r\"shape:.*\\(.*\\)\", message))\n # Check that the correct input op is printed.\n self.assertTrue(re.search(r\"Input tensor.*\", message))\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"lambda: model.fit(xs, ys,\", message)\n\n def testInfInCustomKerasLayerWithTfFunctionPredictCall(self):\n \"\"\"Test catching Infinity in a custom layer, w/ tf.function.\"\"\"\n\n with check_numerics_callback.check_numerics():\n class DivByXLayer(layers.Layer):\n\n @def_function.function\n def call(self, x):\n \"\"\"The computation performed by the for-test custom layer.\n\n Generates Infinity by intention.\n\n Args:\n x: Input tensor of scalar shape.\n\n Returns:\n A scalar tensor.\n \"\"\"\n one_over_x = 1.0 / x\n return one_over_x\n\n model = models.Sequential()\n model.add(DivByXLayer(input_shape=[5]))\n\n # TODO(b/140245224): Currently the model must be compiled prior to\n # predict() being called(). Or keras will fall back to V1 behavior.\n # Remove this after the bug is fixed.\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n xs = array_ops.ones([1, 5])\n # Calling the model with non-zero inputs should be fine.\n self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]])\n\n xs = array_ops.zeros([1, 5])\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: model.predict(xs))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertTrue(re.search(r\"shape: \\(.*, 5\\)\", message))\n # # Check that the correct input op is printed.\n self.assertIn(\"Input tensors (2):\", message)\n # # # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"one_over_x = 1.0 / x\", message)\n\n def testInfInCustomKerasLayerWithoutTfFuntionPredictCall(self):\n \"\"\"Test catching Infinity in a custom layer, w/o tf.function.\"\"\"\n\n with check_numerics_callback.check_numerics():\n class DivByXLayer(layers.Layer):\n\n # Not using the tf.function decorator here.\n def call(self, x):\n \"\"\"The computation performed by the for-test custom layer.\n\n Generates Infinity by intention.\n\n Args:\n x: Input tensor of scalar shape.\n\n Returns:\n A scalar tensor.\n \"\"\"\n one_over_x = 1.0 / x\n return one_over_x\n\n model = models.Sequential()\n model.add(DivByXLayer(input_shape=[5]))\n\n # TODO(b/140245224): Currently the model must be compiled prior to\n # predict() being called(). Or keras will fall back to V1 behavior.\n # Remove this after the bug is fixed.\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n xs = array_ops.ones([1, 5])\n # Calling the model with non-zero inputs should be fine.\n self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]])\n\n xs = array_ops.zeros([1, 5])\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: model.predict(xs))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"RealDiv\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertTrue(re.search(r\"shape: \\(.*, 5\\)\", message))\n # Check that the correct input op is printed.\n self.assertIn(\"Input tensors (2):\", message)\n # Check that the correct line for op creation is printed.\n self.assertTrue(re.search(r\"Stack trace of op's creation\", message))\n self.assertIn(\"one_over_x = 1.0 / x\", message)\n\n def testCatchInfinityInDatasetMapFunction(self):\n \"\"\"Test that callback catches NaN in a tf.dataset map function.\"\"\"\n with check_numerics_callback.check_numerics():\n\n def generate_nan(x):\n \"\"\"Intetionally generates NaNs by taking log of negative number.\"\"\"\n casted_x = math_ops.cast(x, dtypes.float32)\n return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x\n\n dataset = dataset_ops.Dataset.range(10).map(generate_nan)\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n iterator.next)\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: (2, 2)\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Log/x:0\", message))\n self.assertIn(\n \"-> | return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x\",\n message)\n\n def testCustomGradietWithNaNWithTfFunction(self):\n \"\"\"Test that callback catches NaN in a gradient function during backprop.\"\"\"\n with check_numerics_callback.check_numerics():\n @custom_gradient.custom_gradient\n def func_with_bad_grad(x):\n output = math_ops.sin(x)\n @def_function.function\n def grad(dy):\n # `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.\n return math_ops.log(-dy)\n return output, grad\n\n x = constant_op.constant(-2.0, dtype=dtypes.float16)\n def f(x):\n return func_with_bad_grad(x)\n\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: gradient_checker_v2.compute_gradient(f, [x]))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float16\", message))\n self.assertIn(\"shape: ()\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Neg:0\", message))\n self.assertIn(\"-> | return math_ops.log(-dy)\", message)\n\n # TODO(cais): Tests for Infs and NaNs during distributed execution.\n # TODO(cais): Benchmark the slowdown due to callbacks and inserted nodes.\n\n\nif __name__ == \"__main__\":\n ops.enable_eager_execution()\n googletest.main()\n"
] | [
[
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.debug.lib.check_numerics_callback.limit_string_length",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"tensorflow.python.debug.lib.check_numerics_callback.check_numerics",
"tensorflow.python.keras.optimizer_v2.gradient_descent.SGD",
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.keras.layers.BatchNormalization"
]
] |
AdityaSidharta/kaggle_humpback_new_whale | [
"779d60746f8eba99d0336836200150fa7a08388e"
] | [
"model/dataset.py"
] | [
"import os\n\nimport torch\nfrom skimage import io\nfrom skimage.color import gray2rgb\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Compose, Resize, RandomHorizontalFlip, \\\n RandomVerticalFlip, RandomAffine, Normalize, ToTensor, ToPILImage, Grayscale\n\ntrain_transform = Compose([\n ToPILImage(),\n Resize((224, 224)),\n Grayscale(3),\n RandomHorizontalFlip(),\n RandomVerticalFlip(),\n RandomAffine(degrees=30),\n ToTensor(),\n Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n])\n\ntest_transform = Compose([\n ToPILImage(),\n Resize((224, 224)),\n Grayscale(3),\n ToTensor(),\n Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n])\n\n\nclass TrainDataset(Dataset):\n def __init__(self, image_label, ohe_label, train_path, train_tsfm, device):\n self.image_label = image_label\n self.ohe_label = ohe_label\n self.train_path = train_path\n self.train_tsfm = train_tsfm\n self.device = device\n\n def __len__(self):\n return len(self.image_label)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.train_path, self.image_label[idx])\n img_array = io.imread(img_path)\n if len(img_array.shape) == 2:\n img_array = gray2rgb(img_array)\n assert img_array.shape[2] == 3\n img_tensor = self.train_tsfm(img_array)\n img_tensor = img_tensor.type(torch.float).to(self.device)\n label = self.ohe_label[idx, :]\n label_tensor = torch.Tensor(label)\n label_tensor = label_tensor.type(torch.float).to(self.device)\n return img_tensor, label_tensor\n\n\nclass TestDataset(Dataset):\n def __init__(self, image_label, test_path, test_tsfm, device):\n self.image_label = image_label\n self.test_path = test_path\n self.test_tsfm = test_tsfm\n self.device = device\n\n def __len__(self):\n return len(self.image_label)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.test_path, self.image_label[idx])\n img_array = io.imread(img_path)\n if len(img_array.shape) == 2:\n img_array = gray2rgb(img_array)\n assert img_array.shape[2] == 3\n img_tensor = self.test_tsfm(img_array)\n img_tensor = img_tensor.type(torch.float).to(self.device)\n return img_tensor\n"
] | [
[
"torch.Tensor"
]
] |
NVlabs/UMR | [
"d858c4ddd56bdac6e3342609f9c02618c279b990"
] | [
"utils/geometry.py"
] | [
"# -----------------------------------------------------------\n# Code adapted from: https://github.com/akanazawa/cmr/blob/master/utils/geometry.py\n# \n# MIT License\n# \n# Copyright (c) 2018 akanazawa\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# -----------------------------------------------------------\n\n# Geometry stuff.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\ndef triangle_direction_intersection(tri, trg):\n '''\n Finds where an origin-centered ray going in direction trg intersects a triangle.\n Args:\n tri: 3 X 3 vertex locations. tri[0, :] is 0th vertex.\n Returns:\n alpha, beta, gamma\n '''\n p0 = np.copy(tri[0, :])\n # Don't normalize\n d1 = np.copy(tri[1, :]) - p0;\n d2 = np.copy(tri[2, :]) - p0;\n d = trg / np.linalg.norm(trg)\n\n mat = np.stack([d1, d2, d], axis=1)\n\n try:\n inv_mat = np.linalg.inv(mat)\n except np.linalg.LinAlgError:\n return False, 0\n\n a_b_mg = -1*np.matmul(inv_mat, p0)\n is_valid = (a_b_mg[0] >= 0) and (a_b_mg[1] >= 0) and ((a_b_mg[0] + a_b_mg[1]) <= 1) and (a_b_mg[2] < 0)\n if is_valid:\n return True, -a_b_mg[2]*d\n else:\n return False, 0\n\n\ndef project_verts_on_mesh(verts, mesh_verts, mesh_faces):\n verts_out = np.copy(verts)\n for nv in range(verts.shape[0]):\n max_norm = 0\n vert = np.copy(verts_out[nv, :])\n for f in range(mesh_faces.shape[0]):\n face = mesh_faces[f]\n tri = mesh_verts[face, :]\n # is_v=True if it does intersect and returns the point\n is_v, pt = triangle_direction_intersection(tri, vert)\n # Take the furthest away intersection point\n if is_v and np.linalg.norm(pt) > max_norm:\n max_norm = np.linalg.norm(pt)\n verts_out[nv, :] = pt\n\n return verts_out\n"
] | [
[
"numpy.matmul",
"numpy.linalg.inv",
"numpy.copy",
"numpy.stack",
"numpy.linalg.norm"
]
] |
r-vu/barc | [
"7f90d4f8689df7410f0bf4be8843cfe216da1c9f"
] | [
"workspace/src/labs/src/lab2/plot.py"
] | [
"import rosbag\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib.patches as patches\n\nbag = rosbag.Bag(os.path.expanduser(\"/d/Documents/classes/me131/_2020-02-10-20-44-36.bag\"))\n\n\ntopics = bag.get_type_and_topic_info()[1].keys()\ntypes = []\nfor i in range(0,len(bag.get_type_and_topic_info()[1].values())):\n types.append(bag.get_type_and_topic_info()[1].values()[i][0])\n if bag.get_type_and_topic_info()[1].values()[i][0] == 'barc/ECU':\n dimEcu = bag.get_type_and_topic_info()[1].values()[i][1]\n if bag.get_type_and_topic_info()[1].values()[i][0] == 'labs/Z_DynBkMdl':\n dimxy = bag.get_type_and_topic_info()[1].values()[i][1]\n\n\nx_raw = np.zeros((dimxy, 1))\nv_raw = np.zeros((dimxy, 1))\nv_des = 8*np.ones((dimxy,1))\n\ncounter = 0\nfor counter, (topic, msg, t) in enumerate( bag.read_messages(topics=['/z_vhcl']) ) : \n x_raw[counter] = msg.x\n v_raw[counter] = msg.v_x\n\nplt.figure(1)\nplt.plot(x_raw, v_raw, label = 'Actual Velocity')\nplt.plot(x_raw, v_des, label = 'Desired Velocity')\nplt.ylabel('Velocity [m/s]')\nplt.ylim((0,12))\nplt.xlabel('Longitudinal position [m]')\nplt.title('Longitudinal Velocity Tracking')\nplt.legend()\nplt.show()\n\nbag.close()\n"
] | [
[
"numpy.ones",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
jwbrun/ASGDSimulator | [
"ecab2cc83986f08b21bc85151cece85a08fcce82"
] | [
"convNet/trainer.py"
] | [
"import time\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom torch.nn.utils import clip_grad_norm_\nfrom utils.meters import AverageMeter, accuracy\n\n\ndef _flatten_duplicates(inputs, target, batch_first=True):\n if batch_first:\n target = target.view(-1, 1).expand(-1, inputs.size(1))\n else:\n target = target.view(1, -1).expand(inputs.size(0), -1)\n inputs = inputs.flatten(0, 1)\n target = target.flatten(0, 1)\n return inputs, target\n\n\nclass Trainer(object):\n\n def __init__(self, model, criterion, optimizer=None,\n device_ids=[0], device=torch.cuda, dtype=torch.float,\n distributed=False, local_rank=-1, adapt_grad_norm=None,\n grad_clip=-1, print_freq=100):\n self._model = model\n self.criterion = criterion\n self.epoch = 0\n self.training_steps = 0\n self.optimizer = optimizer\n self.device = device\n self.dtype = dtype\n self.local_rank = local_rank\n self.print_freq = print_freq\n self.grad_clip = grad_clip\n self.grad_scale = None\n self.adapt_grad_norm = adapt_grad_norm\n\n if distributed:\n self.model = nn.parallel.DistributedDataParallel(model,\n device_ids=device_ids,\n output_device=device_ids[0])\n elif device_ids and len(device_ids) > 1:\n self.model = nn.DataParallel(model, device_ids)\n else:\n self.model = model\n\n def _grad_norm(self, inputs_batch, target_batch, chunk_batch=1):\n self.model.zero_grad()\n for inputs, target in zip(inputs_batch.chunk(chunk_batch, dim=0),\n target_batch.chunk(chunk_batch, dim=0)):\n target = target.to(self.device)\n inputs = inputs.to(self.device, dtype=self.dtype)\n\n # compute output\n output = self.model(inputs)\n loss = self.criterion(output, target)\n\n if chunk_batch > 1:\n loss = loss / chunk_batch\n\n loss.backward() # accumulate gradient\n grad = clip_grad_norm_(self.model.parameters(), float('inf'))\n return grad\n\n def _step(self, inputs_batch, target_batch, training=False, chunk_batch=1):\n outputs = []\n total_loss = 0\n\n if training:\n self.optimizer.zero_grad()\n self.optimizer.update(self.epoch, self.training_steps)\n\n for inputs, target in zip(inputs_batch.chunk(chunk_batch, dim=0),\n target_batch.chunk(chunk_batch, dim=0)):\n target = target.to(self.device)\n inputs = inputs.to(self.device, dtype=self.dtype)\n if training:\n self.optimizer.pre_forward()\n\n # compute output\n output = self.model(inputs)\n loss = self.criterion(output, target)\n grad = None\n\n if chunk_batch > 1:\n loss = loss / chunk_batch\n\n if isinstance(output, list) or isinstance(output, tuple):\n output = output[0]\n\n outputs.append(output.detach())\n\n if training:\n self.optimizer.pre_backward()\n if self.grad_scale is not None:\n loss = loss * self.grad_scale\n loss.backward() # accumulate gradient\n\n total_loss += float(loss)\n\n if training: # post gradient accumulation\n if self.grad_clip > 0:\n grad = clip_grad_norm_(self.model.parameters(), self.grad_clip)\n self.optimizer.step() # SGD step\n self.training_steps += 1\n\n outputs = torch.cat(outputs, dim=0)\n return outputs, total_loss, grad\n\n def forward(self, data_loader, num_steps=None, training=False, duplicates=1, chunk_batch=1):\n meters = {name: AverageMeter()\n for name in ['step', 'data', 'loss', 'prec1', 'prec5']}\n if training and self.grad_clip > 0:\n meters['grad'] = AverageMeter()\n\n def meter_results(meters):\n results = {name: meter.avg for name, meter in meters.items()}\n results['error1'] = 100. - results['prec1']\n results['error5'] = 100. - results['prec5']\n return results\n\n end = time.time()\n\n for i, (inputs, target) in enumerate(data_loader):\n if training and duplicates > 1 and self.adapt_grad_norm is not None \\\n and i % self.adapt_grad_norm == 0:\n grad_mean = 0\n num = inputs.size(1)\n for j in range(num):\n grad_mean += float(self._grad_norm(inputs.select(1, j), target))\n grad_mean /= num\n grad_all = float(self._grad_norm(\n *_flatten_duplicates(inputs, target)))\n self.grad_scale = grad_mean / grad_all\n logging.info('New loss scale: %s', self.grad_scale)\n\n # measure data loading time\n meters['data'].update(time.time() - end)\n if duplicates > 1: # multiple versions for each sample (dim 1)\n inputs, target = _flatten_duplicates(inputs, target)\n\n output, loss, grad = self._step(inputs, target,\n training=training,\n chunk_batch=chunk_batch)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n meters['loss'].update(float(loss), inputs.size(0))\n meters['prec1'].update(float(prec1), inputs.size(0))\n meters['prec5'].update(float(prec5), inputs.size(0))\n if grad is not None:\n meters['grad'].update(float(grad), inputs.size(0))\n\n # measure elapsed time\n meters['step'].update(time.time() - end)\n end = time.time()\n\n if i % self.print_freq == 0:\n report = str('{phase} - Epoch: [{0}][{1}/{2}]\\t'\n 'Time {meters[step].val:.3f} ({meters[step].avg:.3f})\\t'\n 'Data {meters[data].val:.3f} ({meters[data].avg:.3f})\\t'\n 'Loss {meters[loss].val:.4f} ({meters[loss].avg:.4f})\\t'\n 'Prec@1 {meters[prec1].val:.3f} ({meters[prec1].avg:.3f})\\t'\n 'Prec@5 {meters[prec5].val:.3f} ({meters[prec5].avg:.3f})\\t'\n .format(\n self.epoch, i, len(data_loader),\n phase='TRAINING' if training else 'EVALUATING',\n meters=meters))\n if 'grad' in meters.keys():\n report += 'Grad {meters[grad].val:.3f} ({meters[grad].avg:.3f})'\\\n .format(meters=meters)\n logging.info(report)\n\n if num_steps is not None and i >= num_steps:\n break\n\n return meter_results(meters)\n\n def train(self, data_loader, duplicates=1, chunk_batch=1):\n # switch to train mode\n self.model.train()\n\n return self.forward(data_loader, duplicates=duplicates, training=True, chunk_batch=chunk_batch)\n\n def validate(self, data_loader, duplicates=1):\n # switch to evaluate mode\n self.model.eval()\n with torch.no_grad():\n return self.forward(data_loader, duplicates=duplicates, training=False)\n"
] | [
[
"torch.nn.DataParallel",
"torch.nn.parallel.DistributedDataParallel",
"torch.no_grad",
"torch.cat"
]
] |
ryu577/survival | [
"0c6bf3dfcbb570ffd9e053bee56c86541acc01f0"
] | [
"survival/misc/misc.py"
] | [
"import numpy as np\n\n\ndef solve_hazard_eqn(fn, val, minval=10.0, maxval=900.0, interval=1.0):\n\t'''\n\tFinds the approximate point where a function crosses a value from below.\n\t'''\n\tprev_val = fn(minval)\n\n\tfor i in np.arange(minval+interval, maxval, interval):\n\t\tnext_val = fn(i)\n\t\tif next_val < val and val < prev_val:\n\t\t\treturn i-interval/2\n\t\tprev_val = next_val\n\tif next_val > val:\n\t\treturn maxval\n\telse:\n\t\treturn minval\n\n\ndef get_opt_tau(fn, pc_cost, q=1.0):\n\tpc_haz = q/pc_cost\n\tprev_haz = fn(9.5)\n\tmax_haz = 0\n\tans = 0.0\n\tfor t in np.arange(10,900,0.5):\n\t\tcurr_haz = fn(t)\n\t\tif curr_haz < pc_haz and prev_haz > pc_haz:\n\t\t\treturn t-0.5/2\n\t\tprev_haz = curr_haz\n\t\tif curr_haz > max_haz:\n\t\t\tmax_haz = curr_haz\n\tif max_haz < pc_haz:\n\t\treturn 10\n\telse:\n\t\treturn t\n\n\n"
] | [
[
"numpy.arange"
]
] |
LavinaVRovine/hazard | [
"e0408374dc0b76f8b9a0107f5f12cca2d4c033ef"
] | [
"predictions/lol_predictor.py"
] | [
"import pandas as pd\nimport mlflow.sklearn\nfrom sqlalchemy import create_engine\nfrom config import DATABASE_URI\n\nfrom predictions.common_predictor import CommonPredictor\nfrom config import ROOT_DIR\n\npd.set_option(\"display.width\", 1000)\npd.set_option(\"display.max_columns\", 50)\n\n\nclass LoLPredictor(CommonPredictor):\n def __init__(self, debug: bool = False):\n super().__init__(debug=debug)\n self.training_columns = [\n \"avg\",\n \"gold_per_minute\",\n \"gold_differential_per_minute\",\n \"gold_differential_at_15\",\n \"cs_per_minute\",\n \"cs_differential_at_15\",\n \"tower_differential_at_15\",\n \"tower_ratio\",\n \"first_tower\",\n \"damage_per_minute\",\n \"kills_per_game\",\n \"deaths_per_game\",\n \"kda\",\n \"dragon_game\",\n \"dragons_15\",\n \"nashors_game\",\n \"wards_per_minute\",\n \"c_avg\",\n \"c_gold_per_minute\",\n \"c_gold_differential_per_minute\",\n \"c_gold_differential_at_15\",\n \"c_cs_differential_at_15\",\n \"c_tower_differential_at_15\",\n \"c_tower_ratio\",\n \"c_first_tower\",\n \"c_damage_per_minute\",\n \"c_kills_per_game\",\n \"c_deaths_per_game\",\n \"c_kda\",\n \"c_dragon_game\",\n \"c_dragons_15\",\n \"c_nashors_game\",\n ]\n self.y_col_name = \"main_team_won\"\n\n\nif __name__ == \"__main__\":\n\n print()\n\n mlflow.set_tracking_uri(f\"file:///{ROOT_DIR}/mlruns\")\n mlflow.set_experiment(\"hazard_lol\")\n lol = LoLPredictor()\n con = create_engine(DATABASE_URI + \"lol\", echo=False)\n df_all = pd.read_sql(\"SELECT * FROM averaged_predictions\", con=con)\n lol.main_train(df_all, run_name=\"save run\", n_runs=30)\n print()\n # todo musi byt v current run\n # mlflow.sklearn.save_model(lol.model, path=f\"{ROOT_DIR}/models/ttt\", conda_env=f\"{ROOT_DIR}/environment.yaml\")\n # mlflow.sklearn.log_model(lol.model, artifact_path=f\"{ROOT_DIR}/ttt\", conda_env=f\"{ROOT_DIR}/environment.yaml\")\n"
] | [
[
"pandas.read_sql",
"pandas.set_option"
]
] |
saiabinesh/EdgeNets | [
"2b232d3f7fb60658755dad1ebca0ffc895cc795e"
] | [
"nn_layers/efficient_dwise_conv.py"
] | [
"#============================================\n__author__ = \"Sachin Mehta\"\n__maintainer__ = \"Sachin Mehta\"\n#============================================\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom nn_layers.cnn_utils import activation_fn, CBR, Shuffle, BR\nimport math\n\n\nclass EffDWSepConv(nn.Module):\n '''\n This class implements the volume-wise seperable convolutions\n '''\n def __init__(self, channel_in, channel_out, kernel_size=3):\n super().__init__()\n self.conv_channel = CBR(channel_in, channel_in, kSize=kernel_size, stride=1, groups=channel_in)\n\n # project from channel_in to Channel_out\n groups_proj = math.gcd(channel_in, channel_out)\n self.proj_layer = CBR(channel_in, channel_out, kSize=3, stride=1, groups=groups_proj)\n\n self.linear_comb_layer = nn.Sequential(\n nn.AdaptiveAvgPool2d(output_size=1),\n nn.Conv2d(channel_in, channel_out, kernel_size=1, bias=False),\n nn.Sigmoid()\n )\n\n self.channel_in = channel_in\n self.channel_out = channel_out\n self.ksize=kernel_size\n\n def forward(self, x):\n '''\n :param x: input of dimension C x H x W\n :return: output of dimension C1 x H x W\n '''\n bsz, channels, height, width = x.size()\n x = self.conv_channel(x)\n proj_out =self.proj_layer(x)\n linear_comb_out = self.linear_comb_layer(x)\n return proj_out * linear_comb_out\n\n def __repr__(self):\n s = '{name}(in_channels={channel_in}, out_channels={channel_out}, kernel_size={ksize})'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n\nclass StridedEffDWise(nn.Module):\n '''\n This class implements the strided volume-wise seperable convolutions\n '''\n def __init__(self, channel_in, kernel_size=3):\n '''\n :param channel_in: # of input channels\n :param channel_out: # of output channels\n :param height: Height of the input volume\n :param width: Width of the input volume\n :param kernel_size: Kernel size. We use the same kernel size of 3 for each dimension. Larger kernel size would increase the FLOPs and Parameters\n :param dilation: It's a list with 3 elements, each element corresponding to a dilation rate for each dimension.\n :param shuffle: Shuffle the feature maps in the volume-wise separable convolutions\n :param weight_avg: Waighted average for fusing the feature maps in volume-wise separable convolutions\n :param res_conn: Residual connection in the volume-wise separabel convolutions\n :param proj: Want to project the feature maps from channel_in to channel_out or not\n '''\n super().__init__()\n\n self.pool_layer = CBR(channel_in, channel_in, 3, stride=2, groups=channel_in)\n self.dw_layer = EffDWSepConv(channel_in, channel_in, kernel_size=kernel_size)\n self.channel_in = channel_in\n self.channel_out = 2*channel_in\n self.ksize = kernel_size\n\n def forward(self, x):\n x = self.pool_layer(x)\n return torch.cat([x, self.dw_layer(x)], 1)\n\n def __repr__(self):\n s = '{name}(in_channels={channel_in}, out_channels={channel_out}, kernel_size={ksize}, ' \\\n 'width={width}, height={height})'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\nif __name__ == '__main__':\n import numpy as np\n channel_in = 3\n channel_out = 30\n width = 112\n height = 112\n bsz = 2\n input = torch.Tensor(bsz, channel_in, height, width)._fill_(1)\n model = EffDWSepConv(channel_in, channel_out)\n model.eval()\n\n input = torch.Tensor(bsz, channel_in, 56, 56)._fill_(1)\n out = model(input)\n\n n_params = sum([np.prod(p.size()) for p in model.parameters()])\n print('Params: {}'.format(n_params))\n"
] | [
[
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Sigmoid",
"torch.nn.Conv2d",
"torch.Tensor"
]
] |
selim-karaduman/pytorch-drl-algs | [
"c354399893eae41fe820134e5e57d0d152d9fe5a"
] | [
"pytorch_drl/utils/exploration.py"
] | [
"import numpy as np\nfrom pytorch_drl.utils.schedule import LinearSchedule\n\nclass OrnsteinUhlenbeck:\n\n def __init__(self, x_size, mu=0, \n sigma_init=0.2, sigma_final=0.2, \n sigma_horizon=1, theta=0.2, dt=1e-2):\n self.mu = mu\n self.x_size = x_size\n self.dt = dt\n self.theta = theta\n self.x = np.zeros(x_size) + mu\n self.sigma = LinearSchedule(sigma_init, sigma_final, sigma_horizon)\n\n def set(self, x):\n self.x = x\n\n def step(self):\n dw = np.random.randn(*self.x_size) * np.sqrt(self.dt)\n dx = self.theta * (self.mu - self.x) * self.dt + self.sigma.value * dw\n self.x = self.x + dx\n self.sigma.step()\n return self.x\n\n def reset(self):\n self.x = self.x*0 + self.mu\n\nclass Gaussian:\n\n def __init__(self, x_size, mu=0,\n sigma_init=0.2, sigma_final=0.2, sigma_horizon=1):\n self.mu = mu\n self.x_size = x_size\n self.sigma = LinearSchedule(sigma_init, sigma_final, sigma_horizon)\n\n def step(self):\n x = np.random.randn(*self.x_size) * self.sigma.value + self.mu\n self.sigma.step()\n return x\n"
] | [
[
"numpy.sqrt",
"numpy.random.randn",
"numpy.zeros"
]
] |
phoenx34/wav.snipe | [
"d6d0b26440e5913dfbd5ea33b53ff226d405339c"
] | [
"development/librosa/core/constantq.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''Constant-Q transforms'''\nfrom __future__ import division\n\nimport numpy as np\nimport scipy.fftpack as fft\n\nfrom . import audio\nfrom .time_frequency import cqt_frequencies, note_to_hz\nfrom .spectrum import stft\nfrom .pitch import estimate_tuning\nfrom .. import cache\nfrom .. import filters\nfrom .. import util\nfrom ..util.exceptions import ParameterError\n\n__all__ = ['cqt', 'hybrid_cqt', 'pseudo_cqt']\n\n\n@cache(level=20)\ndef cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,\n bins_per_octave=12, tuning=0.0, filter_scale=1,\n norm=1, sparsity=0.01, window='hann',\n scale=True,\n real=util.Deprecated()):\n '''Compute the constant-Q transform of an audio signal.\n\n This implementation is based on the recursive sub-sampling method\n described by [1]_.\n\n .. [1] Schoerkhuber, Christian, and Anssi Klapuri.\n \"Constant-Q transform toolbox for music processing.\"\n 7th Sound and Music Computing Conference, Barcelona, Spain. 2010.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter scale factor. Small values (<1) use shorter windows\n for improved time resolution.\n\n norm : {inf, -inf, 0, float > 0}\n Type of norm to use for basis function normalization.\n See `librosa.util.normalize`.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n scale : bool\n If `True`, scale the CQT response by square-root the length of\n each channel's filter. This is analogous to `norm='ortho'` in FFT.\n\n If `False`, do not scale the CQT. This is analogous to\n `norm=None` in FFT.\n\n real : [DEPRECATED]\n .. warning:: This parameter name deprecated in librosa 0.5.0\n It will be removed in librosa 0.6.0.\n\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.complex or np.float]\n Constant-Q value each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n See Also\n --------\n librosa.core.resample\n librosa.util.normalize\n\n Notes\n -----\n This function caches at level 20.\n\n Examples\n --------\n Generate and plot a constant-Q power spectrum\n\n >>> import matplotlib.pyplot as plt\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> C = librosa.cqt(y, sr=sr)\n >>> librosa.display.specshow(librosa.logamplitude(C**2, ref_power=np.max),\n ... sr=sr, x_axis='time', y_axis='cqt_note')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Constant-Q power spectrum')\n >>> plt.tight_layout()\n\n\n Limit the frequency range\n\n >>> C = librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),\n ... n_bins=60)\n >>> C\n array([[ 8.827e-04, 9.293e-04, ..., 3.133e-07, 2.942e-07],\n [ 1.076e-03, 1.068e-03, ..., 1.153e-06, 1.148e-06],\n ...,\n [ 1.042e-07, 4.087e-07, ..., 1.612e-07, 1.928e-07],\n [ 2.363e-07, 5.329e-07, ..., 1.294e-07, 1.611e-07]])\n\n\n Using a higher frequency resolution\n\n >>> C = librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),\n ... n_bins=60 * 2, bins_per_octave=12 * 2)\n >>> C\n array([[ 1.536e-05, 5.848e-05, ..., 3.241e-07, 2.453e-07],\n [ 1.856e-03, 1.854e-03, ..., 2.397e-08, 3.549e-08],\n ...,\n [ 2.034e-07, 4.245e-07, ..., 6.213e-08, 1.463e-07],\n [ 4.896e-08, 5.407e-07, ..., 9.176e-08, 1.051e-07]])\n '''\n\n # How many octaves are we dealing with?\n n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))\n n_filters = min(bins_per_octave, n_bins)\n\n len_orig = len(y)\n\n if fmin is None:\n # C1 by default\n fmin = note_to_hz('C1')\n\n if tuning is None:\n tuning = estimate_tuning(y=y, sr=sr)\n\n # First thing, get the freqs of the top octave\n freqs = cqt_frequencies(n_bins, fmin,\n bins_per_octave=bins_per_octave)[-bins_per_octave:]\n\n fmin_t = np.min(freqs)\n fmax_t = np.max(freqs)\n\n # Determine required resampling quality\n Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)\n filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)\n nyquist = sr / 2.0\n if filter_cutoff < audio.BW_FASTEST * nyquist:\n res_type = 'kaiser_fast'\n else:\n res_type = 'kaiser_best'\n\n y, sr, hop_length = __early_downsample(y, sr, hop_length,\n res_type,\n n_octaves,\n nyquist, filter_cutoff, scale)\n\n cqt_resp = []\n\n if res_type != 'kaiser_fast':\n\n # Do the top octave before resampling to allow for fast resampling\n fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,\n n_filters,\n bins_per_octave,\n tuning,\n filter_scale,\n norm,\n sparsity,\n window=window)\n\n # Compute the CQT filter response and append it to the stack\n cqt_resp.append(__cqt_response(y, n_fft, hop_length, fft_basis))\n\n fmin_t /= 2\n fmax_t /= 2\n n_octaves -= 1\n\n filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)\n\n res_type = 'kaiser_fast'\n\n # Make sure our hop is long enough to support the bottom octave\n num_twos = __num_two_factors(hop_length)\n if num_twos < n_octaves - 1:\n raise ParameterError('hop_length must be a positive integer '\n 'multiple of 2^{0:d} for {1:d}-octave CQT'\n .format(n_octaves - 1, n_octaves))\n\n # Now do the recursive bit\n fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,\n n_filters,\n bins_per_octave,\n tuning,\n filter_scale,\n norm,\n sparsity,\n window=window)\n\n my_y, my_sr, my_hop = y, sr, hop_length\n\n # Iterate down the octaves\n for i in range(n_octaves):\n\n # Resample (except first time)\n if i > 0:\n if len(my_y) < 2:\n raise ParameterError('Input signal length={} is too short for '\n '{:d}-octave CQT'.format(len_orig,\n n_octaves))\n\n # The additional scaling of sqrt(2) here is to implicitly rescale\n # the filters\n my_y = np.sqrt(2) * audio.resample(my_y, my_sr, my_sr/2.0,\n res_type=res_type,\n scale=True)\n my_sr /= 2.0\n my_hop //= 2\n\n # Compute the cqt filter response and append to the stack\n cqt_resp.append(__cqt_response(my_y, n_fft, my_hop, fft_basis))\n\n C = __trim_stack(cqt_resp, n_bins)\n\n if scale:\n lengths = filters.constant_q_lengths(sr, fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n window=window,\n filter_scale=filter_scale)\n C /= np.sqrt(lengths[:, np.newaxis])\n\n return C\n\n\n@cache(level=20)\ndef hybrid_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,\n bins_per_octave=12, tuning=0.0, filter_scale=1,\n norm=1, sparsity=0.01, window='hann', scale=True):\n '''Compute the hybrid constant-Q transform of an audio signal.\n\n Here, the hybrid CQT uses the pseudo CQT for higher frequencies where\n the hop_length is longer than half the filter length and the full CQT\n for lower frequencies.\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter filter_scale factor. Larger values use longer windows.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]\n Constant-Q energy for each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n See Also\n --------\n cqt\n pseudo_cqt\n\n Notes\n -----\n This function caches at level 20.\n\n '''\n\n if fmin is None:\n # C1 by default\n fmin = note_to_hz('C1')\n\n if tuning is None:\n tuning = estimate_tuning(y=y, sr=sr)\n\n # Get all CQT frequencies\n freqs = cqt_frequencies(n_bins, fmin,\n bins_per_octave=bins_per_octave,\n tuning=tuning)\n\n # Compute the length of each constant-Q basis function\n lengths = filters.constant_q_lengths(sr, fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n window=window)\n\n # Determine which filters to use with Pseudo CQT\n # These are the ones that fit within 2 hop lengths after padding\n pseudo_filters = 2.0**np.ceil(np.log2(lengths)) < 2 * hop_length\n\n n_bins_pseudo = int(np.sum(pseudo_filters))\n\n n_bins_full = n_bins - n_bins_pseudo\n cqt_resp = []\n\n if n_bins_pseudo > 0:\n fmin_pseudo = np.min(freqs[pseudo_filters])\n\n cqt_resp.append(pseudo_cqt(y, sr,\n hop_length=hop_length,\n fmin=fmin_pseudo,\n n_bins=n_bins_pseudo,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n norm=norm,\n sparsity=sparsity,\n window=window,\n scale=scale))\n\n if n_bins_full > 0:\n cqt_resp.append(np.abs(cqt(y, sr,\n hop_length=hop_length,\n fmin=fmin,\n n_bins=n_bins_full,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n norm=norm,\n sparsity=sparsity,\n window=window,\n scale=scale)))\n\n return __trim_stack(cqt_resp, n_bins)\n\n\n@cache(level=20)\ndef pseudo_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,\n bins_per_octave=12, tuning=0.0, filter_scale=1,\n norm=1, sparsity=0.01, window='hann', scale=True):\n '''Compute the pseudo constant-Q transform of an audio signal.\n\n This uses a single fft size that is the smallest power of 2 that is greater\n than or equal to the max of:\n\n 1. The longest CQT filter\n 2. 2x the hop_length\n\n Parameters\n ----------\n y : np.ndarray [shape=(n,)]\n audio time series\n\n sr : number > 0 [scalar]\n sampling rate of `y`\n\n hop_length : int > 0 [scalar]\n number of samples between successive CQT columns.\n\n fmin : float > 0 [scalar]\n Minimum frequency. Defaults to C1 ~= 32.70 Hz\n\n n_bins : int > 0 [scalar]\n Number of frequency bins, starting at `fmin`\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave\n\n tuning : None or float in `[-0.5, 0.5)`\n Tuning offset in fractions of a bin (cents).\n\n If `None`, tuning will be automatically estimated from the signal.\n\n filter_scale : float > 0\n Filter filter_scale factor. Larger values use longer windows.\n\n sparsity : float in [0, 1)\n Sparsify the CQT basis by discarding up to `sparsity`\n fraction of the energy in each basis.\n\n Set `sparsity=0` to disable sparsification.\n\n window : str, tuple, number, or function\n Window specification for the basis filters.\n See `filters.get_window` for details.\n\n\n Returns\n -------\n CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]\n Pseudo Constant-Q energy for each frequency at each time.\n\n Raises\n ------\n ParameterError\n If `hop_length` is not an integer multiple of\n `2**(n_bins / bins_per_octave)`\n\n Or if `y` is too short to support the frequency range of the CQT.\n\n Notes\n -----\n This function caches at level 20.\n\n '''\n\n if fmin is None:\n # C1 by default\n fmin = note_to_hz('C1')\n\n if tuning is None:\n tuning = estimate_tuning(y=y, sr=sr)\n\n fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin, n_bins,\n bins_per_octave,\n tuning, filter_scale,\n norm, sparsity,\n hop_length=hop_length,\n window=window)\n\n fft_basis = np.abs(fft_basis)\n\n # Compute the magnitude STFT with Hann window\n D = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length))\n\n # Project onto the pseudo-cqt basis\n C = fft_basis.dot(D)\n\n if scale:\n C /= np.sqrt(n_fft)\n else:\n lengths = filters.constant_q_lengths(sr, fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n window=window,\n filter_scale=filter_scale)\n\n C *= np.sqrt(lengths[:, np.newaxis] / n_fft)\n\n return C\n\n\n@cache(level=10)\ndef __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,\n filter_scale, norm, sparsity, hop_length=None,\n window='hann'):\n '''Generate the frequency domain constant-Q filter basis.'''\n\n basis, lengths = filters.constant_q(sr,\n fmin=fmin,\n n_bins=n_bins,\n bins_per_octave=bins_per_octave,\n tuning=tuning,\n filter_scale=filter_scale,\n norm=norm,\n pad_fft=True,\n window=window)\n\n # Filters are padded up to the nearest integral power of 2\n n_fft = basis.shape[1]\n\n if (hop_length is not None and\n n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):\n\n n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))\n\n # re-normalize bases with respect to the FFT window length\n basis *= lengths[:, np.newaxis] / float(n_fft)\n\n # FFT and retain only the non-negative frequencies\n fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]\n\n # sparsify the basis\n fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)\n\n return fft_basis, n_fft, lengths\n\n\ndef __trim_stack(cqt_resp, n_bins):\n '''Helper function to trim and stack a collection of CQT responses'''\n\n # cleanup any framing errors at the boundaries\n max_col = min([x.shape[1] for x in cqt_resp])\n\n cqt_resp = np.vstack([x[:, :max_col] for x in cqt_resp][::-1])\n\n # Finally, clip out any bottom frequencies that we don't really want\n # Transpose magic here to ensure column-contiguity\n return np.ascontiguousarray(cqt_resp[-n_bins:].T).T\n\n\ndef __cqt_response(y, n_fft, hop_length, fft_basis):\n '''Compute the filter response with a target STFT hop.'''\n\n # Compute the STFT matrix\n D = stft(y, n_fft=n_fft, hop_length=hop_length, window=np.ones)\n\n # And filter response energy\n return fft_basis.dot(D)\n\n\ndef __early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves):\n '''Compute the number of early downsampling operations'''\n\n downsample_count1 = max(0, int(np.ceil(np.log2(audio.BW_FASTEST * nyquist /\n filter_cutoff)) - 1) - 1)\n\n num_twos = __num_two_factors(hop_length)\n downsample_count2 = max(0, num_twos - n_octaves + 1)\n\n return min(downsample_count1, downsample_count2)\n\n\ndef __early_downsample(y, sr, hop_length, res_type, n_octaves,\n nyquist, filter_cutoff, scale):\n '''Perform early downsampling on an audio signal, if it applies.'''\n\n downsample_count = __early_downsample_count(nyquist, filter_cutoff,\n hop_length, n_octaves)\n\n if downsample_count > 0 and res_type == 'kaiser_fast':\n downsample_factor = 2**(downsample_count)\n\n hop_length //= downsample_factor\n\n if len(y) < downsample_factor:\n raise ParameterError('Input signal length={:d} is too short for '\n '{:d}-octave CQT'.format(len(y), n_octaves))\n\n new_sr = sr / float(downsample_factor)\n y = audio.resample(y, sr, new_sr,\n res_type=res_type,\n scale=True)\n\n # If we're not going to length-scale after CQT, we\n # need to compensate for the downsampling factor here\n if not scale:\n y *= np.sqrt(downsample_factor)\n\n sr = new_sr\n\n return y, sr, hop_length\n\n\ndef __num_two_factors(x):\n \"\"\"Return how many times integer x can be evenly divided by 2.\n\n Returns 0 for non-positive integers.\n \"\"\"\n if x <= 0:\n return 0\n num_twos = 0\n while x % 2 == 0:\n num_twos += 1\n x //= 2\n\n return num_twos\n"
] | [
[
"numpy.vstack",
"numpy.sum",
"numpy.log2",
"scipy.fftpack.fft",
"numpy.abs",
"numpy.max",
"numpy.min",
"numpy.sqrt",
"numpy.ascontiguousarray"
]
] |
liudengfeng/zipline | [
"01fdd51d83efeb3453e92b7d02c255a06eba49ac"
] | [
"zipline/pipeline/fundamentals/localdata_wy.py"
] | [
"\"\"\"\n\n查询本地数据\n\n尽管`bcolz`最终会丢失时区信息,但写入时依旧将时间列转换为UTC时区。\n除asof_date、timestamp列外,其余时间列无需转换\n\n\"\"\"\n\nimport re\nimport warnings\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom cnswd.cninfo.utils import _rename\nfrom cnswd.mongodb import get_db\nfrom cnswd.setting.constants import MAX_WORKER\nfrom cnswd.utils.tools import filter_a\n\nfrom ..common import AD_FIELD_NAME, TS_FIELD_NAME\nfrom .constants import SW_SECTOR_MAPS\n\n# from cnswd.store import (ClassifyTreeStore, DataBrowseStore, MarginStore,\n# TctGnStore, WyStockDailyStore)\nLOCAL_TZ = 'Asia/Shanghai'\nwarnings.filterwarnings('ignore')\n\nSTOCK_PAT = re.compile(r\"^\\d{6}$\")\nA_STOCK_PAT = re.compile(r\"^[036]\\d{5}$\")\n\nNUM_MAPS = {\n 1: '一级',\n 2: '二级',\n 3: '三级',\n 4: '四级',\n}\n\nTO_DORP_PAT_0 = re.compile(r'^[(]?[一二三四五六七八九][)]?([((]\\d[))])?[、]?')\nTO_DORP_PAT_1 = re.compile(r'^[1-9]、|[(()][1-9][))]')\nTO_DORP_PAT_2 = re.compile(r'[、:()-]|\\_|\\(|\\)')\n\nMATCH_ONLY_A = {\n '$match': {\n '$expr': {\n '$in': [\n {\n '$substrBytes': [\n '$股票代码', 0, 1\n ]\n }, [\n '0', '3', '6'\n ]\n ]\n }\n }\n}\n\n# region 辅助函数\n\n\ndef _to_timestamp(df):\n # 无需 tz 信息\n for col in [AD_FIELD_NAME, TS_FIELD_NAME]:\n if col in df.columns:\n # df[col] = df[col].map(lambda x: pd.Timestamp(\n # x, tz=LOCAL_TZ).tz_convert('UTC').to_pydatetime())\n df[col] = df[col].map(pd.Timestamp)\n return df\n\n\ndef _normalized_col_name(x):\n \"\"\"规范列财务报告项目在`pipeline`中的列名称\n\n 去除列名称中的前导数字,中间符号,保留文字及尾部数字\n \"\"\"\n # 去除前导序号\n x = re.sub(TO_DORP_PAT_0, '', x)\n x = re.sub(TO_DORP_PAT_1, '', x)\n x = re.sub(TO_DORP_PAT_2, '', x)\n return x\n\n\ndef _select_only_a(df, only_A, code_col='股票代码'):\n \"\"\"仅含A股数据\"\"\"\n if only_A:\n cond1 = df[code_col].str.startswith('2')\n cond2 = df[code_col].str.startswith('9')\n df = df.loc[~(cond1 | cond2), :]\n return df\n\n\n# endregion\n\n# region 静态数据\n\n\ndef get_stock_info(only_A=True):\n \"\"\"股票基础信息\"\"\"\n db = get_db('cninfo')\n collection = db['基本资料']\n projection = {\n '_id': 0,\n '股票代码': 1,\n '上市日期': 1,\n # 与行业分类重复\n # '申万行业一级名称': 1,\n # '申万行业二级名称': 1,\n # '申万行业三级名称': 1,\n # '证监会一级行业名称': 1,\n # '证监会二级行业名称': 1,\n '省份': 1,\n '城市': 1,\n '注册资本': 1,\n '上市状态': 1,\n '律师事务所': 1,\n '会计师事务所': 1,\n '上市地点': 1,\n }\n sort = {'股票代码': 1}\n pipeline = [\n {'$project': projection},\n {'$sort': sort}\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n df = pd.DataFrame.from_records(\n collection.aggregate(pipeline))\n df.drop_duplicates('股票代码', inplace=True)\n # 剔除未上市、未交易的无效股票\n cond1 = ~ df['上市日期'].isnull()\n cond2 = df['上市日期'] <= pd.Timestamp('today')\n df = df.loc[cond1 & cond2, :]\n df['timestamp'] = df['上市日期']\n df['asof_date'] = df['上市日期'] - pd.Timedelta(days=1)\n df = _to_timestamp(df)\n # 注册资本转换 -> 十分位数\n df['注册资本十分位数'] = pd.qcut(np.log(df['注册资本'].values), 10, labels=False)\n df.rename(columns={'股票代码': 'sid'}, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n return df\n\n\ndef get_bom_maps(cate, pattern):\n r\"\"\"行业父类编码映射\n\n Example:\n\n >>> pattern = re.compile(r\"^Z\\d{2}$\")\n >>> get_bom_maps('国证行业分类', pattern)\n {'Z01': '能源',\n 'Z02': '原材料',\n 'Z03': '工业',\n 'Z04': '可选消费',\n 'Z05': '主要消费',\n 'Z06': '医药卫生',\n 'Z07': '金融',\n 'Z08': '信息技术',\n 'Z09': '电信业务',\n 'Z10': '公用事业',\n 'Z11': '房地产'}\n \"\"\"\n db = get_db()\n collection = db['分类BOM']\n pipeline = [\n {\n '$match': {\n '分类方式': cate,\n '分类编码': {\n '$regex': pattern\n }\n }\n },\n {\n '$project': {'_id': 0}\n }\n ]\n maps = {}\n for d in collection.aggregate(pipeline):\n maps[d['分类编码']] = d['分类名称']\n return maps\n\n\ndef get_industry_stock_list(cate, only_A):\n db = get_db()\n collection = db['股票分类']\n pipeline = [\n {\n '$match': {\n '分类方式': cate\n }\n }, {\n '$unwind': {\n 'path': '$股票列表'\n }\n }, {\n '$project': {\n '_id': 0,\n '分类编码': 1,\n '分类名称': 1,\n '股票代码': \"$股票列表\"\n }\n }\n ]\n if only_A:\n pipeline.append(MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n return df\n\n\ndef get_cn_industry(only_A=True):\n \"\"\"获取国证四级行业分类\"\"\"\n cate = '国证行业分类'\n col_names = {\n '分类编码': '国证四级行业编码',\n '分类名称': '国证四级行业',\n '股票代码': 'sid',\n }\n df = get_industry_stock_list('国证行业分类', only_A)\n if df.empty:\n msg = '在本地数据库中无法获取行业分类数据。\\n'\n msg += '这将导致股票分类数据缺失。\\n'\n msg += '运行`stock clsf`提取网络数据并存储在本地数据库。'\n warnings.warn(msg)\n return pd.DataFrame(columns=col_names.values())\n df.rename(columns=col_names, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n for level in (1, 2, 3):\n pattern_str = r\"^Z\\d{\" + str(level*2) + \"}$\"\n pattern = re.compile(pattern_str)\n maps = get_bom_maps(cate, pattern)\n digit = level * 2 + 1\n u_num = NUM_MAPS[level]\n code_col = '国证{}行业编码'.format(u_num)\n name_col = '国证{}行业'.format(u_num)\n df[code_col] = df['国证四级行业编码'].map(lambda x: x[:digit])\n df[name_col] = df['国证四级行业编码'].map(lambda x: maps[x[:digit]])\n return df\n\n\ndef get_sw_industry(only_A=True):\n \"\"\"获取申万行业三级分类\"\"\"\n cate = \"申万行业分类\"\n col_names = {\n '分类编码': '申万三级行业编码',\n '分类名称': '申万三级行业',\n '股票代码': 'sid',\n }\n df = get_industry_stock_list(cate, only_A)\n if df.empty:\n msg = '在本地数据库中无法获取行业分类数据。\\n'\n msg += '这将导致股票分类数据缺失。\\n'\n msg += '运行`stock clsf`提取网络数据并存储在本地数据库。'\n warnings.warn(msg)\n return pd.DataFrame(columns=col_names.values())\n df.rename(columns=col_names, inplace=True)\n # S90 为无效数据\n cond = df['申万三级行业编码'] == 'S90'\n df = df[~cond]\n df['sid'] = df['sid'].map(lambda x: int(x))\n for level in (1, 2):\n pattern_str = r\"^S\\d{\" + str(level*2) + \"}$\"\n pattern = re.compile(pattern_str)\n maps = get_bom_maps(cate, pattern)\n digit = level * 2 + 1\n u_num = NUM_MAPS[level]\n code_col = '申万{}行业编码'.format(u_num)\n name_col = '申万{}行业'.format(u_num)\n df[code_col] = df['申万三级行业编码'].map(lambda x: x[:digit])\n df[name_col] = df['申万三级行业编码'].map(lambda x: maps.get(x[:digit], '综合'))\n sw_code_maps = {v: k for k, v in SW_SECTOR_MAPS.items()}\n df['sw_sector'] = df['申万一级行业编码'].map(\n lambda x: sw_code_maps[x]).astype('int64')\n return df\n\n\ndef get_zjh_industry(only_A=True):\n \"\"\"获取证监会行业二级分类\"\"\"\n cate = '证监会行业分类'\n col_names = {\n '分类编码': '证监会二级行业编码',\n '分类名称': '证监会二级行业',\n '股票代码': 'sid',\n }\n df = get_industry_stock_list(cate, only_A)\n if df.empty:\n msg = '在本地数据库中无法获取行业分类数据。\\n'\n msg += '这将导致股票分类数据缺失。\\n'\n msg += '运行`stock clsf`提取网络数据并存储在本地数据库。'\n warnings.warn(msg)\n return pd.DataFrame(columns=col_names.values())\n df.rename(columns=col_names, inplace=True)\n # 混杂了申万编码,剔除\n cond = df['证监会二级行业编码'].str.len() == 3\n df = df[cond]\n df['sid'] = df['sid'].map(lambda x: int(x))\n for level in (1, ):\n pattern_str = r\"^[A-R]$\"\n pattern = re.compile(pattern_str)\n maps = get_bom_maps(cate, pattern)\n digit = (level-1) * 2 + 1\n u_num = NUM_MAPS[level]\n code_col = '证监会{}行业编码'.format(u_num)\n name_col = '证监会{}行业'.format(u_num)\n df[code_col] = df['证监会二级行业编码'].map(lambda x: x[:digit])\n df[name_col] = df['证监会二级行业编码'].map(lambda x: maps.get(x[:digit], '综合'))\n return df\n\n\ndef concept_categories():\n \"\"\"概念类别映射{代码:名称}\"\"\"\n db = get_db()\n collection = db['同花顺概念']\n pipeline = [\n {\n '$project': {\n '_id': 0,\n '概念编码': 1,\n '概念名称': 1,\n }\n }\n ]\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n try:\n df.columns = ['code', 'name']\n except ValueError:\n raise NotImplementedError('本地数据库中\"股票概念数据\"为空,请运行`stock thsgn`')\n df.sort_values('code', inplace=True)\n return df.set_index('code').to_dict()['name']\n\n\ndef field_code_concept_maps():\n \"\"\"\n 概念映射二元组\n\n Returns\n -------\n res : 元组\n 第一项:原始概念编码 -> 数据集字段编码(新编码)\n 第二项:数据集字段编码 -> 概念名称\n\n Example\n -------\n 第一项:{'00010002': 'A001', '00010003': 'A002', '00010004': 'A003', ...\n 第二项:{'A001': '参股金融', 'A002': '可转债', 'A003': '上证红利'...\n\n \"\"\"\n vs = concept_categories()\n no, key = pd.factorize(list(vs.keys()), sort=True)\n id_maps = {v: 'A{}'.format(str(k + 1).zfill(3)) for k, v in zip(no, key)}\n name_maps = {v: vs[k] for (k, v) in id_maps.items()}\n return id_maps, name_maps\n\n\ndef get_concept_info(only_A=True):\n \"\"\"股票概念编码信息\n\n Keyword Arguments:\n only_A {bool} -- 只包含A股代码 (default: {True})\n\n Returns:\n pd.DataFrame -- 股票概念编码信息表\n\n Example:\n >>> get_concept_info().head(3)\n sid A001 A002 A003 A004 A005 ... A205\n 1 False False False False False ... False\n 2 False False False False False ... False\n 4 False False False True False ... False\n \"\"\"\n db = get_db()\n collection = db['同花顺概念']\n pipeline = [\n {\n '$unwind': {\n 'path': '$股票列表'\n }\n }, {\n '$project': {\n '_id': 0,\n '概念编码': 1,\n # '概念名称': 1,\n '股票列表': 1\n }\n }\n ]\n ds = collection.aggregate(pipeline)\n\n def func(x):\n if only_A:\n return A_STOCK_PAT.match(x['股票列表'])\n else:\n return STOCK_PAT.match(x['股票列表'])\n\n ds = filter(func, ds)\n df = pd.DataFrame.from_records(ds)\n df.rename(columns={'股票列表': 'sid'}, inplace=True)\n\n out = pd.pivot_table(df,\n values='概念编码',\n index='sid',\n columns='概念编码',\n aggfunc=np.count_nonzero,\n fill_value=0)\n\n id_maps, _ = field_code_concept_maps()\n out.rename(columns=id_maps, inplace=True)\n out = out.astype('bool').reset_index()\n out['sid'] = out['sid'].map(lambda x: int(x))\n return out\n\n\n# endregion\n\n# region 动态数据\n\ndef _change_hist(code, db=None):\n # 深发展A -> 深发展A\n if db is None:\n db = get_db('wy_stock_daily')\n collection = db[code]\n if collection.estimated_document_count() == 0:\n return pd.DataFrame()\n records = collection.find(\n projection={'_id': 0, '名称': 1, '日期': 1},\n sort=[('日期', 1), ('名称', 1,)])\n df = pd.DataFrame.from_records(records)\n df['名称'] = df['名称'].map(_rename)\n cond = df['名称'] != df['名称'].shift(1)\n df = df.loc[cond, :]\n df.rename(columns={'日期': 'asof_date', '名称': '股票简称'}, inplace=True)\n df['sid'] = int(code)\n return df\n\n\ndef get_short_name_changes(only_A=True):\n \"\"\"股票简称变动历史\"\"\"\n db = get_db('wy_stock_daily')\n codes = db.list_collection_names()\n if only_A:\n codes = filter_a(codes)\n func = partial(_change_hist, db=db)\n # 3878只股票 用时 48s\n with ThreadPoolExecutor(MAX_WORKER) as pool:\n r = pool.map(func, codes)\n df = pd.concat(r, ignore_index=True)\n return df\n\n\ndef get_margin_data(only_A=True):\n \"\"\"融资融券数据\"\"\"\n db = get_db('cninfo')\n collection = db['融资融券明细']\n projection = {\n '_id': 0,\n '股票简称': 0,\n }\n # sort = [('股票代码', 1), ('交易日期', 1)]\n df = pd.DataFrame.from_records(\n collection.find(projection=projection))\n df = _select_only_a(df, only_A, '股票代码')\n df.rename(columns={'交易日期': 'timestamp', '股票代码': 'sid'}, inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n # 设置晚8小时\n df['asof_date'] = df['timestamp'] - pd.Timedelta(hours=8)\n df.sort_values(['sid', 'timestamp'], inplace=True, ignore_index=True)\n return df\n\n\ndef get_dividend_data(only_A=True):\n \"\"\"现金股利\"\"\"\n db = get_db('cninfo')\n collection = db['分红指标']\n # 使用股权登记日作为 asof_date\n # 此指标仅用于计算年度股息之用,不涉及到所谓知晓日期\n pipeline = [\n {\n '$project': {\n '_id': 0,\n '股票代码': 1,\n '分红年度': 1,\n 'A股股权登记日': 1,\n '派息比例(人民币)': 1\n }\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n cols = {'股票代码': 'sid', 'A股股权登记日': 'asof_date', '派息比例(人民币)': '每股派息'}\n df.rename(columns=cols, inplace=True)\n # 首先将日期缺失值默认为分红年度后一个季度\n cond = df['asof_date'].isnull()\n df.loc[cond, 'asof_date'] = df.loc[cond, '分红年度'] + pd.Timedelta(days=45)\n # 重要:对未分派的记录,不得舍弃\n # 派息NaN -> 0.0 不影响实际意义,加快读写速度\n values = {'每股派息': 0.0}\n df.fillna(value=values, inplace=True)\n # 数值更改为每股派息\n df['每股派息'] = df['每股派息'] / 10.0\n df.sort_values(['sid', 'asof_date'], inplace=True, ignore_index=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n # datetime -> timestamp\n df = _to_timestamp(df)\n df[TS_FIELD_NAME] = df[AD_FIELD_NAME]\n return df\n\n\n# endregion\n\n# region 定期财务报告\n\n# 废弃\n# def _fix_sid_ad_ts(df, col='报告年度', ndays=45):\n# \"\"\"\n# 修复截止日期、公告日期。\n# 如果`asof_date`为空,则使用`col`的值\n# `timestamp`在`col`的值基础上加`ndays`天\"\"\"\n# df['sid'] = df['sid'].map(lambda x: int(x))\n# cond = df.asof_date.isna()\n# df.loc[cond, 'asof_date'] = df.loc[cond, col]\n# df.loc[cond, 'timestamp'] = df.loc[cond, col] + pd.Timedelta(days=ndays)\n# # 由于存在数据不完整的情形,当timestamp为空,在asof_date基础上加ndays\n# cond1 = df.timestamp.isna()\n# df.loc[cond1,\n# 'timestamp'] = df.loc[cond1, 'asof_date'] + pd.Timedelta(days=ndays)\n# # 1991-12-31 时段数据需要特别修正\n# cond2 = df.timestamp.map(lambda x: x.is_quarter_end)\n# cond3 = df.asof_date == df.timestamp\n# df.loc[cond2 & cond3,\n# 'timestamp'] = df.loc[cond2 & cond3,\n# 'asof_date'] + pd.Timedelta(days=ndays)\n\n\ndef _periodly_report(only_A, item_name):\n # 一般而言,定期财务报告截止日期与报告年度相同\n # 但不排除数据更正等情形下,报告年度与截止日期不一致\n to_drop = [\n '_id', '股票简称', '机构名称', '合并类型编码', '合并类型', '报表来源编码', '报表来源',\n '备注', '截止日期', '开始日期'\n ]\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n # 规范列名称\n df.columns = df.columns.map(_normalized_col_name)\n df.rename(columns={\n \"股票代码\": \"sid\",\n \"报告年度\": \"asof_date\",\n \"公告日期\": \"timestamp\"\n },\n inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n df.sort_values(['sid', 'asof_date'], inplace=True)\n return df\n\n\ndef get_p_balance_data(only_A=True):\n \"\"\"报告期资产负债表\"\"\"\n item_name = '个股报告期资产负债表'\n df = _periodly_report(only_A, item_name)\n return df\n\n\ndef get_p_income_data(only_A=True):\n \"\"\"报告期利润表\"\"\"\n item_name = '个股报告期利润表'\n df = _periodly_report(only_A, item_name)\n return df\n\n\ndef get_p_cash_flow_data(only_A=True):\n \"\"\"报告期现金流量表\"\"\"\n item_name = '个股报告期现金表'\n df = _periodly_report(only_A, item_name)\n return df\n\n\ndef _financial_report_announcement_date(only_A):\n \"\"\"\n 获取财报公告日期,供其他计算类型的表使用\n\n 注:\n 季度报告、财务指标根据定期报告计算得来,数据中不含公告日期。\n 使用定期报告的公告日期作为`timestamp`\n \"\"\"\n db = get_db('cninfo')\n collection = db['个股报告期资产负债表']\n pipeline = [\n {\n '$project': {\n '_id': 0,\n '股票代码': 1,\n '公告日期': 1,\n '报告年度': 1,\n }\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n df.sort_values(['股票代码', '报告年度'], inplace=True, ignore_index=True)\n return df\n\n\ndef _get_report(only_A, item_name, to_drop, col='报告年度', keys=['股票代码', '报告年度']):\n \"\"\"\n 获取财务报告数据\n\n 使用报告期资产负债表的公告日期\n \"\"\"\n if '_id' not in to_drop:\n to_drop.append('_id')\n\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n dates = _financial_report_announcement_date(only_A)\n if col != '报告年度':\n # 处理行业排名\n df['报告年度'] = df.pop(col)\n # 合并使用 公告日期\n df = df.join(dates.set_index(keys), on=keys)\n # 规范列名称\n df.columns = df.columns.map(_normalized_col_name)\n\n df.rename(columns={\n \"股票代码\": \"sid\",\n \"报告年度\": \"asof_date\",\n \"公告日期\": \"timestamp\"\n },\n inplace=True)\n df['sid'] = df['sid'].map(lambda x: int(x))\n df.sort_values(['sid', 'asof_date'], inplace=True)\n return df\n\n\n# endregion\n\n# region 单季度财务报告\n\n\ndef get_q_income_data(only_A=True):\n \"\"\"个股单季财务利润表\"\"\"\n item_name = '个股单季财务利润表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_q_cash_flow_data(only_A=True):\n \"\"\"个股单季现金流量表\"\"\"\n item_name = '个股单季现金流量表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\n# endregion\n\n# region TTM\n\n\ndef get_ttm_income_data(only_A=True):\n \"\"\"个股TTM财务利润表\"\"\"\n item_name = '个股TTM财务利润表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_ttm_cash_flow_data(only_A=True):\n \"\"\"个股TTM现金流量表\"\"\"\n item_name = '个股TTM现金流量表'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\n# endregion\n\n# region 财务指标\n\n\ndef get_periodly_financial_indicator_data(only_A=True):\n \"\"\"个股报告期指标表\"\"\"\n item_name = '个股报告期指标表'\n to_drop = [\n '股票简称', '机构名称', '开始日期', '数据来源编码', '数据来源', 'last_refresh_time', '备注'\n ]\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_financial_indicator_ranking_data(only_A=True):\n \"\"\"\n 财务指标行业排名\n\n 级别说明:申银万国二级行业\n \"\"\"\n item_name = '财务指标行业排名'\n to_drop = ['股票简称', '行业ID', '行业级别', '级别说明', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\ndef get_quarterly_financial_indicator_data(only_A=True):\n \"\"\"个股单季财务指标\"\"\"\n item_name = '个股单季财务指标'\n to_drop = ['股票简称', '开始日期', '合并类型编码', '合并类型', '备注']\n df = _get_report(only_A, item_name, to_drop)\n return df\n\n\n# endregion\n\n# region 业绩预告\n\n\ndef get_performance_forecaste_data(only_A=True):\n \"\"\"上市公司业绩预告\"\"\"\n item_name = '上市公司业绩预告'\n # 简化写入量,保留`业绩类型`\n to_drop = ['_id', '股票简称', '业绩类型编码', '业绩变化原因', '报告期最新记录标识', '备注']\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n\n # 业绩预告反映未来事件\n\n cond = df['公告日期'].isnull()\n df.loc[cond, '公告日期'] = df.loc[cond, '报告年度'] - pd.Timedelta(days=45)\n # 保留`报告年度`列\n df.rename(columns={\n \"股票代码\": \"sid\",\n # \"报告年度\": \"asof_date\",\n \"公告日期\": \"timestamp\",\n }, inplace=True)\n # 将 asof_date 定义为前一小时\n df['asof_date'] = df['timestamp'] - pd.Timedelta(hours=1)\n df['sid'] = df['sid'].map(lambda x: int(x))\n # 深证信原始数据中 股票代码 \"002746\"\n # 公告日期 2013-10-13 报告年度 2016-09-30\n # 即做出提前三年的业绩预告,有违常理,需删除\n # 一般而言,业绩预告不会领先报告年度一个季度发布\n cond = df['timestamp'] - df['asof_date'] < pd.Timedelta(days=90)\n df = df.loc[cond, :]\n return df\n\n\n# endregion\n\n# region 股东股本\n\n\ndef get_shareholding_concentration_data(only_A=True):\n \"\"\"持股集中度\"\"\"\n item_name = '持股集中度'\n df = _get_report(only_A, item_name, [], col='截止日期')\n df.rename(columns={\n \"A股户数\": \"A股户数\",\n \"B股户数\": \"B股户数\",\n \"H股户数\": \"H股户数\",\n },\n inplace=True)\n # 更改为逻辑类型\n df['前十大股东'] = df['前十大股东'] == '前十大股东'\n df.sort_values(['sid', 'asof_date'], inplace=True)\n return df\n\n\n# endregion\n\n# region 投资评级\n\n\ndef get_investment_rating_data(only_A=True):\n \"\"\"投资评级\"\"\"\n item_name = '投资评级'\n to_drop = ['_id', '前一次投资评级', '股票简称', '投资评级',\n '评级变化', '是否首次评级', \"目标价格(下限)\", \"目标价格(上限)\"]\n db = get_db('cninfo')\n collection = db[item_name]\n pipeline = [\n {\n '$project': {k: 0 for k in to_drop}\n }\n ]\n if only_A:\n pipeline.insert(0, MATCH_ONLY_A)\n ds = collection.aggregate(pipeline)\n df = pd.DataFrame.from_records(ds)\n\n df.rename(columns={\n \"股票代码\": \"sid\",\n \"发布日期\": \"asof_date\",\n \"投资评级(经调整)\": \"投资评级\",\n },\n inplace=True)\n df.dropna(subset=['投资评级'], inplace=True)\n df['timestamp'] = df['asof_date']\n # 至少相差一小时\n df['asof_date'] -= pd.Timedelta(hours=1)\n df['sid'] = df['sid'].map(lambda x: int(x))\n return df\n\n\n# endregion\n"
] | [
[
"pandas.DataFrame",
"pandas.Timedelta",
"pandas.DataFrame.from_records",
"numpy.log",
"pandas.concat",
"pandas.Timestamp",
"pandas.pivot_table"
]
] |
DuckerMan/ru-gpts | [
"cbe8b1e4ae3b88642292590f51c5eacdea7b6846"
] | [
"utils.py"
] | [
"# coding=utf-8\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for logging and serialization\"\"\"\n\nimport os\nimport random\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.nn.parallel.distributed import DistributedDataParallel as torchDDP\n\nimport mpu\nfrom fp16 import FP16_Optimizer\n\n\ndef print_rank_0(message):\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(message, flush=True)\n else:\n print(message, flush=True)\n\n\ndef print_args(args):\n \"\"\"Print arguments.\"\"\"\n\n print('arguments:', flush=True)\n for arg in vars(args):\n dots = '.' * (29 - len(arg))\n print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True)\n\n\ndef print_params_min_max_norm(optimizer, iteration):\n \"\"\"Print min, max, and norm of all parameters.\"\"\"\n index = 0\n rank = torch.distributed.get_rank()\n string = 'iteration, rank, index, model-parallel,min, max, norm\\n'\n optimizer_ = optimizer\n if isinstance(optimizer, FP16_Optimizer):\n optimizer_ = optimizer.optimizer\n for param_group in optimizer_.param_groups:\n for param in param_group['params']:\n index += 1\n min_ = param.data.min()\n max_ = param.data.max()\n norm = param.data.norm()\n string += '{:7d}, {:4d}, {:4d}, {:2d}, '.format(\n iteration, rank, index, int(param.model_parallel))\n string += '{:.6E}, {:.6E}, {:.6E}\\n'.format(min_, max_, norm)\n print(string, flush=True)\n\n\nclass Timers:\n \"\"\"Group of timers.\"\"\"\n\n class Timer:\n \"\"\"Timer.\"\"\"\n\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = self.Timer(name)\n return self.timers[name]\n\n def log(self, names, normalizer=1.0, reset=True):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(\n reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n print_rank_0(string)\n\n\ndef report_memory(name):\n \"\"\"Simple GPU memory report.\"\"\"\n\n mega_bytes = 1024.0 * 1024.0\n string = name + ' memory (MB)'\n string += ' | allocated: {}'.format(\n torch.cuda.memory_allocated() / mega_bytes)\n string += ' | max allocated: {}'.format(\n torch.cuda.max_memory_allocated() / mega_bytes)\n string += ' | cached: {}'.format(torch.cuda.memory_cached() / mega_bytes)\n string += ' | max cached: {}'.format(\n torch.cuda.max_memory_cached() / mega_bytes)\n print_rank_0(string)\n\n\ndef get_checkpoint_name(checkpoints_path, iteration, release=False, zero=False):\n if release:\n d = 'release'\n else:\n d = 'iter_{:07d}'.format(iteration)\n if zero:\n dp_rank = mpu.get_data_parallel_rank()\n d += '_zero_dp_rank_{}'.format(dp_rank)\n return os.path.join(checkpoints_path, d,\n 'mp_rank_{:02d}'.format(mpu.get_model_parallel_rank()),\n 'model_optim_rng.pt')\n\n\ndef ensure_directory_exists(filename):\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\ndef get_checkpoint_tracker_filename(checkpoints_path):\n return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')\n\n\ndef save_zero_checkpoint(args, iteration, optimizer):\n zero_sd = {'iteration': iteration,\n 'optimizer_state_dict': optimizer.state_dict()}\n zero_checkpoint_name = get_checkpoint_name(args.save, iteration, zero=True)\n ensure_directory_exists(zero_checkpoint_name)\n torch.save(zero_sd, zero_checkpoint_name)\n print(' successfully saved {}'.format(zero_checkpoint_name))\n\n\ndef save_checkpoint(iteration, model, optimizer,\n lr_scheduler, args):\n \"\"\"Save a model checkpoint.\"\"\"\n if args.deepspeed:\n save_ds_checkpoint(iteration, model, args)\n else:\n # Only rank zer0 of the data parallel writes to the disk.\n if isinstance(model, torchDDP):\n model = model.module\n\n if mpu.get_data_parallel_rank() == 0:\n checkpoint_name = get_checkpoint_name(args.save, iteration)\n print('global rank {} is saving checkpoint at iteration {:7d} to {}'.\n format(torch.distributed.get_rank(), iteration, checkpoint_name))\n\n sd = {}\n sd['iteration'] = iteration\n sd['model'] = model.state_dict()\n\n # Optimizer stuff.\n if not args.no_save_optim:\n if optimizer is not None:\n sd['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n sd['lr_scheduler'] = lr_scheduler.state_dict()\n\n # rng states.\n if not args.no_save_rng:\n sd['random_rng_state'] = random.getstate()\n sd['np_rng_state'] = np.random.get_state()\n sd['torch_rng_state'] = torch.get_rng_state()\n sd['cuda_rng_state'] = torch.cuda.get_rng_state()\n sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states()\n\n ensure_directory_exists(checkpoint_name)\n torch.save(sd, checkpoint_name)\n print(' successfully saved {}'.format(checkpoint_name))\n\n # Wait so everyone is done (necessary)\n torch.distributed.barrier()\n # And update the latest iteration\n if torch.distributed.get_rank() == 0:\n tracker_filename = get_checkpoint_tracker_filename(args.save)\n with open(tracker_filename, 'w') as f:\n f.write(str(iteration))\n # Wait so everyone is done (not necessary)\n torch.distributed.barrier()\n\n\ndef save_ds_checkpoint(iteration, model, args):\n \"\"\"Save a model checkpoint.\"\"\"\n\n sd = {}\n sd['iteration'] = iteration\n # rng states.\n if not args.no_save_rng:\n sd['random_rng_state'] = random.getstate()\n sd['np_rng_state'] = np.random.get_state()\n sd['torch_rng_state'] = torch.get_rng_state()\n sd['cuda_rng_state'] = torch.cuda.get_rng_state()\n sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states()\n\n model.save_checkpoint(args.save, iteration, client_state=sd)\n\n\ndef get_checkpoint_iteration(args):\n # Read the tracker file and set the iteration.\n tracker_filename = get_checkpoint_tracker_filename(args.load)\n if not os.path.isfile(tracker_filename):\n print_rank_0('WARNING: could not find the metadata file {} '.format(\n tracker_filename))\n print_rank_0(' will not load any checkpoints and will start from '\n 'random')\n return 0, False, False\n iteration = 0\n release = False\n with open(tracker_filename, 'r') as f:\n metastring = f.read().strip()\n try:\n iteration = int(metastring)\n except ValueError:\n release = metastring == 'release'\n if not release:\n print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(\n tracker_filename))\n exit()\n\n assert iteration > 0 or release, 'error parsing metadata file {}'.format(\n tracker_filename)\n\n return iteration, release, True\n\n\ndef load_checkpoint(model, optimizer, lr_scheduler, args):\n \"\"\"Load a model checkpoint.\"\"\"\n\n iteration, release, success = get_checkpoint_iteration(args)\n\n if not success:\n return 0\n\n if args.deepspeed:\n raise NotImplemented(\"No installed deep speed\")\n\n else:\n\n # Checkpoint.\n checkpoint_name = get_checkpoint_name(args.load, iteration, release)\n\n if mpu.get_data_parallel_rank() == 0:\n print('global rank {} is loading checkpoint {}'.format(\n torch.distributed.get_rank(), checkpoint_name))\n\n if args.load_openai:\n from utils import move_weights\n from model import DistributedDataParallel as DDP\n from fp16 import FP16_Module\n model_path = args.load\n from transformers import GPT2LMHeadModel\n print('global rank {} is loading openai weights {}'.format(\n torch.distributed.get_rank(), model_path))\n model.cpu()\n gpt2model = GPT2LMHeadModel.from_pretrained(model_path, cache_dir='gpt2_weights')\n model2fill = model\n while isinstance(model2fill, (DDP, FP16_Module)):\n model2fill = model2fill.module\n move_weights(model2fill, gpt2model)\n model.cuda(torch.cuda.current_device())\n sd = {}\n else:\n sd = torch.load(checkpoint_name, map_location='cpu')\n\n if isinstance(model, torchDDP):\n model = model.module\n\n # Model.\n try:\n model.load_state_dict(sd['model'])\n except KeyError:\n print_rank_0('A metadata file exists but unable to load model '\n 'from checkpoint {}, exiting'.format(checkpoint_name))\n exit()\n\n # Optimizer.\n if not release and not args.finetune and not args.no_load_optim:\n try:\n if optimizer is not None:\n optimizer.load_state_dict(sd['optimizer'])\n if lr_scheduler is not None:\n lr_scheduler.load_state_dict(sd['lr_scheduler'])\n except KeyError:\n print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '\n 'Specify --no-load-optim or --finetune to prevent '\n 'attempting to load the optimizer '\n 'state.'.format(checkpoint_name))\n exit()\n\n # Iterations.\n if args.finetune or release:\n iteration = 0\n else:\n try:\n iteration = sd['iteration']\n except KeyError:\n try: # Backward compatible with older checkpoints\n iteration = sd['total_iters']\n except KeyError:\n print_rank_0('A metadata file exists but Unable to load iteration '\n ' from checkpoint {}, exiting'.format(checkpoint_name))\n exit()\n\n # rng states.\n if not release and not args.finetune and not args.no_load_rng:\n try:\n random.setstate(sd['random_rng_state'])\n np.random.set_state(sd['np_rng_state'])\n torch.set_rng_state(sd['torch_rng_state'])\n torch.cuda.set_rng_state(sd['cuda_rng_state'])\n mpu.get_cuda_rng_tracker().set_states(sd['rng_tracker_states'])\n except KeyError:\n print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '\n 'Specify --no-load-optim or --finetune to prevent '\n 'attempting to load the optimizer '\n 'state.'.format(checkpoint_name))\n exit()\n\n torch.distributed.barrier()\n if mpu.get_data_parallel_rank() == 0:\n print(' successfully loaded {}'.format(checkpoint_name))\n\n return iteration\n\n\ndef load_weights(src, dst, dst2src=False):\n \"\"\"\n Loads weights from src to dst via in place copy.\n src is a huggingface gpt2model, while dst is one of our models.\n dst2src=True loads parameters from our models into huggingface's.\n ^dst2src is still untested\n \"\"\"\n conv_layer = 'Conv1D' in str(type(src))\n for n, p in src.named_parameters():\n if dst2src:\n data = dst._parameters[n].data\n load = p.data\n else:\n data = p.data\n load = dst._parameters[n].data\n if conv_layer and 'weight' in n:\n data = data.t().contiguous()\n load.copy_(data)\n\n\n# dst._parameters[n].data.copy_(data)\n\ndef load_mlp(our, oai, dst2src=False):\n load_weights(oai.c_fc, our.dense_h_to_4h, dst2src)\n load_weights(oai.c_proj, our.dense_4h_to_h, dst2src)\n\n\ndef load_attention(our, oai, dst2src=False):\n load_weights(oai.c_attn, our.query_key_value, dst2src)\n load_weights(oai.c_proj, our.dense, dst2src)\n\n\ndef load_transformer_layer(our, oai, dst2src=False):\n load_weights(oai.ln_1, our.input_layernorm, dst2src)\n load_weights(oai.ln_2, our.post_attention_layernorm, dst2src)\n load_mlp(our.mlp, oai.mlp, dst2src)\n load_attention(our.attention, oai.attn, dst2src)\n\n\ndef move_weights(our, oai, dst2src=False):\n \"\"\"\n Loads weights from `oai` to `our` via in place copy.\n `oai` is a huggingface gpt2model, while `our` is one of our models.\n dst2src=True loads parameters from our models into huggingface's.\n ^dst2src=True is still untested\n \"\"\"\n # while isinstance(our, (torchDDP, model.distributed.DistributedDataParallel, FP16_Module)):\n # our=our.module\n transformer_model = oai.transformer\n load_weights(transformer_model.ln_f, our.transformer.final_layernorm, dst2src)\n load_weights(transformer_model.wte, our.word_embeddings, dst2src)\n load_weights(transformer_model.wpe, our.position_embeddings, dst2src)\n\n for our_layer, oai_layer in zip(our.transformer.layers, oai.transformer.h):\n load_transformer_layer(our_layer, oai_layer, dst2src)\n"
] | [
[
"numpy.random.set_state",
"torch.get_rng_state",
"torch.distributed.get_rank",
"torch.load",
"numpy.random.get_state",
"torch.cuda.memory_cached",
"torch.cuda.get_rng_state",
"torch.cuda.set_rng_state",
"torch.save",
"torch.cuda.synchronize",
"torch.cuda.current_device",
"torch.distributed.is_initialized",
"torch.distributed.barrier",
"torch.set_rng_state",
"torch.cuda.max_memory_allocated",
"torch.cuda.memory_allocated",
"torch.cuda.max_memory_cached"
]
] |
jesbu1/h-baselines | [
"f6f775bb18de22527f2d01d73bd733ed2e435ba3"
] | [
"SocialRobotCustom/python/social_bot/gazebo_agent.py"
] | [
"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport os\nimport time\nimport random\nimport json\nimport gin\nimport numpy as np\nimport PIL.Image\nfrom collections import OrderedDict\nimport gym\nfrom absl import logging\nimport social_bot\nimport social_bot.pygazebo as gazebo\n\n\[email protected]\nclass GazeboAgent():\n \"\"\" Class for the agent of gazebo-based SocialRobot enviroments\n \"\"\"\n\n def __init__(self,\n world,\n agent_type,\n name=None,\n config=None,\n use_image_observation=True,\n resized_image_size=None,\n image_with_internal_states=False,\n with_language=False,\n with_agent_language=False,\n vocab_sequence_length=20,\n action_wrapper=None):\n \"\"\"\n Args:\n world (pygazebo.World): the world\n agent_type (str): the agent_type, supporting pr2_noplugin,\n pioneer2dx_noplugin, turtlebot, youbot_noplugin and icub_with_hands for now\n note that 'agent_type' should be exactly the same string as the model's\n name at the beginning of model's sdf file\n name (str): the name of the agent in world\n if None it will be set the same as agent_type\n config (dict): the configuarations for the agent\n see `agent_cfg.jason` for details\n use_image_observation (bool): Use image or not\n resized_image_size (None|tuple): If None, use the original image size\n from the camera. Otherwise, the original image will be resized\n to (width, height)\n image_with_internal_states (bool): If true, the agent's self internal states\n i.e., joint position and velocities would be available together with image.\n Only affect if use_image_observation is true\n with_language (bool): The observation will be a dict with an extra sentence\n with_agent_language (bool): Include language in agent's action space\n vocab_sequence_length (int): the length of encoded sequence if with_language\n action_wrapper (None|class): Some times primitive joints is not wanted, e.g., has\n redundant dimensions or offset. If not None, this is used to transform the agent\n actions. See ActionWrapper of gazebo_agent.py for example.\n \"\"\"\n self._world = world\n self.type = agent_type\n self._use_image_observation = use_image_observation\n self._resized_image_size = resized_image_size\n self._image_with_internal_states = image_with_internal_states\n self._with_language = with_language\n self._with_agent_language = with_agent_language\n self._vocab_sequence_length = vocab_sequence_length\n self._sentence_space = None\n\n if config == None:\n # Load agent configurations\n with open(\n os.path.join(social_bot.get_model_dir(), \"agent_cfg.json\"),\n 'r') as cfg_file:\n agent_cfgs = json.load(cfg_file)\n config = agent_cfgs[agent_type]\n self.config = config\n joints = config['control_joints']\n if action_wrapper is not None:\n self._action_wrapper = action_wrapper()\n self._action_dim = self._action_wrapper.get_actions_dim()\n else:\n self._action_wrapper = None\n self._action_dim = len(joints)\n\n if name:\n # the agent is wrapped by a new name in world\n self.name = name\n self.joints = []\n for joint in joints:\n self.joints.append(name + '::' + joint)\n else:\n self.name = agent_type\n self.joints = joints\n self._agent = self._world.get_agent(self.name)\n\n # Set the funtions from pygazebo.agent to Agent\n self.get_pose = self._agent.get_pose\n self.set_pose = self._agent.set_pose\n self.get_link_pose = self._agent.get_link_pose\n self.set_link_pose = self._agent.set_link_pose\n self.get_joint_state = self._agent.get_joint_state\n self.set_joint_state = self._agent.set_joint_state\n self.set_pid_controller = self._agent.set_pid_controller\n self.get_collisions = self._agent.get_collisions\n self.get_velocities = self._agent.get_velocities\n\n # Setup joints and sensors\n self._camera = config['camera_sensor']\n self.action_range = self.setup_joints(self._agent, self.joints, config)\n logging.debug(\"joints to control: %s\" % self.joints)\n\n def reset(self):\n \"\"\" Reset the agent. \"\"\"\n self._agent.reset()\n\n def take_action(self, action):\n \"\"\" Take actions.\n \n Args:\n the actions to be taken.\n \"\"\"\n if self._action_wrapper is not None:\n action = self._action_wrapper.wrap_actions(action)\n controls = np.clip(action, -1.0, 1.0) * self.action_range\n controls_dict = dict(zip(self.joints, controls))\n self._agent.take_action(controls_dict)\n\n def get_observation(self, teacher, sentence_raw=\"hello\"):\n \"\"\" Get the observation of agent.\n\n Args:\n teacher (social_bot.Teacher): the teacher, used to get the task specific\n observations from teacher's taskgroups.\n sentence_raw (string): the sentence intened to sent to the Agent. This can\n be ignored if with_language is False.\n Returns:\n obs (dict |numpy.array): the return depends on the configurations: with\n language or not, use image or not, and image_with_internal_states or not.\n Possible situations:\n low-dimensional full states\n low-dimensional full states with language sentence\n image from the camera of agent\n image with internal states\n image with language sentence\n image with both internal states and language sentence\n Note that low-dimensional full states is defined in\n \"Task.task_specific_observation()\", which has all the infomation need\n for the task. While the internal states that used as a supplementary\n to image is form \"Agent.get_internal_states()\", which only contains\n self joint positions and velocities. Joint positions are wrapped with\n sin() and cos() to avoid the discontinuous point at 0 to 2*pi.\n \"\"\"\n if self._image_with_internal_states or self._with_language:\n # observation is an OrderedDict\n obs = self._create_observation_dict(teacher, sentence_raw)\n elif self._use_image_observation: # observation is pure image\n obs = self.get_camera_observation()\n else: # observation is pure low-dimentional states\n obs = teacher.get_task_specific_observation(self)\n return obs\n\n def get_camera_observation(self):\n \"\"\" Get the camera image.\n\n Returns:\n a numpy.array of the image.\n \"\"\"\n image = np.array(\n self._agent.get_camera_observation(self._camera), copy=False)\n if self._resized_image_size:\n image = PIL.Image.fromarray(image).resize(self._resized_image_size,\n PIL.Image.ANTIALIAS)\n image = np.array(image, copy=False)\n return image\n\n def get_internal_states(self):\n \"\"\" Get the internal joint states of the agent.\n\n Returns:\n a numpy.array including joint positions and velocities\n \"\"\"\n joint_pos = []\n joint_vel = []\n for joint_id in range(len(self.joints)):\n joint_name = self.joints[joint_id]\n joint_state = self._agent.get_joint_state(joint_name)\n joint_pos.append(joint_state.get_positions())\n joint_vel.append(joint_state.get_velocities())\n joint_pos = np.array(joint_pos).flatten()\n joint_vel = np.array(joint_vel).flatten()\n # pos of continous joint could be huge, wrap the range with sin and cos.\n joint_pos_sin = np.sin(joint_pos)\n joint_pos_cos = np.cos(joint_pos)\n internal_states = np.concatenate(\n (joint_pos_sin, joint_pos_cos, joint_vel), axis=0)\n return internal_states\n\n def get_control_space(self):\n \"\"\" Get the pure controlling space without language. \"\"\"\n control_space = gym.spaces.Box(\n low=-1.0, high=1.0, shape=[self._action_dim], dtype=np.float32)\n return control_space\n\n def get_action_space(self):\n \"\"\" Get the action space with optional language. \"\"\"\n control_space = self.get_control_space()\n if self._with_agent_language and self._with_language:\n action_space = gym.spaces.Dict(\n control=control_space, sentence=self._sentence_space)\n else:\n action_space = control_space\n return action_space\n\n def get_observation_space(self, teacher):\n \"\"\"\n Get the observation space with optional language.\n\n Args:\n teacher (social_bot.Teacher): the teacher, used to get the task specific\n observations from teacher's taskgroups as a sample.\n \"\"\"\n obs_sample = self.get_observation(teacher)\n if self._with_language or self._image_with_internal_states:\n # observation is a dictionary\n observation_space = self._construct_dict_space(obs_sample)\n elif self._use_image_observation:\n # observation is image\n observation_space = gym.spaces.Box(\n low=0, high=255, shape=obs_sample.shape, dtype=np.uint8)\n else:\n # observation is spare states\n observation_space = gym.spaces.Box(\n low=-np.inf,\n high=np.inf,\n shape=obs_sample.shape,\n dtype=np.float32)\n return observation_space\n\n def set_sentence_space(self, sentence_space):\n \"\"\" Set the sentence if with_languange is enabled.\n\n Args:\n sentence_space (gym.spaces): the space for sentence sequence\n \"\"\"\n self._sentence_space = sentence_space\n\n def _create_observation_dict(self, teacher, sentence_raw):\n obs = OrderedDict()\n if self._use_image_observation:\n obs['image'] = self.get_camera_observation()\n if self._image_with_internal_states:\n obs['states'] = self.get_internal_states()\n else:\n obs['states'] = teacher.get_task_specific_observation(self)\n if self._with_language:\n obs['sentence'] = teacher.sentence_to_sequence(\n sentence_raw, self._vocab_sequence_length)\n return obs\n\n def _construct_dict_space(self, obs_sample):\n \"\"\" A helper function when gym.spaces.Dict is used as observation.\n\n Args:\n obs_sample (numpy.array|dict) : a sample observation\n Returns:\n Return a gym.spaces.Dict with keys 'image', 'states', 'sentence'\n Possible situation:\n image with internal states\n image with language sentence\n image with both internal states and language sentence\n pure low-dimensional states with language sentence\n \"\"\"\n ob_space_dict = dict()\n if 'image' in obs_sample.keys():\n ob_space_dict['image'] = gym.spaces.Box(\n low=0,\n high=255,\n shape=obs_sample['image'].shape,\n dtype=np.uint8)\n if 'states' in obs_sample.keys():\n ob_space_dict['states'] = gym.spaces.Box(\n low=-np.inf,\n high=np.inf,\n shape=obs_sample['states'].shape,\n dtype=np.float32)\n if 'sentence' in obs_sample.keys():\n ob_space_dict['sentence'] = self._sentence_space\n ob_space = gym.spaces.Dict(ob_space_dict)\n return ob_space\n\n def setup_joints(self, agent, joints, agent_cfg):\n \"\"\" Setup the joints acrroding to agent configuration.\n\n Args:\n agent (pygazebo.Agent): the agent\n joints (list of string): the name of joints\n agent_cfg (dict): the configuration\n \"\"\"\n joint_states = list(map(lambda s: agent.get_joint_state(s), joints))\n joints_limits = list(\n map(lambda s: s.get_effort_limits()[0], joint_states))\n print(\"JOINT LIMITS: %s\" % joints_limits)\n print(\"USE PID: %s\" % str(agent_cfg['use_pid']))\n if agent_cfg['use_pid']:\n for joint_index in range(len(joints)):\n agent.set_pid_controller(\n joint_name=joints[joint_index],\n pid_control_type=agent_cfg['pid_type'][joint_index],\n p=agent_cfg['pid'][joint_index][0],\n i=agent_cfg['pid'][joint_index][1],\n d=agent_cfg['pid'][joint_index][2],\n max_force=joints_limits[joint_index])\n control_range = agent_cfg['pid_control_limit']\n else:\n control_range = np.array(joints_limits)\n return control_range\n\n def get_egocentric_cord_2d(self, x, y, agent_yaw):\n \"\"\" Get the egocentric coordinate from a global 2D x-y plane coordinate.\n\n This is achieved by rotating the global coordinates x, y by -agent_yaw.\n\n Args:\n x (float): x of global x-y plane coordinate\n y (float): y of global x-y plane coordinate\n agent_yaw (float): agent yaw (rotation in z-axis), in radian\n Returns:\n tuple of float, the position in the transformed coordinate\n \"\"\"\n rotate = -agent_yaw\n rotated_x = x * np.cos(rotate) - y * np.sin(rotate)\n rotated_y = x * np.sin(rotate) + y * np.cos(rotate)\n return (rotated_x, rotated_y)\n\n def get_contacts(self, contacts_sensor, contact_collision):\n \"\"\" Get contacts to the link.\n\n Args:\n contacts_sensor(string): the name of contacts_sensor\n contact_collision(string): the collision to check contacts\n Returns:\n bool, there is contact or not\n \"\"\"\n contacts = self.get_collisions(contacts_sensor)\n for collision in contacts:\n if collision[0] == contact_collision or collision[\n 1] == contact_collision:\n return True\n return False\n\n\nclass ActionWrapper():\n \"\"\" The action wrapper transform a new actions to primitive actions.\n\n The primitive actions (like the force/velocity/position of joints) may have redundant\n dimensions or offsets. By the action wrapper, we can transform the action to more\n efficency one. The sub class should define the new action space in _NEW_ACTION_LIST.\n \"\"\"\n\n _NEW_ACTION_LIST = []\n\n def get_actions_dim(self):\n \"\"\" Get the dimension of the new action space\n \"\"\"\n return len(self._NEW_ACTION_LIST)\n\n def wrap_actions(self, action):\n \"\"\" Wrap transformed actions to primitive actions.\n\n Args:\n action (nparray): the new action from policy network\n Returns:\n np.array, the primitive actions send to simulator\n \"\"\"\n raise NotImplementedError(\"wrap_actions not implemented!\")\n\n\[email protected]\nclass YoubotActionWrapper(ActionWrapper):\n \"\"\" This action wrapper transform a new actions to primitive actions.\n\n The new action space is the same as keyboard demostration interface, defined in _NEW_ACTION_LIST\n The primitive actions (the joints) please refer to social_bot/models/agent_cfg.json.\n \"\"\"\n\n _NEW_ACTION_LIST = [\n 'arm_joint_yaw', 'arm_joint_pitch', 'arm_joint_pitch_2', 'palm_joint',\n 'gripper_finger_joint', 'wheel_speed', 'wheel_turning'\n ]\n\n def wrap_actions(self, action):\n \"\"\" Wrap transformed actions to primitive actions.\n\n Args:\n action (nparray): the new action from policy network\n Returns:\n np.array, the primitive actions send to simulator\n \"\"\"\n action = dict(zip(self._NEW_ACTION_LIST, action))\n primitive_actions = [\n # arm joints\n action['arm_joint_yaw'],\n 0.25 + action['arm_joint_pitch'] / 2, # add pi/4 offset\n 0.25 + action['arm_joint_pitch'] / 2,\n 0.25 + action['arm_joint_pitch_2'],\n action['palm_joint'],\n # gripper joints\n action['gripper_finger_joint'],\n action['gripper_finger_joint'],\n # wheel joints\n action['wheel_speed'] + action['wheel_turning'],\n action['wheel_speed'] - action['wheel_turning']\n ]\n return primitive_actions\n"
] | [
[
"numpy.cos",
"numpy.clip",
"numpy.array",
"numpy.sin",
"numpy.concatenate"
]
] |
Tgordon523/ps5_analysis | [
"019b8f956ef552624dc823583e19bcf384dcfc08"
] | [
"ps5_shortage.py"
] | [
"### Script to pull and update data tracking\n### import packages\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport requests\nimport io\nimport pathlib\nfrom bs4 import BeautifulSoup\n\n\ndef dataset_load() -> pd.DataFrame():\n \"\"\"\n Function to load and save data regarding ps5 availability history\n \"\"\"\n ### get site\n url = \"https://www.nowinstock.net/videogaming/consoles/sonyps5/full_history.php\"\n\n if isinstance(url, str):\n try:\n res = requests.get(url)\n res.raise_for_status()\n except requests.exceptions.HTTPError as e:\n print(e.response.text)\n else:\n return None\n\n if res.status_code == 200:\n r = res.text\n soup = BeautifulSoup(r)\n ### get table and load to df\n table = soup.find_all(\"table\")\n df = pd.read_html(str(table))[0]\n\n return df\n\n\nif __name__ == \"__main__\":\n data_raw = dataset_load()\n save_dataset = (\n pathlib.Path(r\"C:\\Users\\tgord\\MyPyScripts\\PS5_EDA\")\n / \"ps5_analysis\"\n / \"data\"\n / \"dataset_raw.csv\"\n )\n print(save_dataset)\n data_raw.to_csv(\n save_dataset,\n index=False,\n )\n"
] | [
[
"pandas.DataFrame"
]
] |
jakartaresearch/receipt-ocr | [
"003e067eb7d80495226ad15235fa1d626a09103e"
] | [
"src/text_detector/load_model.py"
] | [
"import torch\nimport torch.backends.cudnn as cudnn\n\nfrom collections import OrderedDict\nfrom .modules.utils import yaml_loader, create_model_for_provider\nfrom .modules.craft import CRAFT\n\n\ndef copy_state_dict(state_dict):\n if list(state_dict.keys())[0].startswith(\"module\"):\n start_idx = 1\n else:\n start_idx = 0\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \".\".join(k.split(\".\")[start_idx:])\n new_state_dict[name] = v\n return new_state_dict\n\n\ndef load_craft(config_file, model_pth):\n cfg = yaml_loader(config_file)\n net = CRAFT()\n\n print(\"Loading weights from checkpoint (\" + model_pth + \")\")\n if cfg[\"cuda\"]:\n net.load_state_dict(copy_state_dict(torch.load(model_pth)))\n else:\n net.load_state_dict(copy_state_dict(torch.load(model_pth, map_location=\"cpu\")))\n\n if cfg[\"cuda\"]:\n net = net.cuda()\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = False\n\n net.eval()\n return cfg, net\n\n\ndef load_craft_onnx(config_file, model_pth):\n cfg = yaml_loader(config_file)\n device = \"CUDAExecutionProvider\" if torch.cuda.is_available() else \"CPUExecutionProvider\"\n print(\"Loading weights from checkpoint (\" + model_pth + \")\")\n net = create_model_for_provider(model_pth, device)\n return cfg, net\n"
] | [
[
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.load"
]
] |
MannyKayy/DeepSpeed | [
"67821f95e4ee04f65965eac4ecc1ffacab4302e6"
] | [
"deepspeed/pt/deepspeed_timer.py"
] | [
"'''\nCopyright 2019 The Microsoft DeepSpeed Team\n'''\n\nimport time\nimport psutil\nimport torch\n\nfrom deepspeed.pt.log_utils import logger\n\n\ndef print_rank_0(message):\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n logger.info(message)\n else:\n logger.info(message)\n\n\nclass SynchronizedWallClockTimer:\n \"\"\"Group of timers. Borrowed from Nvidia Megatron code\"\"\"\n class Timer:\n \"\"\"Timer.\"\"\"\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = self.Timer(name)\n return self.timers[name]\n\n @staticmethod\n def memory_usage():\n alloc = \"mem_allocated: {:.4f} GB\".format(torch.cuda.memory_allocated() /\n (1024 * 1024 * 1024))\n max_alloc = \"max_mem_allocated: {:.4f} GB\".format(\n torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024))\n cache = \"cache_allocated: {:.4f} GB\".format(torch.cuda.memory_cached() /\n (1024 * 1024 * 1024))\n max_cache = \"max_cache_allocated: {:.4f} GB\".format(\n torch.cuda.max_memory_cached() / (1024 * 1024 * 1024))\n return \" | {} | {} | {} | {}\".format(alloc, max_alloc, cache, max_cache)\n\n def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n if memory_breakdown:\n string += self.memory_usage()\n print_rank_0(string)\n\n\nclass ThroughputTimer():\n def __init__(self,\n batch_size,\n num_workers,\n start_step=2,\n steps_per_output=50,\n monitor_memory=True,\n logging_fn=None):\n self.start_time = 0\n self.end_time = 0\n self.started = False\n self.batch_size = batch_size\n if batch_size is None:\n self.batch_size = 1\n self.num_workers = num_workers\n self.start_step = start_step\n self.epoch_count = 0\n self.local_step_count = 0\n self.total_step_count = 0\n self.total_elapsed_time = 0\n self.steps_per_output = steps_per_output\n self.monitor_memory = monitor_memory\n self.logging = logging_fn\n if self.logging is None:\n self.logging = logger.info\n self.initialized = False\n\n def update_epoch_count(self):\n self.epoch_count += 1\n self.local_step_count = 0\n\n def _init_timer(self):\n self.initialized = True\n\n def start(self):\n self._init_timer()\n self.started = True\n if self.total_step_count >= self.start_step:\n torch.cuda.synchronize()\n self.start_time = time.time()\n\n def stop(self, report_speed=True):\n if not self.started:\n return\n self.started = False\n self.total_step_count += 1\n self.local_step_count += 1\n if self.total_step_count > self.start_step:\n torch.cuda.synchronize()\n self.end_time = time.time()\n duration = self.end_time - self.start_time\n self.total_elapsed_time += duration\n if self.local_step_count % self.steps_per_output == 0:\n if report_speed:\n self.logging(\"{}/{}, SamplesPerSec={}\".format(\n self.epoch_count,\n self.local_step_count,\n self.avg_samples_per_sec()))\n if self.monitor_memory:\n virt_mem = psutil.virtual_memory()\n swap = psutil.swap_memory()\n self.logging(\"{}/{}, vm percent: {}, swap percent: {}\".format(\n self.epoch_count,\n self.local_step_count,\n virt_mem.percent,\n swap.percent))\n\n def avg_samples_per_sec(self):\n if self.total_step_count > 0:\n samples_per_step = self.batch_size * self.num_workers\n total_step_offset = self.total_step_count - self.start_step\n avg_time_per_step = self.total_elapsed_time / total_step_offset\n # training samples per second\n return samples_per_step / avg_time_per_step\n return float(\"-inf\")\n"
] | [
[
"torch.distributed.get_rank",
"torch.cuda.memory_cached",
"torch.cuda.synchronize",
"torch.distributed.is_initialized",
"torch.cuda.max_memory_allocated",
"torch.cuda.memory_allocated",
"torch.cuda.max_memory_cached"
]
] |
lccatala/tfg_ros | [
"d8da2bc6b1e0036e34460d174e708764a3c6f4ca"
] | [
"tfg/src/pytorch_segmentation/utils/helpers.py"
] | [
"import os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport math\nimport PIL\nimport matplotlib.pyplot as plt\n\ndef show_images(images, in_row=True):\n '''\n Helper function to show 3 images\n '''\n total_images = len(images)\n\n rc_tuple = (1, total_images)\n if not in_row:\n rc_tuple = (total_images, 1)\n \n\t#figure = plt.figure(figsize=(20, 10))\n for ii in range(len(images)):\n plt.subplot(*rc_tuple, ii+1)\n plt.title(images[ii][0])\n plt.axis('off')\n plt.imshow(images[ii][1])\n plt.show()\n\ndef dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef initialize_weights(*models):\n for model in models:\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1.)\n m.bias.data.fill_(1e-4)\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0.0, 0.0001)\n m.bias.data.zero_()\n\ndef get_upsampling_weight(in_channels, out_channels, kernel_size):\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)\n weight[list(range(in_channels)), list(range(out_channels)), :, :] = filt\n return torch.from_numpy(weight).float()\n\ndef colorize_mask(mask, palette):\n zero_pad = 256 * 3 - len(palette)\n for i in range(zero_pad):\n palette.append(0)\n new_mask = PIL.Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n return new_mask\n\ndef set_trainable_attr(m,b):\n m.trainable = b\n for p in m.parameters(): p.requires_grad = b\n\ndef apply_leaf(m, f):\n c = m if isinstance(m, (list, tuple)) else list(m.children())\n if isinstance(m, nn.Module):\n f(m)\n if len(c)>0:\n for l in c:\n apply_leaf(l,f)\n\ndef set_trainable(l, b):\n apply_leaf(l, lambda m: set_trainable_attr(m,b))"
] | [
[
"torch.nn.init.kaiming_normal_",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"torch.from_numpy"
]
] |
Ray-Young/Machine_Learning_Study | [
"f665d4b9fe7c1c1f722c76e91a5b3f99b2b8a19c"
] | [
"K-Nearest-Neighbor/bk/process.py"
] | [
"import numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom decimal import Decimal\nimport csv\n\ndef processFile(inFile):\n lst = []\n with open(inFile) as f:\n for line in f:\n tmp = line.strip().split(\",\")\n lst.append(tmp)\n # sums = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n # count = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n \n arr = []\n for j in range(len(lst[0])):\n tmp = []\n for i in range(len(lst)): #get list without \"?\"\n if lst[i][j]!='?':\n tmp.append(lst[i][j])\n arr.append(tmp)\n #print(arr)\n \n \n median = []\n for l in arr:\n l.sort() #find median, then assign to \"?\" value\n m = l[int(len(l)/2)]\n median.append(m)\n #print(median)\n \n newlst = []\n for i in range(len(lst)):\n tmp = []\n for j in range(len(lst[0])):\n if lst[i][j]!='?':\n tmp.append(lst[i][j])\n else:\n tmp.append(median[j])\n newlst.append(tmp)\n #print(newlst)\n \n #newlst2 = []\n #std = []\n #for j in range(len(lst[0])):\n # temp = []\n # for i in range(len(lst)):\n # temp.append(newlst[i][j])\n # newlst2.append(temp)\n #std.append(np.std(temp))\n #print(newlst2)\n #print(std)\n \n #for l in newlst2:\n # np.mean(l)\n \n #vectorizer = TfidfVectorizer(stop_words='english', min_df=10,max_df=0.8)\n #dtm = vectorizer.fit_transform(newlst) \n #print(dtm)\n cat = []\n for i in range(len(newlst[0])):\n tmp = []\n cat.append(tmp)\n #print(cat)\n notDigital = [0,3,4,5,6,8,9,11,12]\n for i in range(len(newlst)):\n for j in range(len(newlst[0])):\n x = newlst[i][j]\n if j in notDigital:\n if x not in cat[j]:\n cat[j].append(x)\n \n \n # newlst2 will make all attributes become digital numbers\n newlst2=[]\n for i in range(len(newlst)):\n tmp = []\n for j in range(len(newlst[0])):\n x = newlst[i][j]\n if j in notDigital:\n tmp.append(cat[j].index(x))\n else:\n tmp.append(x)\n newlst2.append(tmp)\n #print(newlst2)\n \n std = []\n average = []\n \n for j in range (len(newlst2[0])-1):\n tmp = []\n for i in range (len(newlst2)):\n tmp.append(float(newlst2[i][j]))\n std.append(np.std(tmp))\n average.append(np.average(tmp))\n #print(std)\n #print(average)\n \n normalize = []\n for i in range(len(newlst2)):\n tmp = []\n for j in range(len(newlst2[0])):\n if(j == len(newlst2[0])-1):\n if(newlst2[i][j] == '+'):\n tmp.append(1)\n else:\n tmp.append(2)\n else:\n x = float(newlst2[i][j])\n z = (x-average[j])/std[j]\n tmp.append(z)\n normalize.append(tmp)\n #print(normalize)\n \n # int_normalize = []\n # for i in range(len(normalize)):\n # tmp = []\n # for j in range(len(normalize[0])):\n # s = normalize[i][j]\n # x = int(s*100)\n # tmp.append(x)\n # int_normalize.append(tmp)\n\n\n\n if(inFile == 'crx.data.training'):\n with open(\"crx.training.processed\",'a') as f:\n datawriter = csv.writer(f, delimiter= ',')\n for line in normalize:\n datawriter.writerow(line)\n if(inFile == 'crx.data.testing'):\n with open(\"crx.testing.processed\",'a') as f:\n datawriter = csv.writer(f, delimiter= ',')\n for line in normalize:\n datawriter.writerow(line)\n\n\n # test = [0,1,2,3]\n # std = np.std(test)\n # average = np.average(test)\n # print((test[3]-average)/std)\n\ndef run(infile1, infile2):\n processFile(infile1)\n \n processFile(infile2)\n "
] | [
[
"numpy.std",
"numpy.average"
]
] |
franklili3/pyfolio | [
"a63245b768e9b90154f31ca1a7a1a2472caafbfd"
] | [
"pyfolio/interesting_periods.py"
] | [
"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generates a list of historical event dates that may have had\nsignificant impact on markets. See extract_interesting_date_ranges.\"\"\"\n\nimport pandas as pd\n\nfrom collections import OrderedDict\n\nPERIODS = OrderedDict()\n# Dotcom bubble\nPERIODS['Dotcom'] = (pd.Timestamp('20000310'), pd.Timestamp('20000910'))\n\n# 9/11\nPERIODS['9/11'] = (pd.Timestamp('20010911'), pd.Timestamp('20011011'))\n\n# 01/08/03 US Housing Bubble 2003\nPERIODS['US Housing'] = (\n pd.Timestamp('20030108'), pd.Timestamp('20030208'))\n\n# Market regimes\nPERIODS['Low Volatility Bull Market'] = (pd.Timestamp('20050101'),\n pd.Timestamp('20070801'))\n\n# August 2007, March and September of 2008, Q1 & Q2 2009,\nPERIODS['Fall2007'] = (pd.Timestamp('20071001'), pd.Timestamp('20081031'))\nPERIODS['Mar2008'] = (pd.Timestamp('20080301'), pd.Timestamp('20080401'))\n\n# Lehmann Brothers\nPERIODS['June2008'] = (pd.Timestamp('20080601'), pd.Timestamp('20080630'))\n\nPERIODS['Fall2009'] = (pd.Timestamp('20090801'), pd.Timestamp('20090831'))\n\nPERIODS['Fall2010'] = (\n pd.Timestamp('20100401'), pd.Timestamp('20100630'))\n\nPERIODS['2011年下跌期'] = (pd.Timestamp('20110901'),\n pd.Timestamp('20111230'))\n\nPERIODS['2012年下跌期'] = (\n pd.Timestamp('20120601'), pd.Timestamp('20121130'))\n\n\n# Market down-turn in August/Sept 2015\nPERIODS['2015年下跌期'] = (pd.Timestamp('20150601'), pd.Timestamp('20150930'))\n\nPERIODS['2016年下跌期'] = (pd.Timestamp('20160101'), pd.Timestamp('20160129'))\n\nPERIODS['2018年下跌期'] = (pd.Timestamp('20180201'), pd.Timestamp('20181228'))"
] | [
[
"pandas.Timestamp"
]
] |
gqkc/CLOSURE | [
"a0204396822ae70d91e44ecb12ae05e2e02e69d7"
] | [
"scripts/preprocess_questions.py"
] | [
"#!/usr/bin/env python3\n\n# This code is released under the MIT License in association with the following paper:\n#\n# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).\n#\n# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).\n\nimport sys\nimport os\nsys.path.insert(0, os.path.abspath('.'))\n\nimport argparse\n\nimport json\nimport os\n\nimport h5py\nimport numpy as np\n\nimport vr.programs\nfrom vr.preprocess import tokenize, encode, build_vocab\n\n\n\"\"\"\nPreprocessing script for CLEVR question files.\n\"\"\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', default='prefix',\n choices=['chain', 'prefix', 'postfix'])\nparser.add_argument('--input_questions_json', required=True, action='append')\nparser.add_argument('--q_family_shift', type=int, action='append')\nparser.add_argument('--input_vocab_json', default='')\nparser.add_argument('--expand_vocab', default=0, type=int)\nparser.add_argument('--unk_threshold', default=1, type=int)\nparser.add_argument('--encode_unk', default=0, type=int)\n\nparser.add_argument('--output_h5_file', required=True)\nparser.add_argument('--output_vocab_json', default='')\n\n\ndef program_to_str(program, mode):\n converter = vr.programs.ProgramConverter()\n if mode == 'chain':\n if not converter.is_chain(program):\n return None\n return vr.programs.list_to_str(program)\n elif mode == 'prefix':\n program_prefix = converter.list_to_prefix(program)\n return vr.programs.list_to_str(program_prefix)\n elif mode == 'postfix':\n program_postfix = converter.list_to_postfix(program)\n return vr.programs.list_to_str(program_postfix)\n return None\n\n\ndef main(args):\n if (args.input_vocab_json == '') and (args.output_vocab_json == ''):\n print('Must give one of --input_vocab_json or --output_vocab_json')\n return\n\n print('Loading data from', args.input_questions_json)\n if args.q_family_shift and len(args.q_family_shift):\n if len(args.q_family_shift) != len(args.input_questions_json):\n raise ValueError(\"shift must be provided for each question file\")\n q_family_shifts = args.q_family_shift\n else:\n q_family_shifts = [0] * len(args.input_questions_json)\n questions = []\n for q_file, shift in zip(args.input_questions_json, q_family_shifts):\n print(q_file)\n with open(q_file, 'r') as f:\n more_questions = json.load(f)['questions']\n for q in more_questions:\n q['question_family_index'] += shift\n questions.extend(more_questions)\n\n # Either create the vocab or load it from disk\n if args.input_vocab_json == '' or args.expand_vocab == 1:\n print('Building vocab')\n if 'answer' in questions[0]:\n answer_token_to_idx = build_vocab(\n (q['answer'] for q in questions)\n )\n question_token_to_idx = build_vocab(\n (q['question'] for q in questions),\n min_token_count=args.unk_threshold,\n punct_to_keep=[';', ','], punct_to_remove=['?', '.']\n )\n all_program_strs = []\n for q in questions:\n if 'program' not in q:\n continue\n program_str = program_to_str(q['program'], args.mode)\n if program_str is not None:\n all_program_strs.append(program_str)\n program_token_to_idx = build_vocab(all_program_strs)\n vocab = {\n 'question_token_to_idx': question_token_to_idx,\n 'program_token_to_idx': program_token_to_idx,\n 'answer_token_to_idx': answer_token_to_idx,\n }\n def arity(name):\n if name == 'scene':\n return 0\n if 'equal' in name or name in ['union', 'intersect', 'less_than', 'greater_than']:\n return 2\n return 1\n vocab['program_token_arity'] = {name: arity(name) for name in program_token_to_idx}\n if args.input_vocab_json != '':\n print('Loading vocab')\n if args.expand_vocab == 1:\n new_vocab = vocab\n with open(args.input_vocab_json, 'r') as f:\n vocab = json.load(f)\n if args.expand_vocab == 1:\n num_new_words = 0\n for word in new_vocab['question_token_to_idx']:\n if word not in vocab['question_token_to_idx']:\n print('Found new word %s' % word)\n idx = len(vocab['question_token_to_idx'])\n vocab['question_token_to_idx'][word] = idx\n num_new_words += 1\n print('Found %d new words' % num_new_words)\n\n if args.output_vocab_json != '':\n with open(args.output_vocab_json, 'w') as f:\n json.dump(vocab, f)\n\n # Encode all questions and programs\n print('Encoding data')\n questions_encoded = []\n programs_encoded = []\n question_families = []\n orig_idxs = []\n image_idxs = []\n answers = []\n types = []\n for orig_idx, q in enumerate(questions):\n question = q['question']\n if 'program' in q:\n types += [q['program'][-1]['function']]\n\n orig_idxs.append(orig_idx)\n image_idxs.append(q['image_index'])\n if 'question_family_index' in q:\n question_families.append(q['question_family_index'])\n question_tokens = tokenize(question,\n punct_to_keep=[';', ','],\n punct_to_remove=['?', '.'])\n question_encoded = encode(question_tokens,\n vocab['question_token_to_idx'],\n allow_unk=args.encode_unk == 1)\n questions_encoded.append(question_encoded)\n\n if 'program' in q:\n program = q['program']\n program_str = program_to_str(program, args.mode)\n program_tokens = tokenize(program_str)\n program_encoded = encode(program_tokens, vocab['program_token_to_idx'])\n programs_encoded.append(program_encoded)\n\n if 'answer' in q:\n answers.append(vocab['answer_token_to_idx'][q['answer']])\n\n # Pad encoded questions and programs\n max_question_length = max(len(x) for x in questions_encoded)\n for qe in questions_encoded:\n while len(qe) < max_question_length:\n qe.append(vocab['question_token_to_idx']['<NULL>'])\n\n if len(programs_encoded) > 0:\n max_program_length = max(len(x) for x in programs_encoded)\n for pe in programs_encoded:\n while len(pe) < max_program_length:\n pe.append(vocab['program_token_to_idx']['<NULL>'])\n\n # Create h5 file\n print('Writing output')\n questions_encoded = np.asarray(questions_encoded, dtype=np.int32)\n programs_encoded = np.asarray(programs_encoded, dtype=np.int32)\n print(questions_encoded.shape)\n print(programs_encoded.shape)\n\n mapping = {}\n for i, t in enumerate(set(types)):\n mapping[t] = i\n\n print(mapping)\n\n types_coded = []\n for t in types:\n types_coded += [mapping[t]]\n\n with h5py.File(args.output_h5_file, 'w') as f:\n f.create_dataset('questions', data=questions_encoded)\n f.create_dataset('image_idxs', data=np.asarray(image_idxs))\n f.create_dataset('orig_idxs', data=np.asarray(orig_idxs))\n\n if len(programs_encoded) > 0:\n f.create_dataset('programs', data=programs_encoded)\n if len(question_families) > 0:\n f.create_dataset('question_families', data=np.asarray(question_families))\n if len(answers) > 0:\n f.create_dataset('answers', data=np.asarray(answers))\n if len(types) > 0:\n f.create_dataset('types', data=np.asarray(types_coded))\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.asarray"
]
] |
LeDuySon/Vehicle-tracking-deepsort | [
"ab03375d11d83def0452260d7071e9c1cc7406c2"
] | [
"yolov3_deepsort.py"
] | [
"import os\nimport cv2\nimport time\nimport argparse\nimport torch\nimport warnings\nimport numpy as np\n\nfrom detector import build_detector\nfrom deep_sort import build_tracker\nfrom utils.draw import draw_boxes\nfrom utils.parser import get_config\nfrom utils.log import get_logger\nfrom utils.io import write_results\n\n\nclass VideoTracker(object):\n def __init__(self, cfg, args, video_path):\n self.cfg = cfg\n self.args = args\n self.video_path = video_path\n self.logger = get_logger(\"root\")\n self.video_name = video_path.split(\"/\")[-1].split(\".\")[0]\n use_cuda = args.use_cuda and torch.cuda.is_available()\n if not use_cuda:\n warnings.warn(\"Running in cpu mode which maybe very slow!\", UserWarning)\n\n if args.display:\n cv2.namedWindow(\"test\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"test\", args.display_width, args.display_height)\n\n if args.cam != -1:\n print(\"Using webcam \" + str(args.cam))\n self.vdo = cv2.VideoCapture(args.cam)\n else:\n self.vdo = cv2.VideoCapture()\n self.detector = build_detector(cfg, use_cuda=use_cuda)\n self.deepsort = build_tracker(cfg, use_cuda=use_cuda)\n self.class_names = self.detector.class_names\n print(\"Class name: \", self.class_names)\n\n def __enter__(self):\n if self.args.cam != -1:\n ret, frame = self.vdo.read()\n assert ret, \"Error: Camera error\"\n self.im_width = frame.shape[0]\n self.im_height = frame.shape[1]\n\n else:\n assert os.path.isfile(self.video_path), \"Path error\"\n self.vdo.open(self.video_path)\n self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))\n assert self.vdo.isOpened()\n\n if self.args.save_path:\n os.makedirs(self.args.save_path, exist_ok=True)\n\n # path of saved video and results\n self.save_video_path = os.path.join(self.args.save_path, self.video_name + \"_results.avi\")\n self.save_results_path = os.path.join(self.args.save_path, self.video_name + \"_results.txt\")\n\n # create video writer\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))\n\n # logging\n self.logger.info(\"Save results to {}\".format(self.args.save_path))\n\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n if exc_type:\n print(exc_type, exc_value, exc_traceback)\n\n def run(self):\n results = []\n idx_frame = 0\n while self.vdo.grab():\n idx_frame += 1\n if idx_frame % self.args.frame_interval:\n continue\n\n start = time.time()\n _, ori_im = self.vdo.retrieve()\n im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)\n\n # do detection\n bbox_xywh, cls_conf, cls_ids = self.detector(im)\n\n # select person class\n mask = cls_ids < 7\n\n bbox_xywh = bbox_xywh[mask]\n # bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector\n bbox_xywh[:, 3:] *= 1.2\n cls_conf = cls_conf[mask]\n\n # do tracking\n outputs = self.deepsort.update(bbox_xywh, cls_conf, im)\n\n # draw boxes for visualization\n if len(outputs) > 0:\n bbox_tlwh = []\n bbox_xyxy = outputs[:, :4]\n identities = outputs[:, -1]\n ori_im = draw_boxes(ori_im, bbox_xyxy, identities)\n\n for bb_xyxy in bbox_xyxy:\n bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))\n\n results.append((idx_frame - 1, bbox_tlwh, identities))\n\n end = time.time()\n\n if self.args.display:\n cv2.imshow(\"test\", ori_im)\n cv2.waitKey(1)\n\n if self.args.save_path:\n self.writer.write(ori_im)\n\n # save results\n write_results(self.save_results_path, results, 'mot')\n\n # logging\n self.logger.info(\"time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}\" \\\n .format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"VIDEO_PATH\", type=str)\n parser.add_argument(\"--config_detection\", type=str, default=\"./configs/yolov3.yaml\")\n parser.add_argument(\"--config_deepsort\", type=str, default=\"./configs/deep_sort.yaml\")\n # parser.add_argument(\"--ignore_display\", dest=\"display\", action=\"store_false\", default=True)\n parser.add_argument(\"--display\", action=\"store_true\")\n parser.add_argument(\"--frame_interval\", type=int, default=1)\n parser.add_argument(\"--display_width\", type=int, default=800)\n parser.add_argument(\"--display_height\", type=int, default=600)\n parser.add_argument(\"--save_path\", type=str, default=\"./output/\")\n parser.add_argument(\"--cpu\", dest=\"use_cuda\", action=\"store_false\", default=True)\n parser.add_argument(\"--camera\", action=\"store\", dest=\"cam\", type=int, default=\"-1\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n cfg = get_config()\n cfg.merge_from_file(args.config_detection)\n cfg.merge_from_file(args.config_deepsort)\n\n with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:\n vdo_trk.run()\n"
] | [
[
"torch.cuda.is_available"
]
] |
GandalfSaxe/letomes | [
"5f73a4066fcf69260cb538c105acf898b22e756d"
] | [
"code/cudasim/cuda_rocketry.py"
] | [
"from orbsim.r3b_2d.simulators import run_sim\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# import pygmo as pg\n# from pygmo import algorithm\nimport os\nimport sys\nfrom orbsim.r3b_2d.simulators import run_sim\n\n# from orbsim.plotting import orbitplot2d, orbitplot_non_inertial\nfrom orbsim.r3b_2d.analyticals import (\n ensure_bounds,\n random_disjoint_intervals,\n collapse_intervals,\n)\nimport time\nfrom numba import jit, njit\nimport math\nfrom math import pi\nfrom scipy.stats import rankdata\n\n# from ctypes import cdll\nfrom ctypes import *\n\ncudasim = cdll.LoadLibrary(\"./libcudasim.so\")\n\npi8 = pi / 8\npi4 = pi / 4\npi2 = pi / 2\ntau = 2 * pi\n\n\ndef evolve(psis, bounds, nIterations, nIndividuals, nJitter, maxDuration, maxSteps):\n init_sigma = 0.2 # spread\n init_alpha = 0.3 # learningrate\n sigma, alpha = init_sigma, init_alpha\n # sigma = np.ones(nIndividuals) * init_sigma\n # alpha = np.ones(nIndividuals) * init_alpha\n allscores=[]\n winners = []\n intermediate_winners = []\n bounds_list = bounds.values()\n np.random.seed(0)\n for _ in range(nIterations):\n\n \"\"\"\n make list of all paths to integrate\n \"\"\"\n jitter = []\n for _ in range(nIndividuals):\n noise = np.random.randn(nJitter, 3)\n halfway = int(noise.shape[0]/2)\n for i in range(halfway):\n noise[halfway+i] = -1*noise[i]\n jitter.append(noise)\n jitter = np.array(jitter)\n jitter = np.array([sigma * jitt for idx, jitt in enumerate(jitter)])\n jitter = jitter.reshape(nJitter, nIndividuals, 3)\n jitter[0] *= 0 # Make sure all set individuals are evaluated without jitter\n points = jitter + psis\n points = points.reshape(nIndividuals * nJitter, 3)\n for i, pt in enumerate(points):\n points[i] = ensure_bounds(pt, bounds_list)\n points = points.reshape(nJitter, nIndividuals, 3)\n successes = np.zeros(nIndividuals * nJitter, dtype=bool)\n scores = np.zeros(nIndividuals * nJitter)\n\n \"\"\"\n cudasim.integrate\n \n Input:\n nIndividuals Number of individuals (size of population)\n nJitter Number of random jitter points\n maxSteps Maximum number of steps of integration algorithm\n maxDuration Maximum t (in days) of integration algorithm\n inArray 1D input array of doubles; size is 3 x nIndividuals \n\n Output:\n successArray 1D ouput array of bools; size is 1 x nIndividuals\n scoreArray 1D ouput array of doubles; size is 1 x nIndividuals\n \n \"\"\"\n cudasim.integrate.restype = None\n cudasim.integrate.argtypes = [\n c_int,\n c_int,\n c_double,\n c_int,\n POINTER(c_double),\n POINTER(c_bool),\n POINTER(c_double),\n ]\n inArray = points.ctypes.data_as(POINTER(c_double))\n successArray = successes.ctypes.data_as(POINTER(c_bool))\n scoreArray = scores.ctypes.data_as(POINTER(c_double))\n cudasim.integrate(\n nIndividuals,\n nJitter,\n maxDuration,\n int(maxSteps),\n inArray,\n successArray,\n scoreArray,\n )\n\n print(\"successes=\", successes.sum())\n points = points.reshape(nIndividuals * nJitter, 3)\n for i, _ in enumerate(scores):\n scores[i] += points[i][2] # add burn dv\n if not successes[i]:\n scores[i] += 1\n scores[i] *= 10\n\n \"\"\"transform scores -- ranking\"\"\"\n scores = scores.reshape(nIndividuals, nJitter)\n ranked_scores = np.array(\n [rankdata(-1 * sig_eps, method=\"ordinal\") for sig_eps in scores]\n )\n for rscores in ranked_scores:\n rsum = rscores.sum()\n rscores = [\n rscore / rsum for rscore in rscores\n ] # make scores sum to 1\n # ranked_scores = -1 * ranked_scores\n\n steps = np.zeros([nIndividuals, 3])\n jitter = jitter.transpose(1, 0, 2)\n steps = np.array(\n [\n np.dot(ranked_scores[idx], jitter[idx]) * sigma**2 * alpha\n for idx in range(len(steps))\n ]\n )\n\n \"\"\"report winners\"\"\"\n points = points.reshape(nIndividuals, nJitter, 3)\n scores = scores.reshape(nIndividuals, nJitter)\n successes = successes.reshape(nIndividuals, nJitter)\n for idx, psi in enumerate(psis):\n allscores.append(f\"{scores[idx][0]} \")\n if successes[idx][0]:\n winners.append(str([idx, psi, scores[idx][0]]) + \"\\n\")\n for jdx, succ in enumerate(\n successes[idx][1:]\n ): # all but the first value, since the first value is the individual itself\n if succ:\n intermediate_winners.append(\n \" -- \"\n + str([idx, points[idx][jdx + 1], scores[idx][jdx + 1]])\n + \"\\n\"\n )\n allscores.append(\"\\n\")\n psis += steps\n\n scoresfile = open('cuda_moon_scores.txt', 'w')\n scoresfile.writelines(allscores)\n scoresfile.close()\n logfile = open(f\"cudaES.log\", \"w\")\n logfile.writelines(winners)\n logfile.writelines(intermediate_winners)\n logfile.close()\n\n\ndef initialize_psis(n, bounds):\n psis = [[random_disjoint_intervals(bound) for bound in bounds] for _ in range(n)]\n return psis\n\n\nif __name__ == \"__main__\":\n nIterations = 300\n nIndividuals = 1024\n nJitter = 32\n maxDuration = 100\n maxSteps = 1e7\n bounds = {\n \"pos\": np.array([[0, 1 * tau]]),\n \"ang\": np.array([[0, 1 * tau / 16], [tau / 2 - tau / 16, tau / 2]]),\n \"burn\": np.array([[3.1, 3.15]]),\n }\n psis = initialize_psis(nIndividuals, bounds.values())\n # pop.set_x(0, [-2.277654673852600, 0.047996554429844, 3.810000000000000])\n # pop.set_x(1, [-0.138042744751570, -0.144259374836607, 3.127288444444444])\n # pop.set_x(2, [-2.086814820119193, -0.000122173047640, 3.111181716545691])\n # print(pop)\n psis[0] = [4.005_530_633_326_986, 0.047_996_554_429_844, 3.810_000_000_000_000]\n evolve(psis, bounds, nIterations, nIndividuals, nJitter, maxDuration, maxSteps)\n"
] | [
[
"scipy.stats.rankdata",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.randn",
"numpy.array",
"numpy.dot"
]
] |
frankstratton/eps | [
"37e8b2f739df68db9d49e66852e294c110b8bf8a"
] | [
"test/support/python/naive_bayes.py"
] | [
"import pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn2pmml import sklearn2pmml\nfrom sklearn2pmml.decoration import ContinuousDomain, CategoricalDomain\nfrom sklearn2pmml.pipeline import PMMLPipeline\nfrom sklearn2pmml.feature_extraction.text import Splitter\nfrom sklearn_pandas import DataFrameMapper\n\ndata = pd.read_csv(\"test/support/mpg.csv\")\n\nnumeric_features = [\"displ\", \"year\", \"cyl\"]\ncategorical_features = [\"class\"]\ntext_features = []\n\nmapper = DataFrameMapper(\n [(numeric_features, [ContinuousDomain()])] +\n [([f], [CategoricalDomain(), OneHotEncoder()]) for f in categorical_features] +\n [(f, [CategoricalDomain(), CountVectorizer(tokenizer=Splitter())]) for f in text_features]\n)\n\npipeline = PMMLPipeline([\n (\"mapper\", mapper),\n (\"model\", GaussianNB())\n])\npipeline.fit(data, data[\"drv\"])\n\nsklearn2pmml(pipeline, \"test/support/python/naive_bayes.pmml\")\n\nprint(pipeline.predict(data[:10]))\n"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.naive_bayes.GaussianNB"
]
] |
EM-AutoML/AutoDL-Projects | [
"8ff416fe5d6cb1b310b885fe376e6f2790fbda14"
] | [
"exps/algos/R_EA.py"
] | [
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n##################################################################\n# Regularized Evolution for Image Classifier Architecture Search #\n##################################################################\nimport os, sys, time, glob, random, argparse\nimport numpy as np, collections\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import load_config, dict2config, configure2str\nfrom datasets import get_datasets, SearchDataset\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler\nfrom utils import get_model_infos, obtain_accuracy\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom nas_201_api import NASBench201API as API\nfrom models import CellStructure, get_search_spaces\n\n\nclass Model(object):\n\n def __init__(self):\n self.arch = None\n self.accuracy = None\n \n def __str__(self):\n \"\"\"Prints a readable version of this bitstring.\"\"\"\n return '{:}'.format(self.arch)\n \n\n# This function is to mimic the training and evaluatinig procedure for a single architecture `arch`.\n# The time_cost is calculated as the total training time for a few (e.g., 12 epochs) plus the evaluation time for one epoch.\n# For use_converged_LR = True, the architecture is trained for 12 epochs, with LR being decaded from 0.1 to 0.\n# In this case, the LR schedular is converged.\n# For use_converged_LR = False, the architecture is planed to be trained for 200 epochs, but we early stop its procedure.\n# \ndef train_and_eval(arch, nas_bench, extra_info, dataname='cifar10-valid', use_converged_LR=True):\n if use_converged_LR and nas_bench is not None:\n arch_index = nas_bench.query_index_by_arch( arch )\n assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)\n info = nas_bench.get_more_info(arch_index, dataname, None, True)\n valid_acc, time_cost = info['valid-accuracy'], info['train-all-time'] + info['valid-per-time']\n #_, valid_acc = info.get_metrics('cifar10-valid', 'x-valid' , 25, True) # use the validation accuracy after 25 training epochs\n elif not use_converged_LR and nas_bench is not None:\n # Please use `use_converged_LR=False` for cifar10 only.\n # It did return values for cifar100 and ImageNet16-120, but it has some potential issues. (Please email me for more details)\n arch_index, nepoch = nas_bench.query_index_by_arch( arch ), 25\n assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)\n xoinfo = nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)\n xocost = nas_bench.get_cost_info(arch_index, 'cifar10-valid', False)\n info = nas_bench.get_more_info(arch_index, dataname, nepoch, False, True) # use the validation accuracy after 25 training epochs, which is used in our ICLR submission (not the camera ready).\n cost = nas_bench.get_cost_info(arch_index, dataname, False)\n # The following codes are used to estimate the time cost.\n # When we build NAS-Bench-201, architectures are trained on different machines and we can not use that time record.\n # When we create checkpoints for converged_LR, we run all experiments on 1080Ti, and thus the time for each architecture can be fairly compared.\n nums = {'ImageNet16-120-train': 151700, 'ImageNet16-120-valid': 3000,\n 'cifar10-valid-train' : 25000, 'cifar10-valid-valid' : 25000,\n 'cifar100-train' : 50000, 'cifar100-valid' : 5000}\n estimated_train_cost = xoinfo['train-per-time'] / nums['cifar10-valid-train'] * nums['{:}-train'.format(dataname)] / xocost['latency'] * cost['latency'] * nepoch\n estimated_valid_cost = xoinfo['valid-per-time'] / nums['cifar10-valid-valid'] * nums['{:}-valid'.format(dataname)] / xocost['latency'] * cost['latency']\n try:\n valid_acc, time_cost = info['valid-accuracy'], estimated_train_cost + estimated_valid_cost\n except:\n valid_acc, time_cost = info['est-valid-accuracy'], estimated_train_cost + estimated_valid_cost\n else:\n # train a model from scratch.\n raise ValueError('NOT IMPLEMENT YET')\n return valid_acc, time_cost\n\n\ndef random_architecture_func(max_nodes, op_names):\n # return a random architecture\n def random_architecture():\n genotypes = []\n for i in range(1, max_nodes):\n xlist = []\n for j in range(i):\n node_str = '{:}<-{:}'.format(i, j)\n op_name = random.choice( op_names )\n xlist.append((op_name, j))\n genotypes.append( tuple(xlist) )\n return CellStructure( genotypes )\n return random_architecture\n\n\ndef mutate_arch_func(op_names):\n \"\"\"Computes the architecture for a child of the given parent architecture.\n The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.\n \"\"\"\n def mutate_arch_func(parent_arch):\n child_arch = deepcopy( parent_arch )\n node_id = random.randint(0, len(child_arch.nodes)-1)\n node_info = list( child_arch.nodes[node_id] )\n snode_id = random.randint(0, len(node_info)-1)\n xop = random.choice( op_names )\n while xop == node_info[snode_id][0]:\n xop = random.choice( op_names )\n node_info[snode_id] = (xop, node_info[snode_id][1])\n child_arch.nodes[node_id] = tuple( node_info )\n return child_arch\n return mutate_arch_func\n\n\ndef regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, nas_bench, extra_info, dataname):\n \"\"\"Algorithm for regularized evolution (i.e. aging evolution).\n \n Follows \"Algorithm 1\" in Real et al. \"Regularized Evolution for Image\n Classifier Architecture Search\".\n \n Args:\n cycles: the number of cycles the algorithm should run for.\n population_size: the number of individuals to keep in the population.\n sample_size: the number of individuals that should participate in each tournament.\n time_budget: the upper bound of searching cost\n\n Returns:\n history: a list of `Model` instances, representing all the models computed\n during the evolution experiment.\n \"\"\"\n population = collections.deque()\n history, total_time_cost = [], 0 # Not used by the algorithm, only used to report results.\n\n # Initialize the population with random models.\n while len(population) < population_size:\n model = Model()\n model.arch = random_arch()\n model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info)\n population.append(model)\n history.append(model)\n total_time_cost += time_cost\n\n # Carry out evolution in cycles. Each cycle produces a model and removes\n # another.\n #while len(history) < cycles:\n while total_time_cost < time_budget:\n # Sample randomly chosen models from the current population.\n start_time, sample = time.time(), []\n while len(sample) < sample_size:\n # Inefficient, but written this way for clarity. In the case of neural\n # nets, the efficiency of this line is irrelevant because training neural\n # nets is the rate-determining step.\n candidate = random.choice(list(population))\n sample.append(candidate)\n\n # The parent is the best model in the sample.\n parent = max(sample, key=lambda i: i.accuracy)\n\n # Create the child model and store it.\n child = Model()\n child.arch = mutate_arch(parent.arch)\n total_time_cost += time.time() - start_time\n child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info)\n if total_time_cost + time_cost > time_budget: # return\n return history, total_time_cost\n else:\n total_time_cost += time_cost\n population.append(child)\n history.append(child)\n\n # Remove the oldest model.\n population.popleft()\n return history, total_time_cost\n\n\ndef main(xargs, nas_bench):\n assert torch.cuda.is_available(), 'CUDA is not available.'\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_num_threads( xargs.workers )\n prepare_seed(xargs.rand_seed)\n logger = prepare_logger(args)\n\n assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'\n if xargs.dataset == 'cifar10':\n dataname = 'cifar10-valid'\n else:\n dataname = xargs.dataset\n if xargs.data_path is not None:\n train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)\n split_Fpath = 'configs/nas-benchmark/cifar-split.txt'\n cifar_split = load_config(split_Fpath, None, None)\n train_split, valid_split = cifar_split.train, cifar_split.valid\n logger.log('Load split file from {:}'.format(split_Fpath))\n config_path = 'configs/nas-benchmark/algos/R-EA.config'\n config = load_config(config_path, {'class_num': class_num, 'xshape': xshape}, logger)\n # To split data\n train_data_v2 = deepcopy(train_data)\n train_data_v2.transform = valid_data.transform\n valid_data = train_data_v2\n search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split)\n # data loader\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split) , num_workers=xargs.workers, pin_memory=True)\n valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True)\n logger.log('||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size))\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n extra_info = {'config': config, 'train_loader': train_loader, 'valid_loader': valid_loader}\n else:\n config_path = 'configs/nas-benchmark/algos/R-EA.config'\n config = load_config(config_path, None, logger)\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n extra_info = {'config': config, 'train_loader': None, 'valid_loader': None}\n\n search_space = get_search_spaces('cell', xargs.search_space_name)\n random_arch = random_architecture_func(xargs.max_nodes, search_space)\n mutate_arch = mutate_arch_func(search_space)\n #x =random_arch() ; y = mutate_arch(x)\n x_start_time = time.time()\n logger.log('{:} use nas_bench : {:}'.format(time_string(), nas_bench))\n logger.log('-'*30 + ' start searching with the time budget of {:} s'.format(xargs.time_budget))\n history, total_cost = regularized_evolution(xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, nas_bench if args.ea_fast_by_api else None, extra_info, dataname)\n logger.log('{:} regularized_evolution finish with history of {:} arch with {:.1f} s (real-cost={:.2f} s).'.format(time_string(), len(history), total_cost, time.time()-x_start_time))\n best_arch = max(history, key=lambda i: i.accuracy)\n best_arch = best_arch.arch\n logger.log('{:} best arch is {:}'.format(time_string(), best_arch))\n \n info = nas_bench.query_by_arch( best_arch )\n if info is None: logger.log('Did not find this architecture : {:}.'.format(best_arch))\n else : logger.log('{:}'.format(info))\n logger.log('-'*100)\n logger.close()\n return logger.log_dir, nas_bench.query_index_by_arch( best_arch )\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Regularized Evolution Algorithm\")\n parser.add_argument('--data_path', type=str, help='Path to dataset')\n parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')\n # channels and number-of-cells\n parser.add_argument('--search_space_name', type=str, help='The search space name.')\n parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')\n parser.add_argument('--channel', type=int, help='The number of channels.')\n parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')\n parser.add_argument('--ea_cycles', type=int, help='The number of cycles in EA.')\n parser.add_argument('--ea_population', type=int, help='The population size in EA.')\n parser.add_argument('--ea_sample_size', type=int, help='The sample size in EA.')\n parser.add_argument('--ea_fast_by_api', type=int, help='Use our API to speed up the experiments or not.')\n parser.add_argument('--time_budget', type=int, help='The total time cost budge for searching (in seconds).')\n # log\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')\n parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')\n parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')\n parser.add_argument('--rand_seed', type=int, default=-1, help='manual seed')\n args = parser.parse_args()\n #if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)\n args.ea_fast_by_api = args.ea_fast_by_api > 0\n\n if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):\n nas_bench = None\n else:\n print ('{:} build NAS-Benchmark-API from {:}'.format(time_string(), args.arch_nas_dataset))\n nas_bench = API(args.arch_nas_dataset)\n if args.rand_seed < 0:\n save_dir, all_indexes, num = None, [], 500\n for i in range(num):\n print ('{:} : {:03d}/{:03d}'.format(time_string(), i, num))\n args.rand_seed = random.randint(1, 100000)\n save_dir, index = main(args, nas_bench)\n all_indexes.append( index )\n torch.save(all_indexes, save_dir / 'results.pth')\n else:\n main(args, nas_bench)\n"
] | [
[
"torch.set_num_threads",
"torch.save",
"torch.cuda.is_available",
"torch.utils.data.sampler.SubsetRandomSampler"
]
] |
VuongLong/DANCE_W | [
"8a7dc39a16908bb4726ed57049c6a7d6698a76bc"
] | [
"models/algorithms.py"
] | [
"import torch\nfrom torch.autograd import Function\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef op_copy(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr0'] = param_group['lr']\n return optimizer\n\ndef lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):\n decay = (1 + gamma * iter_num / max_iter) ** (-power)\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr0'] * decay\n param_group['weight_decay'] = 1e-3\n param_group['momentum'] = 0.9\n param_group['nesterov'] = True\n return optimizer\n \n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n nn.init.zeros_(m.bias)\n\n\nclass KanNet(nn.Module):\n def __init__(self, output=1, bottleneck_dim=256, type=\"linear\"):\n super(KanNet, self).__init__()\n self.type = type\n if type == 'wn':\n self.fc = weightNorm(nn.Linear(bottleneck_dim, output), name=\"weight\")\n self.fc.apply(init_weights)\n else:\n self.fc = nn.Linear(bottleneck_dim, output)\n self.fc.apply(init_weights)\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n def get_weight(self):\n return self.fc.weight\n\n def get_bias(self):\n return self.fc.bias"
] | [
[
"torch.nn.Linear",
"torch.nn.init.xavier_normal_",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.nn.init.kaiming_uniform_"
]
] |
hoxmark/Deep_reinforcement_active_learning | [
"7458916d6f75c7fbfcfd4bc81763ab5ba16208ad"
] | [
"selection_strategies/download_graphs/cnn/cnn_sim_umich_n_del.py"
] | [
"from pprint import pprint\nfrom visdom import Visdom\nimport pathlib\nimport json\nimport sys\nimport matplotlib.pyplot as plt\n\ndef download_env(env):\n vis = Visdom('http://logserver.duckdns.org', port=5010)\n data = vis.get_window_data(env=env)\n d = json.loads(data)\n \n n_deleted = []\n test_acc_avg = []\n \n for key in d:\n try:\n #1 for MR 0 for UMICH\n x = list(d[key][\"content\"][\"data\"][1][\"x\"])\n y = list(d[key][\"content\"][\"data\"][1][\"y\"]) \n if 'n-deleted' in key:\n n_deleted = (x,y)\n\n #1 for MR 0 for UMICH\n x = list(d[key][\"content\"][\"data\"][1][\"x\"])\n y = list(d[key][\"content\"][\"data\"][1][\"y\"]) \n if 'test-acc-avg' in key:\n test_acc_avg = (x,y)\n except:\n pass\n\n\n return n_deleted, test_acc_avg\n\nif __name__ == \"__main__\":\n\n source = [ \"SS_bjornhox_11-07-18_14:22_UMICH_cnn_sim_0.08_28ef\",\n \"SS_bjornhox_11-07-18_14:34_UMICH_cnn_sim_0.12_2366\",\n \"SS_bjornhox_11-07-18_14:34_UMICH_cnn_sim_0.14_2f39\"]\n\n legend = [\"0.08\", \"0.12\", \"0.14\"]\n path = './results/'\n\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\n n_deleted = []\n test_acc_avg = []\n \n \n # for i in sys.argv[1:]:\n # legend.append(i.split(\"_\")[7])\n # legend.append(i.split(\"_\")[6])\n # legend.append(i.split(\"_\")[8])\n\n for i in range(0, len(source)): \n env = source[i]\n res1, res2 = download_env(env)\n n_deleted.append(res1)\n test_acc_avg.append(res2)\n\n plt.figure(1)\n plt.axis([0,250,0,1100])\n plt.subplot(111)\n\n plt.xlabel(\"Amount of labeled data\")\n plt.ylabel(\"Number of deleted samples\")\n\n new_plot = []\n\n for i in range(0,len(n_deleted)):\n # print(test_acc_avg[i])\n # print(n_deleted[i])\n # # new = (test_acc_avg[i][0][0:8], n_deleted[i][1][0:8])\n new = (test_acc_avg[i][0][0:15], n_deleted[i][1][0:15])\n\n new[0].insert(0,0) \n new[1].insert(0,0)\n new_plot.append(new)\n # print(new)\n # print(\"---\")\n # quit()\n \n \n\n plt.plot(*new_plot[0], dashes=[4, 2], color='#9467bd')\n plt.plot(*new_plot[1], color='#1f77b4')\n plt.plot(*new_plot[2], dashes=[6, 2], color='#17becf')\n\n plt.legend(legend,\n loc='center right')\n plt.savefig('results/CNN_UMICH_N_DEL.png' , dpi=600)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
lauritowal/plainFDM_public | [
"3d47352e5aad15b0c632488b048b470d0528c652"
] | [
"plain_fdm/lfz_controlFDM/pid_regler.py"
] | [
"import numpy as np\nimport logging\n\nfrom plain_fdm.servicesFDM.utils import Utils\n\nlogging.basicConfig(filename='pid_regler.log', filemode='w', level=logging.DEBUG)\n\n\n# siehe auch Drohne-Code Unity\nclass PidRegler(object):\n\n def __init__(self):\n self.utils = Utils()\n self.kpaElevator = 110\n self.kdaElevator = 4.5\n self.kpaAileron = 100.0\n self.kdaAileron = 10\n\n \n # Headline: Aileron -->\n def get_aileron_command(self, heading_reference, heading, roll_angle, roll_angle_rate, delta_aileron):\n roll_angle_reference = self._outerLoopAileron(heading_reference, heading)\n return self._innerLoopAileron(roll_angle_reference, roll_angle, roll_angle_rate, delta_aileron)\n\n def _outerLoopAileron(self, heading_reference, heading):\n\n logging.debug(\"heading_reference: %s\", heading_reference)\n logging.debug(\"heading: %s\", heading)\n\n heading_difference = np.deg2rad(self.utils.normalize_angle(heading_reference - heading))\n logging.debug(\"heading_difference: %s (rad)\", heading_difference)\n\n logging.debug(\"heading_reference: %s (degree)\", np.rad2deg(heading_difference))\n\n heading_roll_angle_reference = heading_difference * 1.0 #Vorsteuerung als P-Regler\n return heading_roll_angle_reference\n\n # innerLoop: heading_roll->Aileron\n def _innerLoopAileron(self, roll_angle_reference, roll_angle, roll_angle_rate, delta_aileron):\n\n logging.debug(\"roll_angle_reference: %s\", roll_angle_reference)\n logging.debug(\"roll_angle: %s\", roll_angle)\n\n diff_rollAngle = roll_angle_reference - roll_angle\n\n logging.debug(\"diff_rollAngle: %s\", diff_rollAngle)\n\n #if np.rad2deg(rollAngle_Current) < -2:\n # print(\"jetzt\")\n AileronCommand = (diff_rollAngle * self.kpaAileron - roll_angle_rate * self.kdaAileron)\n AileronCommand = AileronCommand + delta_aileron\n AileronCommand = np.deg2rad(np.clip(AileronCommand, -1, 1) * (-15))\n\n logging.debug(\"AileronCommand: %s (in degrees)\", AileronCommand)\n\n return AileronCommand\n \n # Headline: Elevator\n def getElevatorCommand(self, TASReference, TASCurrent, pitchAngleCurrent, pitchAngleRateCurrent, elevatorCurrent):\n pitchAngleReference = self._outerLoopElevator(TASReference, TASCurrent)\n elevatorCommand = self._innerLoopElevator(pitchAngleReference, pitchAngleCurrent, pitchAngleRateCurrent, elevatorCurrent)\n return elevatorCommand\n\n def _outerLoopElevator(self, TASReference, TASCurrent):\n pitchAngleReference = (TASCurrent - TASReference) * 1.0 #Vorsteuerung als P-Regler\n return pitchAngleReference\n\n # innerLoop: Pitch->Elevator\n def _innerLoopElevator(self, pitchAngleReference, pitchAngleCurrent, pitchAngleRateCurrent, elevatorCurrent):\n diffPitchAngle = pitchAngleReference - pitchAngleCurrent\n elevatorCommand = np.clip(diffPitchAngle * self.kpaElevator - pitchAngleRateCurrent * self.kdaElevator, -1, 1)\n elevatorCommand = elevatorCommand + elevatorCurrent\n elevatorCommand = np.deg2rad(np.clip(elevatorCommand, -1, 1) * (-20))\n return elevatorCommand\n\n def __difference_yaw_angle(self, heading_reference, heading_current):\n # keep values between 0 and 360 degrees\n heading_reference = heading_reference % 360\n heading_current = heading_current % 360\n\n logging.debug(\"heading_reference mod 360: %s\", heading_reference)\n logging.debug(\"heading_current mod 360: %s\", heading_current)\n\n heading_difference = heading_reference - heading_current\n\n logging.debug(\"heading_difference: %s\", heading_difference)\n\n normalized = self.utils.normalize_angle(heading_difference)\n\n logging.debug(\"normalized: %s\", normalized)\n\n return normalized\n\n\ndef main():\n pid = PidRegler()\n print(pid.getElevatorCommand(70,60,0,0,0))\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.clip",
"numpy.rad2deg"
]
] |
xiaoyili/elasticdl | [
"93e58c42eb5e2ef14661469777d0224884d7bf1d"
] | [
"elasticdl/python/common/model_handler.py"
] | [
"import abc\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom elasticdl.python.common.constants import DistributionStrategy\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.save_utils import CheckpointSaver\nfrom elasticdl.python.elasticdl.layers.embedding import Embedding\nfrom elasticdl.python.keras.layers import SparseEmbedding\nfrom elasticdl.python.ps.embedding_table import EmbeddingTable\n\n\ndef _get_trained_params_from_checkpoint(checkpoint_dir):\n \"\"\"Get parameters from a checkpoint directory saved by ElasticDL\"\"\"\n parameters = CheckpointSaver.restore_params_from_checkpoint(\n checkpoint_dir, 0, 1\n )\n\n trained_params = parameters.non_embedding_params\n for name, table in parameters.embedding_params.items():\n # The name of variable in a tf.keras.layers.Embedding layer is\n # \"{layer_name}/embeddings:0\"\n var_name = name + \"/embeddings:0\"\n trained_params[var_name] = table\n return trained_params\n\n\ndef _convert_embedding_table_to_numpy_array(embedding_table, embedding_shape):\n \"\"\"Convert an embedding table to a np.ndarray which can be assigned\n to trainable weights in keras embedding layers.\n\n Args:\n embedding_table: A `EmbeddingTable` instance.\n embedding_shape: a tuple with two elements\n\n Returns:\n A np.ndarray\n \"\"\"\n embedding_ids = list(embedding_table.embedding_vectors.keys())\n embedding_values = list(embedding_table.embedding_vectors.values())\n embedding_weights = np.zeros(embedding_shape)\n embedding_weights[embedding_ids] = embedding_values\n return embedding_weights\n\n\ndef _need_partition_embedding(layer):\n \"\"\"The embedding layer will be partitioned on multiple\n PS instances if the memory of the layer.train_weights is\n bigger than 2MB.\n \"\"\"\n EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION = 2 * 1024 * 1024 # 2MB\n FLOAT32_BYTES = 4\n weights_memory = layer.input_dim * layer.output_dim * FLOAT32_BYTES\n return weights_memory > EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION\n\n\nclass ModelHandler(metaclass=abc.ABCMeta):\n \"\"\"Generate the model to train in ElasticDL for different distributed\n strategies and export trained model in ElasticDL to SavedModel.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_train(self, model):\n \"\"\"Generate a model to train in ElasticDL.\n\n Args:\n model: A native keras model instance.\n\n Returns:\n A keras model instance for ElasticDL training.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported a SavedModel\n by tf.saved_model.save.\n\n Args:\n model: A keras model instance trained by ElasticDL and\n it may contains `elasticdl.layers.Embedding` layers.\n dataset: A `tf.data.Dataset` instance which has the same outputs as\n the training dataset.\n\n Returns:\n A keras model instance trained by ElasticDL.\n \"\"\"\n\n @classmethod\n def get_model_handler(\n cls, distribution_strategy=None, checkpoint_dir=None\n ):\n \"\"\"Create a model handler to process the model for the\n distributed strategy.\n\n Args:\n distribution_strategy (string): distribution strategy name\n checkpoint_dir: Checkpoint directory to save model parametes\n during training.\n\n Return:\n ModelHandler subclass instance.\n \"\"\"\n if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:\n return ParameterServerModelHandler(checkpoint_dir=checkpoint_dir)\n elif distribution_strategy == DistributionStrategy.ALLREDUCE:\n logger.warning(\n \"Allreduce distribution strategy is not supported yet. \"\n \"Switching to use the default distribution strategy.\"\n )\n return DefaultModelHandler()\n\n\nclass DefaultModelHandler(ModelHandler):\n \"\"\"Return the origin model to train and export.\"\"\"\n\n def get_model_to_train(self, model):\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"\n Get model with inputs and trained parameters to export.\n \"\"\"\n if not model.inputs:\n model._build_model_with_inputs(inputs=dataset, targets=None)\n return model\n\n\nclass ParameterServerModelHandler(ModelHandler):\n \"\"\"Model handler for parameter server strategy.\n For training, The handler will replace `tf.keras.layers.Embedding`\n layers with`elasticdl.layers.Embedding` for training.\n For saving model, the handler will restore Keras model definition and\n pull trained parameters from parameter server(s) for the model.\n \"\"\"\n\n def __init__(self, checkpoint_dir=None):\n \"\"\"\n Arguments:\n checkpoint_dir: A checkpoint directory to save all model\n parameters during training.\n \"\"\"\n self._checkpoint_dir = checkpoint_dir\n\n def get_model_to_train(self, model):\n \"\"\"Replace the tf.keras.layers.Embedding layer in the model with\n an elasticdl.layers.Embedding layer in ParameterServerStrategy.\n \"\"\"\n if type(model) == tf.keras.Sequential or model._is_graph_network:\n model = self._clone_model_with_edl_embedding(model)\n else:\n model = self._replace_attr_with_edl_embedding(model)\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported to a SavedModel by\n `tf.saved_model.save`.\n \"\"\"\n model = self._restore_keras_model_def(model)\n if not model.inputs:\n # build model to add inputs and outputs that\n # can be consumed by tf-serving\n model._build_model_with_inputs(inputs=dataset, targets=None)\n\n checkpoint_dir = CheckpointSaver.get_valid_lastest_version_dir(\n self._checkpoint_dir\n )\n if checkpoint_dir is None:\n logger.warning(\"No available checkpoint to export model\")\n return model\n\n trained_params = _get_trained_params_from_checkpoint(checkpoint_dir)\n for var in model.trainable_variables:\n if isinstance(trained_params[var.name], EmbeddingTable):\n embedding_params = _convert_embedding_table_to_numpy_array(\n trained_params[var.name], var.shape\n )\n var.assign(embedding_params)\n else:\n var.assign(trained_params[var.name].numpy())\n return model\n\n def _restore_keras_model_def(self, model):\n \"\"\"Restore Keras model definition by replacing\n `elasticdl.layers.Embedding` layers with\n `tf.keras.layers.Embedding` layers.\n \"\"\"\n # clear keras model session to avoid clutter from old models/layers.\n tf.keras.backend.clear_session()\n if (\n isinstance(model, tf.keras.models.Model)\n and not model._is_graph_network\n ):\n model = self._replace_attr_with_keras_embedding(model)\n else:\n model = self._clone_model_with_keras_embedding(model)\n return model\n\n @staticmethod\n def _clone_model_with_edl_embedding(model):\n \"\"\"Clone a new model and replace keras embedding layers including\n `tf.keras.layers.Embedding` and `SparseEmbedding` with\n `elasticdl.layers.Embedding`\n \"\"\"\n\n def _clone_function(layer):\n if type(layer) in [\n tf.keras.layers.Embedding,\n SparseEmbedding,\n ] and _need_partition_embedding(layer):\n logger.debug(\n \"Replace {} with {}\".format(layer.name, Embedding)\n )\n # ElasticDL embedding only accept a string type initializer\n init = tf.keras.initializers.serialize(\n layer.embeddings_initializer\n )[\"class_name\"]\n\n if type(layer) == tf.keras.layers.Embedding:\n embedding_layer = Embedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=init,\n mask_zero=layer.mask_zero,\n input_length=layer.input_length,\n name=layer.name,\n )\n else:\n embedding_layer = Embedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=init,\n name=layer.name,\n combiner=layer.combiner,\n )\n return embedding_layer\n return layer\n\n return tf.keras.models.clone_model(\n model, clone_function=_clone_function\n )\n\n @staticmethod\n def _clone_model_with_keras_embedding(model):\n \"\"\"Clone a new model and replace the `elasticdl.layers.Embedding`\n layers with `tf.keras.layers.Embedding` or `SparseEmbedding` layers\n \"\"\"\n\n def _clone_function(layer):\n if type(layer) == Embedding:\n logger.info(\n \"Replace embedding layer with \"\n \"elasticdl.layers.Embedding\"\n )\n # The combiner is not None only for SparseEmbedding,\n if layer.combiner is not None:\n embedding_layer = SparseEmbedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=layer.embeddings_initializer,\n name=layer.name,\n combiner=layer.combiner,\n )\n else:\n embedding_layer = tf.keras.layers.Embedding(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=layer.embeddings_initializer,\n mask_zero=layer.mask_zero,\n input_length=layer.input_length,\n name=layer.name,\n )\n return embedding_layer\n return layer\n\n return tf.keras.models.clone_model(\n model, clone_function=_clone_function\n )\n\n @staticmethod\n def _replace_attr_with_edl_embedding(model):\n \"\"\"Replace the keras embedding attributes in the model with\n `elasticdl.layers.Embedding` layers.\n \"\"\"\n for name, value in model.__dict__.items():\n if type(\n value\n ) == tf.keras.layers.Embedding and _need_partition_embedding(\n value\n ):\n logger.info(\n \"Replace {} layer with \"\n \"elasticdl.layers.Embedding\".format(value)\n )\n initializer_name = tf.keras.initializers.serialize(\n value.embeddings_initializer\n )[\"class_name\"]\n embedding_layer = Embedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=initializer_name,\n mask_zero=value.mask_zero,\n input_length=value.input_length,\n )\n setattr(model, name, embedding_layer)\n elif type(value) == SparseEmbedding and _need_partition_embedding(\n value\n ):\n logger.info(\n \"Replace {} layer with \"\n \"elasticdl.layers.Embedding\".format(value)\n )\n embedding_layer = Embedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=initializer_name,\n combiner=value.combiner,\n )\n setattr(model, name, embedding_layer)\n return model\n\n @staticmethod\n def _replace_attr_with_keras_embedding(model):\n \"\"\"Replace the elasticdl.layers.Embedding attributes in the model\n with `tf.keras.layers.Embedding` or `SparseEmbedding` layers.\n \"\"\"\n for name, value in model.__dict__.items():\n if type(value) == Embedding:\n # The combiner is not None only for SparseEmbedding,\n if value.combiner is not None:\n logger.info(\"Replace elasticdl with SparseEmbedding\")\n embedding_layer = SparseEmbedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=value.embeddings_initializer,\n combiner=value.combiner,\n )\n else:\n logger.info(\n \"Replace elasticdl with \", \"tf.kerasl.layers.Embedding\"\n )\n embedding_layer = tf.keras.layers.Embedding(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=value.embeddings_initializer,\n mask_zero=value.mask_zero,\n input_length=value.input_length,\n )\n setattr(model, name, embedding_layer)\n return model\n"
] | [
[
"tensorflow.keras.layers.Embedding",
"numpy.zeros",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.models.clone_model",
"tensorflow.keras.initializers.serialize"
]
] |
BitBottleneck/cvprreview | [
"5beff8aa6948cfb1665301f4ece1769175fd546f"
] | [
"transfer_learning.py"
] | [
"# -*-coding:utf-8-*-\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python import pywrap_tensorflow\nimport os\n\n# The save address of the weight of baseline quantization.\nFILE_PATH_old = \"/home/zxc/Liu/Bit-Bottleneck-ResNet/logs_Bit_Bottleneck/old/model.ckpt\"\n# The new address used to save the transfer weight for new model.\nFILE_PATH_new = \"/home/zxc/Liu/Bit-Bottleneck-ResNet/logs_Bit_Bottleneck/model.ckpt\"\n# The save address of the weight of new model which is inserted Bit Bottleneck layers.\nOUTPUT_FILE = \"/home/zxc/Bit-Bottleneck-ResNet/logs_Bit_Bottleneck/new/\"\n\nold_data = []\nold_name = []\n\nnew_data = []\nnew_name = []\n\n# Read the baseline quantization weights.\nfor var_name_old, _ in tf.contrib.framework.list_variables(FILE_PATH_old):\n var_old = tf.contrib.framework.load_variable(FILE_PATH_old, var_name_old)\n old_data.append(var_old)\n old_name.append(var_name_old)\n\n# Read the weights of new model.\nfor var_name_new, _ in tf.contrib.framework.list_variables(FILE_PATH_new):\n var_new = tf.contrib.framework.load_variable(FILE_PATH_new, var_name_new)\n new_data.append(var_new)\n new_name.append(var_name_new)\n\n\ntransform_variable_list = []\n# If the name of variable is same , then use the old value to replace the new value.\nfor i in range(0, len(new_name)):\n for j in range(0, len(old_name)):\n if new_name[i] == old_name[j]:\n new_data[i] = old_data[j]\n print(new_name[i])\n rename = new_name[i]\n redata = new_data[i]\n # the variable of Variable_1 and Variable are int32 type, Others are float32 type\n if rename.find('Variable_1') != -1 or rename.find('Variable') != -1:\n renamed_var = tf.Variable(redata, name=rename, dtype=tf.int32)\n else:\n renamed_var = tf.Variable(redata, name=rename, dtype=tf.float32)\n transform_variable_list.append(renamed_var)\n\n\ndef save(saver, sess, logdir):\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n saver.save(sess, checkpoint_path, write_meta_graph=False)\n print('The weights have been converted to {}.'.format(checkpoint_path))\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(var_list=transform_variable_list, write_version=1)\n save(saver, sess, OUTPUT_FILE)\nprint(\"It's finished!\")\n\n\n\n\n\n\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.contrib.framework.load_variable",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.Variable",
"tensorflow.contrib.framework.list_variables"
]
] |
tefavidal/py-pde | [
"427be3f2f4b096775f46111cd5a5d05af50e94bc"
] | [
"pde/solvers/scipy.py"
] | [
"\"\"\"\nDefines a solver using :mod:`scipy.integrate`\n \n.. codeauthor:: David Zwicker <[email protected]> \n\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom ..fields.base import FieldBase\nfrom ..pdes.base import PDEBase\nfrom .base import SolverBase\n\n\nclass ScipySolver(SolverBase):\n \"\"\"class for solving partial differential equations using scipy\n\n This class is a thin wrapper around :func:`scipy.integrate.solve_ivp`. In\n particular, it supports all the methods implemented by this function.\n \"\"\"\n\n name = \"scipy\"\n\n def __init__(self, pde: PDEBase, backend: str = \"auto\", **kwargs):\n r\"\"\"\n Args:\n pde (:class:`~pde.pdes.base.PDEBase`):\n The instance describing the pde that needs to be solved\n backend (str):\n Determines how the function is created. Accepted values are\n 'numpy` and 'numba'. Alternatively, 'auto' lets the code decide\n for the most optimal backend.\n **kwargs:\n All extra arguments are forwarded to\n :func:`scipy.integrate.solve_ivp`.\n \"\"\"\n super().__init__(pde)\n self.backend = backend\n self.solver_params = kwargs\n\n def make_stepper(\n self, state: FieldBase, dt: float = None\n ) -> Callable[[FieldBase, float, float], float]:\n \"\"\"return a stepper function\n\n Args:\n state (:class:`~pde.fields.FieldBase`):\n An example for the state from which the grid and other information can\n be extracted.\n dt (float):\n Initial time step for the simulation. If `None`, the solver will choose\n a suitable initial value.\n\n Returns:\n Function that can be called to advance the `state` from time\n `t_start` to time `t_end`.\n \"\"\"\n if self.pde.is_sde:\n raise RuntimeError(\"Cannot use scipy stepper for a stochastic equation\")\n\n from scipy import integrate\n\n shape = state.data.shape\n self.info[\"dt\"] = dt\n self.info[\"steps\"] = 0\n self.info[\"stochastic\"] = False\n\n # obtain function for evaluating the right hand side\n rhs = self._make_pde_rhs(state, backend=self.backend)\n\n def rhs_helper(t: float, state_flat: np.ndarray) -> np.ndarray:\n \"\"\"helper function to provide the correct call convention\"\"\"\n return rhs(state_flat.reshape(shape), t).flat # type: ignore\n\n def stepper(state: FieldBase, t_start: float, t_end: float) -> float:\n \"\"\"use scipy.integrate.odeint to advance `state` from `t_start` to\n `t_end`\"\"\"\n if dt is not None:\n self.solver_params[\"first_step\"] = min(t_end - t_start, dt)\n\n sol = integrate.solve_ivp(\n rhs_helper,\n t_span=(t_start, t_end),\n y0=np.ravel(state.data),\n t_eval=[t_end], # only store necessary\n **self.solver_params,\n )\n self.info[\"steps\"] += sol.nfev\n state.data[:] = sol.y.reshape(shape)\n return sol.t[0] # type: ignore\n\n if dt:\n self._logger.info(\n f\"Initialized {self.__class__.__name__} stepper with dt=%g\", dt\n )\n else:\n self._logger.info(f\"Initialized {self.__class__.__name__} stepper\")\n return stepper\n"
] | [
[
"numpy.ravel"
]
] |
bintulab/storm-analysis | [
"71ae493cbd17ddb97938d0ae2032d97a0eaa76b2"
] | [
"storm_analysis/spliner/measure_psf_utils.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nVarious utility functions for PSF measurement. Basically\ntrying to consolidate/improve what is common between the \nseveral different scripts that do this.\n\nHazen 03/18\n\"\"\"\nimport numpy\nimport scipy\nimport scipy.ndimage\n\nimport storm_analysis.sa_library.imagecorrelation as imgCorr\n\n\nclass ZScaler(object):\n \"\"\"\n Used in PSF measurement to convert a floating point z value into\n a z index.\n \"\"\"\n def __init__(self, z_range, z_step):\n super(ZScaler, self).__init__()\n\n assert(z_range > 0.0), \"The z range must be positive.\"\n assert(z_step > 0.0), \"The z step must be positive.\"\n assert(z_range >= z_step), \"The z range must be greater than or equal to the step size.\"\n \n # Assert that the z_step size is a multiple of the z_range.\n assert ((int(z_range*1.0e+3) % int(z_step*1.0e+3)) == 0), \"The z range must be a multiple of the z step.\"\n\n self.z_mid = int(round(z_range/z_step))\n self.z_max = 2 * self.z_mid + 1\n self.z_step = z_step\n\n def convert(self, z):\n return int(round(z/self.z_step) + self.z_mid)\n\n def getMaxZ(self):\n return self.z_max\n \n def inRange(self, zi):\n return ((zi > -1) and (zi < self.z_max))\n\n\ndef alignPSFs(psfs, max_xy = 2, max_z = 2, max_reps = 10, verbose = True):\n \"\"\"\n Align multiple PSFs in x,y,z.\n\n psfs - A list of PSFs, each of these has shape (nz, nxy, nxy).\n max_xy - The maximum expected alignment error xy in pixels.\n max_z - The maximum expected alignment error in z in z steps.\n max_reps - Maximum number of cycles of refinement.\n verbose - Verbose, or not.\n\n Returns the average PSF after alignment.\n \"\"\"\n\n # Create working list for aligned PSFs.\n aligned_psfs = []\n for i in range(len(psfs)):\n aligned_psfs.append(psfs[i])\n\n starting_score = psfCorrelation(aligned_psfs)\n \n # Repeat aligning a PSF to the average of all the other PSFs.\n for i in range(max_reps):\n moving = False\n for j in range(len(psfs)):\n\n # Compute average of all the PSFs except the current PSF.\n sum_psf = averagePSF(aligned_psfs, skip = j)\n\n # Align the current PSF to the average PSF and update\n # the list of aligned PSFs.\n #\n psf_aligner = imgCorr.Align3DProductNewtonCG(sum_psf,\n xy_margin = max_xy,\n z_margin = max_z)\n\n psf_aligner.setOtherImage(aligned_psfs[j])\n\n [aligned_psfs[j], q_score, disp] = psf_aligner.align()\n\n # Check if the PSF was translated.\n if not numpy.allclose(numpy.zeros(disp.size), disp, atol = 1.0e-3):\n moving = True\n \n if verbose:\n print(i, j, q_score, disp)\n\n current_score = psfCorrelation(aligned_psfs)\n \n # Print current score.\n if verbose:\n print(\"Quality score: {0:.6f}\".format(current_score/starting_score))\n print()\n \n # Stop if the PSFs are no longer being adjusted.\n if not moving:\n break\n \n i += 1\n\n # Compute average of aligned PSFs.\n return [averagePSF(aligned_psfs), current_score/starting_score]\n\n\ndef averagePSF(psfs, skip = -1):\n \"\"\"\n Compute average of a list of PSFs.\n \"\"\"\n n_psfs = 0\n average_psf = numpy.zeros_like(psfs[0])\n for i in range(len(psfs)):\n if (i == skip):\n continue\n average_psf += psfs[i]\n n_psfs += 1\n\n return average_psf/float(n_psfs)\n\n\ndef extractAOI(frame, aoi_size, xf, yf, zoom = 1):\n \"\"\"\n Extract AOI for PSF measurements.\n\n frame - An image.\n aoi_size - 1/2 the AOI size in pixels.\n xf - AOI x offset in pixels.\n yf - AOI y offset in pixels.\n zoom - Zoom factor, default is 2.0.\n \"\"\"\n xi = int(xf)\n yi = int(yf)\n\n sx = xi - aoi_size\n ex = xi + aoi_size\n sy = yi - aoi_size\n ey = yi + aoi_size\n \n # Check that the slice is inside the image.\n assert (sx >= 0), \"X position is too small ({0:d}).\".format(sx)\n assert (sy >= 0), \"Y position is too small ({0:d}).\".format(sy)\n assert (ex <= frame.shape[0]), \"X position is too large ({0:d}).\".format(ex)\n assert (ey <= frame.shape[1]), \"Y position is too large ({0:d}).\".format(ey)\n\n # Slice.\n im_slice = frame[sx:ex,sy:ey]\n\n # Zoom and center.\n if(zoom != 1):\n im_slice_up = scipy.ndimage.interpolation.zoom(im_slice, zoom)\n else:\n im_slice_up = im_slice\n \n im_slice_up = scipy.ndimage.interpolation.shift(im_slice_up, (-zoom*(xf-xi), -zoom*(yf-yi)), mode='nearest')\n\n return im_slice_up\n\n \ndef makeZIndexArray(z_offsets, z_range, z_step):\n \"\"\"\n Create the array that specifies which slice the image at\n a particular z offset should be added to. If the image \n should not be added to any slice then z_index will have\n the value of -1.\n\n Note: The bins are centered on the z_step.\n\n All units are in microns.\n\n z_offsets - The different z offsets, an array of shape\n (N,2) as contained for example in z_offsets.txt\n file.\n z_range - The range the PSF will cover (+- z_range).\n z_step - The z step size.\n \"\"\"\n assert(len(z_offsets.shape) == 2), \"Z offsets must have shape (N,2).\"\n assert(z_offsets.shape[1] == 2), \"Z offsets must have shape (N,2).\"\n\n z_sclr = ZScaler(z_range, z_step)\n z_index = numpy.zeros(z_offsets.shape[0], dtype = numpy.int) - 1\n for i in range(z_offsets.shape[0]):\n if (z_offsets[i][0] < 1.0e-6):\n continue\n zi = z_sclr.convert(z_offsets[i][1])\n if z_sclr.inRange(zi):\n z_index[i] = zi\n \n #if (z_offsets[i][1] > (-z_range - 0.5*z_step)) and (z_offsets[i][1] < (z_range + 0.5*z_step)):\n\n assert(numpy.max(z_index) > -0.5), \"No valid frames for PSF measurement.\"\n \n return z_index\n\n\ndef meanEdge(psf_slice):\n \"\"\"\n Return the mean of the boundary pixels of a PSF slice.\n \"\"\"\n edge = numpy.concatenate((psf_slice[0,:],\n psf_slice[-1,:],\n psf_slice[:,0],\n psf_slice[:,-1]))\n return numpy.mean(edge)\n\n\ndef measureSinglePSFBeads(frame_reader, z_index, aoi_size, x, y, drift_xy = None, zoom = 1):\n \"\"\"\n Measures a single PSF from a PSF z stack movie that you\n might take using beads.\n\n frame_reader - A sa_library.analysis_io.FrameReader like object.\n z_index - Z slice in the PSF for each frame, as returned for\n example by makeZIndexArray().\n aoi_size - Size of the PSF AOI.\n x - Bead center position in x.\n y - Bead center position in y.\n drift_xy - An array containing x,y drift information. This should\n have a shape of (N,2). The x drift is the first entry and\n the y drift is the second entry.\n zoom - Amount to magnify the final PSF by. Must be an integer.\n\n Returns - [psf, samples per z section]\n \"\"\"\n if drift_xy is not None:\n assert(drift_xy.shape[0] == z_index.size), \"XY drift must have the same number of points a z_index.\"\n assert(drift_xy.shape[1] == 2), \"XY drift can only have an x and a y offset for each frame.\"\n\n assert(isinstance(aoi_size, int)), \"PSF AOI must be an integer.\"\n assert(isinstance(zoom, int)), \"Zoom must be an integer.\"\n\n z_size = numpy.max(z_index) + 1\n\n psf = numpy.zeros((z_size, 2*aoi_size*zoom, 2*aoi_size*zoom))\n samples = numpy.zeros(z_size, dtype = numpy.int)\n for i in range(z_index.size):\n\n\n # Ignore frames with 'bad' z index.\n if(z_index[i] < 0):\n continue\n\n # Load the frame.\n frame = frame_reader.loadAFrame(i)\n\n # Figure out where to slice.\n xf = x\n yf = y\n\n # Apply drift correction (if specified).\n if drift_xy is not None:\n xf += drift_xy[i,0]\n yf += drift_xy[i,1]\n\n # Extract AOI.\n im_slice_up = extractAOI(frame, aoi_size, xf, yf, zoom = zoom)\n\n # Update accumulators.\n zi = z_index[i]\n psf[zi,:,:] += im_slice_up\n samples[zi] += 1\n\n return [psf, samples]\n\n\ndef psfCorrelation(psfs):\n \"\"\"\n Calculate the correlation score of the PSFs, this is just the\n sum of the product of all the PSFs.\n \"\"\"\n product = numpy.copy(psfs[0])\n for i in range(1,len(psfs)):\n product = product * psfs[i]\n product = product/float(len(psfs))\n return numpy.sum(product)\n\n\ndef psfSharpness(psf):\n \"\"\"\n Calculates how 'sharp' the PSF is as defined here by how large \n the mean frequency component is. The idea is that a better average\n PSF will be less blurred out, so it will have more power in\n the larger frequencies.\n \"\"\"\n psd = numpy.abs(numpy.fft.fftn(psf))**2\n\n k1 = numpy.abs(numpy.fft.fftfreq(psf.shape[0]))\n k2 = numpy.abs(numpy.fft.fftfreq(psf.shape[1]))\n k3 = numpy.abs(numpy.fft.fftfreq(psf.shape[2]))\n\n # Ignore the highest frequencies as these are mostly pixel noise.\n k1[(k1 > 0.4)] = 0\n k2[(k2 > 0.4)] = 0\n k2[(k3 > 0.4)] = 0\n\n [m_k1, m_k2, m_k3] = numpy.meshgrid(k1, k2, k3, indexing = 'ij')\n return numpy.mean(psd * m_k1 * m_k2 * m_k3)\n\n\ndef smoothPSF(psf, xy_sigma = 0.5, z_sigma = 0.5):\n \"\"\"\n Apply gaussian smoothing to a PSF.\n \"\"\"\n return scipy.ndimage.filters.gaussian_filter(psf,\n [z_sigma, xy_sigma, xy_sigma],\n mode = \"nearest\")\n\n \ndef sumPSF(psfs):\n \"\"\"\n Compute sum of a list of PSFs.\n \"\"\"\n sum_psf = numpy.zeros_like(psfs[0])\n for psf in psfs:\n sum_psf += psf\n\n return sum_psf\n"
] | [
[
"numpy.zeros_like",
"numpy.sum",
"numpy.fft.fftn",
"numpy.zeros",
"scipy.ndimage.interpolation.zoom",
"numpy.copy",
"numpy.max",
"scipy.ndimage.filters.gaussian_filter",
"numpy.concatenate",
"numpy.meshgrid",
"numpy.fft.fftfreq",
"numpy.mean",
"scipy.ndimage.interpolation.shift"
]
] |
javabrett/pandas | [
"7b92e7ee47fa3024aa5bc4fb3518717157c88dcc"
] | [
"pandas/core/base.py"
] | [
"\"\"\"\nBase and utility classes for pandas objects.\n\"\"\"\nimport builtins\nfrom collections import OrderedDict\nimport textwrap\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nfrom pandas.compat import PYPY\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,\n is_datetimelike, is_extension_array_dtype, is_extension_type, is_list_like,\n is_object_dtype, is_scalar, is_timedelta64_ns_dtype)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import algorithms, common as com\nfrom pandas.core.accessor import DirNamesMixin\nfrom pandas.core.arrays import ExtensionArray\nimport pandas.core.nanops as nanops\n\n_shared_docs = dict()\n_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',\n unique='IndexOpsMixin', duplicated='IndexOpsMixin')\n\n\nclass StringMixin:\n \"\"\"\n Implements string methods so long as object defines a `__str__` method.\n \"\"\"\n # side note - this could be made into a metaclass if more than one\n # object needs\n\n # ----------------------------------------------------------------------\n # Formatting\n\n def __str__(self):\n \"\"\"\n Return a string representation for a particular Object\n \"\"\"\n raise AbstractMethodError(self)\n\n def __repr__(self):\n \"\"\"\n Return a string representation for a particular object.\n \"\"\"\n return str(self)\n\n\nclass PandasObject(StringMixin, DirNamesMixin):\n\n \"\"\"baseclass for various pandas objects\"\"\"\n\n @property\n def _constructor(self):\n \"\"\"class constructor (for this class it's just `__class__`\"\"\"\n return self.__class__\n\n def __str__(self):\n \"\"\"\n Return a string representation for a particular object.\n \"\"\"\n # Should be overwritten by base classes\n return object.__repr__(self)\n\n def _reset_cache(self, key=None):\n \"\"\"\n Reset cached properties. If ``key`` is passed, only clears that key.\n \"\"\"\n if getattr(self, '_cache', None) is None:\n return\n if key is None:\n self._cache.clear()\n else:\n self._cache.pop(key, None)\n\n def __sizeof__(self):\n \"\"\"\n Generates the total memory usage for an object that returns\n either a value or Series of values\n \"\"\"\n if hasattr(self, 'memory_usage'):\n mem = self.memory_usage(deep=True)\n if not is_scalar(mem):\n mem = mem.sum()\n return int(mem)\n\n # no memory_usage attribute, so fall back to\n # object's 'sizeof'\n return super().__sizeof__()\n\n\nclass NoNewAttributesMixin:\n \"\"\"Mixin which prevents adding new attributes.\n\n Prevents additional attributes via xxx.attribute = \"something\" after a\n call to `self.__freeze()`. Mainly used to prevent the user from using\n wrong attributes on a accessor (`Series.cat/.str/.dt`).\n\n If you really want to add a new attribute at a later time, you need to use\n `object.__setattr__(self, key, value)`.\n \"\"\"\n\n def _freeze(self):\n \"\"\"Prevents setting additional attributes\"\"\"\n object.__setattr__(self, \"__frozen\", True)\n\n # prevent adding any attribute via s.xxx.new_attribute = ...\n def __setattr__(self, key, value):\n # _cache is used by a decorator\n # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)\n # because\n # 1.) getattr is false for attributes that raise errors\n # 2.) cls.__dict__ doesn't traverse into base classes\n if (getattr(self, \"__frozen\", False) and not\n (key == \"_cache\" or\n key in type(self).__dict__ or\n getattr(self, key, None) is not None)):\n raise AttributeError(\"You cannot add any new attribute '{key}'\".\n format(key=key))\n object.__setattr__(self, key, value)\n\n\nclass GroupByError(Exception):\n pass\n\n\nclass DataError(GroupByError):\n pass\n\n\nclass SpecificationError(GroupByError):\n pass\n\n\nclass SelectionMixin:\n \"\"\"\n mixin implementing the selection & aggregation interface on a group-like\n object sub-classes need to define: obj, exclusions\n \"\"\"\n _selection = None\n _internal_names = ['_cache', '__setstate__']\n _internal_names_set = set(_internal_names)\n\n _builtin_table = OrderedDict((\n (builtins.sum, np.sum),\n (builtins.max, np.max),\n (builtins.min, np.min),\n ))\n\n _cython_table = OrderedDict((\n (builtins.sum, 'sum'),\n (builtins.max, 'max'),\n (builtins.min, 'min'),\n (np.all, 'all'),\n (np.any, 'any'),\n (np.sum, 'sum'),\n (np.nansum, 'sum'),\n (np.mean, 'mean'),\n (np.nanmean, 'mean'),\n (np.prod, 'prod'),\n (np.nanprod, 'prod'),\n (np.std, 'std'),\n (np.nanstd, 'std'),\n (np.var, 'var'),\n (np.nanvar, 'var'),\n (np.median, 'median'),\n (np.nanmedian, 'median'),\n (np.max, 'max'),\n (np.nanmax, 'max'),\n (np.min, 'min'),\n (np.nanmin, 'min'),\n (np.cumprod, 'cumprod'),\n (np.nancumprod, 'cumprod'),\n (np.cumsum, 'cumsum'),\n (np.nancumsum, 'cumsum'),\n ))\n\n @property\n def _selection_name(self):\n \"\"\"\n return a name for myself; this would ideally be called\n the 'name' property, but we cannot conflict with the\n Series.name property which can be set\n \"\"\"\n if self._selection is None:\n return None # 'result'\n else:\n return self._selection\n\n @property\n def _selection_list(self):\n if not isinstance(self._selection, (list, tuple, ABCSeries,\n ABCIndexClass, np.ndarray)):\n return [self._selection]\n return self._selection\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, ABCSeries):\n return self.obj\n else:\n return self.obj[self._selection]\n\n @cache_readonly\n def ndim(self):\n return self._selected_obj.ndim\n\n @cache_readonly\n def _obj_with_exclusions(self):\n if self._selection is not None and isinstance(self.obj,\n ABCDataFrame):\n return self.obj.reindex(columns=self._selection_list)\n\n if len(self.exclusions) > 0:\n return self.obj.drop(self.exclusions, axis=1)\n else:\n return self.obj\n\n def __getitem__(self, key):\n if self._selection is not None:\n raise IndexError('Column(s) {selection} already selected'\n .format(selection=self._selection))\n\n if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,\n np.ndarray)):\n if len(self.obj.columns.intersection(key)) != len(key):\n bad_keys = list(set(key).difference(self.obj.columns))\n raise KeyError(\"Columns not found: {missing}\"\n .format(missing=str(bad_keys)[1:-1]))\n return self._gotitem(list(key), ndim=2)\n\n elif not getattr(self, 'as_index', False):\n if key not in self.obj.columns:\n raise KeyError(\"Column not found: {key}\".format(key=key))\n return self._gotitem(key, ndim=2)\n\n else:\n if key not in self.obj:\n raise KeyError(\"Column not found: {key}\".format(key=key))\n return self._gotitem(key, ndim=1)\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n\n \"\"\"\n raise AbstractMethodError(self)\n\n def aggregate(self, func, *args, **kwargs):\n raise AbstractMethodError(self)\n\n agg = aggregate\n\n def _try_aggregate_string_function(self, arg, *args, **kwargs):\n \"\"\"\n if arg is a string, then try to operate on it:\n - try to find a function (or attribute) on ourselves\n - try to find a numpy function\n - raise\n\n \"\"\"\n assert isinstance(arg, str)\n\n f = getattr(self, arg, None)\n if f is not None:\n if callable(f):\n return f(*args, **kwargs)\n\n # people may try to aggregate on a non-callable attribute\n # but don't let them think they can pass args to it\n assert len(args) == 0\n assert len([kwarg for kwarg in kwargs\n if kwarg not in ['axis', '_level']]) == 0\n return f\n\n f = getattr(np, arg, None)\n if f is not None:\n return f(self, *args, **kwargs)\n\n raise ValueError(\"{arg} is an unknown string function\".format(arg=arg))\n\n def _aggregate(self, arg, *args, **kwargs):\n \"\"\"\n provide an implementation for the aggregators\n\n Parameters\n ----------\n arg : string, dict, function\n *args : args to pass on to the function\n **kwargs : kwargs to pass on to the function\n\n Returns\n -------\n tuple of result, how\n\n Notes\n -----\n how can be a string describe the required post-processing, or\n None if not required\n \"\"\"\n is_aggregator = lambda x: isinstance(x, (list, tuple, dict))\n is_nested_renamer = False\n\n _axis = kwargs.pop('_axis', None)\n if _axis is None:\n _axis = getattr(self, 'axis', 0)\n _level = kwargs.pop('_level', None)\n\n if isinstance(arg, str):\n return self._try_aggregate_string_function(arg, *args,\n **kwargs), None\n\n if isinstance(arg, dict):\n\n # aggregate based on the passed dict\n if _axis != 0: # pragma: no cover\n raise ValueError('Can only pass dict with axis=0')\n\n obj = self._selected_obj\n\n def nested_renaming_depr(level=4):\n # deprecation of nested renaming\n # GH 15931\n warnings.warn(\n (\"using a dict with renaming \"\n \"is deprecated and will be removed in a future \"\n \"version\"),\n FutureWarning, stacklevel=level)\n\n # if we have a dict of any non-scalars\n # eg. {'A' : ['mean']}, normalize all to\n # be list-likes\n if any(is_aggregator(x) for x in arg.values()):\n new_arg = OrderedDict()\n for k, v in arg.items():\n if not isinstance(v, (tuple, list, dict)):\n new_arg[k] = [v]\n else:\n new_arg[k] = v\n\n # the keys must be in the columns\n # for ndim=2, or renamers for ndim=1\n\n # ok for now, but deprecated\n # {'A': { 'ra': 'mean' }}\n # {'A': { 'ra': ['mean'] }}\n # {'ra': ['mean']}\n\n # not ok\n # {'ra' : { 'A' : 'mean' }}\n if isinstance(v, dict):\n is_nested_renamer = True\n\n if k not in obj.columns:\n msg = ('cannot perform renaming for {key} with a '\n 'nested dictionary').format(key=k)\n raise SpecificationError(msg)\n nested_renaming_depr(4 + (_level or 0))\n\n elif isinstance(obj, ABCSeries):\n nested_renaming_depr()\n elif (isinstance(obj, ABCDataFrame) and\n k not in obj.columns):\n raise KeyError(\n \"Column '{col}' does not exist!\".format(col=k))\n\n arg = new_arg\n\n else:\n # deprecation of renaming keys\n # GH 15931\n keys = list(arg.keys())\n if (isinstance(obj, ABCDataFrame) and\n len(obj.columns.intersection(keys)) != len(keys)):\n nested_renaming_depr()\n\n from pandas.core.reshape.concat import concat\n\n def _agg_1dim(name, how, subset=None):\n \"\"\"\n aggregate a 1-dim with how\n \"\"\"\n colg = self._gotitem(name, ndim=1, subset=subset)\n if colg.ndim != 1:\n raise SpecificationError(\"nested dictionary is ambiguous \"\n \"in aggregation\")\n return colg.aggregate(how, _level=(_level or 0) + 1)\n\n def _agg_2dim(name, how):\n \"\"\"\n aggregate a 2-dim with how\n \"\"\"\n colg = self._gotitem(self._selection, ndim=2,\n subset=obj)\n return colg.aggregate(how, _level=None)\n\n def _agg(arg, func):\n \"\"\"\n run the aggregations over the arg with func\n return an OrderedDict\n \"\"\"\n result = OrderedDict()\n for fname, agg_how in arg.items():\n result[fname] = func(fname, agg_how)\n return result\n\n # set the final keys\n keys = list(arg.keys())\n result = OrderedDict()\n\n # nested renamer\n if is_nested_renamer:\n result = list(_agg(arg, _agg_1dim).values())\n\n if all(isinstance(r, dict) for r in result):\n\n result, results = OrderedDict(), result\n for r in results:\n result.update(r)\n keys = list(result.keys())\n\n else:\n\n if self._selection is not None:\n keys = None\n\n # some selection on the object\n elif self._selection is not None:\n\n sl = set(self._selection_list)\n\n # we are a Series like object,\n # but may have multiple aggregations\n if len(sl) == 1:\n\n result = _agg(arg, lambda fname,\n agg_how: _agg_1dim(self._selection, agg_how))\n\n # we are selecting the same set as we are aggregating\n elif not len(sl - set(keys)):\n\n result = _agg(arg, _agg_1dim)\n\n # we are a DataFrame, with possibly multiple aggregations\n else:\n\n result = _agg(arg, _agg_2dim)\n\n # no selection\n else:\n\n try:\n result = _agg(arg, _agg_1dim)\n except SpecificationError:\n\n # we are aggregating expecting all 1d-returns\n # but we have 2d\n result = _agg(arg, _agg_2dim)\n\n # combine results\n\n def is_any_series():\n # return a boolean if we have *any* nested series\n return any(isinstance(r, ABCSeries) for r in result.values())\n\n def is_any_frame():\n # return a boolean if we have *any* nested series\n return any(isinstance(r, ABCDataFrame)\n for r in result.values())\n\n if isinstance(result, list):\n return concat(result, keys=keys, axis=1, sort=True), True\n\n elif is_any_frame():\n # we have a dict of DataFrames\n # return a MI DataFrame\n\n return concat([result[k] for k in keys],\n keys=keys, axis=1), True\n\n elif isinstance(self, ABCSeries) and is_any_series():\n\n # we have a dict of Series\n # return a MI Series\n try:\n result = concat(result)\n except TypeError:\n # we want to give a nice error here if\n # we have non-same sized objects, so\n # we don't automatically broadcast\n\n raise ValueError(\"cannot perform both aggregation \"\n \"and transformation operations \"\n \"simultaneously\")\n\n return result, True\n\n # fall thru\n from pandas import DataFrame, Series\n try:\n result = DataFrame(result)\n except ValueError:\n\n # we have a dict of scalars\n result = Series(result,\n name=getattr(self, 'name', None))\n\n return result, True\n elif is_list_like(arg):\n # we require a list, but not an 'str'\n return self._aggregate_multiple_funcs(arg,\n _level=_level,\n _axis=_axis), None\n else:\n result = None\n\n f = self._is_cython_func(arg)\n if f and not args and not kwargs:\n return getattr(self, f)(), None\n\n # caller can react\n return result, True\n\n def _aggregate_multiple_funcs(self, arg, _level, _axis):\n from pandas.core.reshape.concat import concat\n\n if _axis != 0:\n raise NotImplementedError(\"axis other than 0 is not supported\")\n\n if self._selected_obj.ndim == 1:\n obj = self._selected_obj\n else:\n obj = self._obj_with_exclusions\n\n results = []\n keys = []\n\n # degenerate case\n if obj.ndim == 1:\n for a in arg:\n try:\n colg = self._gotitem(obj.name, ndim=1, subset=obj)\n results.append(colg.aggregate(a))\n\n # make sure we find a good name\n name = com.get_callable_name(a) or a\n keys.append(name)\n except (TypeError, DataError):\n pass\n except SpecificationError:\n raise\n\n # multiples\n else:\n for index, col in enumerate(obj):\n try:\n colg = self._gotitem(col, ndim=1,\n subset=obj.iloc[:, index])\n results.append(colg.aggregate(arg))\n keys.append(col)\n except (TypeError, DataError):\n pass\n except ValueError:\n # cannot aggregate\n continue\n except SpecificationError:\n raise\n\n # if we are empty\n if not len(results):\n raise ValueError(\"no results\")\n\n try:\n return concat(results, keys=keys, axis=1, sort=False)\n except TypeError:\n\n # we are concatting non-NDFrame objects,\n # e.g. a list of scalars\n\n from pandas.core.dtypes.cast import is_nested_object\n from pandas import Series\n result = Series(results, index=keys, name=self.name)\n if is_nested_object(result):\n raise ValueError(\"cannot combine transform and \"\n \"aggregation operations\")\n return result\n\n def _shallow_copy(self, obj=None, obj_type=None, **kwargs):\n \"\"\"\n return a new object with the replacement attributes\n \"\"\"\n if obj is None:\n obj = self._selected_obj.copy()\n if obj_type is None:\n obj_type = self._constructor\n if isinstance(obj, obj_type):\n obj = obj.obj\n for attr in self._attributes:\n if attr not in kwargs:\n kwargs[attr] = getattr(self, attr)\n return obj_type(obj, **kwargs)\n\n def _is_cython_func(self, arg):\n \"\"\"\n if we define an internal function for this argument, return it\n \"\"\"\n return self._cython_table.get(arg)\n\n def _is_builtin_func(self, arg):\n \"\"\"\n if we define an builtin function for this argument, return it,\n otherwise return the arg\n \"\"\"\n return self._builtin_table.get(arg, arg)\n\n\nclass IndexOpsMixin:\n \"\"\" common ops mixin to support a unified interface / docs for Series /\n Index\n \"\"\"\n\n # ndarray compatibility\n __array_priority__ = 1000\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Return the transpose, which is by definition self.\n\n Returns\n -------\n %(klass)s\n \"\"\"\n nv.validate_transpose(args, kwargs)\n return self\n\n T = property(transpose, doc=\"Return the transpose, which is by \"\n \"definition self.\")\n\n @property\n def _is_homogeneous_type(self):\n \"\"\"\n Whether the object has a single dtype.\n\n By definition, Series and Index are always considered homogeneous.\n A MultiIndex may or may not be homogeneous, depending on the\n dtypes of the levels.\n\n See Also\n --------\n DataFrame._is_homogeneous_type\n MultiIndex._is_homogeneous_type\n \"\"\"\n return True\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n return self._values.shape\n\n @property\n def ndim(self):\n \"\"\"\n Number of dimensions of the underlying data, by definition 1.\n \"\"\"\n return 1\n\n def item(self):\n \"\"\"\n Return the first element of the underlying data as a python scalar.\n\n Returns\n -------\n scalar\n The first element of %(klass)s.\n \"\"\"\n return self.values.item()\n\n @property\n def data(self):\n \"\"\"\n Return the data pointer of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.data is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self.values.data\n\n @property\n def itemsize(self):\n \"\"\"\n Return the size of the dtype of the item of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.itemsize is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self._ndarray_values.itemsize\n\n @property\n def nbytes(self):\n \"\"\"\n Return the number of bytes in the underlying data.\n \"\"\"\n return self._values.nbytes\n\n @property\n def strides(self):\n \"\"\"\n Return the strides of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.strides is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self._ndarray_values.strides\n\n @property\n def size(self):\n \"\"\"\n Return the number of elements in the underlying data.\n \"\"\"\n return len(self._values)\n\n @property\n def flags(self):\n \"\"\"\n Return the ndarray.flags for the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.flags is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self.values.flags\n\n @property\n def base(self):\n \"\"\"\n Return the base object if the memory of the underlying data is shared.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"{obj}.base is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning, stacklevel=2)\n return self.values.base\n\n @property\n def array(self) -> ExtensionArray:\n \"\"\"\n The ExtensionArray of the data backing this Series or Index.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n ExtensionArray\n An ExtensionArray of the values stored within. For extension\n types, this is the actual array. For NumPy native types, this\n is a thin (no copy) wrapper around :class:`numpy.ndarray`.\n\n ``.array`` differs ``.values`` which may require converting the\n data to a different form.\n\n See Also\n --------\n Index.to_numpy : Similar method that always returns a NumPy array.\n Series.to_numpy : Similar method that always returns a NumPy array.\n\n Notes\n -----\n This table lays out the different array types for each extension\n dtype within pandas.\n\n ================== =============================\n dtype array type\n ================== =============================\n category Categorical\n period PeriodArray\n interval IntervalArray\n IntegerNA IntegerArray\n datetime64[ns, tz] DatetimeArray\n ================== =============================\n\n For any 3rd-party extension types, the array type will be an\n ExtensionArray.\n\n For all remaining dtypes ``.array`` will be a\n :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray\n stored within. If you absolutely need a NumPy array (possibly with\n copying / coercing data), then use :meth:`Series.to_numpy` instead.\n\n Examples\n --------\n\n For regular NumPy types like int, and float, a PandasArray\n is returned.\n\n >>> pd.Series([1, 2, 3]).array\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n For extension types, like Categorical, the actual ExtensionArray\n is returned\n\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.array\n [a, b, a]\n Categories (2, object): [a, b]\n \"\"\"\n # As a mixin, we depend on the mixing class having _values.\n # Special mixin syntax may be developed in the future:\n # https://github.com/python/typing/issues/246\n result = self._values # type: ignore\n\n if is_datetime64_ns_dtype(result.dtype):\n from pandas.arrays import DatetimeArray\n result = DatetimeArray(result)\n elif is_timedelta64_ns_dtype(result.dtype):\n from pandas.arrays import TimedeltaArray\n result = TimedeltaArray(result)\n\n elif not is_extension_array_dtype(result.dtype):\n from pandas.core.arrays.numpy_ import PandasArray\n result = PandasArray(result)\n\n return result\n\n def to_numpy(self, dtype=None, copy=False):\n \"\"\"\n A NumPy ndarray representing the values in this Series or Index.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`\n copy : bool, default False\n Whether to ensure that the returned value is a not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.array : Get the actual data stored within.\n Index.array : Get the actual data stored within.\n DataFrame.to_numpy : Similar method for DataFrame.\n\n Notes\n -----\n The returned array will be the same up to equality (values equal\n in `self` will be equal in the returned array; likewise for values\n that are not equal). When `self` contains an ExtensionArray, the\n dtype may be different. For example, for a category-dtype Series,\n ``to_numpy()`` will return a NumPy array and the categorical dtype\n will be lost.\n\n For NumPy dtypes, this will be a reference to the actual data stored\n in this Series or Index (assuming ``copy=False``). Modifying the result\n in place will modify the data stored in the Series or Index (not that\n we recommend doing that).\n\n For extension types, ``to_numpy()`` *may* require copying data and\n coercing the result to a NumPy type (possibly object), which may be\n expensive. When you need a no-copy reference to the underlying data,\n :attr:`Series.array` should be used instead.\n\n This table lays out the different dtypes and default return types of\n ``to_numpy()`` for various dtypes within pandas.\n\n ================== ================================\n dtype array type\n ================== ================================\n category[T] ndarray[T] (same dtype as input)\n period ndarray[object] (Periods)\n interval ndarray[object] (Intervals)\n IntegerNA ndarray[object]\n datetime64[ns] datetime64[ns]\n datetime64[ns, tz] ndarray[object] (Timestamps)\n ================== ================================\n\n Examples\n --------\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.to_numpy()\n array(['a', 'b', 'a'], dtype=object)\n\n Specify the `dtype` to control how datetime-aware data is represented.\n Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`\n objects, each with the correct ``tz``.\n\n >>> ser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> ser.to_numpy(dtype=object)\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],\n dtype=object)\n\n Or ``dtype='datetime64[ns]'`` to return an ndarray of native\n datetime64 values. The values are converted to UTC and the timezone\n info is dropped.\n\n >>> ser.to_numpy(dtype=\"datetime64[ns]\")\n ... # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],\n dtype='datetime64[ns]')\n \"\"\"\n if is_datetime64tz_dtype(self.dtype) and dtype is None:\n # note: this is going to change very soon.\n # I have a WIP PR making this unnecessary, but it's\n # a bit out of scope for the DatetimeArray PR.\n dtype = \"object\"\n\n result = np.asarray(self._values, dtype=dtype)\n # TODO(GH-24345): Avoid potential double copy\n if copy:\n result = result.copy()\n return result\n\n @property\n def _ndarray_values(self) -> np.ndarray:\n \"\"\"\n The data as an ndarray, possibly losing information.\n\n The expectation is that this is cheap to compute, and is primarily\n used for interacting with our indexers.\n\n - categorical -> codes\n \"\"\"\n if is_extension_array_dtype(self):\n return self.array._ndarray_values\n # As a mixin, we depend on the mixing class having values.\n # Special mixin syntax may be developed in the future:\n # https://github.com/python/typing/issues/246\n return self.values # type: ignore\n\n @property\n def empty(self):\n return not self.size\n\n def max(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Index.\n\n Parameters\n ----------\n axis : int, optional\n For compatibility with NumPy. Only 0 or None are allowed.\n skipna : bool, default True\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.max()\n ('b', 2)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_max(args, kwargs)\n return nanops.nanmax(self._values, skipna=skipna)\n\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return an ndarray of the maximum argument indexer.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series\n skipna : bool, default True\n\n Returns\n -------\n numpy.ndarray\n Indices of the maximum values.\n\n See Also\n --------\n numpy.ndarray.argmax\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_argmax_with_skipna(skipna, args, kwargs)\n return nanops.nanargmax(self._values, skipna=skipna)\n\n def min(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Index.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series\n skipna : bool, default True\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the minimum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.min()\n ('a', 1)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_min(args, kwargs)\n return nanops.nanmin(self._values, skipna=skipna)\n\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return a ndarray of the minimum argument indexer.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series\n skipna : bool, default True\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n numpy.ndarray.argmin\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_argmax_with_skipna(skipna, args, kwargs)\n return nanops.nanargmin(self._values, skipna=skipna)\n\n def tolist(self):\n \"\"\"\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n list\n\n See Also\n --------\n numpy.ndarray.tolist\n \"\"\"\n if is_datetimelike(self._values):\n return [com.maybe_box_datetimelike(x) for x in self._values]\n elif is_extension_array_dtype(self._values):\n return list(self._values)\n else:\n return self._values.tolist()\n\n to_list = tolist\n\n def __iter__(self):\n \"\"\"\n Return an iterator of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n iterator\n \"\"\"\n # We are explicity making element iterators.\n if is_datetimelike(self._values):\n return map(com.maybe_box_datetimelike, self._values)\n elif is_extension_array_dtype(self._values):\n return iter(self._values)\n else:\n return map(self._values.item, range(self._values.size))\n\n @cache_readonly\n def hasnans(self):\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n return bool(isna(self).any())\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n \"\"\" perform the reduction type operation if we can \"\"\"\n func = getattr(self, name, None)\n if func is None:\n raise TypeError(\"{klass} cannot perform the operation {op}\".format(\n klass=self.__class__.__name__, op=name))\n return func(skipna=skipna, **kwds)\n\n def _map_values(self, mapper, na_action=None):\n \"\"\"\n An internal function that maps values using the input\n correspondence (which can be a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n The input correspondence object\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping function\n\n Returns\n -------\n Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n\n \"\"\"\n\n # we can fastpath dict/Series to an efficient map\n # as we know that we are not going to have to yield\n # python types\n if isinstance(mapper, dict):\n if hasattr(mapper, '__missing__'):\n # If a dictionary subclass defines a default value method,\n # convert mapper to a lookup function (GH #15999).\n dict_with_default = mapper\n mapper = lambda x: dict_with_default[x]\n else:\n # Dictionary does not have a default. Thus it's safe to\n # convert to an Series for efficiency.\n # we specify the keys here to handle the\n # possibility that they are tuples\n from pandas import Series\n mapper = Series(mapper)\n\n if isinstance(mapper, ABCSeries):\n # Since values were input this means we came from either\n # a dict or a series and mapper should be an index\n if is_categorical_dtype(self._values):\n # use the built in categorical series mapper which saves\n # time by mapping the categories instead of all values\n return self._values.map(mapper)\n if is_extension_type(self.dtype):\n values = self._values\n else:\n values = self.values\n\n indexer = mapper.index.get_indexer(values)\n new_values = algorithms.take_1d(mapper._values, indexer)\n\n return new_values\n\n # we must convert to python types\n if is_extension_type(self.dtype):\n values = self._values\n if na_action is not None:\n raise NotImplementedError\n map_f = lambda values, f: values.map(f)\n else:\n values = self.astype(object)\n values = getattr(values, 'values', values)\n if na_action == 'ignore':\n def map_f(values, f):\n return lib.map_infer_mask(values, f,\n isna(values).view(np.uint8))\n else:\n map_f = lib.map_infer\n\n # mapper is a function\n new_values = map_f(values, mapper)\n\n return new_values\n\n def value_counts(self, normalize=False, sort=True, ascending=False,\n bins=None, dropna=True):\n \"\"\"\n Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : boolean, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : boolean, default True\n Sort by frequencies.\n ascending : boolean, default False\n Sort in ascending order.\n bins : integer, optional\n Rather than count values, group them into half-open bins,\n a convenience for ``pd.cut``, only works with numeric data.\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.count: Number of non-NA elements in a DataFrame.\n\n Examples\n --------\n >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])\n >>> index.value_counts()\n 3.0 2\n 4.0 1\n 2.0 1\n 1.0 1\n dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])\n >>> s.value_counts(normalize=True)\n 3.0 0.4\n 4.0 0.2\n 2.0 0.2\n 1.0 0.2\n dtype: float64\n\n **bins**\n\n Bins can be useful for going from a continuous variable to a\n categorical variable; instead of counting unique\n apparitions of values, divide the index in the specified\n number of half-open bins.\n\n >>> s.value_counts(bins=3)\n (2.0, 3.0] 2\n (0.996, 2.0] 2\n (3.0, 4.0] 1\n dtype: int64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> s.value_counts(dropna=False)\n 3.0 2\n NaN 1\n 4.0 1\n 2.0 1\n 1.0 1\n dtype: int64\n \"\"\"\n from pandas.core.algorithms import value_counts\n result = value_counts(self, sort=sort, ascending=ascending,\n normalize=normalize, bins=bins, dropna=dropna)\n return result\n\n def unique(self):\n values = self._values\n\n if hasattr(values, 'unique'):\n\n result = values.unique()\n else:\n from pandas.core.algorithms import unique1d\n result = unique1d(values)\n\n return result\n\n def nunique(self, dropna=True):\n \"\"\"\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n\n >>> s.nunique()\n 4\n \"\"\"\n uniqs = self.unique()\n n = len(uniqs)\n if dropna and isna(uniqs).any():\n n -= 1\n return n\n\n @property\n def is_unique(self):\n \"\"\"\n Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n \"\"\"\n return self.nunique(dropna=False) == len(self)\n\n @property\n def is_monotonic(self):\n \"\"\"\n Return boolean if values in the object are\n monotonic_increasing.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n return Index(self).is_monotonic\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"\n Return boolean if values in the object are\n monotonic_decreasing.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n return Index(self).is_monotonic_decreasing\n\n def memory_usage(self, deep=False):\n \"\"\"\n Memory usage of the values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n numpy.ndarray.nbytes\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False or if used on PyPy\n \"\"\"\n if hasattr(self.array, 'memory_usage'):\n return self.array.memory_usage(deep=deep)\n\n v = self.array.nbytes\n if deep and is_object_dtype(self) and not PYPY:\n v += lib.memory_usage_of_objects(self.array)\n return v\n\n @Substitution(\n values='', order='', size_hint='',\n sort=textwrap.dedent(\"\"\"\\\n sort : boolean, default False\n Sort `uniques` and shuffle `labels` to maintain the\n relationship.\n \"\"\"))\n @Appender(algorithms._shared_docs['factorize'])\n def factorize(self, sort=False, na_sentinel=-1):\n return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)\n\n _shared_docs['searchsorted'] = (\n \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted %(klass)s `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n Parameters\n ----------\n value : array_like\n Values to insert into `self`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n int or array of int\n A scalar or array of insertion points with the\n same shape as `value`.\n\n .. versionchanged :: 0.24.0\n If `value` is a scalar, an int is now always returned.\n Previously, scalar inputs returned an 1-item array for\n :class:`Series` and :class:`Categorical`.\n\n See Also\n --------\n numpy.searchsorted\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n Examples\n --------\n\n >>> x = pd.Series([1, 2, 3])\n >>> x\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> x.searchsorted(4)\n 3\n\n >>> x.searchsorted([0, 4])\n array([0, 3])\n\n >>> x.searchsorted([1, 3], side='left')\n array([0, 2])\n\n >>> x.searchsorted([1, 3], side='right')\n array([1, 3])\n\n >>> x = pd.Categorical(['apple', 'bread', 'bread',\n 'cheese', 'milk'], ordered=True)\n [apple, bread, bread, cheese, milk]\n Categories (4, object): [apple < bread < cheese < milk]\n\n >>> x.searchsorted('bread')\n 1\n\n >>> x.searchsorted(['bread'], side='right')\n array([3])\n \"\"\")\n\n @Substitution(klass='Index')\n @Appender(_shared_docs['searchsorted'])\n def searchsorted(self, value, side='left', sorter=None):\n return algorithms.searchsorted(self._values, value,\n side=side, sorter=sorter)\n\n def drop_duplicates(self, keep='first', inplace=False):\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if isinstance(self, ABCIndexClass):\n if self.is_unique:\n return self._shallow_copy()\n\n duplicated = self.duplicated(keep=keep)\n result = self[np.logical_not(duplicated)]\n if inplace:\n return self._update_inplace(result)\n else:\n return result\n\n def duplicated(self, keep='first'):\n from pandas.core.algorithms import duplicated\n if isinstance(self, ABCIndexClass):\n if self.is_unique:\n return np.zeros(len(self), dtype=np.bool)\n return duplicated(self, keep=keep)\n else:\n return self._constructor(duplicated(self, keep=keep),\n index=self.index).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # abstracts\n\n def _update_inplace(self, result, verify_is_copy=True, **kwargs):\n raise AbstractMethodError(self)\n"
] | [
[
"pandas.compat.numpy.function.validate_min",
"pandas.Series",
"pandas.core.algorithms.value_counts",
"pandas.core.dtypes.cast.is_nested_object",
"pandas.compat.numpy.function.validate_argmax_with_skipna",
"pandas.core.reshape.concat.concat",
"numpy.asarray",
"pandas.core.nanops.nanargmax",
"pandas.core.dtypes.common.is_extension_type",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.nanops.nanmax",
"pandas.core.dtypes.common.is_datetime64_ns_dtype",
"pandas.core.algorithms.take_1d",
"numpy.logical_not",
"pandas.compat.numpy.function.validate_max",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.nanops.nanargmin",
"pandas.compat.numpy.function.validate_transpose",
"pandas.core.arrays.numpy_.PandasArray",
"pandas.core.algorithms.unique1d",
"pandas.core.algorithms.factorize",
"pandas.core.common.get_callable_name",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.Index",
"pandas.core.dtypes.common.is_scalar",
"pandas.arrays.DatetimeArray",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.missing.isna",
"pandas.util._decorators.Substitution",
"pandas.util._decorators.Appender",
"pandas.core.nanops.nanmin",
"pandas.core.algorithms.searchsorted",
"pandas.core.dtypes.common.is_datetimelike",
"pandas.arrays.TimedeltaArray",
"pandas.DataFrame",
"pandas.core.dtypes.common.is_timedelta64_ns_dtype",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.errors.AbstractMethodError",
"pandas.util._validators.validate_bool_kwarg",
"pandas._libs.lib.memory_usage_of_objects"
]
] |
arangoml/dgl | [
"d135058f9986fadcbdf6aa1011a00c3ad45a8ce3"
] | [
"tutorials/basics/2_basics.py"
] | [
"\"\"\"\r\n.. currentmodule:: dgl\r\n\r\nDGLGraph and Node/edge Features\r\n===============================\r\n\r\n**Author**: `Minjie Wang <https://jermainewang.github.io/>`_, Quan Gan, Yu Gai,\r\nZheng Zhang\r\n\r\nIn this tutorial, you learn how to create a graph and how to read and write node and edge representations.\r\n\"\"\"\r\n\r\n###############################################################################\r\n# Creating a graph\r\n# ----------------\r\n# The design of :class:`DGLGraph` was influenced by other graph libraries. You \r\n# can create a graph from networkx and convert it into a :class:`DGLGraph` and \r\n# vice versa.\r\n\r\nimport networkx as nx\r\nimport dgl\r\n\r\ng_nx = nx.petersen_graph()\r\ng_dgl = dgl.DGLGraph(g_nx)\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.subplot(121)\r\nnx.draw(g_nx, with_labels=True)\r\nplt.subplot(122)\r\nnx.draw(g_dgl.to_networkx(), with_labels=True)\r\n\r\nplt.show()\r\n\r\n\r\n###############################################################################\r\n# The examples here show the same graph, except that :class:`DGLGraph` is always directional.\r\n#\r\n# You can also create a graph by calling the DGL interface.\r\n# \r\n# In the next example, you build a star graph. :class:`DGLGraph` nodes are a consecutive range of\r\n# integers between 0 and :func:`number_of_nodes() <DGLGraph.number_of_nodes>`\r\n# and can grow by calling :func:`add_nodes <DGLGraph.add_nodes>`.\r\n# :class:`DGLGraph` edges are in order of their additions. Note that\r\n# edges are accessed in much the same way as nodes, with one extra feature: *edge broadcasting*.\r\n\r\nimport dgl\r\nimport torch as th\r\n\r\ng = dgl.DGLGraph()\r\ng.add_nodes(10)\r\n# A couple edges one-by-one\r\nfor i in range(1, 4):\r\n g.add_edge(i, 0)\r\n# A few more with a paired list\r\nsrc = list(range(5, 8)); dst = [0]*3\r\ng.add_edges(src, dst)\r\n# finish with a pair of tensors\r\nsrc = th.tensor([8, 9]); dst = th.tensor([0, 0])\r\ng.add_edges(src, dst)\r\n\r\n# Edge broadcasting will do star graph in one go!\r\ng.clear(); g.add_nodes(10)\r\nsrc = th.tensor(list(range(1, 10)));\r\ng.add_edges(src, 0)\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nnx.draw(g.to_networkx(), with_labels=True)\r\nplt.show()\r\n\r\n\r\n###############################################################################\r\n# Assigning a feature\r\n# -------------------\r\n# You can also assign features to nodes and edges of a :class:`DGLGraph`. The\r\n# features are represented as dictionary of names (strings) and tensors,\r\n# called **fields**.\r\n#\r\n# The following code snippet assigns each node a vector (len=3).\r\n#\r\n# .. note::\r\n#\r\n# DGL aims to be framework-agnostic, and currently it supports PyTorch and\r\n# MXNet tensors. The following examples use PyTorch only.\r\n\r\nimport dgl\r\nimport torch as th\r\n\r\nx = th.randn(10, 3)\r\ng.ndata['x'] = x\r\n\r\n\r\n###############################################################################\r\n# :func:`ndata <DGLGraph.ndata>` is a syntax sugar to access the state of all nodes. \r\n# States are stored\r\n# in a container ``data`` that hosts a user-defined dictionary.\r\n\r\nprint(g.ndata['x'] == g.nodes[:].data['x'])\r\n\r\n# Access node set with integer, list, or integer tensor\r\ng.nodes[0].data['x'] = th.zeros(1, 3)\r\ng.nodes[[0, 1, 2]].data['x'] = th.zeros(3, 3)\r\ng.nodes[th.tensor([0, 1, 2])].data['x'] = th.zeros(3, 3)\r\n\r\n\r\n###############################################################################\r\n# Assigning edge features is similar to that of node features,\r\n# except that you can also do it by specifying endpoints of the edges.\r\n\r\ng.edata['w'] = th.randn(9, 2)\r\n\r\n# Access edge set with IDs in integer, list, or integer tensor\r\ng.edges[1].data['w'] = th.randn(1, 2)\r\ng.edges[[0, 1, 2]].data['w'] = th.zeros(3, 2)\r\ng.edges[th.tensor([0, 1, 2])].data['w'] = th.zeros(3, 2)\r\n\r\n# You can also access the edges by giving endpoints\r\ng.edges[1, 0].data['w'] = th.ones(1, 2) # edge 1 -> 0\r\ng.edges[[1, 2, 3], [0, 0, 0]].data['w'] = th.ones(3, 2) # edges [1, 2, 3] -> 0\r\n\r\n\r\n###############################################################################\r\n# After assignments, each node or edge field will be associated with a scheme\r\n# containing the shape and data type (dtype) of its field value.\r\n\r\nprint(g.node_attr_schemes())\r\ng.ndata['x'] = th.zeros((10, 4))\r\nprint(g.node_attr_schemes())\r\n\r\n\r\n###############################################################################\r\n# You can also remove node or edge states from the graph. This is particularly\r\n# useful to save memory during inference.\r\n\r\ng.ndata.pop('x')\r\ng.edata.pop('w')\r\n\r\n\r\n###############################################################################\r\n# Working with multigraphs\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~\r\n# Many graph applications need parallel edges. To enable this, construct :class:`DGLGraph`\r\n# with ``multigraph=True``.\r\n\r\ng_multi = dgl.DGLGraph(multigraph=True)\r\ng_multi.add_nodes(10)\r\ng_multi.ndata['x'] = th.randn(10, 2)\r\n\r\ng_multi.add_edges(list(range(1, 10)), 0)\r\ng_multi.add_edge(1, 0) # two edges on 1->0\r\n\r\ng_multi.edata['w'] = th.randn(10, 2)\r\ng_multi.edges[1].data['w'] = th.zeros(1, 2)\r\nprint(g_multi.edges())\r\n\r\n\r\n###############################################################################\r\n# An edge in multigraph cannot be uniquely identified by using its incident nodes\r\n# :math:`u` and :math:`v`; query their edge IDs use ``edge_id`` interface.\r\n\r\neid_10 = g_multi.edge_id(1, 0)\r\ng_multi.edges[eid_10].data['w'] = th.ones(len(eid_10), 2)\r\nprint(g_multi.edata['w'])\r\n\r\n\r\n###############################################################################\r\n# .. note::\r\n#\r\n# * Nodes and edges can be added but not removed.\r\n# * Updating a feature of different schemes raises the risk of error on individual nodes (or\r\n# node subset).\r\n\r\n\r\n###############################################################################\r\n# Next steps\r\n# ----------\r\n# In the :doc:`next tutorial <3_pagerank>` you learn the\r\n# DGL message passing interface by implementing PageRank.\r\n"
] | [
[
"torch.ones",
"torch.randn",
"torch.tensor",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"torch.zeros"
]
] |
Bertinus/causal_cell_embedding | [
"417b55749130fc7b7832fd3ee4c49feff4a04593"
] | [
"ai/causalcell/training.py"
] | [
"import ai.causalcell.utils.configuration as configuration\nimport ai.causalcell.datasets.synthetic_dataset as sd\nimport logging\nimport numpy as np\nimport torch\nimport random\nimport os\nimport copy\nimport dill as pickle\nimport skopt\nfrom collections import OrderedDict\n\n# from ai.causalcell.datasets.synthetic_dataset import global_graph\n\n_LOG = logging.getLogger(__name__)\n\n\ndef set_seed(seed, cuda=False):\n \"\"\"\n Fix the seed for numpy, python random, and pytorch.\n \"\"\"\n print('pytorch/random seed: {}'.format(seed))\n\n # Numpy, python, pytorch (cpu), pytorch (gpu).\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n if cuda:\n torch.cuda.manual_seed_all(seed)\n\n\ndef save_results(results, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n # Save best model\n output_name = \"best_model_{}.pth.tar\".format(results[\"exp_id\"])\n torch.save(results[\"best_model\"].state_dict(), os.path.join(output_dir, output_name))\n\n # Save last model\n output_name = \"last_model_{}.pth.tar\".format(results[\"exp_id\"])\n torch.save(results[\"last_model\"].state_dict(), os.path.join(output_dir, output_name))\n\n # Save the rest of the results dictionary\n del results[\"best_model\"]\n del results[\"last_model\"]\n output_name = \"results_{}.pkl\".format(results[\"exp_id\"])\n with open(os.path.join(output_dir, output_name), 'wb') as f:\n pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef train_epoch(model, device, train_loader, epoch):\n\n model.train()\n\n all_loss, all_losses = [], None\n\n for batch_idx, data in enumerate(train_loader):\n\n x, fingerprint, compound, line = data\n x = x.to(device)\n fingerprint = fingerprint.to(device)\n\n # Expected to return a dictionary of outputs.\n loss, losses = model.forward_backward_update(x, fingerprint, compound, line, device=device)\n\n if all_losses is None:\n all_losses = {i: [losses[i].detach().cpu().item()] for i in losses.keys()}\n else:\n for i in losses.keys():\n all_losses[i].append(losses[i].detach().cpu().item())\n\n all_loss.append(loss.detach())\n\n all_loss = float(torch.mean(torch.tensor(all_loss)).detach().numpy())\n print('epoch {} Mean train loss: {:.4f}'.format(epoch, all_loss))\n\n return all_loss, all_losses\n\n\ndef evaluate_epoch(model, device, data_loader, epoch):\n \"\"\"Evaluates a given model on given data.\"\"\"\n model.eval()\n all_loss, all_losses = [], None\n\n with torch.no_grad():\n for batch_idx, data in enumerate(data_loader):\n\n x, fingerprint, compound, line = data\n x = x.to(device)\n fingerprint = fingerprint.to(device)\n\n # Expected to return a dictionary of outputs.\n loss, losses = model.forward_loss(x, fingerprint, compound, line, device=device)\n\n if all_losses is None:\n all_losses = {i: [losses[i].detach().cpu().item()] for i in losses.keys()}\n else:\n for i in losses.keys():\n all_losses[i].append(losses[i].detach().cpu().item())\n\n # Sum up batch loss.\n loss = sum(losses.values())\n all_loss.append(loss)\n\n all_loss = float(torch.mean(torch.tensor(all_loss)).detach().numpy())\n print('epoch {} Mean valid loss: {:.4f}'.format(epoch, all_loss))\n\n return all_loss, all_losses\n\n\ndef train(cfg):\n \"\"\"\n Trains a model on a dataset given the supplied configuration.\n save is by default True and will result in the model's performance being\n saved to a handy pickle file, as well as the best-performing model being\n saved. Set this to False when doing an outer loop of hyperparameter\n optimization.\n \"\"\"\n exp_name = cfg['experiment_name']\n exp_id = cfg['exp_id']\n n_epochs = cfg['n_epochs']\n seed = cfg['seed']\n output_dir = os.path.join('results', cfg['experiment_name'])\n early_stopping = cfg['early_stopping']\n patience_max = cfg['patience_max']\n patience = 0\n\n set_seed(seed)\n\n # dataloader\n valid_loader = configuration.setup_dataloader(cfg, 'valid')\n train_loader = configuration.setup_dataloader(cfg, 'train')\n\n device = 'cuda' if cfg['cuda'] else 'cpu'\n model = configuration.setup_model(cfg).to(device)\n\n print('model: \\n{}'.format(model))\n\n best_valid_loss = np.inf\n best_model, best_epoch = None, None\n all_train_losses, all_valid_losses = [], []\n\n for epoch in range(n_epochs):\n\n train_loss, train_losses = train_epoch(model=model, device=device, train_loader=train_loader,\n epoch=epoch)\n\n valid_loss, valid_losses = evaluate_epoch(model=model, device=device, data_loader=valid_loader, epoch=epoch)\n\n all_train_losses.append(train_losses)\n all_valid_losses.append(valid_losses)\n\n if valid_loss < best_valid_loss:\n best_model = copy.deepcopy(model)\n best_epoch = epoch\n best_valid_loss = valid_loss\n else:\n patience += 1\n if early_stopping and patience > patience_max:\n break\n\n results = {\"exp_name\": exp_name,\n \"config\": cfg,\n \"data_graph\": sd.global_graph,\n \"seed\": seed,\n \"exp_id\": exp_id,\n \"n_envs_in_split\": {\"train\": train_loader.batch_sampler.n_envs_in_split,\n \"valid\": valid_loader.batch_sampler.n_envs_in_split},\n \"n_samples_in_split\": {\"train\": train_loader.batch_sampler.n_samples,\n \"valid\": valid_loader.batch_sampler.n_samples},\n \"losses\": {\"train\": all_train_losses, \"valid\": all_valid_losses},\n \"best_epoch\": best_epoch,\n \"best_model\": best_model.to('cpu'),\n \"last_model\": model.to('cpu')}\n\n save_results(results, output_dir)\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.no_grad",
"numpy.random.seed",
"torch.tensor"
]
] |
ahstarwab/nn-gev_torch | [
"f7849e55230322fd5bfb4da81efc72875e2e76da"
] | [
"nn_models.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nclass BLSTMMaskEstimator(nn.Module):\n def __init__(self, input_dim=513, hidden_dim=512, num_layers=1, dropout=0.3, bidirectional=True):\n super(BLSTMMaskEstimator, self).__init__()\n self.dropout = dropout\n # blstm_layer = SequenceBLSTM(513, 256, normalized=True)\n self.blstm_layer = nn.LSTM(input_dim, 256, num_layers, dropout=dropout, bidirectional=bidirectional)\n # relu_1 = SequenceLinear(256, 513, normalized=True)\n self.relu_1 = nn.Linear(hidden_dim, input_dim)\n # relu_2 = SequenceLinear(513, 513, normalized=True)\n self.relu_2 = nn.Linear(input_dim, input_dim)\n # noise_mask_estimate = SequenceLinear(513, 513, normalized=True)\n self.noise_mask_estimate = nn.Linear(input_dim, input_dim)\n # speech_mask_estimate = SequenceLinear(513, 513, normalized=True)\n self.speech_mask_estimate = nn.Linear(input_dim, input_dim)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, Y):\n \n Y = Y.reshape(-1, 1, Y.shape[-1]) #[seq_len X 1 X input_dim]\n blstm, _ = self.blstm_layer(Y)\n \n relu_1 = self.relu_1(blstm)#, dropout=self.dropout)\n #TODO\n #Need torch.clamp(relu_1, min=0, max=1)?\n relu_2 = self.relu_2(relu_1)#, dropout=self.dropout)\n #TODO\n #Need torch.clamp(relu_2, min=0, max=1)\n X_mask = self.sigmoid(self.speech_mask_estimate(relu_2))\n N_mask = self.sigmoid(self.noise_mask_estimate(relu_2))\n \n\n return X_mask, N_mask\n\nclass SimpleFWMaskEstimator(nn.Module):\n def __init__(self, input_dim=513, hidden_dim=1024, output_dim = 513):\n super(SimpleFWMaskEstimator, self).__init__()\n self.relu_1 = nn.Linear(input_dim, hidden_dim)\n self.noise_mask_estimate = nn.Linear(hidden_dim, output_dim)\n self.speech_mask_estimate = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, Y):\n relu_1 = self.relu_1(Y)\n #TODO\n #Need torch.clamp(relu_1, min=0, max=1)\n X_mask = nn.Sigmoid(self.speech_mask_estimate(relu_1))\n N_mask = nn.Sigmoid(self.noise_mask_estimate(relu_1))\n \n\n return X_mask, N_mask,"
] | [
[
"torch.nn.Sigmoid",
"torch.nn.LSTM",
"torch.nn.Linear"
]
] |
tomsnail/opencv_tf_py | [
"cf9aa7fa250546564cff56aa33b5a39991b0d8f1"
] | [
"learn/C01/01/Convolution.py"
] | [
"import cv2\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread('images/1.jpg',cv2.IMREAD_GRAYSCALE)\nkernel = [[1,1,1],[0,0,0],[-1,-1,-1]]\ndest = signal.convolve2d(img,kernel)\nplt.imshow(dest,cmap='gray')\nplt.show()"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"scipy.signal.convolve2d"
]
] |
fire-suppression-abm/mesa | [
"8498eea3e5d4a739aee3b003107a0e7de59c5026"
] | [
"mesa/datacollection.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nMesa Data Collection Module\n===========================\n\nDataCollector is meant to provide a simple, standard way to collect data\ngenerated by a Mesa model. It collects three types of data: model-level data,\nagent-level data, and tables.\n\nA DataCollector is instantiated with two dictionaries of reporter names and\nassociated variable names or functions for each, one for model-level data and\none for agent-level data; a third dictionary provides table names and columns.\nVariable names are converted into functions which retrieve attributes of that\nname.\n\nWhen the collect() method is called, each model-level function is called, with\nthe model as the argument, and the results associated with the relevant\nvariable. Then the agent-level functions are called on each agent in the model\nscheduler.\n\nAdditionally, other objects can write directly to tables by passing in an\nappropriate dictionary object for a table row.\n\nThe DataCollector then stores the data it collects in dictionaries:\n * model_vars maps each reporter to a list of its values\n * tables maps each table to a dictionary, with each column as a key with a\n list as its value.\n * _agent_records maps each model step to a list of each agents id\n and its values.\n\nFinally, DataCollector can create a pandas DataFrame from each collection.\n\nThe default DataCollector here makes several assumptions:\n * The model has a schedule object called 'schedule'\n * The schedule has an agent list called agents\n * For collecting agent-level variables, agents must have a unique_id\n\n\"\"\"\nfrom functools import partial\nimport itertools\nfrom operator import attrgetter\nimport pandas as pd\n\n\nclass DataCollector:\n \"\"\" Class for collecting data generated by a Mesa model.\n\n A DataCollector is instantiated with dictionaries of names of model- and\n agent-level variables to collect, associated with attribute names or\n functions which actually collect them. When the collect(...) method is\n called, it collects these attributes and executes these functions one by\n one and stores the results.\n\n \"\"\"\n\n model = None\n\n def __init__(self, model_reporters=None, agent_reporters=None, tables=None):\n \"\"\" Instantiate a DataCollector with lists of model and agent reporters.\n\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to either an attribute name, or a method.\n For example, if there was only one model-level reporter for number of\n agents, it might look like:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n If there was only one agent-level reporter (e.g. the agent's energy),\n it might look like this:\n {\"energy\": \"energy\"}\n or like this:\n {\"energy\": lambda a: a.energy}\n\n The tables arg accepts a dictionary mapping names of tables to lists of\n columns. For example, if we want to allow agents to write their age\n when they are destroyed (to keep track of lifespans), it might look\n like:\n {\"Lifespan\": [\"unique_id\", \"age\"]}\n\n Args:\n model_reporters: Dictionary of reporter names and attributes/funcs\n agent_reporters: Dictionary of reporter names and attributes/funcs.\n tables: Dictionary of table names to lists of column names.\n\n Notes:\n If you want to pickle your model you must not use lambda functions.\n If your model includes a large number of agents, you should *only*\n use attribute names for the agent reporter, it will be much faster.\n \"\"\"\n self.model_reporters = {}\n self.agent_reporters = {}\n\n self.model_vars = {}\n self._agent_records = {}\n self.tables = {}\n\n if model_reporters is not None:\n for name, reporter in model_reporters.items():\n self._new_model_reporter(name, reporter)\n\n if agent_reporters is not None:\n for name, reporter in agent_reporters.items():\n self._new_agent_reporter(name, reporter)\n\n if tables is not None:\n for name, columns in tables.items():\n self._new_table(name, columns)\n\n def _new_model_reporter(self, name, reporter):\n \"\"\" Add a new model-level reporter to collect.\n\n Args:\n name: Name of the model-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n \"\"\"\n if type(reporter) is str:\n reporter = partial(self._getattr, reporter)\n self.model_reporters[name] = reporter\n self.model_vars[name] = []\n\n def _new_agent_reporter(self, name, reporter):\n \"\"\" Add a new agent-level reporter to collect.\n\n Args:\n name: Name of the agent-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n\n \"\"\"\n if type(reporter) is str:\n attribute_name = reporter\n reporter = partial(self._getattr, reporter)\n reporter.attribute_name = attribute_name\n self.agent_reporters[name] = reporter\n\n def _new_table(self, table_name, table_columns):\n \"\"\" Add a new table that objects can write to.\n\n Args:\n table_name: Name of the new table.\n table_columns: List of columns to add to the table.\n\n \"\"\"\n new_table = {column: [] for column in table_columns}\n self.tables[table_name] = new_table\n\n def _record_agents(self, model):\n \"\"\" Record agents data in a mapping of functions and agents. \"\"\"\n rep_funcs = self.agent_reporters.values()\n if all([hasattr(rep, 'attribute_name') for rep in rep_funcs]):\n prefix = ['model.schedule.steps', 'unique_id']\n attributes = [func.attribute_name for func in rep_funcs]\n get_reports = attrgetter(*prefix + attributes)\n else:\n def get_reports(agent):\n prefix = (agent.model.schedule.steps, agent.unique_id)\n reports = tuple(rep(agent) for rep in rep_funcs)\n return prefix + reports\n agent_records = map(get_reports, model.schedule.agents)\n return agent_records\n\n def collect(self, model):\n \"\"\" Collect all the data for the given model object. \"\"\"\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n self.model_vars[var].append(reporter(model))\n\n if self.agent_reporters:\n agent_records = self._record_agents(model)\n self._agent_records[model.schedule.steps] = list(agent_records)\n\n def add_table_row(self, table_name, row, ignore_missing=False):\n \"\"\" Add a row dictionary to a specific table.\n\n Args:\n table_name: Name of the table to append a row to.\n row: A dictionary of the form {column_name: value...}\n ignore_missing: If True, fill any missing columns with Nones;\n if False, throw an error if any columns are missing\n\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"Table does not exist.\")\n\n for column in self.tables[table_name]:\n if column in row:\n self.tables[table_name][column].append(row[column])\n elif ignore_missing:\n self.tables[table_name][column].append(None)\n else:\n raise Exception(\"Could not insert row with missing column\")\n\n @staticmethod\n def _getattr(name, object):\n \"\"\" Turn around arguments of getattr to make it partially callable.\"\"\"\n return getattr(object, name, None)\n\n def get_model_vars_dataframe(self):\n \"\"\" Create a pandas DataFrame from the model variables.\n\n The DataFrame has one column for each model variable, and the index is\n (implicitly) the model tick.\n\n \"\"\"\n return pd.DataFrame(self.model_vars)\n\n def get_agent_vars_dataframe(self):\n \"\"\" Create a pandas DataFrame from the agent variables.\n\n The DataFrame has one column for each variable, with two additional\n columns for tick and agent_id.\n\n \"\"\"\n all_records = itertools.chain.from_iterable(\n self._agent_records.values())\n rep_names = [rep_name for rep_name in self.agent_reporters]\n\n df = pd.DataFrame.from_records(\n data=all_records,\n columns=[\"Step\", \"AgentID\"] + rep_names,\n )\n df = df.set_index([\"Step\", \"AgentID\"])\n return df\n\n def get_table_dataframe(self, table_name):\n \"\"\" Create a pandas DataFrame from a particular table.\n\n Args:\n table_name: The name of the table to convert.\n\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"No such table.\")\n return pd.DataFrame(self.tables[table_name])\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.DataFrame"
]
] |
google-research/growneuron | [
"2ab6fe19f920b9f3b9cc9cf8ac39c8965967a5fe"
] | [
"growneuron/layers_test.py"
] | [
"# coding=utf-8\n# Copyright 2022 GradMax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for growneuron.layers.\"\"\"\nimport absl.testing.parameterized as parameterized\nimport growneuron.layers as glayers\nimport tensorflow as tf\n\n\nclass LayerTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.named_parameters(\n ('dense', tf.keras.layers.Dense(3), (3, 4)),\n ('batchnorm', tf.keras.layers.BatchNormalization(), (2, 4)),\n ('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4))\n )\n def test_consistency(self, layer, input_shape):\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n original_out = layer(x)\n new_out = wrapped_layer(x)\n self.assertAllEqual(original_out, new_out)\n\n @parameterized.named_parameters(\n ('dense', tf.keras.layers.Dense(3), (3, 4), 1),\n ('dense_5neuron', tf.keras.layers.Dense(3), (3, 4), 5),\n ('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 1),\n ('conv2d_5neuron', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 5),\n )\n def test_add_neurons_incoming_zeros(self, layer, input_shape, n_new):\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n original_out = wrapped_layer(x)\n old_output_shape = original_out.get_shape()\n n_neurons_old = old_output_shape[-1]\n wrapped_layer.add_neurons(n_new, new_weights='zeros', is_outgoing=False)\n new_out = wrapped_layer(x)\n # Check the output has the expected shape\n new_shape = old_output_shape[:-1] + [n_neurons_old+n_new]\n self.assertAllEqual(new_shape, new_out.get_shape())\n # Check the old neurons create same output\n self.assertAllClose(original_out, new_out[Ellipsis, :n_neurons_old])\n # Check the new neurons create zero output\n self.assertEqual(0, tf.math.count_nonzero(new_out[Ellipsis, n_neurons_old:]))\n new_weights, new_biases = wrapped_layer.get_weights()\n # Check the new weights are zero\n added_weights = new_weights[Ellipsis, n_neurons_old:]\n self.assertAllEqual(added_weights, tf.zeros_like(added_weights))\n # Check the new biases are zero\n added_biases = new_biases[n_neurons_old:]\n self.assertAllEqual(added_biases, tf.zeros_like(added_biases))\n\n @parameterized.named_parameters(\n ('dense', tf.keras.layers.Dense(3), (3, 4), 1),\n ('dense_5neuron', tf.keras.layers.Dense(3), (3, 4), 5),\n ('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 1),\n ('conv2d_5neuron', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 5),\n )\n def test_add_neurons_outgoing_zeros(self, layer, input_shape, n_new):\n wrapped_layer = glayers.GrowLayer(layer)\n n_features = input_shape[-1]\n x = tf.random.uniform(input_shape)\n # New input after growing would have more features\n new_input_shape = input_shape[:-1] + (n_new,)\n new_x = tf.concat([x, tf.random.uniform(new_input_shape)], axis=-1)\n original_out = layer(x)\n old_weights, old_biases = wrapped_layer.get_weights()\n wrapped_layer.add_neurons(n_new, new_weights='zeros', is_outgoing=True)\n new_out = wrapped_layer(new_x)\n new_weights, new_biases = wrapped_layer.get_weights()\n print(new_weights, new_biases)\n # Output of the layer shouldn't change.\n self.assertAllClose(original_out, new_out)\n # Check biases are unchanged\n self.assertAllEqual(old_biases, new_biases)\n # Check the new weights are zero\n added_weights = new_weights[Ellipsis, n_features:, :]\n self.assertAllEqual(added_weights, tf.zeros_like(added_weights))\n # Check the old weights are same\n kept_weights = new_weights[Ellipsis, :n_features, :]\n self.assertAllEqual(old_weights, kept_weights)\n\n @parameterized.named_parameters(\n ('dense_kernel', 'dense', ('kernel',)),\n ('dense_bias', 'dense', ('bias',)),\n ('dense_activity', 'dense', ('activity',)),\n ('dense_all', 'dense', ('kernel', 'bias', 'activity')),\n ('conv2d_kernel', 'conv2d', ('kernel',)),\n ('conv2d_bias', 'conv2d', ('bias',)),\n ('conv2d_activity', 'conv2d', ('activity',)),\n ('conv2d_all', 'conv2d', ('kernel', 'bias', 'activity')),\n )\n def test_regularizer_incoming(self, layer_type, regularizer_types):\n reg_kwargs = {f'{r_type}_regularizer': tf.keras.regularizers.L2(0.1)\n for r_type in regularizer_types}\n print(reg_kwargs)\n if layer_type == 'dense':\n layer = tf.keras.layers.Dense(3, **reg_kwargs)\n input_shape = (3, 4)\n elif layer_type == 'conv2d':\n layer = tf.keras.layers.Conv2D(3, 3, **reg_kwargs)\n input_shape = (3, 5, 5, 4)\n else:\n raise ValueError('not supported')\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n _ = wrapped_layer(x)\n old_losses = wrapped_layer.losses\n wrapped_layer.add_neurons(1, new_weights='zeros', is_outgoing=False)\n _ = wrapped_layer(x)\n new_losses = wrapped_layer.losses\n for old_loss, new_loss in zip(old_losses, new_losses):\n self.assertAllClose(old_loss, new_loss)\n\n @parameterized.named_parameters(\n ('dense_kernel', 'dense', ('kernel',)),\n ('dense_bias', 'dense', ('bias',)),\n ('dense_activity', 'dense', ('activity',)),\n ('dense_all', 'dense', ('kernel', 'bias', 'activity')),\n ('conv2d_kernel', 'conv2d', ('kernel',)),\n ('conv2d_bias', 'conv2d', ('bias',)),\n ('conv2d_activity', 'conv2d', ('activity',)),\n ('conv2d_all', 'conv2d', ('kernel', 'bias', 'activity')),\n ('bn_beta', 'bn', ('beta',)),\n )\n def test_regularizer_outgoing(self, layer_type, regularizer_types):\n reg_kwargs = {f'{r_type}_regularizer': tf.keras.regularizers.L2(0.1)\n for r_type in regularizer_types}\n print(reg_kwargs)\n if layer_type == 'dense':\n layer = tf.keras.layers.Dense(3, **reg_kwargs)\n input_shape = (3, 4)\n elif layer_type == 'conv2d':\n layer = tf.keras.layers.Conv2D(3, 3, **reg_kwargs)\n input_shape = (3, 5, 5, 4)\n elif layer_type == 'bn':\n layer = tf.keras.layers.BatchNormalization(**reg_kwargs)\n input_shape = (3, 4)\n else:\n raise ValueError('not supported')\n wrapped_layer = glayers.GrowLayer(layer)\n x = tf.random.uniform(input_shape)\n _ = wrapped_layer(x)\n old_losses = wrapped_layer.losses\n if layer_type == 'bn':\n wrapped_layer.add_neurons_identity(1)\n else:\n wrapped_layer.add_neurons(1, new_weights='zeros', is_outgoing=True)\n new_input_shape = input_shape[:-1] + (1,)\n new_x = tf.concat([x, tf.random.uniform(new_input_shape)], axis=-1)\n _ = wrapped_layer(new_x)\n new_losses = wrapped_layer.losses\n for old_loss, new_loss in zip(old_losses, new_losses):\n self.assertAllClose(old_loss, new_loss)\n\n @parameterized.named_parameters(\n ('2d_axis1', (4, 5), -1),\n ('3d_axis1', (3, 3, 1), -1),\n ('4d_axis1', (3, 3, 4, 5), -1),\n ('2d_axis2', (4, 5), -2),\n ('3d_axis2', (3, 3, 1), -2),\n ('4d_axis2', (3, 3, 4, 5), -2),\n )\n def test_norm_l2(self, shape, axis):\n tensor = tf.reshape(tf.range(tf.math.reduce_prod(shape),\n dtype=tf.float32), shape)\n calculated_norm = glayers.norm_l2(tensor, axis)\n if axis == -2:\n tensor = tf.einsum('...ij->...ji', tensor)\n # L2 norm should be 1 over axis 1\n flat_tensor = tf.reshape(tensor,\n [-1, tensor.shape[-1]])\n expected_norms = tf.norm(flat_tensor, axis=-2)\n self.assertAllClose(expected_norms, calculated_norm)\n pass\n\n @parameterized.named_parameters(\n ('2d_axis1', (4, 5), -1),\n ('3d_axis1', (3, 3, 1), -1),\n ('4d_axis1', (3, 3, 4, 5), -1),\n ('2d_axis2', (4, 5), -2),\n ('3d_axis2', (3, 3, 1), -2),\n ('4d_axis2', (3, 3, 4, 5), -2),\n )\n def test_normalize_l2(self, shape, axis):\n tensor = tf.reshape(tf.range(tf.math.reduce_prod(shape),\n dtype=tf.float32), shape)\n normalized_tensor = glayers.normalize_l2(tensor, axis)\n if axis == -2:\n normalized_tensor = tf.einsum('...ij->...ji', normalized_tensor)\n # L2 norm should be 1 over axis 1\n flat_tensor = tf.reshape(normalized_tensor,\n [-1, normalized_tensor.shape[-1]])\n norms = tf.norm(flat_tensor, axis=-2)\n self.assertAllClose(norms, tf.ones_like(norms))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.norm",
"tensorflow.math.reduce_prod",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"tensorflow.math.count_nonzero",
"tensorflow.keras.regularizers.L2",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.random.uniform",
"tensorflow.keras.layers.Conv2D",
"tensorflow.einsum",
"tensorflow.test.main"
]
] |
mpeychev/disentangled-autoencoders | [
"2d1f18fe198486f29c74ba5606ffcadaff7055cf"
] | [
"src/base_autoencoder.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport draw_util\nimport os\n\nclass Autoencoder(object):\n\n def partial_fit(self, targets):\n assert (self.is_training)\n if not self.is_denoising:\n self.sess.run(self.train_op, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets) if self.is_denoising == 1 else \\\n draw_util.erase(targets)\n self.sess.run(self.train_op, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n\n def calc_reconstruction_loss(self, targets):\n if len(targets) == 40000:\n A = self.calc_reconstruction_loss(targets[:20000])\n B = self.calc_reconstruction_loss(targets[20000:])\n return (A + B) / 2.0\n if not self.is_denoising:\n return self.sess.run(self.reconstruction_loss, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets) if self.is_denoising == 1 else \\\n draw_util.erase(targets)\n return self.sess.run(self.reconstruction_loss, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n\n def calc_kl_divergence(self, inputs):\n if len(inputs) == 40000:\n A = self.calc_kl_divergence(inputs[:20000])\n B = self.calc_kl_divergence(inputs[20000:])\n return (A + B) / 2.0\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.kl_divergence, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def calc_cost(self, targets):\n if len(targets) == 40000:\n A = self.calc_cost(targets[:20000])\n B = self.calc_cost(targets[20000:])\n return (A + B) / 2.0\n if not self.is_denoising:\n return self.sess.run(self.cost, feed_dict={self.input_layer: targets,\n self.batch_size: [len(targets)]})\n else:\n inputs = draw_util.add_noise(targets) if self.is_denoising == 1 else \\\n draw_util.erase(targets)\n return self.sess.run(self.cost, feed_dict={self.input_layer: inputs,\n self.target_layer: targets, self.batch_size: [len(targets)]})\n\n def get_code_dimension(self):\n return self.code_dimension\n\n def get_beta(self):\n return self.beta\n\n def get_output_layer(self, inputs, ignore_noise=False):\n if not ignore_noise:\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.output_layer, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def get_output_layer_from_code(self, code):\n return self.sess.run(self.output_layer, feed_dict={self.code: code})\n\n def get_code(self, inputs, ignore_noise=False):\n if not ignore_noise:\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.code, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def get_code_mean(self, inputs):\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n return self.sess.run(self.code_mean, feed_dict={self.input_layer: inputs,\n self.batch_size: [len(inputs)]})\n\n def get_code_variance(self, inputs):\n if self.is_denoising == 1:\n inputs = draw_util.add_noise(inputs)\n elif self.is_denoising == 2:\n inputs = draw_util.erase(inputs)\n code_log_sigma_sq = self.sess.run(self.code_log_sigma_sq, feed_dict = {\n self.input_layer: inputs, self.batch_size: [len(inputs)]})\n return np.exp(code_log_sigma_sq)\n\n def calc_reconstruction_accuracy(self, targets):\n inputs = targets if not self.is_denoising else (draw_util.add_noise(targets) if \\\n self.is_denoising == 1 else draw_util.erase(targets))\n predicted_images = self.get_output_layer(inputs)\n return np.mean(np.sqrt(np.sum(np.square(predicted_images - targets), axis=1)))\n\n def save_model(self):\n self.saver.save(self.sess, os.path.join(self.logs_dir, 'model'))\n\n def restore_model(self):\n self.saver.restore(self.sess, tf.train.latest_checkpoint(self.logs_dir))\n\n def close_session(self):\n self.sess.close()\n tf.reset_default_graph()\n"
] | [
[
"tensorflow.train.latest_checkpoint",
"numpy.square",
"tensorflow.reset_default_graph",
"numpy.exp"
]
] |
lelloman/python-languagedetector | [
"2abfa582b2f2100399e6cf8f92bc65d68ba20dad"
] | [
"to_tensorflow_lite.py"
] | [
"#!/usr/bin/python\nfrom __future__ import print_function\nfrom common import *\nimport tensorflow.contrib.lite as tflite\n\nimport keras\n\na = keras.models.Sequential()\n\nmodel = load_model()\n\nfull_model_file_name = 'full_model.h5'\nmodel.save(full_model_file_name)\nconverter = tflite.TFLiteConverter.from_keras_model_file(full_model_file_name)\ntflite_model = converter.convert()\nopen(\"converted_model.tflite\", \"wb\").write(tflite_model)"
] | [
[
"tensorflow.contrib.lite.TFLiteConverter.from_keras_model_file"
]
] |
Con-Mi/lambda-packs | [
"b23a8464abdd88050b83310e1d0e99c54dac28ab"
] | [
"Skimage_numpy/source/scipy/special/__init__.py"
] | [
"\"\"\"\n========================================\nSpecial functions (:mod:`scipy.special`)\n========================================\n\n.. module:: scipy.special\n\nNearly all of the functions below are universal functions and follow\nbroadcasting and automatic array-looping rules. Exceptions are noted.\n\nError handling\n==============\n\nErrors are handled by returning nans, or other appropriate values.\nSome of the special function routines will emit warnings when an error\noccurs. By default this is disabled. To enable such messages use\n``errprint(1)``, and to disable such messages use ``errprint(0)``.\n\nExample:\n\n >>> print scipy.special.bdtr(-1,10,0.3)\n >>> scipy.special.errprint(1)\n >>> print scipy.special.bdtr(-1,10,0.3)\n\n.. autosummary::\n :toctree: generated/\n\n errprint\n SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``\n\nAvailable functions\n===================\n\nAiry functions\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n airy -- Airy functions and their derivatives.\n airye -- Exponentially scaled Airy functions\n ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)\n bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)\n itairy --\n \n\nElliptic Functions and Integrals\n--------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellipj -- Jacobian elliptic functions\n ellipk -- Complete elliptic integral of the first kind.\n ellipkm1 -- ellipkm1(x) == ellipk(1 - x)\n ellipkinc -- Incomplete elliptic integral of the first kind.\n ellipe -- Complete elliptic integral of the second kind.\n ellipeinc -- Incomplete elliptic integral of the second kind.\n\nBessel Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n jv -- Bessel function of real-valued order and complex argument.\n jn -- Alias for jv\n jve -- Exponentially scaled Bessel function.\n yn -- Bessel function of second kind (integer order).\n yv -- Bessel function of the second kind (real-valued order).\n yve -- Exponentially scaled Bessel function of the second kind.\n kn -- Modified Bessel function of the second kind (integer order).\n kv -- Modified Bessel function of the second kind (real order).\n kve -- Exponentially scaled modified Bessel function of the second kind.\n iv -- Modified Bessel function.\n ive -- Exponentially scaled modified Bessel function.\n hankel1 -- Hankel function of the first kind.\n hankel1e -- Exponentially scaled Hankel function of the first kind.\n hankel2 -- Hankel function of the second kind.\n hankel2e -- Exponentially scaled Hankel function of the second kind.\n\nThe following is not an universal function:\n\n.. autosummary::\n :toctree: generated/\n\n lmbda -- [+]Sequence of lambda functions with arbitrary order v.\n\nZeros of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.\n jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.\n jn_zeros -- [+]Zeros of Jn(x)\n jnp_zeros -- [+]Zeros of Jn'(x)\n yn_zeros -- [+]Zeros of Yn(x)\n ynp_zeros -- [+]Zeros of Yn'(x)\n y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)\n y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)\n y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')\n\nFaster versions of common Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n j0 -- Bessel function of order 0.\n j1 -- Bessel function of order 1.\n y0 -- Bessel function of second kind of order 0.\n y1 -- Bessel function of second kind of order 1.\n i0 -- Modified Bessel function of order 0.\n i0e -- Exponentially scaled modified Bessel function of order 0.\n i1 -- Modified Bessel function of order 1.\n i1e -- Exponentially scaled modified Bessel function of order 1.\n k0 -- Modified Bessel function of the second kind of order 0.\n k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.\n k1 -- Modified Bessel function of the second kind of order 1.\n k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.\n\nIntegrals of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n itj0y0 -- Basic integrals of j0 and y0 from 0 to x.\n it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.\n iti0k0 -- Basic integrals of i0 and k0 from 0 to x.\n it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.\n besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.\n\nDerivatives of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n jvp -- Nth derivative of Jv(v,z)\n yvp -- Nth derivative of Yv(v,z)\n kvp -- Nth derivative of Kv(v,z)\n ivp -- Nth derivative of Iv(v,z)\n h1vp -- Nth derivative of H1v(v,z)\n h2vp -- Nth derivative of H2v(v,z)\n\nSpherical Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n spherical_jn -- Spherical Bessel function of the first kind, jn(z)\n spherical_yn -- Spherical Bessel function of the second kind, yn(z)\n spherical_in -- Modified spherical Bessel function of the first kind, in(z)\n spherical_kn -- Modified spherical Bessel function of the second kind, kn(z)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)\n sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)\n sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)\n sph_in -- [+]Sequence of spherical Bessel functions, in(z)\n sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)\n sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)\n\nRiccati-Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.\n riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.\n\nStruve Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n struve -- Struve function --- Hv(x)\n modstruve -- Modified Struve function --- Lv(x)\n itstruve0 -- Integral of H0(t) from 0 to x\n it2struve0 -- Integral of H0(t)/t from x to Inf.\n itmodstruve0 -- Integral of L0(t) from 0 to x.\n\n\nRaw Statistical Functions\n-------------------------\n\n.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.\n\n.. autosummary::\n :toctree: generated/\n\n bdtr -- Sum of terms 0 through k of the binomial pdf.\n bdtrc -- Sum of terms k+1 through n of the binomial pdf.\n bdtri -- Inverse of bdtr\n bdtrik --\n bdtrin --\n btdtr -- Integral from 0 to x of beta pdf.\n btdtri -- Quantiles of beta distribution\n btdtria --\n btdtrib --\n fdtr -- Integral from 0 to x of F pdf.\n fdtrc -- Integral from x to infinity under F pdf.\n fdtri -- Inverse of fdtrc\n fdtridfd -- \n gdtr -- Integral from 0 to x of gamma pdf.\n gdtrc -- Integral from x to infinity under gamma pdf.\n gdtria -- Inverse with respect to `a` of gdtr.\n gdtrib -- Inverse with respect to `b` of gdtr.\n gdtrix -- Inverse with respect to `x` of gdtr.\n nbdtr -- Sum of terms 0 through k of the negative binomial pdf.\n nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.\n nbdtri -- Inverse of nbdtr\n nbdtrik --\n nbdtrin --\n ncfdtr -- CDF of non-central t distribution.\n ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.\n ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.\n ncfdtri -- Inverse CDF of noncentral F distribution.\n ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.\n nctdtr -- CDF of noncentral t distribution.\n nctdtridf -- Find degrees of freedom of noncentral t distribution.\n nctdtrit -- Inverse CDF of noncentral t distribution.\n nctdtrinc -- Find noncentrality parameter of noncentral t distribution.\n nrdtrimn -- Find mean of normal distribution from cdf and std.\n nrdtrisd -- Find std of normal distribution from cdf and mean.\n pdtr -- Sum of terms 0 through k of the Poisson pdf.\n pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.\n pdtri -- Inverse of pdtr\n pdtrik --\n stdtr -- Integral from -infinity to t of the Student-t pdf.\n stdtridf --\n stdtrit --\n chdtr -- Integral from 0 to x of the Chi-square pdf.\n chdtrc -- Integral from x to infnity of Chi-square pdf.\n chdtri -- Inverse of chdtrc.\n chdtriv --\n ndtr -- Integral from -infinity to x of standard normal pdf\n log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf\n ndtri -- Inverse of ndtr (quantiles)\n chndtr --\n chndtridf --\n chndtrinc --\n chndtrix --\n smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)\n smirnovi -- Inverse of smirnov.\n kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.\n kolmogi -- Inverse of kolmogorov\n tklmbda -- Tukey-Lambda CDF\n logit --\n expit --\n boxcox -- Compute the Box-Cox transformation.\n boxcox1p -- Compute the Box-Cox transformation of 1 + x.\n inv_boxcox -- Compute the inverse of the Box-Cox tranformation.\n inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.\n\n\nInformation Theory Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n entr -- entr(x) = -x*log(x)\n rel_entr -- rel_entr(x, y) = x*log(x/y)\n kl_div -- kl_div(x, y) = x*log(x/y) - x + y\n huber -- Huber loss function.\n pseudo_huber -- Pseudo-Huber loss function.\n\n\nGamma and Related Functions\n---------------------------\n\n.. autosummary::\n :toctree: generated/\n\n gamma -- Gamma function.\n gammaln -- Log of the absolute value of the Gamma function.\n loggamma -- Principal branch of the logarithm of the Gamma function.\n gammasgn -- Sign of the gamma function.\n gammainc -- Incomplete gamma integral.\n gammaincinv -- Inverse of gammainc.\n gammaincc -- Complemented incomplete gamma integral.\n gammainccinv -- Inverse of gammaincc.\n beta -- Beta function.\n betaln -- Log of the absolute value of the beta function.\n betainc -- Incomplete beta integral.\n betaincinv -- Inverse of betainc.\n psi -- Logarithmic derivative of the gamma function.\n rgamma -- One divided by the gamma function.\n polygamma -- Nth derivative of psi function.\n multigammaln -- Log of the multivariate gamma.\n digamma -- Digamma function (derivative of the logarithm of gamma).\n poch -- The Pochhammer symbol (rising factorial).\n\n\nError Function and Fresnel Integrals\n------------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n erf -- Error function.\n erfc -- Complemented error function (1- erf(x))\n erfcx -- Scaled complemented error function exp(x**2)*erfc(x)\n erfi -- Imaginary error function, -i erf(i x)\n erfinv -- Inverse of error function\n erfcinv -- Inverse of erfc\n wofz -- Fadeeva function.\n dawsn -- Dawson's integral.\n fresnel -- Fresnel sine and cosine integrals.\n fresnel_zeros -- Complex zeros of both Fresnel integrals\n modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)\n modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n erf_zeros -- [+]Complex zeros of erf(z)\n fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals\n fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals\n\nLegendre Functions\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n lpmv -- Associated Legendre Function of arbitrary non-negative degree v.\n sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.\n lpn -- [+]Legendre Functions (polynomials) of the first kind\n lqn -- [+]Legendre Functions of the second kind.\n lpmn -- [+]Associated Legendre Function of the first kind for real arguments.\n lqmn -- [+]Associated Legendre Function of the second kind.\n\nEllipsoidal Harmonics\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellip_harm -- Ellipsoidal harmonic E\n ellip_harm_2 -- Ellipsoidal harmonic F\n ellip_normal -- Ellipsoidal normalization constant\n\nOrthogonal polynomials\n----------------------\n\nThe following functions evaluate values of orthogonal polynomials:\n\n.. autosummary::\n :toctree: generated/\n\n assoc_laguerre\n eval_legendre\n eval_chebyt\n eval_chebyu\n eval_chebyc\n eval_chebys\n eval_jacobi\n eval_laguerre\n eval_genlaguerre\n eval_hermite\n eval_hermitenorm\n eval_gegenbauer\n eval_sh_legendre\n eval_sh_chebyt\n eval_sh_chebyu\n eval_sh_jacobi\n\nThe functions below, in turn, return the polynomial coefficients in\n:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.\nThe :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns\nthe roots, weights, and total weights for the appropriate form of Gaussian\nquadrature. These are returned in an ``n x 3`` array with roots in the first\ncolumn, weights in the second column, and total weights in the final column.\nNote that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing\narithmetic, and lose information of the original orthogonal polynomial.\n\n.. autosummary::\n :toctree: generated/\n\n legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).\n chebyt -- [+]Chebyshev polynomial T_n(x)\n chebyu -- [+]Chebyshev polynomial U_n(x)\n chebyc -- [+]Chebyshev polynomial C_n(x)\n chebys -- [+]Chebyshev polynomial S_n(x)\n jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)\n laguerre -- [+]Laguerre polynomial, L_n(x)\n genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)\n hermite -- [+]Hermite polynomial H_n(x)\n hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)\n gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)\n sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)\n sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)\n sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)\n sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)\n\n.. warning::\n\n Computing values of high-order polynomials (around ``order > 20``) using\n polynomial coefficients is numerically unstable. To evaluate polynomial\n values, the ``eval_*`` functions should be used instead.\n\nRoots and weights for orthogonal polynomials\n\n.. autosummary::\n :toctree: generated/\n\n c_roots\n cg_roots\n h_roots\n he_roots\n j_roots\n js_roots\n l_roots\n la_roots\n p_roots\n ps_roots\n s_roots\n t_roots\n ts_roots\n u_roots\n us_roots\n\n\nHypergeometric Functions\n------------------------\n\n.. autosummary::\n :toctree: generated/\n\n hyp2f1 -- Gauss hypergeometric function (2F1)\n hyp1f1 -- Confluent hypergeometric function (1F1)\n hyperu -- Confluent hypergeometric function (U)\n hyp0f1 -- Confluent hypergeometric limit function (0F1)\n hyp2f0 -- Hypergeometric function (2F0)\n hyp1f2 -- Hypergeometric function (1F2)\n hyp3f0 -- Hypergeometric function (3F0)\n\n\nParabolic Cylinder Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pbdv -- Parabolic cylinder function Dv(x) and derivative.\n pbvv -- Parabolic cylinder function Vv(x) and derivative.\n pbwa -- Parabolic cylinder function W(a,x) and derivative.\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)\n pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)\n pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z\n\nMathieu and Related Functions\n-----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_a -- Characteristic values for even solution (ce_m)\n mathieu_b -- Characteristic values for odd solution (se_m)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_even_coef -- [+]sequence of expansion coefficients for even solution\n mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution\n\nThe following return both function and first derivative:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_cem -- Even Mathieu function\n mathieu_sem -- Odd Mathieu function\n mathieu_modcem1 -- Even modified Mathieu function of the first kind\n mathieu_modcem2 -- Even modified Mathieu function of the second kind\n mathieu_modsem1 -- Odd modified Mathieu function of the first kind\n mathieu_modsem2 -- Odd modified Mathieu function of the second kind\n\nSpheroidal Wave Functions\n-------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1 -- Prolate spheroidal angular function of the first kind\n pro_rad1 -- Prolate spheroidal radial function of the first kind\n pro_rad2 -- Prolate spheroidal radial function of the second kind\n obl_ang1 -- Oblate spheroidal angular function of the first kind\n obl_rad1 -- Oblate spheroidal radial function of the first kind\n obl_rad2 -- Oblate spheroidal radial function of the second kind\n pro_cv -- Compute characteristic value for prolate functions\n obl_cv -- Compute characteristic value for oblate functions\n pro_cv_seq -- Compute sequence of prolate characteristic values\n obl_cv_seq -- Compute sequence of oblate characteristic values\n\nThe following functions require pre-computed characteristic value:\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1_cv -- Prolate spheroidal angular function of the first kind\n pro_rad1_cv -- Prolate spheroidal radial function of the first kind\n pro_rad2_cv -- Prolate spheroidal radial function of the second kind\n obl_ang1_cv -- Oblate spheroidal angular function of the first kind\n obl_rad1_cv -- Oblate spheroidal radial function of the first kind\n obl_rad2_cv -- Oblate spheroidal radial function of the second kind\n\nKelvin Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n kelvin -- All Kelvin functions (order 0) and derivatives.\n kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives\n ber -- Kelvin function ber x\n bei -- Kelvin function bei x\n berp -- Derivative of Kelvin function ber x\n beip -- Derivative of Kelvin function bei x\n ker -- Kelvin function ker x\n kei -- Kelvin function kei x\n kerp -- Derivative of Kelvin function ker x\n keip -- Derivative of Kelvin function kei x\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n ber_zeros -- [+]Zeros of Kelvin function bei x\n bei_zeros -- [+]Zeros of Kelvin function ber x\n berp_zeros -- [+]Zeros of derivative of Kelvin function ber x\n beip_zeros -- [+]Zeros of derivative of Kelvin function bei x\n ker_zeros -- [+]Zeros of Kelvin function kei x\n kei_zeros -- [+]Zeros of Kelvin function ker x\n kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x\n keip_zeros -- [+]Zeros of derivative of Kelvin function kei x\n\nCombinatorics\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n comb -- [+]Combinations of N things taken k at a time, \"N choose k\"\n perm -- [+]Permutations of N things taken k at a time, \"k-permutations of N\"\n\nOther Special Functions\n-----------------------\n\n.. autosummary::\n :toctree: generated/\n\n agm -- Arithmetic-Geometric Mean\n bernoulli -- Bernoulli numbers\n binom -- Binomial coefficient.\n diric -- Dirichlet function (periodic sinc)\n euler -- Euler numbers\n expn -- Exponential integral.\n exp1 -- Exponential integral of order 1 (for complex argument)\n expi -- Another exponential integral -- Ei(x)\n factorial -- The factorial function, n! = special.gamma(n+1)\n factorial2 -- Double factorial, (n!)!\n factorialk -- [+](...((n!)!)!...)! where there are k '!'\n shichi -- Hyperbolic sine and cosine integrals.\n sici -- Integral of the sinc and \"cosinc\" functions.\n spence -- Spence's function, also known as the dilogarithm.\n lambertw -- Lambert W function\n zeta -- Riemann zeta function of two arguments.\n zetac -- Standard Riemann zeta function minus 1.\n\nConvenience Functions\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n cbrt -- Cube root.\n exp10 -- 10 raised to the x power.\n exp2 -- 2 raised to the x power.\n radian -- radian angle given degrees, minutes, and seconds.\n cosdg -- cosine of the angle given in degrees.\n sindg -- sine of the angle given in degrees.\n tandg -- tangent of the angle given in degrees.\n cotdg -- cotangent of the angle given in degrees.\n log1p -- log(1+x)\n expm1 -- exp(x)-1\n cosm1 -- cos(x)-1\n round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.\n xlogy -- x*log(y)\n xlog1py -- x*log1p(y)\n exprel -- (exp(x)-1)/x\n sinc -- sin(x)/x\n\n.. [+] in the description indicates a function which is not a universal\n.. function and does not follow broadcasting and automatic\n.. array-looping rules.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom ._ufuncs import *\n\nfrom .basic import *\nfrom . import specfun\nfrom . import orthogonal\nfrom .orthogonal import *\nfrom .spfun_stats import multigammaln\nfrom ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal\nfrom .lambertw import lambertw\nfrom ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,\n spherical_kn)\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom numpy.dual import register_func\nregister_func('i0',i0)\ndel register_func\n\nfrom numpy.testing import Tester\ntest = Tester().test\n"
] | [
[
"numpy.dual.register_func",
"numpy.testing.Tester"
]
] |
desikan95/ludwig | [
"c6b5594db98b6eaadc7bcd32e38983fe3cb4c3f2"
] | [
"tests/integration_tests/test_combiners.py"
] | [
"import logging\n\nimport pytest\nimport tensorflow as tf\n\nfrom ludwig.combiners.combiners import (\n ConcatCombiner,\n SequenceConcatCombiner,\n SequenceCombiner,\n ComparatorCombiner,\n sequence_encoder_registry,\n)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nlogging.getLogger(\"ludwig\").setLevel(logging.INFO)\n\nBATCH_SIZE = 16\nSEQ_SIZE = 12\nHIDDEN_SIZE = 128\nOTHER_HIDDEN_SIZE = 32\nFC_SIZE = 64\nBASE_FC_SIZE = 256\n\n\n# set up simulated encoder outputs\[email protected]\ndef encoder_outputs():\n # generates simulated encoder outputs dictionary:\n # feature_1: shape [b, h1] tensor\n # feature_2: shape [b, h2] tensor\n # feature_3: shape [b, s, h1] tensor\n # feature_4: shape [b, sh, h2] tensor\n\n encoder_outputs = {}\n shapes_list = [\n [BATCH_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, OTHER_HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, OTHER_HIDDEN_SIZE],\n ]\n feature_names = [\"feature_\" + str(i + 1) for i in range(len(shapes_list))]\n\n for feature_name, batch_shape in zip(feature_names, shapes_list):\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(batch_shape, dtype=tf.float32)\n }\n if len(batch_shape) > 2:\n encoder_outputs[feature_name][\n \"encoder_output_state\"] = tf.random.normal(\n [batch_shape[0], batch_shape[2]], dtype=tf.float32\n )\n\n return encoder_outputs\n\n\n# setup encoder outputs for ComparatorCombiner\[email protected]\ndef encoder_comparator_outputs():\n # generates simulated encoder outputs dictionary:\n # feature_1: shape [b, h1] tensor\n # feature_2: shape [b, h2] tensor\n # feature_3: shape [b, s, h1] tensor\n # feature_4: shape [b, sh, h2] tensor\n\n encoder_outputs = {}\n shapes_list = [\n [BATCH_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, OTHER_HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, HIDDEN_SIZE],\n [BATCH_SIZE, SEQ_SIZE, OTHER_HIDDEN_SIZE],\n ]\n text_feature_names = [\"text_feature_\" + str(i + 1) for i in\n range(len(shapes_list))]\n image_feature_names = [\n \"image_feature_\" + str(i + 1) for i in range(len(shapes_list))\n ]\n for i, (feature_name, batch_shape) in enumerate(\n zip(text_feature_names, shapes_list)\n ):\n # is there a better way to do this?\n if i == 0 or i == 3:\n dot_product_shape = [batch_shape[0], BASE_FC_SIZE]\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(dot_product_shape,\n dtype=tf.float32)\n }\n else:\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(batch_shape,\n dtype=tf.float32)\n }\n\n for i, (feature_name, batch_shape) in enumerate(\n zip(image_feature_names, shapes_list)\n ):\n if i == 0 or i == 3:\n dot_product_shape = [batch_shape[0], BASE_FC_SIZE]\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(dot_product_shape,\n dtype=tf.float32)\n }\n else:\n encoder_outputs[feature_name] = {\n \"encoder_output\": tf.random.normal(batch_shape,\n dtype=tf.float32)\n }\n\n return encoder_outputs\n\n\n# test for simple concatenation combiner\[email protected](\"fc_layer\",\n [None, [{\"fc_size\": 64}, {\"fc_size\": 64}]])\ndef test_concat_combiner(encoder_outputs, fc_layer):\n # clean out unneeded encoder outputs\n del encoder_outputs[\"feature_3\"]\n del encoder_outputs[\"feature_4\"]\n\n # setup combiner to test\n combiner = ConcatCombiner(fc_layers=fc_layer)\n\n # concatenate encoder outputs\n results = combiner(encoder_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n # confirm correct output shapes\n if fc_layer:\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE,\n FC_SIZE]\n else:\n # calculate expected hidden size for concatenated tensors\n hidden_size = 0\n for k in encoder_outputs:\n hidden_size += encoder_outputs[k][\"encoder_output\"].shape[1]\n\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE,\n hidden_size]\n\n\n# test for sequence concatenation combiner\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"main_sequence_feature\", [None, \"feature_3\"])\ndef test_sequence_concat_combiner(\n encoder_outputs, main_sequence_feature, reduce_output\n):\n combiner = SequenceConcatCombiner(\n main_sequence_feature=main_sequence_feature,\n reduce_output=reduce_output\n )\n\n # calculate expected hidden size for concatenated tensors\n hidden_size = 0\n for k in encoder_outputs:\n hidden_size += encoder_outputs[k][\"encoder_output\"].shape[-1]\n\n # concatenate encoder outputs\n results = combiner(encoder_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n # confirm correct shape\n if reduce_output is None:\n assert results[\"combiner_output\"].shape.as_list() == [\n BATCH_SIZE,\n SEQ_SIZE,\n hidden_size,\n ]\n else:\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE,\n hidden_size]\n\n\n# test for sequence combiner\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"encoder\", sequence_encoder_registry)\[email protected](\"main_sequence_feature\", [None, \"feature_3\"])\ndef test_sequence_combiner(\n encoder_outputs, main_sequence_feature, encoder, reduce_output\n):\n combiner = SequenceCombiner(\n main_sequence_feature=main_sequence_feature,\n encoder=encoder,\n reduce_output=reduce_output,\n )\n\n # calculate expected hidden size for concatenated tensors\n hidden_size = 0\n for k in encoder_outputs:\n hidden_size += encoder_outputs[k][\"encoder_output\"].shape[-1]\n\n # concatenate encoder outputs\n results = combiner(encoder_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n combiner_shape = results[\"combiner_output\"].shape\n # test for correct dimension\n if reduce_output:\n assert len(combiner_shape) == 2\n else:\n assert len(combiner_shape) == 3\n\n # Shape test assumes on Ludwig sequence encoder defaults\n # parallel encoders: # layers = 4, fc_size=256\n # non-parallel encoders: fc_size=256\n # if defaults change, then this test has to be updated\n default_layer = 4\n default_fc_size = 256\n\n if \"parallel\" in encoder:\n combiner_shape[-1] == default_layer * default_fc_size\n else:\n combiner_shape[-1] == default_fc_size\n\n\[email protected](\"fc_layer\",\n [None, [{\"fc_size\": 64}, {\"fc_size\": 64}]])\[email protected](\"entity_1\", [[\"text_feature_1\", \"text_feature_2\"]])\[email protected](\"entity_2\", [[\"image_feature_1\", \"image_feature_2\"]])\ndef test_comparator_combiner(encoder_comparator_outputs, fc_layer, entity_1,\n entity_2):\n # clean out unneeded encoder outputs since we only have 2 layers\n del encoder_comparator_outputs[\"text_feature_3\"]\n del encoder_comparator_outputs[\"image_feature_3\"]\n del encoder_comparator_outputs[\"text_feature_4\"]\n del encoder_comparator_outputs[\"image_feature_4\"]\n\n # setup combiner to test set to 256 for case when none as it's the default size\n fc_size = fc_layer[0][\"fc_size\"] if fc_layer else 256\n combiner = ComparatorCombiner(\n entity_1, entity_2, fc_layers=fc_layer, fc_size=fc_size\n )\n\n # concatenate encoder outputs\n results = combiner(encoder_comparator_outputs)\n\n # required key present\n assert \"combiner_output\" in results\n\n # confirm correct output shapes\n # concat on axis=1\n # because of dot products, 2 of the shapes added will be the fc_size\n # other 2 will be of shape BATCH_SIZE\n # this assumes dimensionality = 2\n size = BATCH_SIZE * 2 + fc_size * 2\n assert results[\"combiner_output\"].shape.as_list() == [BATCH_SIZE, size]\n"
] | [
[
"tensorflow.random.normal"
]
] |
taijihagino/tensorflow-hangul-recognition | [
"8c39444ea8accd8b3d935f277e34fa2797564114"
] | [
"tools/classify-hangul.py"
] | [
"#!/usr/bin/env python\n\nimport argparse\nimport io\nimport os\nimport sys\n\nimport tensorflow as tf\n\nSCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Default paths.\nDEFAULT_LABEL_FILE = os.path.join(\n SCRIPT_PATH, '../labels/2350-common-hangul.txt'\n)\nDEFAULT_GRAPH_FILE = os.path.join(\n SCRIPT_PATH, '../saved-model/optimized_hangul_tensorflow.pb'\n)\n\n\ndef read_image(file):\n \"\"\"Read an image file and convert it into a 1-D floating point array.\"\"\"\n file_content = tf.read_file(file)\n image = tf.image.decode_jpeg(file_content, channels=1)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.reshape(image, (1, 64*64))\n return image\n\n\ndef classify(args):\n \"\"\"Classify a character.\n\n This method will import the saved model from the given graph file, and will\n pass in the given image pixels as input for the classification. The top\n five predictions will be printed.\n \"\"\"\n labels = io.open(args.label_file,\n 'r', encoding='utf-8').read().splitlines()\n\n if not os.path.isfile(args.image):\n print('Error: Image %s not found.' % args.image)\n sys.exit(1)\n\n # Load graph and parse file.\n with tf.gfile.GFile(args.graph_file, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(\n graph_def,\n input_map=None,\n return_elements=None,\n name='hangul-model',\n producer_op_list=None\n )\n\n # Get relevant nodes.\n x = graph.get_tensor_by_name('hangul-model/input:0')\n y = graph.get_tensor_by_name('hangul-model/output:0')\n keep_prob = graph.get_tensor_by_name('hangul-model/keep_prob:0')\n\n image = read_image(args.image)\n sess = tf.InteractiveSession()\n image_array = sess.run(image)\n sess.close()\n with tf.Session(graph=graph) as graph_sess:\n predictions = graph_sess.run(y, feed_dict={x: image_array,\n keep_prob: 1.0})\n prediction = predictions[0]\n\n # Get the indices that would sort the array, then only get the indices that\n # correspond to the top 5 predictions.\n sorted_indices = prediction.argsort()[::-1][:5]\n for index in sorted_indices:\n label = labels[index]\n confidence = prediction[index]\n print('%s (confidence = %.5f)' % (label, confidence))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('image', type=str,\n help='Image to pass to model for classification.')\n parser.add_argument('--label-file', type=str, dest='label_file',\n default=DEFAULT_LABEL_FILE,\n help='File containing newline delimited labels.')\n parser.add_argument('--graph-file', type=str, dest='graph_file',\n default=DEFAULT_GRAPH_FILE,\n help='The saved model graph file to use for '\n 'classification.')\n classify(parser.parse_args())\n"
] | [
[
"tensorflow.reshape",
"tensorflow.gfile.GFile",
"tensorflow.read_file",
"tensorflow.InteractiveSession",
"tensorflow.Graph",
"tensorflow.image.convert_image_dtype",
"tensorflow.image.decode_jpeg",
"tensorflow.import_graph_def",
"tensorflow.Session",
"tensorflow.GraphDef"
]
] |
neuml/txtai | [
"b25173a8650a73dbcae43caa278f32020fef0236"
] | [
"src/python/txtai/vectors/words.py"
] | [
"\"\"\"\nWord Vectors module\n\"\"\"\n\nimport os\nimport pickle\nimport tempfile\n\nfrom errno import ENOENT\nfrom multiprocessing import Pool\n\nimport numpy as np\n\n# Conditionally import Word Vector libraries as they aren't installed by default\ntry:\n import fasttext\n from pymagnitude import converter, Magnitude\n\n WORDS = True\nexcept ImportError:\n WORDS = False\n\nfrom .base import Vectors\nfrom ..pipeline import Tokenizer\n\n# Multiprocessing helper methods\n# pylint: disable=W0603\nVECTORS = None\n\n\ndef create(config, scoring):\n \"\"\"\n Multiprocessing helper method. Creates a global embeddings object to be accessed in a new subprocess.\n\n Args:\n config: vector configuration\n scoring: scoring instance\n \"\"\"\n\n global VECTORS\n\n # Create a global embedding object using configuration and saved\n VECTORS = WordVectors(config, scoring)\n\n\ndef transform(document):\n \"\"\"\n Multiprocessing helper method. Transforms document into an embeddings vector.\n\n Args:\n document: (id, data, tags)\n\n Returns:\n (id, embedding)\n \"\"\"\n\n return (document[0], VECTORS.transform(document))\n\n\nclass SerialPool:\n \"\"\"\n Custom pool to execute vector transforms serially.\n \"\"\"\n\n def __init__(self, vectors):\n global VECTORS\n VECTORS = vectors\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def imap(self, func, iterable):\n \"\"\"\n Single process version of imap.\n\n Args:\n func: function to run\n iterable: iterable to use\n \"\"\"\n\n for x in iterable:\n yield func(x)\n\n\nclass WordVectors(Vectors):\n \"\"\"\n Builds sentence embeddings/vectors using weighted word embeddings.\n \"\"\"\n\n def load(self, path):\n # Ensure that vector path exists\n if not path or not os.path.isfile(path):\n raise IOError(ENOENT, \"Vector model file not found\", path)\n\n # Load magnitude model. If this is a training run (uninitialized config), block until vectors are fully loaded\n return Magnitude(path, case_insensitive=True, blocking=not self.initialized)\n\n def index(self, documents):\n ids, dimensions, stream = [], None, None\n\n # Shared objects with Pool\n args = (self.config, self.scoring)\n\n # Convert all documents to embedding arrays, stream embeddings to disk to control memory usage\n with SerialPool(self) if \"parallel\" in self.config and not self.config[\"parallel\"] else Pool(\n os.cpu_count(), initializer=create, initargs=args\n ) as pool:\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".npy\", delete=False) as output:\n stream = output.name\n for uid, embedding in pool.imap(transform, documents):\n if not dimensions:\n # Set number of dimensions for embeddings\n dimensions = embedding.shape[0]\n\n ids.append(uid)\n pickle.dump(embedding, output, protocol=4)\n\n return (ids, dimensions, len(ids), stream)\n\n def transform(self, document):\n # Convert to tokens if necessary\n if isinstance(document[1], str):\n document = (document[0], Tokenizer.tokenize(document[1]), document[2])\n\n # Generate weights for each vector using a scoring method\n weights = self.scoring.weights(document) if self.scoring else None\n\n # pylint: disable=E1133\n if weights and [x for x in weights if x > 0]:\n # Build weighted average embeddings vector. Create weights array os float32 to match embeddings precision.\n embedding = np.average(self.lookup(document[1]), weights=np.array(weights, dtype=np.float32), axis=0)\n else:\n # If no weights, use mean\n embedding = np.mean(self.lookup(document[1]), axis=0)\n\n return embedding\n\n def lookup(self, tokens):\n \"\"\"\n Queries word vectors for given list of input tokens.\n\n Args:\n tokens: list of tokens to query\n\n Returns:\n word vectors array\n \"\"\"\n\n return self.model.query(tokens)\n\n @staticmethod\n def isdatabase(path):\n \"\"\"\n Checks if this is a SQLite database file which is the file format used for word vectors databases.\n\n Args:\n path: path to check\n\n Returns:\n True if this is a SQLite database\n \"\"\"\n\n if isinstance(path, str) and os.path.isfile(path) and os.path.getsize(path) >= 100:\n # Read 100 byte SQLite header\n with open(path, \"rb\") as f:\n header = f.read(100)\n\n # Check for SQLite header\n return header.startswith(b\"SQLite format 3\\000\")\n\n return False\n\n @staticmethod\n def build(data, size, mincount, path):\n \"\"\"\n Builds fastText vectors from a file.\n\n Args:\n data: path to input data file\n size: number of vector dimensions\n mincount: minimum number of occurrences required to register a token\n path: path to output file\n \"\"\"\n\n # Train on data file using largest dimension size\n model = fasttext.train_unsupervised(data, dim=size, minCount=mincount)\n\n # Output file path\n print(f\"Building {size} dimension model\")\n\n # Output vectors in vec/txt format\n with open(path + \".txt\", \"w\", encoding=\"utf-8\") as output:\n words = model.get_words()\n output.write(f\"{len(words)} {model.get_dimension()}\\n\")\n\n for word in words:\n # Skip end of line token\n if word != \"</s>\":\n vector = model.get_word_vector(word)\n data = \"\"\n for v in vector:\n data += \" \" + str(v)\n\n output.write(word + data + \"\\n\")\n\n # Build magnitude vectors database\n print(\"Converting vectors to magnitude format\")\n converter.convert(path + \".txt\", path + \".magnitude\", subword=True)\n"
] | [
[
"numpy.array"
]
] |
rok20/flood-warning-rok20 | [
"660b0e87de60732df2ee85bf8bdd83c9806168b7"
] | [
"floodsystem/plot.py"
] | [
"\nfrom .analysis import polyfit\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\ndef plot_water_levels(station, dates, levels):\n #plot the dates and water levels\n plt.plot(dates, levels, label = 'Water level')\n\n # Add axis labels, rotate date labels and add plot title\n plt.xlabel('date')\n plt.ylabel('water level (m)')\n plt.xticks(rotation=45)\n plt.title(\"Station \" + station.name)\n\n #add typical high and low\n low, high = station.typical_range[0], station.typical_range[1]\n plt.axhline(y = low, color = 'b', label = 'Typical low')\n plt.axhline(y = high, color = 'r', label = 'Typical High')\n\n\n # Display plot\n plt.tight_layout() \n # This makes sure plot does not cut off date labels\n\n plt.show()\n\ndef plot_water_level_with_fit(station, dates, levels, p):\n list = polyfit(dates, levels, p)\n \n dates2 = matplotlib.dates.date2num(dates) \n\n #adjust dates so values aren't so high\n dates2 = dates2 - dates2[0]\n \n #provide points at set intervals for the polynomial\n points = np.linspace(dates2[0], dates2[-1], 30)\n \n #plot data in hours for each curve, label the axis and provide a title.\n plt.plot(24*dates2, levels, label = \"Actual data\")\n plt.plot(24*points, list[0](points), label = \"Best fit polynomial\")\n plt.xlabel(\"Hours in the past\")\n plt.ylabel(\"Water Level\")\n plt.title(station)\n \n\n plt.show\n\n\n"
] | [
[
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"matplotlib.dates.date2num"
]
] |
jokteur/ASMA | [
"25ac8a0455c680232d56c18d31de62c3188b7153"
] | [
"plots/exploration/finite_size_fluct.py"
] | [
"import time\nimport copy\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.animation as animation\n\nimport flowrect\nfrom flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM\nfrom flowrect.simulations import particle_population, flow_rectification, FR_finite_fluctuations\n\n\n# Plot saving parameters\nsave = False\nsave_path = \"\"\nsave_name = \"\"\n\n\ndef moving_average(x, w):\n return np.convolve(x, np.ones(w), \"valid\") / w\n\n\n# Simulation parameters\nN = 500\ndt = 1e-2\nI_ext = 5\nparams = dict(\n time_end=10,\n dt=dt,\n Lambda=[1.0, 2.5],\n Gamma=[-5.5, 1.0],\n # Lambda=[67.0, 7.0, 3.0],\n # Gamma=[-3.0, 4.0, -3.0],\n c=10,\n lambda_kappa=1,\n I_ext=5,\n I_ext_time=5,\n interaction=0.3,\n)\n\na_cutoff = 7\n\nprint(f\"FR with finite size fluctuations\")\nt = time.time()\nts, a_grid, rho_tN, m_t_exact, x_t, en_cons, A_orig, A1, Abar, S = FR_finite_fluctuations(\n a_cutoff=a_cutoff, **params\n)\nprint(f\"{time.time() - t:.2f}\")\n\nprint(f\"Particle simulation\")\nt = time.time()\nts, M, spikes, A, X = particle_population(**params, N=N, Gamma_ext=True)\nm_t = calculate_mt(M, spikes)\n# m_ts = np.zeros(m_t.T.shape)\n# w = 50\n# m_ts[: -w + 1, 0] = moving_average(m_t.T[:, 0], w)\n# m_ts[: -w + 1, 1] = moving_average(m_t.T[:, 1], w)\n# m_ts[-w + 1 :, :] = m_ts[-w, :]\nprint(f\"{time.time() - t:.2f}\")\n\nprint(f\"Flow rectification approximation\")\nt = time.time()\nts, a_grid, rho_t, m_t_exact, x_t, en_cons, A_t = flow_rectification(a_cutoff=a_cutoff, **params)\nprint(f\"{time.time() - t:.2f}s\")\n\n\nI_ext_vec = np.concatenate((np.zeros(int(len(ts) / 2)), I_ext * np.ones(int(len(ts) / 2))))\nages = calculate_age(spikes.T) * params[\"dt\"]\nA_av = moving_average(A, 100)\n\n# Animated plots\n\n\nclass AnimatedPlot:\n def __init__(self, xlim=10, ylim=10):\n self.fig = plt.figure(figsize=(5.5, 9))\n gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])\n self.fig.suptitle(fr\"PDE vs particle simulation $N=${N}\")\n\n self.ax1 = plt.subplot(gs[0])\n self.ax2 = plt.subplot(gs[1])\n self.xlim, self.ylim = xlim, ylim\n self.plots = {}\n\n def init_plot(self):\n\n self.plots[\"title\"] = self.ax1.text(\n 0.5,\n 0.85,\n \"\",\n bbox={\"facecolor\": \"w\", \"alpha\": 0.5, \"pad\": 5},\n transform=self.ax1.transAxes,\n ha=\"center\",\n )\n\n # density plot (PDE)\n self.plots[\"p_rho\"] = self.ax1.plot([], [], \"-k\", label=\"Particle\")[0]\n self.plots[\"rho\"] = self.ax1.plot(a_grid, rho_t[0], \"--r\", linewidth=1, label=\"PDE\")[0]\n self.plots[\"rhoN\"] = self.ax1.plot(a_grid, rho_tN[0], \"-b\", linewidth=1, label=\"Finite\")[0]\n self.plots[\"S\"] = self.ax1.plot(a_grid, S[0], \"g\", linewidth=1)[0]\n self.ax1.set_ylim(0, 4)\n self.ax1.set_title(\"Probability density distribution\")\n self.ax1.legend(handles=[self.plots[\"rho\"], self.plots[\"p_rho\"], self.plots[\"rhoN\"]])\n self.ax1.set_xlabel(\"Age a (s)\")\n self.ax1.set_ylabel(r\"$\\rho_t$\")\n\n self.ax2.plot()\n self.ax2.set_title(\"External input\")\n self.plots[\"vline\"] = self.ax2.plot([], [], \"-r\", linewidth=1)[0]\n self.ax2.set_ylim(0, 6)\n self.ax2.plot(ts, I_ext_vec, \"-k\")\n self.ax2.set_ylabel(r\"$I^{ext}$ (a.u.)\")\n self.ax2.set_xlabel(r\"$t$ (s)\")\n\n return tuple(self.plots.values())\n\n def calculate_hist(self, i):\n hist, bins = np.histogram(ages[:, i], bins=50, density=True)\n bins = (bins[1:] + bins[:-1]) / 2\n # w = 2\n return bins, hist # moving_average(hist, w)\n\n def animate(self, i):\n t = dt * i\n # Scatter\n self.plots[\"title\"].set_text(fr\"Time $t=${t:.2f}s\")\n # Particule rho\n bins, hist = self.calculate_hist(i)\n self.plots[\"p_rho\"].set_data(bins, hist)\n self.plots[\"vline\"].set_data(np.array([t, t]), np.array([0, 6]))\n\n # PDE rho\n self.plots[\"rho\"].set_data(a_grid, rho_t[i])\n self.plots[\"rhoN\"].set_data(a_grid, rho_tN[i])\n self.plots[\"S\"].set_data(a_grid, S[i])\n return tuple(self.plots.values())\n\n\n# Scatter plot\nlim = 20\npl = AnimatedPlot(xlim=lim, ylim=lim)\nanim_int = 4 # Want every 10ms\nprint(anim_int)\n\nani = animation.FuncAnimation(\n pl.fig,\n func=pl.animate,\n frames=range(0, len(M), anim_int),\n init_func=pl.init_plot,\n interval=5,\n blit=True,\n)\n\nif save:\n ani.save(os.path.join(save_path, save_name))\n\nplt.figure()\nA_av = moving_average(A, 50)\nplt.plot(ts, A, \"--k\", label=\"Particle\")\nplt.plot(ts[: len(A_av)], A_av, \"--r\", label=\"P. rolling av.\")\nplt.plot(ts, A_t, \"-b\", linewidth=1.5, label=\"PDE\")\nplt.plot(ts, A1, \"-c\", label=\"A1\")\nplt.ylim(0, 10)\nplt.plot(ts, Abar, \"-.g\", label=\"Abar\")\nplt.legend()\nplt.show()"
] | [
[
"numpy.ones",
"matplotlib.pyplot.legend",
"numpy.histogram",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.gridspec.GridSpec"
]
] |
igor-krawczuk/pytorch-lightning | [
"7de51f78ac2ec09b230e1cb8a786f872de3b861f"
] | [
"tests/models/test_gpu.py"
] | [
"import os\n\nimport pytest\nimport torch\n\nimport tests.base.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core import memory\nfrom pytorch_lightning.trainer.distrib_parts import (\n parse_gpu_ids,\n determine_root_gpu_device,\n)\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import LightningTestModel\n\nPRETEND_N_OF_GPUS = 16\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model_ddp2(tmpdir):\n \"\"\"Make sure DDP2 works.\"\"\"\n\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=True,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=2,\n weights_summary=None,\n distributed_backend='ddp2'\n )\n\n tutils.run_model_test(trainer_options, model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model_ddp(tmpdir):\n \"\"\"Make sure DDP works.\"\"\"\n\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=False,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend='ddp'\n )\n\n tutils.run_model_test(trainer_options, model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_ddp_all_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Make sure DDP works with dataloaders passed to fit()\"\"\"\n\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(default_save_path=tmpdir,\n show_progress_bar=False,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend='ddp')\n\n fit_options = dict(train_dataloader=model.train_dataloader(),\n val_dataloaders=model.val_dataloader())\n\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model, **fit_options)\n assert result == 1, \"DDP doesn't work with dataloaders passed to fit().\"\n\n\ndef test_optimizer_return_options():\n tutils.reset_seed()\n\n trainer = Trainer()\n model, hparams = tutils.get_default_model()\n\n # single optimizer\n opt_a = torch.optim.Adam(model.parameters(), lr=0.002)\n opt_b = torch.optim.SGD(model.parameters(), lr=0.002)\n scheduler_a = torch.optim.lr_scheduler.StepLR(opt_a, 10)\n scheduler_b = torch.optim.lr_scheduler.StepLR(opt_b, 10)\n\n # single optimizer\n optim, lr_sched, freq = trainer.init_optimizers(opt_a)\n assert len(optim) == 1 and len(lr_sched) == 0 and len(freq) == 0\n\n # opt tuple\n opts = (opt_a, opt_b)\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]\n assert len(lr_sched) == 0 and len(freq) == 0\n\n # opt list\n opts = [opt_a, opt_b]\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]\n assert len(lr_sched) == 0 and len(freq) == 0\n\n # opt tuple of 2 lists\n opts = ([opt_a], [scheduler_a])\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 1 and len(lr_sched) == 1 and len(freq) == 0\n assert optim[0] == opts[0][0]\n assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',\n frequency=1, reduce_on_plateau=False, monitor='val_loss')\n\n # opt single dictionary\n opts = {\"optimizer\": opt_a, \"lr_scheduler\": scheduler_a}\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 1 and len(lr_sched) == 1 and len(freq) == 0\n assert optim[0] == opt_a\n assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',\n frequency=1, reduce_on_plateau=False, monitor='val_loss')\n\n # opt multiple dictionaries with frequencies\n opts = (\n {\"optimizer\": opt_a, \"lr_scheduler\": scheduler_a, \"frequency\": 1},\n {\"optimizer\": opt_b, \"lr_scheduler\": scheduler_b, \"frequency\": 5},\n )\n optim, lr_sched, freq = trainer.init_optimizers(opts)\n assert len(optim) == 2 and len(lr_sched) == 2 and len(freq) == 2\n assert optim[0] == opt_a\n assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',\n frequency=1, reduce_on_plateau=False, monitor='val_loss')\n assert freq == [1, 5]\n\n\ndef test_cpu_slurm_save_load(tmpdir):\n \"\"\"Verify model save/load/checkpoint on CPU.\"\"\"\n tutils.reset_seed()\n\n hparams = tutils.get_default_hparams()\n model = LightningTestModel(hparams)\n\n # logger file to get meta\n logger = tutils.get_default_testtube_logger(tmpdir, False)\n version = logger.version\n\n trainer_options = dict(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir)\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n real_global_step = trainer.global_step\n\n # traning complete\n assert result == 1, 'amp + ddp model failed to complete'\n\n # predict with trained model before saving\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n model.eval()\n pred_before_saving = model(x)\n\n # test HPC saving\n # simulate snapshot on slurm\n saved_filepath = trainer.hpc_save(tmpdir, logger)\n assert os.path.exists(saved_filepath)\n\n # new logger file to get meta\n logger = tutils.get_default_testtube_logger(tmpdir, False, version=version)\n\n trainer_options = dict(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir),\n )\n trainer = Trainer(**trainer_options)\n model = LightningTestModel(hparams)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_pred_same():\n assert trainer.global_step == real_global_step and trainer.global_step > 0\n\n # predict with loaded model to make sure answers are the same\n trainer.model.eval()\n new_pred = trainer.model(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n model.on_epoch_start = assert_pred_same\n\n # by calling fit again, we trigger training, loading weights from the cluster\n # and our hook to predict using current model before any more weight updates\n trainer.fit(model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_none_backend(tmpdir):\n \"\"\"Make sure when using multiple GPUs the user can't use `distributed_backend = None`.\"\"\"\n tutils.reset_seed()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=False,\n max_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus='-1'\n )\n\n with pytest.warns(UserWarning):\n tutils.run_model_test(trainer_options, model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model_dp(tmpdir):\n \"\"\"Make sure DP works.\"\"\"\n tutils.reset_seed()\n\n model, hparams = tutils.get_default_model()\n trainer_options = dict(\n default_save_path=tmpdir,\n show_progress_bar=False,\n distributed_backend='dp',\n max_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus='-1'\n )\n\n tutils.run_model_test(trainer_options, model)\n\n # test memory helper functions\n memory.get_memory_profile('min_max')\n\n\[email protected]\ndef mocked_device_count(monkeypatch):\n def device_count():\n return PRETEND_N_OF_GPUS\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]\ndef mocked_device_count_0(monkeypatch):\n def device_count():\n return 0\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(0, 0, None, id=\"Oth gpu, expect 1 gpu to use.\"),\n pytest.param(1, 1, None, id=\"1st gpu, expect 1 gpu to use.\"),\n pytest.param(-1, PRETEND_N_OF_GPUS, \"ddp\", id=\"-1 - use all gpus\"),\n pytest.param('-1', PRETEND_N_OF_GPUS, \"ddp\", id=\"'-1' - use all gpus\"),\n pytest.param(3, 3, \"ddp\", id=\"3rd gpu - 1 gpu to use (backend:ddp)\")\n])\ndef test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(None, 0, \"ddp\", id=\"None - expect 0 gpu to use.\"),\n])\ndef test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"O gpus, expect gpu root device to be None.\"),\n pytest.param(1, 0, \"ddp\", id=\"1 gpu, expect gpu root device to be 0.\"),\n pytest.param(-1, 0, \"ddp\", id=\"-1 - use all gpus, expect gpu root device to be 0.\"),\n pytest.param('-1', 0, \"ddp\", id=\"'-1' - use all gpus, expect gpu root device to be 0.\"),\n pytest.param(3, 0, \"ddp\", id=\"3 gpus, expect gpu root device to be 0.(backend:ddp)\")\n])\ndef test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\[email protected]_param_tests\[email protected]([\n 'gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, None, id=\"None is None\"),\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"None is None\"),\n])\ndef test_root_gpu_property_0_passing(\n mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\n# Asking for a gpu when non are available will result in a MisconfigurationException\[email protected]_param_tests\[email protected]([\n 'gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(1, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param([1, 2], None, \"ddp\"),\n pytest.param([0, 1], None, \"ddp\"),\n pytest.param(-1, None, \"ddp\"),\n pytest.param('-1', None, \"ddp\")\n])\ndef test_root_gpu_property_0_raising(\n mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n with pytest.raises(MisconfigurationException):\n Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu'], [\n pytest.param(None, None, id=\"No gpus, expect gpu root device to be None\"),\n pytest.param([0], 0, id=\"Oth gpu, expect gpu root device to be 0.\"),\n pytest.param([1], 1, id=\"1st gpu, expect gpu root device to be 1.\"),\n pytest.param([3], 3, id=\"3rd gpu, expect gpu root device to be 3.\"),\n pytest.param([1, 2], 1, id=\"[1, 2] gpus, expect gpu root device to be 1.\"),\n])\ndef test_determine_root_gpu_device(gpus, expected_root_gpu):\n assert determine_root_gpu_device(gpus) == expected_root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_gpu_ids'], [\n pytest.param(None, None),\n pytest.param(0, None),\n pytest.param(1, [0]),\n pytest.param(3, [0, 1, 2]),\n pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id=\"-1 - use all gpus\"),\n pytest.param([0], [0]),\n pytest.param([1, 3], [1, 3]),\n pytest.param('0', [0]),\n pytest.param('3', [3]),\n pytest.param('1, 3', [1, 3]),\n pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id=\"'-1' - use all gpus\"),\n])\ndef test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):\n assert parse_gpu_ids(gpus) == expected_gpu_ids\n\n\[email protected]_param_tests\[email protected](['gpus'], [\n pytest.param(0.1),\n pytest.param(-2),\n pytest.param(False),\n pytest.param([]),\n pytest.param([-1]),\n pytest.param([None]),\n pytest.param(['0']),\n pytest.param((0, 1)),\n])\ndef test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\[email protected](\"gpus\", [''])\ndef test_parse_gpu_fail_on_empty_string(mocked_device_count, gpus):\n # This currently results in a ValueError instead of MisconfigurationException\n with pytest.raises(ValueError):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\[email protected](\"gpus\", [[1, 2, 19], -1, '-1'])\ndef test_parse_gpu_fail_on_non_existant_id(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\ndef test_parse_gpu_fail_on_non_existant_id_2(mocked_device_count):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids([1, 2, 19])\n\n\[email protected]_param_tests\[email protected](\"gpus\", [-1, '-1'])\ndef test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\n# if __name__ == '__main__':\n# pytest.main([__file__])\n"
] | [
[
"torch.optim.lr_scheduler.StepLR",
"torch.eq",
"torch.cuda.device_count"
]
] |
friggog/trimesh | [
"8ba65f6f19664b15bec1d13115af5040365254da"
] | [
"trimesh/visual/texture.py"
] | [
"import copy\n\nimport numpy as np\n\nfrom .base import Visuals\nfrom . import color\n\nfrom .. import util\nfrom .. import caching\nfrom .. import grouping\n\nfrom .material import SimpleMaterial, PBRMaterial, empty_material # NOQA\n\n\nclass TextureVisuals(Visuals):\n def __init__(self,\n uv=None,\n material=None,\n image=None,\n face_materials=None):\n \"\"\"\n Store a single material and per-vertex UV coordinates\n for a mesh.\n\n If passed UV coordinates and a single image it will\n create a SimpleMaterial for the image.\n\n Parameters\n --------------\n uv : (n, 2) float\n UV coordinates for the mesh\n material : Material\n Store images and properties\n image : PIL.Image\n Can be passed to automatically create material\n \"\"\"\n\n # store values we care about enough to hash\n self.vertex_attributes = caching.DataStore()\n # cache calculated values\n self._cache = caching.Cache(self.vertex_attributes.fast_hash)\n\n # should be (n, 2) float\n self.uv = uv\n\n if material is None:\n if image is None:\n self.material = empty_material()\n else:\n # if an image is passed create a SimpleMaterial\n self.material = SimpleMaterial(image=image)\n else:\n # if passed assign\n self.material = material\n\n self.face_materials = face_materials\n\n def _verify_crc(self):\n \"\"\"\n Dump the cache if anything in self.vertex_attributes has changed.\n \"\"\"\n self._cache.verify()\n\n @property\n def kind(self):\n \"\"\"\n Return the type of visual data stored\n\n Returns\n ----------\n kind : str\n What type of visuals are defined\n \"\"\"\n return 'texture'\n\n @property\n def defined(self):\n \"\"\"\n Check if any data is stored\n\n Returns\n ----------\n defined : bool\n Are UV coordinates and images set?\n \"\"\"\n ok = self.material is not None\n return ok\n\n def crc(self):\n \"\"\"\n Get a CRC of the stored data.\n\n Returns\n --------------\n crc : int\n Hash of items in self.vertex_attributes\n \"\"\"\n return self.vertex_attributes.crc()\n\n @property\n def uv(self):\n \"\"\"\n Get the stored UV coordinates.\n\n Returns\n ------------\n uv : (n, 2) float\n Pixel position per- vertex\n \"\"\"\n return self.vertex_attributes.get('uv', None)\n\n @uv.setter\n def uv(self, values):\n \"\"\"\n Set the UV coordinates.\n\n Parameters\n --------------\n values : (n, 2) float\n Pixel locations on a texture per- vertex\n \"\"\"\n if values is None:\n self.vertex_attributes['uv'] = None\n else:\n self.vertex_attributes['uv'] = np.asanyarray(\n values, dtype=np.float64)\n\n def copy(self, uv=None):\n \"\"\"\n Return a copy of the current TextureVisuals object.\n\n Returns\n ----------\n copied : TextureVisuals\n Contains the same information in a new object\n \"\"\"\n if uv is None:\n uv = self.uv\n if uv is not None:\n uv = uv.copy()\n copied = TextureVisuals(\n uv=uv,\n material=self.material.copy(),\n face_materials=copy.copy(self.face_materials))\n\n return copied\n\n def to_color(self):\n \"\"\"\n Convert textured visuals to a ColorVisuals with vertex\n color calculated from texture.\n\n Returns\n -----------\n vis : trimesh.visuals.ColorVisuals\n Contains vertex color from texture\n \"\"\"\n # find the color at each UV coordinate\n colors = self.material.to_color(self.uv)\n # create ColorVisuals from result\n vis = color.ColorVisuals(vertex_colors=colors)\n return vis\n\n def face_subset(self, face_index):\n \"\"\"\n Get a copy of\n \"\"\"\n if self.uv is not None:\n indices = np.unique(self.mesh.faces[face_index].flatten())\n return self.copy(self.uv[indices])\n else:\n return self.copy()\n\n def update_vertices(self, mask):\n \"\"\"\n Apply a mask to remove or duplicate vertex properties.\n\n Parameters\n ------------\n mask : (len(vertices),) bool or (n,) int\n Mask which can be used like: `vertex_attribute[mask]`\n \"\"\"\n # collect updated masked values\n updates = {}\n for key, value in self.vertex_attributes.items():\n # DataStore will convert None to zero-length array\n if len(value) == 0:\n continue\n try:\n # store the update\n updates[key] = value[mask]\n except BaseException:\n # usual reason is an incorrect size or index\n util.log.warning('failed to update visual: `{}`'.format(key))\n # clear all values from the vertex attributes\n self.vertex_attributes.clear()\n # apply the updated values\n self.vertex_attributes.update(updates)\n\n def update_faces(self, mask):\n \"\"\"\n Apply a mask to remove or duplicate face properties,\n not applicable to texture visuals.\n \"\"\"\n pass\n\n def concatenate(self, others):\n \"\"\"\n Concatenate this TextureVisuals object with others\n and return the result without modifying this visual.\n\n Parameters\n -----------\n others : (n,) Visuals\n Other visual objects to concatenate\n\n Returns\n -----------\n concatenated : TextureVisuals\n Concatenated visual objects\n \"\"\"\n util.log.warning('concatenating texture: may result in visual artifacts')\n from .objects import concatenate\n return concatenate(self, others)\n\n\ndef unmerge_faces(faces, *args, **kwargs):\n \"\"\"\n Textured meshes can come with faces referencing vertex\n indices (`v`) and an array the same shape which references\n vertex texture indices (`vt`) and sometimes even normal (`vn`).\n\n Vertex locations with different values of any of these can't\n be considered the \"same\" vertex, and for our simple data\n model we need to not combine these vertices.\n\n Parameters\n -------------\n faces : (n, d) int\n References vertex indices\n *args : (n, d) int\n Various references of corresponding values\n This is usually UV coordinates or normal indexes\n maintain_faces : bool\n Do not alter original faces and return no-op masks.\n\n Returns\n -------------\n new_faces : (m, d) int\n New faces for masked vertices\n mask_v : (p,) int\n A mask to apply to vertices\n mask_* : (p,) int\n A mask to apply to vt array to get matching UV coordinates\n Returns as many of these as args were passed\n \"\"\"\n # unfortunately Python2 doesn't let us put named kwargs\n # after an `*args` sequence so we have to do this ugly get\n maintain_faces = kwargs.get('maintain_faces', False)\n\n # don't alter faces\n if maintain_faces:\n # start with not altering faces at all\n result = [faces]\n # find the maximum index referenced by faces\n max_idx = faces.max()\n # add a vertex mask which is just ordered\n result.append(np.arange(max_idx + 1))\n\n # now given the order is fixed do our best on the rest of the order\n for arg in args:\n # create a mask of the attribute-vertex mapping\n # note that these might conflict since we're not unmerging\n masks = np.zeros((3, max_idx + 1), dtype=np.int64)\n # set the mask using the unmodified face indexes\n for i, f, a in zip(range(3), faces.T, arg.T):\n masks[i][f] = a\n # find the most commonly occurring attribute (i.e. UV coordinate)\n # and use that index note that this is doing a float conversion\n # and then median before converting back to int: could also do this as\n # a column diff and sort but this seemed easier and is fast enough\n result.append(np.median(masks, axis=0).astype(np.int64))\n\n return result\n\n # stack into pairs of (vertex index, texture index)\n stackable = [np.asanyarray(faces).reshape(-1)]\n # append multiple args to the correlated stack\n # this is usually UV coordinates (vt) and normals (vn)\n for arg in args:\n stackable.append(np.asanyarray(arg).reshape(-1))\n\n # unify them into rows of a numpy array\n stack = np.column_stack(stackable)\n # find unique pairs: we're trying to avoid merging\n # vertices that have the same position but different\n # texture coordinates\n unique, inverse = grouping.unique_rows(stack)\n\n # only take the unique pairs\n pairs = stack[unique]\n # try to maintain original vertex order\n order = pairs[:, 0].argsort()\n # apply the order to the pairs\n pairs = pairs[order]\n\n # we re-ordered the vertices to try to maintain\n # the original vertex order as much as possible\n # so to reconstruct the faces we need to remap\n remap = np.zeros(len(order), dtype=np.int64)\n remap[order] = np.arange(len(order))\n\n # the faces are just the inverse with the new order\n new_faces = remap[inverse].reshape((-1, 3))\n\n # the mask for vertices and masks for other args\n result = [new_faces]\n result.extend(pairs.T)\n\n return result\n\n\ndef power_resize(image, resample=1, square=False):\n \"\"\"\n Resize a PIL image so every dimension is a power of two.\n\n Parameters\n ------------\n image : PIL.Image\n Input image\n resample : int\n Passed to Image.resize\n square : bool\n If True, upsize to a square image\n\n Returns\n -------------\n resized : PIL.Image\n Input image resized\n \"\"\"\n # what is the current resolution of the image in pixels\n size = np.array(image.size, dtype=np.int64)\n # what is the resolution of the image upsized to the nearest\n # power of two on each axis: allow rectangular textures\n new_size = (2 ** np.ceil(np.log2(size))).astype(np.int64)\n\n # make every dimension the largest\n if square:\n new_size = np.ones(2, dtype=np.int64) * new_size.max()\n\n # if we're not powers of two upsize\n if (size != new_size).any():\n return image.resize(new_size, resample=resample)\n\n return image.copy()\n"
] | [
[
"numpy.ones",
"numpy.log2",
"numpy.zeros",
"numpy.column_stack",
"numpy.asanyarray",
"numpy.median",
"numpy.arange",
"numpy.array"
]
] |
itsameercat/polychrom | [
"a3a39290857bc889627f9d437faa050248bbdc13"
] | [
"polychrom/polymer_analyses.py"
] | [
"# Code written by: Maksim Imakaev ([email protected])\n\"\"\"\nAnalyses of polymer conformations\n=================================\n\n\nThis module presents a collection of utils to work with polymer conformations.\n\n\nTools for calculating contacts\n------------------------------\n\nThe main function calculating contacts is: :py:func:`polychrom.polymer_analyses.calculate_contacts`\nRight now it is a simple wrapper around scipy.cKDTree. \n\nAnother function :py:func:`polychrom.polymer_analyses.smart_contacts` was added recently\nto help build contact maps with a large contact radius. \nIt randomly sub-samples the monomers; by default selecting N/cutoff monomers. It then \ncalculates contacts from sub-sampled monomers only. It is especially helpful when the same code \nneeds to calculate contacts at large and small contact radii.Because of sub-sampling at large\ncontact radius, it avoids the problem of having way-too-many-contacts at a large contact radius. \nFor ordinary contacts, the number of contacts scales as contact_radius^3; however, with smart_contacts \nit would only scale linearly with contact radius, which leads to significant speedsups. \n\n\nTools to calculate P(s) and R(s) \n----------------------------------\n\nWe provide functions to calculate P(s), Rg^2(s) and R^2(s) for polymers. \nBy default, they use log-spaced bins on the X axis, with about 10 bins per order of magnitude, \nbut aligned such that the last bins ends exactly at (N-1). They output (bin, scaling) \nfor Rg^2 and R^2, and (bin_mid, scaling) for contacts. In either case, the \nreturned values are ready to plot. The difference is that Rg and R^2 are evaluated\nat a given value of s, while contacts are aggregated for (bins[0].. bins[1]), (bins[1]..bins[2]). \nTherefore, we have to return bin mids for contacts. \n\n\"\"\"\n\nfrom math import sqrt\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.spatial import ckdtree\n\ntry:\n from . import _polymer_math\nexcept:\n pass\n\n\ndef calculate_contacts(data, cutoff=1.7):\n \"\"\"Calculates contacts between points give the contact radius (cutoff)\n\n Parameters\n ----------\n data : Nx3 array\n Coordinates of points\n cutoff : float , optional\n Cutoff distance (contact radius)\n\n Returns\n -------\n k by 2 array of contacts. Each row corresponds to a contact.\n \"\"\"\n if data.shape[1] != 3:\n raise ValueError(\"Incorrect polymer data shape. Must be Nx3.\")\n\n if np.isnan(data).any():\n raise RuntimeError(\"Data contains NANs\")\n\n tree = ckdtree.cKDTree(data)\n pairs = tree.query_pairs(cutoff, output_type=\"ndarray\")\n return pairs\n\n\ndef smart_contacts(data, cutoff=1.7, min_cutoff=2.1, percent_func=lambda x: 1 / x):\n \"\"\"Calculates contacts for a polymer, give the contact radius (cutoff)\n This method takes a random fraction of the monomers that is equal to (\n 1/cutoff).\n\n This is done to make contact finding faster, and because if cutoff radius\n is R, and monomer (i,j) are in contact, then monomers (i+a), and (j+b)\n are likely in contact if |a| + |b| <~ R (the polymer could not run away\n by more than R in R steps)\n\n This method will have # of contacts grow approximately linearly with\n contact radius, not cubically, which should drastically speed up\n computations of contacts for large (5+) contact radii. This should allow\n using the same code both for small and large contact radius, without the\n need to reduce the # of conformations, subsample the data, or both at\n very large contact radii.\n\n\n Parameters\n ----------\n data : Nx3 array\n Polymer coordinates\n cutoff : float , optional\n Cutoff distance that defines contact\n min_cutoff : float, optional\n Apply the \"smart\" reduction of contacts only when cutoff\n is less than this value\n percent_func : callable, optional \n Function that calculates fraction of monomers to use, as a function of cutoff\n Default is 1/cutoff \n\n Returns\n -------\n k by 2 array of contacts. Each row corresponds to a contact.\n \"\"\"\n if data.shape[1] != 3:\n raise ValueError(\"Incorrect polymer data shape. Must be Nx3.\")\n\n if np.isnan(data).any():\n raise RuntimeError(\"Data contains NANs\")\n\n if cutoff > min_cutoff:\n frac = percent_func(cutoff)\n inds = np.nonzero(np.random.random(len(data)) < frac)[0]\n\n conts = calculate_contacts(data[inds], cutoff)\n conts[:, 0] = inds[conts[:, 0]]\n conts[:, 1] = inds[conts[:, 1]]\n return conts\n\n else:\n return calculate_contacts(data, cutoff)\n\n\ndef generate_bins(N, start=4, bins_per_order_magn=10):\n lstart = np.log10(start)\n lend = np.log10(N - 1) + 1e-6\n num = int(np.ceil((lend - lstart) * bins_per_order_magn))\n bins = np.unique(np.logspace(lstart, lend, dtype=int, num=max(num, 0)))\n if len(bins) > 0:\n assert bins[-1] == N - 1\n return bins\n\n\ndef contact_scaling(data, bins0=None, cutoff=1.1, integrate=False, ring=False):\n \"\"\"\n Returns contact probability scaling for a given polymer conformation\n Contact between monomers X and X+1 is counted as s=1 \n \n\n Parameters\n ----------\n data : Nx3 array of ints/floats\n Input polymer conformation\n bins0 : list or None\n Bins to calculate scaling.\n Bins should probably be log-spaced; log-spaced bins can be quickly\n calculated using mirnylib.numtuis.logbinsnew.\n If None, bins will be calculated automatically\n cutoff : float, optional\n Cutoff to calculate scaling\n integrate : bool, optional\n if True, will return cumulative probability\n ring : bool, optional\n If True, will calculate contacts for the ring\n intContacts : bool, optional\n If True, will speed up calculation of contacts for a cubit lattice case.\n verbose : bool, optional\n If True, print some information.\n\n Returns\n -------\n (mids, contact probabilities) where \"mids\" contains\n geometric means of bin start/end\n \n\n \"\"\"\n data = np.asarray(data)\n N = data.shape[0]\n assert data.shape[1] == 3\n\n if bins0 is None:\n bins0 = generate_bins(N)\n\n bins0 = np.array(bins0)\n bins = [(bins0[i], bins0[i + 1]) for i in range(len(bins0) - 1)]\n contacts = np.array(calculate_contacts(data, cutoff))\n\n contacts = contacts[:, 1] - contacts[:, 0] # contact lengthes\n\n if ring:\n mask = contacts > N // 2\n contacts[mask] = N - contacts[mask]\n\n scontacts = np.sort(contacts) # sorted contact lengthes\n # count of contacts\n connumbers = np.diff(np.searchsorted(scontacts, bins0, side=\"left\"))\n\n if ring:\n possible = np.diff(N * bins0)\n else:\n possible = np.diff(N * bins0 + 0.5 * bins0 - 0.5 * (bins0 ** 2))\n\n connumbers = connumbers / possible\n\n a = [sqrt(i[0] * (i[1] - 1)) for i in bins]\n return a, connumbers\n\n\n\ndef slope_contact_scaling(mids, cp, sigma=2):\n \n smooth=lambda x: gaussian_filter1d(x, sigma)\n \n # P(s) has to be smoothed in logspace, and both P and s have to be smoothed. \n # It is discussed in detail here\n # https://gist.github.com/mimakaev/4becf1310ba6ee07f6b91e511c531e73\n \n # Values sigma=1.5-2 look reasonable for reasonable simulations\n \n slope = np.diff(smooth(np.log(cp))) / np.diff(\n smooth(np.log(mids)))\n \n return mids[1:], slope\n\n\ndef Rg2_scaling(data, bins=None, ring=False):\n \"\"\"Calculates average gyration radius of subchains a function of s\n \n Parameters\n ----------\n \n data: Nx3 array\n bins: subchain lengths at which to calculate Rg\n ring: treat polymer as a ring (default: False) \n \"\"\"\n\n data = np.asarray(data, float)\n N = data.shape[0]\n assert data.shape[1] == 3\n\n data = np.concatenate([[[0, 0, 0]], data])\n\n if bins is None:\n bins = generate_bins(N)\n\n coms = np.cumsum(data, 0) # cumulative sum of locations to calculate COM\n coms2 = np.cumsum(data ** 2, 0) # cumulative sum of locations^2 to calculate RG\n\n def radius_gyration(len2):\n data\n if ring:\n comsadd = coms[1:len2, :].copy()\n coms2add = coms2[1:len2, :].copy()\n comsadd += coms[-1, :][None, :]\n coms2add += coms2[-1, :][None, :]\n comsw = np.concatenate([coms, comsadd], axis=0)\n coms2w = np.concatenate([coms2, coms2add], axis=0)\n else:\n comsw = coms\n coms2w = coms2\n\n coms2d = (-coms2w[:-len2, :] + coms2w[len2:, :]) / len2\n comsd = ((comsw[:-len2, :] - comsw[len2:, :]) / len2) ** 2\n diffs = coms2d - comsd\n sums = np.sum(diffs, 1)\n return np.mean(sums)\n\n rads = [0.0 for i in range(len(bins))]\n for i in range(len(bins)):\n rads[i] = radius_gyration(int(bins[i]))\n return np.array(bins), rads\n\n\ndef R2_scaling(data, bins=None, ring=False):\n \"\"\"\n Returns end-to-end distance scaling of a given polymer conformation.\n ..warning:: This method averages end-to-end scaling over all possible\n subchains of given length\n\n Parameters\n ----------\n\n data: Nx3 array\n bins: the same as in giveCpScaling\n\n \"\"\"\n data = np.asarray(data, float)\n N = data.shape[0]\n assert data.shape[1] == 3\n data = data.T\n\n if bins is None:\n bins = generate_bins(N)\n if ring:\n data = np.concatenate([data, data], axis=1)\n\n rads = [0.0 for i in range(len(bins))]\n for i in range(len(bins)):\n length = bins[i]\n if ring:\n rads[i] = np.mean(\n (np.sum((data[:, :N] - data[:, length : length + N]) ** 2, 0))\n )\n else:\n rads[i] = np.mean((np.sum((data[:, :-length] - data[:, length:]) ** 2, 0)))\n return np.array(bins), rads\n\n\ndef Rg2(data):\n \"\"\"\n Simply calculates gyration radius of a polymer chain.\n \"\"\"\n data = np.asarray(data)\n assert data.shape[1] == 3\n return np.mean((data - np.mean(data, axis=0)) ** 2) * 3\n\n\ndef Rg2_matrix(data):\n \"\"\"\n Uses dynamic programming and vectorizing to calculate Rg for each subchain of the polymer. \n Returns a matrix for which an element [i,j] is Rg of a subchain from i to j including i and j\n \"\"\"\n\n data = np.asarray(data, float)\n assert data.shape[1] == 3\n N = data.shape[0]\n data = np.concatenate([[[0, 0, 0]], data])\n\n coms = np.cumsum(data, 0) # cumulative sum of locations to calculate COM\n coms2 = np.cumsum(data ** 2, 0) # cumulative sum of locations^2 to calculate RG\n\n dists = np.abs(np.arange(N)[:, None] - np.arange(N)[None, :]) + 1\n coms2d = (-coms2[:-1, None, :] + coms2[None, 1::, :]) / dists[:, :, None]\n comsd = ((coms[:-1, None, :] - coms[None, 1:, :]) / dists[:, :, None]) ** 2\n sums = np.sum(coms2d - comsd, 2)\n np.fill_diagonal(sums, 0)\n mask = np.arange(N)[:, None] > np.arange(N)[None, :]\n sums[mask] = sums.T[mask]\n return sums\n\n\ndef ndarray_groupby_aggregate(\n df,\n ndarray_cols,\n aggregate_cols,\n value_cols=[],\n sample_cols=[],\n preset=\"sum\",\n ndarray_agg=lambda x: np.sum(x, axis=0),\n value_agg=lambda x: x.sum(),\n):\n \"\"\"\n A version of pd.groupby that is aware of numpy arrays as values of columns \n \n * aggregates columns ndarray_cols using ndarray_agg aggregator,\n * aggregates value_cols using value_agg aggregator,\n * takes the first element in sample_cols,\n * aggregates over aggregate_cols\n \n It has presets for sum, mean and nanmean. \n \"\"\"\n\n if preset == \"sum\":\n ndarray_agg = lambda x: np.sum(x, axis=0)\n value_agg = lambda x: x.sum()\n elif preset == \"mean\":\n ndarray_agg = lambda x: np.mean(x, axis=0)\n value_agg = lambda x: x.mean()\n elif preset == \"nanmean\":\n ndarray_agg = lambda x: np.nanmean(x, axis=0)\n value_agg = lambda x: x.mean()\n\n def combine_values(in_df):\n \"\"\"\n splits into ndarrays, 'normal' values, and samples;\n performs aggregation, and returns a Series\n \"\"\"\n average_arrs = pd.Series(\n index=ndarray_cols,\n data=[\n ndarray_agg([np.asarray(j) for j in in_df[i].values])\n for i in ndarray_cols\n ],\n )\n average_values = value_agg(in_df[value_cols])\n sample_values = in_df[sample_cols].iloc[0]\n agg_series = pd.concat([average_arrs, average_values, sample_values])\n return agg_series\n\n return df.groupby(aggregate_cols).apply(combine_values)\n\n\ndef streaming_ndarray_agg(\n in_stream,\n ndarray_cols,\n aggregate_cols,\n value_cols=[],\n sample_cols=[],\n chunksize=30000,\n add_count_col=False,\n divide_by_count=False,\n):\n \"\"\"\n Takes in_stream of dataframes\n \n Applies ndarray-aware groupby-sum or groupby-mean: treats ndarray_cols as numpy arrays, \n value_cols as normal values, for sample_cols takes the first element. \n \n Does groupby over aggregate_cols \n \n if add_count_col is True, adds column \"count\", if it's a string - adds column with add_count_col name \n\n if divide_by_counts is True, divides result by column \"count\". \n If it's a string, divides by divide_by_count column\n \n This function can be used for automatically aggregating P(s), R(s) etc. \n for a set of conformations that is so large that all P(s) won't fit in RAM,\n and when averaging needs to be done over so many parameters \n that for-loops are not an issue. Examples may include simulations in which sweep\n over many parameters has been performed. \n \n \"\"\"\n value_cols_orig = [i for i in value_cols]\n ndarray_cols, value_cols = list(ndarray_cols), list(value_cols)\n aggregate_cols, sample_cols = list(aggregate_cols), list(sample_cols)\n if add_count_col is not False:\n if add_count_col is True:\n add_count_col = \"count\"\n value_cols.append(add_count_col)\n\n def agg_one(dfs, aggregate):\n \"\"\"takes a list of DataFrames and old aggregate\n performs groupby and aggregation and returns new aggregate\"\"\"\n if add_count_col is not False:\n for i in dfs:\n i[add_count_col] = 1\n\n df = pd.concat(dfs + ([aggregate] if aggregate is not None else []), sort=False)\n aggregate = ndarray_groupby_aggregate(\n df,\n ndarray_cols=ndarray_cols,\n aggregate_cols=aggregate_cols,\n value_cols=value_cols,\n sample_cols=sample_cols,\n preset=\"sum\",\n )\n return aggregate.reset_index()\n\n aggregate = None\n cur = []\n count = 0\n for i in in_stream:\n cur.append(i)\n count += len(i)\n if count > chunksize:\n aggregate = agg_one(cur, aggregate)\n cur = []\n count = 0\n if len(cur) > 0:\n aggregate = agg_one(cur, aggregate)\n\n if divide_by_count is not False:\n if divide_by_count is True:\n divide_by_count = \"count\"\n for i in ndarray_cols + value_cols_orig:\n aggregate[i] = aggregate[i] / aggregate[divide_by_count]\n\n return aggregate\n\n\ndef kabsch_msd(P, Q):\n \"\"\"\n Calculates MSD between two vectors using Kabash alcorithm \n Borrowed from https://github.com/charnley/rmsd with some changes \n \n rmsd is licenced with a 2-clause BSD licence \n \n Copyright (c) 2013, Jimmy Charnley Kromann <[email protected]> & Lars Bratholm\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n \n \"\"\"\n P = P - np.mean(P, axis=0)\n Q = Q - np.mean(Q, axis=0)\n C = np.dot(np.transpose(P), Q)\n\n V, S, W = np.linalg.svd(C)\n d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0\n if d:\n S[-1] = -S[-1]\n V[:, -1] = -V[:, -1]\n\n # Create Rotation matrix U\n U = np.dot(V, W)\n dist = np.mean((np.dot(P, U) - Q) ** 2) * 3\n return dist\n\n\nkabsch_rmsd = kabsch_msd\n\n\ndef mutualSimplify(a, b, verbose=False):\n \"\"\"\n Ported here from openmmlib.\n\n Given two polymer rings, it attempts to reduce the number of monomers in each of\n them while preserving the linking between them. It does so by trying to remove\n monomers one-by-one. If no other bonds pass through the triangle formed by the 2\n old bonds and 1 new bond, it accepts removal of the monomer. It does so until no\n monomers in either of the rings can be removed.\n\n \"\"\"\n if verbose:\n print(\"Starting mutual simplification of polymers\")\n while True:\n la, lb = len(a), len(b)\n if verbose:\n print(len(a), len(b), \"before; \", end=\" \")\n a, b = _polymer_math.mutualSimplify(a, b)\n if verbose:\n print(len(a), len(b), \"after one; \", end=\" \")\n b, a = _polymer_math.mutualSimplify(b, a)\n if verbose:\n print(len(a), len(b), \"after two; \")\n\n if (len(a) == la) and (len(b) == lb):\n if verbose:\n print(\"Mutual simplification finished\")\n return a, b\n\n\ndef getLinkingNumber(data1, data2, simplify=True, randomOffset=True, verbose=False):\n \"\"\"\n Ported here from openmmlib as well.\n\n \"\"\"\n if simplify:\n data1, data2 = mutualSimplify(a=data1, b=data2, verbose=verbose)\n return _polymer_math.getLinkingNumber(data1, data2, randomOffset=randomOffset)\n\n\ndef calculate_cistrans(data, chains, chain_id=0, cutoff=5, pbc_box=False, box_size=None):\n \n \"\"\"\n Analysis of the territoriality of polymer chains from simulations, using the cis/trans ratio.\n Cis signal is computed for the marked chain ('chain_id') as amount of contacts of the chain with itself\n Trans signal is the total amount of trans contacts for the marked chain with other chains from 'chains' \n (and with all the replicas for 'pbc_box'=True)\n \n \"\"\"\n if data.shape[1] != 3:\n raise ValueError(\"Incorrect polymer data shape. Must be Nx3.\")\n\n if np.isnan(data).any():\n raise RuntimeError(\"Data contains NANs\")\n \n N = len(data)\n \n if pbc_box == True:\n if box_size is None:\n raise ValueError(\"Box size is not given\")\n else:\n data_scaled = np.mod(data, box_size)\n \n else:\n box_size = None\n data_scaled = np.copy(data)\n \n if chains is None:\n chains = [[0, N]]\n chain_id = 0\n\n chain_start = chains[chain_id][0]\n chain_end = chains[chain_id][1]\n \n # all contact pairs available in the scaled data\n tree = ckdtree.cKDTree(data_scaled, boxsize=box_size)\n pairs = tree.query_pairs(cutoff, output_type=\"ndarray\")\n \n # total number of contacts of the marked chain:\n # each contact is counted twice if both monomers belong to the marked chain and \n # only once if just one of the monomers in the pair belong to the marked chain\n all_signal = len(pairs[pairs<chain_end])-len(pairs[pairs<chain_start])\n \n # contact pairs of the marked chain with itself\n tree = ckdtree.cKDTree(data[chain_start:chain_end], boxsize=None)\n pairs = tree.query_pairs(cutoff, output_type=\"ndarray\")\n \n # doubled number of contacts of the marked chain with itself (i.e. cis signal)\n cis_signal = 2*len(pairs)\n \n assert all_signal >= cis_signal\n \n trans_signal = all_signal - cis_signal\n \n return cis_signal, trans_signal\n"
] | [
[
"numpy.sum",
"numpy.diff",
"numpy.asarray",
"numpy.copy",
"numpy.log",
"numpy.fill_diagonal",
"numpy.transpose",
"numpy.nanmean",
"numpy.concatenate",
"numpy.log10",
"numpy.isnan",
"numpy.mean",
"numpy.ceil",
"numpy.searchsorted",
"scipy.spatial.ckdtree.cKDTree",
"numpy.linalg.det",
"numpy.mod",
"numpy.arange",
"pandas.concat",
"numpy.sort",
"numpy.cumsum",
"numpy.linalg.svd",
"numpy.array",
"numpy.dot"
]
] |
lemoner20/tensorlayer | [
"69bd591f247b4a67f8968bd29c3660b22dbffae4"
] | [
"example/tutorial_frozenlake_dqn.py"
] | [
"import gym, random, time\nimport numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\nimport matplotlib.pyplot as plt\n\n\"\"\" Q-Network Q(a, s) - TD Learning, Off-Policy, e-Greedy Exploration (GLIE)\n\nQ(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A))\ndelta_w = R + lambda * Q(newS, newA)\n\nSee David Silver RL Tutorial Lecture 5 - Q-Learning for more details.\n\nEN: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.5m3361vlw\nCN: https://zhuanlan.zhihu.com/p/25710327\n\nNote: Policy Network has been proved to be better than Q-Learning, see tutorial_atari_pong.py\n\"\"\"\n## The FrozenLake v0 environment\n# https://gym.openai.com/envs/FrozenLake-v0\n# The agent controls the movement of a character in a grid world. Some tiles of\n# the grid are walkable, and others lead to the agent falling into the water.\n# Additionally, the movement direction of the agent is uncertain and only partially\n# depends on the chosen direction. The agent is rewarded for finding a walkable\n# path to a goal tile.\n# SFFF (S: starting point, safe)\n# FHFH (F: frozen surface, safe)\n# FFFH (H: hole, fall to your doom)\n# HFFG (G: goal, where the frisbee is located)\n# The episode ends when you reach the goal or fall in a hole. You receive a reward\n# of 1 if you reach the goal, and zero otherwise.\nenv = gym.make('FrozenLake-v0')\n\ndef to_one_hot(i, n_classes=None):\n a = np.zeros(n_classes, 'uint8')\n a[i] = 1\n return a\n\nrender = False # display the game environment\nrunning_reward = None\n\ntf.reset_default_graph()\n## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function.\n# 4x4 grid can be represented by one-hot vector with 16 integers.\ninputs = tf.placeholder(shape=[1, 16], dtype=tf.float32)\nnet = InputLayer(inputs, name='observation')\nnet = DenseLayer(net, n_units=4, act=tf.identity,\n W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')\ny = net.outputs # action-value / rewards of 4 actions\npredict = tf.argmax(y, 1) # chose action greedily with reward. in Q-Learning, policy is greedy, so we use \"max\" to select the next action.\n\n## Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.\nnextQ = tf.placeholder(shape=[1, 4], dtype=tf.float32)\nloss = tl.cost.mean_squared_error(nextQ, y, is_mean=False) # tf.reduce_sum(tf.square(nextQ - y))\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)\n\n## Set learning parameters\nlambd = .99 # decay factor\ne = 0.1 # e-Greedy Exploration, the larger the more random\nnum_episodes = 10000\nwith tf.Session() as sess:\n tl.layers.initialize_global_variables(sess)\n for i in range(num_episodes):\n ## Reset environment and get first new observation\n episode_time = time.time()\n s = env.reset() # observation is state, integer 0 ~ 15\n rAll = 0\n for j in range(99): # step index, maximum step is 99\n if render: env.render()\n ## Choose an action by greedily (with e chance of random action) from the Q-network\n a, allQ = sess.run([predict, y], feed_dict={inputs : [to_one_hot(s, 16)]})\n ## e-Greedy Exploration !!! sample random action\n if np.random.rand(1) < e:\n a[0] = env.action_space.sample()\n ## Get new state and reward from environment\n s1, r, d, _ = env.step(a[0])\n ## Obtain the Q' values by feeding the new state through our network\n Q1 = sess.run(y, feed_dict={inputs : [to_one_hot(s1, 16)]})\n ## Obtain maxQ' and set our target value for chosen action.\n maxQ1 = np.max(Q1) # in Q-Learning, policy is greedy, so we use \"max\" to select the next action.\n targetQ = allQ\n targetQ[0, a[0]] = r + lambd * maxQ1\n ## Train network using target and predicted Q values\n # it is not real target Q value, it is just an estimation,\n # but check the Q-Learning update formula:\n # Q'(s,a) <- Q(s,a) + alpha(r + lambd * maxQ(s',a') - Q(s, a))\n # minimizing |r + lambd * maxQ(s',a') - Q(s, a)|^2 equal to force\n # Q'(s,a) ≈ Q(s,a)\n _ = sess.run(train_op, {inputs : [to_one_hot(s, 16)], nextQ : targetQ})\n rAll += r\n s = s1\n ## Reduce chance of random action if an episode is done.\n if d == True:\n e = 1./((i/50) + 10) # reduce e, GLIE: Greey in the limit with infinite Exploration\n break\n\n ## Note that, the rewards here with random action\n running_reward = rAll if running_reward is None else running_reward * 0.99 + rAll * 0.01\n print(\"Episode [%d/%d] sum reward:%f running reward:%f took:%.5fs %s\" %\n (i, num_episodes, rAll, running_reward, time.time()-episode_time, '' if rAll == 0 else ' !!!!!!!!'))\n"
] | [
[
"tensorflow.placeholder",
"numpy.zeros",
"numpy.max",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.argmax",
"numpy.random.rand",
"tensorflow.reset_default_graph",
"tensorflow.random_uniform_initializer"
]
] |
oasislabs/ready-layer-2 | [
"dcfc5edd2a645ab7a0dfc54933a71a7dca923fff"
] | [
"demo/train_models.py"
] | [
"#!/usr/bin/env python3\n\nfrom contextlib import contextmanager\nfrom os import path as osp\n\nimport joblib\nimport pandas as pd\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.svm import SVC\n\n\nDEMO_DIR = osp.abspath(osp.dirname(__file__))\nDATA_DIR = osp.join(DEMO_DIR, \"data\")\nMODELS_DIR = osp.join(DEMO_DIR, \"models\")\n\n\n@contextmanager\ndef load_data(train=True):\n df = pd.read_csv(osp.join(DATA_DIR, f'iris_{\"train\" if train else \"test\"}.csv'), header=None)\n df.columns = [\"sepal length\", \"sepal width\", \"petal length\", \"petal width\", \"label\"]\n\n X = df.drop([\"label\"], axis=1)\n y = pd.factorize(df[\"label\"], sort=True)[0]\n\n yield X, y\n\n\ndef main():\n with load_data(train=True) as (X, y):\n model_a = SVC(gamma=\"scale\")\n model_a.fit(X, y)\n\n model_b = AdaBoostClassifier()\n model_b.fit(X, y)\n\n print(\"train\")\n print(f\"├─ model A score: {model_a.score(X, y):.3f}\")\n print(f\"└─ model B score: {model_b.score(X, y):.3f}\")\n\n with load_data(train=False) as (X, y):\n print(\"\\ntest (debugging only. you wouldn't see these irl)\")\n print(f\"├─ model A score: {model_a.score(X, y):.3f}\")\n print(f\"└─ model B score: {model_b.score(X, y):.3f}\")\n\n joblib.dump(model_a, osp.join(MODELS_DIR, \"model_a.joblib\"))\n joblib.dump(model_b, osp.join(MODELS_DIR, \"model_b.joblib\"))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.svm.SVC",
"pandas.factorize"
]
] |
ag88/Test-stock-prediction-algorithms | [
"b2d12dc71acd0ed201976549cab7d2076db4721c"
] | [
"StockMarketTimeSeriesAnomalies/FindInflectionPoints.py"
] | [
"\n\n# http://github.com/timestocome\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n# use data leveled, log'd and otherwise smoothed in \n# https://github.com/timestocome/StockMarketData\n# to do some analysis\n\n# http://www.mdpi.com/1999-4893/5/4/588\n# after I started working through the algorithm\n# it became clear it's not so different than convolution\n# and convolution might be easier and faster so shifted \n# to using the built in scipy.signal library\n# The signal still needs to be stationary (rotated to x axis) in time\n# and for stocks because of inflation you'll need a log index or the \n# older ones will be too small to catch \n#\n# to find the bottoms of the Nasdad flip signal around the x axis and \n# repeat\n\n\n# import data that we've rotated to x axis to make stationary in time (see section 1 of above paper)\n# and scaled by taking the log\ndata = pd.read_csv('LeveledLogStockData.csv', index_col=0, parse_dates=True)\n\nfeatures = ['Nasdaq', 'S&P', 'Russell', 'DJIA', 'Gold', '1yr T', '10yr Bond']\ndata.columns = ['Nasdaq', 'S&P', 'Russell', 'DJIA', 'Gold', '1yr T', '10yr Bond']\n\n\n\nfor f in features:\n inverted_name = 'Flipped_' + f\n peaks_name = 'Peaks_' + f\n floors_name = 'Floors_' + f \n\n inverted_signal = data[f] * -1.\n\n peaks_ix = signal.find_peaks_cwt(data[f], np.arange(1, 253))\n peaks = np.zeros(len(data))\n for i in peaks_ix: peaks[i] = 1\n data[peaks_name] = peaks \n\n floor_ix = signal.find_peaks_cwt(inverted_signal, np.arange(1, 253))\n floors = np.zeros(len(data))\n for i in floor_ix: floors[i] = 1 \n data[floors_name] = floors \n\n\n\ninflection_dates = ['Peaks_Nasdaq', 'Floors_Nasdaq','Peaks_S&P', 'Floors_S&P', 'Peaks_Russell', 'Floors_Russell', 'Peaks_DJIA', \n 'Floors_DJIA', 'Peaks_Gold', 'Floors_Gold', 'Peaks_1yr T', 'Floors_1yr T', 'Peaks_10yr Bond', 'Floors_10yr Bond']\n\n\ndata[inflection_dates].to_csv(\"inflectionDates.csv\") \n\n\n\n\n\nplt.figure(figsize=(16,16))\nplt.plot(data['Nasdaq'])\nplt.plot(data['Peaks_Nasdaq'], c='green')\nplt.plot(data['Floors_Nasdaq'], c='red')\nplt.savefig('Inflection_dates_nasdaq.png')\nplt.show()\n\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
]
] |
codezakh/ALBEF | [
"16aee1da1b7682afcd5a5f1ded74fc8dc199a8cf"
] | [
"models/unified/contrastive.py"
] | [
"'''\n * Copyright (c) 2021, salesforce.com, inc.\n * All rights reserved.\n * SPDX-License-Identifier: BSD-3-Clause\n * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n'''\n\nfrom functools import partial\nfrom multiprocessing.sharedctypes import Value\nfrom models.vit import VisionTransformer, interpolate_pos_embed\nfrom models.med import BertConfig, BertForMaskedLM\n# from models.modality_wise_ln_med import BertConfig, BertForMaskedLM\nfrom enum import Enum\nfrom omegaconf import OmegaConf\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nimport numpy as np\nimport random\n\n\nclass VisionLanguageLearner(nn.Module):\n def __init__(self, \n text_encoder = None,\n tokenizer = None,\n config = None, \n temp = 0.07,\n init_deit = True\n ):\n super().__init__()\n \n self.tokenizer = tokenizer \n self.mlm_probability = config['mlm_probability']\n embed_dim = config['embed_dim']\n \n self.visual_encoder = VisionTransformer(\n img_size=config['image_res'], patch_size=16, embed_dim=768, depth=1, num_heads=12, \n mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) \n \n if init_deit:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth\",\n map_location=\"cpu\", check_hash=True)\n state_dict = checkpoint[\"model\"]\n pos_embed_reshaped = interpolate_pos_embed(state_dict['pos_embed'], self.visual_encoder)\n state_dict['pos_embed'] = pos_embed_reshaped\n msg = self.visual_encoder.load_state_dict(state_dict,strict=False)\n print(f'missing_keys={msg.missing_keys}\\tunexpected_keys={msg.unexpected_keys}') \n \n vision_width = config['vision_width'] \n bert_config = BertConfig.from_dict(OmegaConf.to_container(config.bert_config))\n \n self.text_encoder = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config) \n\n text_width = self.text_encoder.config.hidden_size\n self.vision_proj = nn.Linear(vision_width, embed_dim)\n self.text_proj = nn.Linear(text_width, embed_dim) \n\n self.temp = nn.Parameter(torch.ones([]) * config['temp']) \n self.queue_size = config['queue_size']\n self.momentum = config['momentum'] \n\n # Hardcoded from DALL-E's D-VAE.\n vocab_size = 8192\n self.mim_head = nn.Linear(self.visual_encoder.embed_dim, vocab_size)\n\n # create momentum models\n self.text_encoder_m = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config) \n self.vision_proj_m = nn.Linear(vision_width, embed_dim)\n self.text_proj_m = nn.Linear(text_width, embed_dim) \n self.visual_encoder_m = VisionTransformer(\n img_size=config['image_res'], patch_size=16, embed_dim=768, depth=1, num_heads=12, \n mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) \n \n self.model_pairs = [\n (self.visual_encoder, self.visual_encoder_m),\n (self.text_encoder, self.text_encoder_m),\n (self.text_proj, self.text_proj_m),\n (self.vision_proj, self.vision_proj_m)\n ]\n \n self.copy_params()\n\n # create the queue\n self.register_buffer(\"image_queue\", torch.randn(embed_dim, self.queue_size))\n self.register_buffer(\"text_queue\", torch.randn(embed_dim, self.queue_size))\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long)) \n \n self.image_queue = nn.functional.normalize(self.image_queue, dim=0)\n self.text_queue = nn.functional.normalize(self.text_queue, dim=0)\n\n\n def forward(self, image, text, visual_token_ids, masked_visual_token_pos, masked_visual_tok_labels, alpha=0, return_dict=False):\n with torch.no_grad():\n self.temp.clamp_(0.001,0.5)\n\n ## ================ ITA ====================== ##\n image_embeds = self.visual_encoder(image) \n image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)\n image_embeds = self.text_encoder.bert(\n inputs_embeds=image_embeds, \n attention_mask=image_atts,\n return_dict=True,\n mode='image'\n )\n image_embeds = image_embeds.last_hidden_state\n image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1) \n\n text_output = self.text_encoder.bert(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text') \n text_embeds = text_output.last_hidden_state\n text_feat = F.normalize(self.text_proj(text_embeds[:,0,:]),dim=-1) \n\n \n with torch.no_grad():\n self._momentum_update()\n\n image_embeds_m = self.visual_encoder_m(image) \n image_embeds_m = self.text_encoder.bert(\n inputs_embeds=image_embeds_m, \n attention_mask=image_atts,\n return_dict=True,\n mode='image'\n )\n image_embeds_m = image_embeds_m.last_hidden_state\n image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1) \n image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1) \n\n text_output_m = self.text_encoder_m.bert(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text') \n text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1) \n text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)\n\n sim_i2t_m = image_feat_m @ text_feat_all / self.temp\n sim_t2i_m = text_feat_m @ image_feat_all / self.temp\n\n sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)\n sim_targets.fill_diagonal_(1)\n\n sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets\n sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets \n\n sim_i2t = image_feat @ text_feat_all / self.temp \n sim_t2i = text_feat @ image_feat_all / self.temp \n\n loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()\n loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean() \n\n loss_ita = (loss_i2t+loss_t2i)/2\n self._dequeue_and_enqueue(image_feat_m, text_feat_m)\n\n \n ##================= MLM ========================## \n input_ids = text.input_ids.clone()\n labels = input_ids.clone()\n\n probability_matrix = torch.full(labels.shape, self.mlm_probability) \n input_ids, labels = self.mask(input_ids, self.text_encoder.config.vocab_size, image.device, targets=labels,\n probability_matrix = probability_matrix) \n \n with torch.no_grad():\n logits_m = self.text_encoder_m(input_ids, \n attention_mask = text.attention_mask,\n return_dict = True,\n return_logits = True,\n mode='text'\n ) \n mlm_output = self.text_encoder(input_ids, \n attention_mask = text.attention_mask,\n return_dict = True,\n labels = labels, \n soft_labels = F.softmax(logits_m,dim=-1),\n alpha = alpha,\n mode='text'\n ) \n loss_mlm = mlm_output.loss \n\n\n ##================= MIM ========================##\n post_mask_image_embeds = self.visual_encoder(image, masked_visual_token_pos)\n image_atts = torch.ones(post_mask_image_embeds.size()[:-1],dtype=torch.long).to(image.device)\n post_mask_cross_embeds = self.text_encoder.bert(\n inputs_embeds=post_mask_image_embeds, \n attention_mask=image_atts,\n return_dict=True,\n mode='image'\n )\n # Drop the CLS token, because we don't mask it.\n post_mask_cross_embeds = post_mask_cross_embeds.last_hidden_state[:, 1:]\n predicted_visual_tokens = self.mim_head(post_mask_cross_embeds)\n loss_mim = F.cross_entropy(\n input=predicted_visual_tokens[masked_visual_token_pos], \n target=masked_visual_tok_labels\n )\n\n if return_dict:\n return {\n 'losses': {\n 'loss_ita': loss_ita,\n 'loss_mlm': loss_mlm,\n 'loss_mim': loss_mim\n }\n }\n\n return loss_mlm, loss_mim, loss_ita\n\n \n\n @torch.no_grad() \n def copy_params(self):\n for model_pair in self.model_pairs: \n for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):\n param_m.data.copy_(param.data) # initialize\n param_m.requires_grad = False # not update by gradient \n\n \n @torch.no_grad() \n def _momentum_update(self):\n for model_pair in self.model_pairs: \n for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):\n param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)\n \n \n \n @torch.no_grad()\n def _dequeue_and_enqueue(self, image_feat, text_feat):\n # gather keys before updating queue\n image_feats = concat_all_gather(image_feat)\n text_feats = concat_all_gather(text_feat)\n\n batch_size = image_feats.shape[0]\n\n ptr = int(self.queue_ptr)\n assert self.queue_size % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n self.image_queue[:, ptr:ptr + batch_size] = image_feats.T\n self.text_queue[:, ptr:ptr + batch_size] = text_feats.T\n ptr = (ptr + batch_size) % self.queue_size # move pointer\n\n self.queue_ptr[0] = ptr \n \n \n def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):\n if masked_indices is None: \n masked_indices = torch.bernoulli(probability_matrix).bool()\n \n masked_indices[input_ids == self.tokenizer.pad_token_id] = False\n masked_indices[input_ids == self.tokenizer.cls_token_id] = False\n \n if targets is not None:\n targets[~masked_indices] = -100 # We only compute loss on masked tokens \n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices\n input_ids[indices_replaced] = self.tokenizer.mask_token_id\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)\n input_ids[indices_random] = random_words[indices_random] \n # The rest of the time (10% of the time) we keep the masked input tokens unchanged \n \n if targets is not None:\n return input_ids, targets\n else:\n return input_ids\n \n\[email protected]_grad()\ndef concat_all_gather(tensor):\n \"\"\"\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n \"\"\"\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output\n\n"
] | [
[
"torch.ones_like",
"torch.distributed.all_gather",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.hub.load_state_dict_from_url",
"torch.randint",
"torch.distributed.get_world_size",
"torch.randn",
"torch.nn.functional.normalize",
"torch.nn.functional.softmax",
"torch.no_grad",
"torch.full",
"torch.nn.functional.cross_entropy",
"torch.zeros",
"torch.cat",
"torch.bernoulli"
]
] |
jbrockmendel/sm2 | [
"c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf"
] | [
"sm2/regression/tests/test_cov.py"
] | [
"\"\"\"Example: minimal OLS\n\n\"\"\"\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_allclose\n\nimport sm2.api as sm\n\n\[email protected]_vetted\ndef test_HC_use():\n np.random.seed(0)\n nsample = 100\n x = np.linspace(0, 10, 100)\n X = sm.add_constant(np.column_stack((x, x**2)), prepend=False)\n beta = np.array([1, 0.1, 10])\n y = np.dot(X, beta) + np.random.normal(size=nsample)\n\n results = sm.OLS(y, X).fit()\n\n # test cov_params\n idx = np.array([1, 2])\n # need to call HC0_se to have cov_HC0 available\n results.HC0_se\n cov12 = results.cov_params(column=[1, 2], cov_p=results.cov_HC0)\n assert_almost_equal(cov12,\n results.cov_HC0[idx[:, None], idx],\n decimal=15)\n\n # test t_test\n tvals = results.params / results.HC0_se\n ttest = results.t_test(np.eye(3), cov_p=results.cov_HC0)\n assert_almost_equal(ttest.tvalue,\n tvals,\n decimal=14)\n\n assert_almost_equal(ttest.sd,\n results.HC0_se,\n decimal=14)\n\n # test f_test\n ftest = results.f_test(np.eye(3)[:-1], cov_p=results.cov_HC0)\n slopes = results.params[:-1]\n idx = np.array([0, 1])\n cov_slopes = results.cov_HC0[idx[:, None], idx]\n fval = np.dot(slopes, np.dot(np.linalg.inv(cov_slopes), slopes)) / len(idx)\n assert_allclose(ftest.fvalue, fval, rtol=12)\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.eye",
"numpy.linalg.inv",
"numpy.random.seed",
"numpy.column_stack",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.dot",
"numpy.linspace"
]
] |
wguanicedew/qiskit-terra | [
"f8e4fcb53e328b8b17762fc8df0a8d0a44da8d9a"
] | [
"qiskit/extensions/standard/u1.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nDiagonal single qubit gate.\n\"\"\"\nimport numpy\nfrom qiskit.circuit import ControlledGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\n\n\n# pylint: disable=cyclic-import\nclass U1Gate(Gate):\n \"\"\"Diagonal single-qubit gate.\"\"\"\n\n def __init__(self, theta, label=None):\n \"\"\"Create new diagonal single-qubit gate.\"\"\"\n super().__init__(\"u1\", 1, [theta], label=label)\n\n def _define(self):\n from qiskit.extensions.standard.u3 import U3Gate\n definition = []\n q = QuantumRegister(1, \"q\")\n rule = [\n (U3Gate(0, 0, self.params[0]), [q[0]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def control(self, num_ctrl_qubits=1, label=None):\n \"\"\"Controlled version of this gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n if num_ctrl_qubits == 1:\n return Cu1Gate(*self.params)\n return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label)\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n return U1Gate(-self.params[0])\n\n def to_matrix(self):\n \"\"\"Return a Numpy.array for the U3 gate.\"\"\"\n lam = self.params[0]\n lam = float(lam)\n return numpy.array([[1, 0], [0, numpy.exp(1j * lam)]], dtype=complex)\n\n\ndef u1(self, theta, q): # pylint: disable=invalid-name\n \"\"\"Apply u1 with angle theta to q.\"\"\"\n return self.append(U1Gate(theta), [q], [])\n\n\nQuantumCircuit.u1 = u1\n\n\nclass Cu1Gate(ControlledGate):\n \"\"\"controlled-u1 gate.\"\"\"\n\n def __init__(self, theta):\n \"\"\"Create new cu1 gate.\"\"\"\n super().__init__(\"cu1\", 2, [theta], num_ctrl_qubits=1)\n self.base_gate = U1Gate(theta)\n\n def _define(self):\n \"\"\"\n gate cu1(lambda) a,b\n { u1(lambda/2) a; cx a,b;\n u1(-lambda/2) b; cx a,b;\n u1(lambda/2) b;\n }\n \"\"\"\n from qiskit.extensions.standard.x import CnotGate\n definition = []\n q = QuantumRegister(2, \"q\")\n rule = [\n (U1Gate(self.params[0] / 2), [q[0]], []),\n (CnotGate(), [q[0], q[1]], []),\n (U1Gate(-self.params[0] / 2), [q[1]], []),\n (CnotGate(), [q[0], q[1]], []),\n (U1Gate(self.params[0] / 2), [q[1]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n return Cu1Gate(-self.params[0])\n\n\ndef cu1(self, theta, ctl, tgt):\n \"\"\"Apply cu1 from ctl to tgt with angle theta.\"\"\"\n return self.append(Cu1Gate(theta), [ctl, tgt], [])\n\n\nQuantumCircuit.cu1 = cu1\n"
] | [
[
"numpy.exp"
]
] |
ptigwe/treex | [
"c46687376ccc50c8fea6cb8617e22e4b4dd1924a"
] | [
"treex/metrics/metrics.py"
] | [
"import typing as tp\n\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport treeo as to\n\nfrom treex import types, utils\nfrom treex.metrics.metric import Metric\n\n\nclass Metrics(Metric):\n metrics: tp.Dict[str, Metric]\n\n def __init__(\n self,\n metrics: tp.Any,\n on: tp.Optional[types.IndexLike] = None,\n name: tp.Optional[str] = None,\n dtype: tp.Optional[jnp.dtype] = None,\n ):\n super().__init__(on=on, name=name, dtype=dtype)\n\n names: tp.Set[str] = set()\n\n def get_name(path, metric):\n name = utils._get_name(metric)\n return f\"{path}/{name}\" if path else name\n\n self.metrics = {\n utils._unique_name(names, get_name(path, metric)): metric\n for path, metric in utils._flatten_names(metrics)\n }\n\n def update(self, **kwargs) -> None:\n for name, metric in self.metrics.items():\n arg_names = utils._function_argument_names(metric.update)\n\n if arg_names is None:\n metric_kwargs = kwargs\n else:\n metric_kwargs = {arg: kwargs[arg] for arg in arg_names if arg in kwargs}\n\n metric.update(**metric_kwargs)\n\n def compute(self) -> tp.Dict[str, jnp.ndarray]:\n outputs = {}\n names = set()\n\n for name, metric in self.metrics.items():\n\n value = metric.compute()\n\n for path, value in utils._flatten_names(value):\n name = f\"{name}/{path}\" if path else name\n name = utils._unique_name(names, name)\n\n outputs[name] = value\n\n return outputs\n\n def __call__(self, **kwargs) -> tp.Dict[str, jnp.ndarray]:\n return super().__call__(**kwargs)\n\n\nclass AuxMetrics(Metric):\n totals: tp.Dict[str, jnp.ndarray] = types.MetricState.node()\n counts: tp.Dict[str, jnp.ndarray] = types.MetricState.node()\n\n def __init__(\n self,\n aux_metrics: tp.Any,\n on: tp.Optional[types.IndexLike] = None,\n name: tp.Optional[str] = None,\n dtype: tp.Optional[jnp.dtype] = None,\n ):\n super().__init__(on=on, name=name, dtype=dtype)\n logs = self.as_logs(aux_metrics)\n self.totals = {name: jnp.array(0.0, dtype=jnp.float32) for name in logs}\n self.counts = {name: jnp.array(0, dtype=jnp.uint32) for name in logs}\n\n def update(self, aux_metrics: tp.Any) -> None:\n logs = self.as_logs(aux_metrics)\n\n self.totals = {\n name: (self.totals[name] + logs[name]).astype(self.totals[name].dtype)\n for name in self.totals\n }\n self.counts = {\n name: (self.counts[name] + np.prod(logs[name].shape)).astype(\n self.counts[name].dtype\n )\n for name in self.counts\n }\n\n def compute(self) -> tp.Dict[str, jnp.ndarray]:\n return {name: self.totals[name] / self.counts[name] for name in self.totals}\n\n def __call__(self, aux_metrics: tp.Any) -> tp.Dict[str, jnp.ndarray]:\n return super().__call__(aux_metrics=aux_metrics)\n\n @staticmethod\n def metric_name(field_info: to.FieldInfo) -> str:\n return (\n field_info.value.name\n if isinstance(field_info.value, types.Named)\n else field_info.name\n if field_info.name is not None\n else \"aux_metric\"\n )\n\n def as_logs(self, tree: tp.Any) -> tp.Dict[str, jnp.ndarray]:\n\n names: tp.Set[str] = set()\n\n with to.add_field_info():\n fields_info: tp.List[to.FieldInfo] = jax.tree_flatten(\n tree,\n is_leaf=lambda x: isinstance(x, types.Named)\n and not isinstance(x.value, to.Nothing),\n )[0]\n\n # pretend Named values are leaves\n for i, x in enumerate(fields_info):\n if isinstance(x, types.Named):\n field_info = x.value\n field_info.value = types.Named(x.name, field_info.value)\n fields_info[i] = field_info\n\n metrics = {\n self.metric_name(field_info): field_info.value.value\n if isinstance(field_info.value, types.Named)\n else field_info.value\n for field_info in fields_info\n }\n metrics = {\n utils._unique_name(names, name): value for name, value in metrics.items()\n }\n\n return metrics\n"
] | [
[
"numpy.prod"
]
] |
danhartfiction/LedFx | [
"eaf40ab180ef7e8f4f769193b35b3ffd5fe2a340"
] | [
"ledfx/effects/drano(Reactive).py"
] | [
"from ledfx.effects.audio import AudioReactiveEffect, FREQUENCY_RANGES\nfrom ledfx.effects.colorrainbow import ColorRainbowEffect \nimport voluptuous as vol\nimport numpy as np\nimport time\nimport statistics\nimport requests\nimport threading\n\nclass DranoBeatAudioEffect(AudioReactiveEffect, ColorRainbowEffect):\n\n NAME = \"DranoBeat\"\n CONFIG_SCHEMA = vol.Schema({\n vol.Optional('frequency_range', description='Frequency range for the beat detection', default = 'bass'): vol.In(list(FREQUENCY_RANGES.keys())),\n })\n\n def config_updated(self, config):\n self._frequency_range = np.linspace(\n FREQUENCY_RANGES[self.config['frequency_range']].min,\n FREQUENCY_RANGES[self.config['frequency_range']].max,\n 20)\n\n def updateThread(self):\n self.getBeat()\n if not hasattr(self, 'beatThreadStart'):\n print(\"afa\")\n self.beatThreadStart = True\n threading.Timer(self.next_beat - time.time(), self.beatThread).start()\n self.i = 0\n threading.Timer(2, self.updateThread).start()\n\n def beatThread(self):\n self.i += 1\n print(\"BEAT {}!\".format(self.i))\n self.pixels = self.apply_rainbow(True)\n now = time.time()\n if self.next_beat - 60/self.bpm < now:\n self.next_beat += 60/self.bpm\n print(\"next in {}\".format(self.next_beat - now))\n threading.Timer(self.next_beat - now, self.beatThread).start()\n self.faderThreadStart = True\n threading.Timer(.1, self.fader).start()\n \n def fader(self):\n# print(\"fading\")\n self.pixels = np.zeros(shape=(self.pixel_count, 3))\n\n def getBeat(self):\n r = requests.get(\"http://127.0.0.1:5000/\")\n data = r.text.split(':')\n self.next_beat = float(data[0])\n self.bpm = float(data[1])\n# self.next_beat = time.time() + 1\n# self.bpm = 60\n\n def audio_data_updated(self, data):\n if not hasattr(self, 'colormap'):\n self.colormap = np.zeros(shape=(self.pixel_count, 3)) \n self.updateThread()\n"
] | [
[
"numpy.linspace",
"numpy.zeros"
]
] |
vanessagraber/bilby | [
"80ee2d123a913d881f2a790b04e2939c46584d27"
] | [
"bilby/core/prior.py"
] | [
"from __future__ import division\n\nimport os\nfrom collections import OrderedDict\nfrom future.utils import iteritems\n\nimport numpy as np\nimport scipy.stats\nfrom scipy.integrate import cumtrapz\nfrom scipy.interpolate import interp1d\nfrom scipy.special import erf, erfinv\n\n# Keep import bilby statement, it is necessary for some eval() statements\nimport bilby # noqa\nfrom .utils import logger, infer_args_from_method, check_directory_exists_and_if_not_mkdir\n\n\nclass PriorDict(OrderedDict):\n def __init__(self, dictionary=None, filename=None):\n \"\"\" A set of priors\n\n Parameters\n ----------\n dictionary: dict, None\n If given, a dictionary to generate the prior set.\n filename: str, None\n If given, a file containing the prior to generate the prior set.\n \"\"\"\n OrderedDict.__init__(self)\n if isinstance(dictionary, dict):\n self.from_dictionary(dictionary)\n elif type(dictionary) is str:\n logger.debug('Argument \"dictionary\" is a string.' +\n ' Assuming it is intended as a file name.')\n self.from_file(dictionary)\n elif type(filename) is str:\n self.from_file(filename)\n elif dictionary is not None:\n raise ValueError(\"PriorDict input dictionary not understood\")\n\n self.convert_floats_to_delta_functions()\n\n def to_file(self, outdir, label):\n \"\"\" Write the prior distribution to file.\n\n Parameters\n ----------\n outdir: str\n output directory name\n label: str\n Output file naming scheme\n \"\"\"\n\n check_directory_exists_and_if_not_mkdir(outdir)\n prior_file = os.path.join(outdir, \"{}.prior\".format(label))\n logger.debug(\"Writing priors to {}\".format(prior_file))\n with open(prior_file, \"w\") as outfile:\n for key in self.keys():\n outfile.write(\n \"{} = {}\\n\".format(key, self[key]))\n\n def from_file(self, filename):\n \"\"\" Reads in a prior from a file specification\n\n Parameters\n ----------\n filename: str\n Name of the file to be read in\n \"\"\"\n\n prior = {}\n with open(filename, 'r') as f:\n for line in f:\n if line[0] == '#':\n continue\n elements = line.split('=')\n key = elements[0].replace(' ', '')\n val = '='.join(elements[1:])\n prior[key] = eval(val)\n self.update(prior)\n\n def from_dictionary(self, dictionary):\n for key, val in iteritems(dictionary):\n if isinstance(val, str):\n try:\n prior = eval(val)\n if isinstance(prior, (Prior, float, int, str)):\n val = prior\n except (NameError, SyntaxError, TypeError):\n logger.debug(\n \"Failed to load dictionary value {} correctlty\"\n .format(key))\n pass\n self[key] = val\n\n def convert_floats_to_delta_functions(self):\n \"\"\" Convert all float parameters to delta functions \"\"\"\n for key in self:\n if isinstance(self[key], Prior):\n continue\n elif isinstance(self[key], float) or isinstance(self[key], int):\n self[key] = DeltaFunction(self[key])\n logger.debug(\n \"{} converted to delta function prior.\".format(key))\n else:\n logger.debug(\n \"{} cannot be converted to delta function prior.\"\n .format(key))\n\n def fill_priors(self, likelihood, default_priors_file=None):\n \"\"\"\n Fill dictionary of priors based on required parameters of likelihood\n\n Any floats in prior will be converted to delta function prior. Any\n required, non-specified parameters will use the default.\n\n Note: if `likelihood` has `non_standard_sampling_parameter_keys`, then\n this will set-up default priors for those as well.\n\n Parameters\n ----------\n likelihood: bilby.likelihood.GravitationalWaveTransient instance\n Used to infer the set of parameters to fill the prior with\n default_priors_file: str, optional\n If given, a file containing the default priors.\n\n\n Returns\n -------\n prior: dict\n The filled prior dictionary\n\n \"\"\"\n\n self.convert_floats_to_delta_functions()\n\n missing_keys = set(likelihood.parameters) - set(self.keys())\n\n for missing_key in missing_keys:\n if not self.test_redundancy(missing_key):\n default_prior = create_default_prior(missing_key, default_priors_file)\n if default_prior is None:\n set_val = likelihood.parameters[missing_key]\n logger.warning(\n \"Parameter {} has no default prior and is set to {}, this\"\n \" will not be sampled and may cause an error.\"\n .format(missing_key, set_val))\n else:\n self[missing_key] = default_prior\n\n for key in self:\n self.test_redundancy(key)\n\n def sample(self, size=None):\n \"\"\"Draw samples from the prior set\n\n Parameters\n ----------\n size: int or tuple of ints, optional\n See numpy.random.uniform docs\n\n Returns\n -------\n dict: Dictionary of the samples\n \"\"\"\n return self.sample_subset(keys=self.keys(), size=size)\n\n def sample_subset(self, keys=iter([]), size=None):\n \"\"\"Draw samples from the prior set for parameters which are not a DeltaFunction\n\n Parameters\n ----------\n keys: list\n List of prior keys to draw samples from\n size: int or tuple of ints, optional\n See numpy.random.uniform docs\n\n Returns\n -------\n dict: Dictionary of the drawn samples\n \"\"\"\n self.convert_floats_to_delta_functions()\n samples = dict()\n for key in keys:\n if isinstance(self[key], Prior):\n samples[key] = self[key].sample(size=size)\n else:\n logger.debug('{} not a known prior.'.format(key))\n return samples\n\n def prob(self, sample, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n sample: dict\n Dictionary of the samples of which we want to have the probability of\n kwargs:\n The keyword arguments are passed directly to `np.product`\n\n Returns\n -------\n float: Joint probability of all individual sample probabilities\n\n \"\"\"\n return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)\n\n def ln_prob(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dict\n Dictionary of the samples of which we want to have the log probability of\n\n Returns\n -------\n float: Joint log probability of all the individual sample probabilities\n\n \"\"\"\n return np.sum([self[key].ln_prob(sample[key]) for key in sample])\n\n def rescale(self, keys, theta):\n \"\"\"Rescale samples from unit cube to prior\n\n Parameters\n ----------\n keys: list\n List of prior keys to be rescaled\n theta: list\n List of randomly drawn values on a unit cube associated with the prior keys\n\n Returns\n -------\n list: List of floats containing the rescaled sample\n \"\"\"\n return [self[key].rescale(sample) for key, sample in zip(keys, theta)]\n\n def test_redundancy(self, key):\n \"\"\"Empty redundancy test, should be overwritten in subclasses\"\"\"\n return False\n\n\nclass PriorSet(PriorDict):\n\n def __init__(self, dictionary=None, filename=None):\n \"\"\" DEPRECATED: USE PriorDict INSTEAD\"\"\"\n logger.warning(\"The name 'PriorSet' is deprecated use 'PriorDict' instead\")\n super(PriorSet, self).__init__(dictionary, filename)\n\n\ndef create_default_prior(name, default_priors_file=None):\n \"\"\"Make a default prior for a parameter with a known name.\n\n Parameters\n ----------\n name: str\n Parameter name\n default_priors_file: str, optional\n If given, a file containing the default priors.\n\n Return\n ------\n prior: Prior\n Default prior distribution for that parameter, if unknown None is\n returned.\n \"\"\"\n\n if default_priors_file is None:\n logger.debug(\n \"No prior file given.\")\n prior = None\n else:\n default_priors = PriorDict(filename=default_priors_file)\n if name in default_priors.keys():\n prior = default_priors[name]\n else:\n logger.debug(\n \"No default prior found for variable {}.\".format(name))\n prior = None\n return prior\n\n\nclass Prior(object):\n _default_latex_labels = dict()\n\n def __init__(self, name=None, latex_label=None, unit=None, minimum=-np.inf,\n maximum=np.inf):\n \"\"\" Implements a Prior object\n\n Parameters\n ----------\n name: str, optional\n Name associated with prior.\n latex_label: str, optional\n Latex label associated with prior, used for plotting.\n unit: str, optional\n If given, a Latex string describing the units of the parameter.\n minimum: float, optional\n Minimum of the domain, default=-np.inf\n maximum: float, optional\n Maximum of the domain, default=np.inf\n\n \"\"\"\n self.name = name\n self.latex_label = latex_label\n self.unit = unit\n self.minimum = minimum\n self.maximum = maximum\n\n def __call__(self):\n \"\"\"Overrides the __call__ special method. Calls the sample method.\n\n Returns\n -------\n float: The return value of the sample method.\n \"\"\"\n return self.sample()\n\n def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if sorted(self.__dict__.keys()) != sorted(other.__dict__.keys()):\n return False\n for key in self.__dict__:\n if type(self.__dict__[key]) is np.ndarray:\n if not np.array_equal(self.__dict__[key], other.__dict__[key]):\n return False\n else:\n if not self.__dict__[key] == other.__dict__[key]:\n return False\n return True\n\n def sample(self, size=None):\n \"\"\"Draw a sample from the prior\n\n Parameters\n ----------\n size: int or tuple of ints, optional\n See numpy.random.uniform docs\n\n Returns\n -------\n float: A random number between 0 and 1, rescaled to match the distribution of this Prior\n\n \"\"\"\n return self.rescale(np.random.uniform(0, 1, size))\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the prior.\n\n This should be overwritten by each subclass.\n\n Parameters\n ----------\n val: float\n A random number between 0 and 1\n\n Returns\n -------\n None\n\n \"\"\"\n return None\n\n def prob(self, val):\n \"\"\"Return the prior probability of val, this should be overwritten\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n np.nan\n\n \"\"\"\n return np.nan\n\n def ln_prob(self, val):\n \"\"\"Return the prior ln probability of val, this should be overwritten\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n np.nan\n\n \"\"\"\n return np.log(self.prob(val))\n\n def is_in_prior_range(self, val):\n \"\"\"Returns True if val is in the prior boundaries, zero otherwise\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n np.nan\n\n \"\"\"\n return (val >= self.minimum) & (val <= self.maximum)\n\n @staticmethod\n def test_valid_for_rescaling(val):\n \"\"\"Test if 0 < val < 1\n\n Parameters\n ----------\n val: float\n\n Raises\n -------\n ValueError: If val is not between 0 and 1\n \"\"\"\n val = np.atleast_1d(val)\n tests = (val < 0) + (val > 1)\n if np.any(tests):\n raise ValueError(\"Number to be rescaled should be in [0, 1]\")\n\n def __repr__(self):\n \"\"\"Overrides the special method __repr__.\n\n Returns a representation of this instance that resembles how it is instantiated.\n Works correctly for all child classes\n\n Returns\n -------\n str: A string representation of this instance\n\n \"\"\"\n subclass_args = infer_args_from_method(self.__init__)\n prior_name = self.__class__.__name__\n\n property_names = [p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)]\n dict_with_properties = self.__dict__.copy()\n for key in property_names:\n dict_with_properties[key] = getattr(self, key)\n args = ', '.join(['{}={}'.format(key, repr(dict_with_properties[key])) for key in subclass_args])\n return \"{}({})\".format(prior_name, args)\n\n @property\n def is_fixed(self):\n \"\"\"\n Returns True if the prior is fixed and should not be used in the sampler. Does this by checking if this instance\n is an instance of DeltaFunction.\n\n\n Returns\n -------\n bool: Whether it's fixed or not!\n\n \"\"\"\n return isinstance(self, DeltaFunction)\n\n @property\n def latex_label(self):\n \"\"\"Latex label that can be used for plots.\n\n Draws from a set of default labels if no label is given\n\n Returns\n -------\n str: A latex representation for this prior\n\n \"\"\"\n return self.__latex_label\n\n @latex_label.setter\n def latex_label(self, latex_label=None):\n if latex_label is None:\n self.__latex_label = self.__default_latex_label\n else:\n self.__latex_label = latex_label\n\n @property\n def unit(self):\n return self.__unit\n\n @unit.setter\n def unit(self, unit):\n self.__unit = unit\n\n @property\n def latex_label_with_unit(self):\n \"\"\" If a unit is specifed, returns a string of the latex label and unit \"\"\"\n if self.unit is not None:\n return \"{} [{}]\".format(self.latex_label, self.unit)\n else:\n return self.latex_label\n\n @property\n def minimum(self):\n return self.__minimum\n\n @minimum.setter\n def minimum(self, minimum):\n self.__minimum = minimum\n\n @property\n def maximum(self):\n return self.__maximum\n\n @maximum.setter\n def maximum(self, maximum):\n self.__maximum = maximum\n\n @property\n def __default_latex_label(self):\n if self.name in self._default_latex_labels.keys():\n label = self._default_latex_labels[self.name]\n else:\n label = self.name\n return label\n\n\nclass DeltaFunction(Prior):\n\n def __init__(self, peak, name=None, latex_label=None, unit=None):\n \"\"\"Dirac delta function prior, this always returns peak.\n\n Parameters\n ----------\n peak: float\n Peak value of the delta function\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=peak, maximum=peak)\n self.peak = peak\n\n def rescale(self, val):\n \"\"\"Rescale everything to the peak with the correct shape.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Rescaled probability, equivalent to peak\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return self.peak * val ** 0\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: np.inf if val = peak, 0 otherwise\n\n \"\"\"\n at_peak = (val == self.peak)\n return np.nan_to_num(np.multiply(at_peak, np.inf))\n\n\nclass PowerLaw(Prior):\n\n def __init__(self, alpha, minimum, maximum, name=None, latex_label=None,\n unit=None):\n \"\"\"Power law with bounds and alpha, spectral index\n\n Parameters\n ----------\n alpha: float\n Power law exponent parameter\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit)\n self.alpha = alpha\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the power-law prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n\n Parameters\n ----------\n val: float\n Uniform probability\n\n Returns\n -------\n float: Rescaled probability\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n if self.alpha == -1:\n return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))\n else:\n return (self.minimum ** (1 + self.alpha) + val *\n (self.maximum ** (1 + self.alpha) - self.minimum ** (1 + self.alpha))) ** (1. / (1 + self.alpha))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n if self.alpha == -1:\n return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * self.is_in_prior_range(val)\n else:\n return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /\n (self.maximum ** (1 + self.alpha) -\n self.minimum ** (1 + self.alpha))) * self.is_in_prior_range(val)\n\n def ln_prob(self, val):\n \"\"\"Return the logarithmic prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float:\n\n \"\"\"\n if self.alpha == -1:\n normalising = 1. / np.log(self.maximum / self.minimum)\n else:\n normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -\n self.minimum ** (1 + self.alpha))\n\n return (self.alpha * np.log(val) + np.log(normalising)) + np.log(1. * self.is_in_prior_range(val))\n\n\nclass Uniform(Prior):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None):\n \"\"\"Uniform prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit)\n\n def rescale(self, val):\n Prior.test_valid_for_rescaling(val)\n return self.minimum + val * (self.maximum - self.minimum)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.uniform.pdf(val, loc=self.minimum,\n scale=self.maximum - self.minimum)\n\n def ln_prob(self, val):\n \"\"\"Return the log prior probability of val\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: log probability of val\n \"\"\"\n return scipy.stats.uniform.logpdf(val, loc=self.minimum,\n scale=self.maximum - self.minimum)\n\n\nclass LogUniform(PowerLaw):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None):\n \"\"\"Log-Uniform prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n PowerLaw.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum, alpha=-1)\n if self.minimum <= 0:\n logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))\n\n\nclass Cosine(Prior):\n\n def __init__(self, name=None, latex_label=None, unit=None,\n minimum=-np.pi / 2, maximum=np.pi / 2):\n \"\"\"Cosine prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to a uniform in cosine prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return np.arcsin(-1 + val * 2)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val. Defined over [-pi/2, pi/2].\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.cos(val) / 2 * self.is_in_prior_range(val)\n\n\nclass Sine(Prior):\n\n def __init__(self, name=None, latex_label=None, unit=None, minimum=0,\n maximum=np.pi):\n \"\"\"Sine prior with bounds\n\n Parameters\n ----------\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to a uniform in sine prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return np.arccos(1 - val * 2)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val. Defined over [0, pi].\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.sin(val) / 2 * self.is_in_prior_range(val)\n\n\nclass Gaussian(Prior):\n\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"Gaussian prior with mean mu and width sigma\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n self.mu = mu\n self.sigma = sigma\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Gaussian prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 / self.sigma\n\n def ln_prob(self, val):\n return -0.5 * ((self.mu - val) ** 2 / self.sigma ** 2 + np.log(2 * np.pi * self.sigma ** 2))\n\n\nclass Normal(Gaussian):\n\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"A synonym for the Gaussian distribution.\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma: float\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Gaussian.__init__(self, mu=mu, sigma=sigma, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass TruncatedGaussian(Prior):\n\n def __init__(self, mu, sigma, minimum, maximum, name=None,\n latex_label=None, unit=None):\n \"\"\"Truncated Gaussian prior with mean mu and width sigma\n\n https://en.wikipedia.org/wiki/Truncated_normal_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum)\n self.mu = mu\n self.sigma = sigma\n\n @property\n def normalisation(self):\n \"\"\" Calculates the proper normalisation of the truncated Gaussian\n\n Returns\n -------\n float: Proper normalisation of the truncated Gaussian\n \"\"\"\n return (erf((self.maximum - self.mu) / 2 ** 0.5 / self.sigma) - erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate truncated Gaussian prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return erfinv(2 * val * self.normalisation + erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (\n 2 * np.pi) ** 0.5 / self.sigma / self.normalisation * self.is_in_prior_range(val)\n\n\nclass TruncatedNormal(TruncatedGaussian):\n\n def __init__(self, mu, sigma, minimum, maximum, name=None,\n latex_label=None, unit=None):\n \"\"\"A synonym for the TruncatedGaussian distribution.\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n TruncatedGaussian.__init__(self, mu=mu, sigma=sigma, minimum=minimum,\n maximum=maximum, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass HalfGaussian(TruncatedGaussian):\n def __init__(self, sigma, name=None, latex_label=None, unit=None):\n \"\"\"A Gaussian with its mode at zero, and truncated to only be positive.\n\n Parameters\n ----------\n sigma: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n TruncatedGaussian.__init__(self, 0., sigma, minimum=0., maximum=np.inf,\n name=name, latex_label=latex_label,\n unit=unit)\n\n\nclass HalfNormal(HalfGaussian):\n def __init__(self, sigma, name=None, latex_label=None, unit=None):\n \"\"\"A synonym for the HalfGaussian distribution.\n\n Parameters\n ----------\n sigma: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n HalfGaussian.__init__(self, sigma=sigma, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass LogNormal(Prior):\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"Log-normal prior with mean mu and width sigma\n\n https://en.wikipedia.org/wiki/Log-normal_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,\n unit=unit)\n\n if sigma <= 0.:\n raise ValueError(\"For the LogGaussian prior the standard deviation must be positive\")\n\n self.mu = mu\n self.sigma = sigma\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate LogNormal prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return scipy.stats.lognorm.ppf(val, self.sigma, scale=np.exp(self.mu))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n return scipy.stats.lognorm.pdf(val, self.sigma, scale=np.exp(self.mu))\n\n def ln_prob(self, val):\n return scipy.stats.lognorm.logpdf(val, self.sigma, scale=np.exp(self.mu))\n\n\nclass LogGaussian(LogNormal):\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):\n \"\"\"Synonym of LogNormal prior\n\n https://en.wikipedia.org/wiki/Log-normal_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n LogNormal.__init__(self, mu=mu, sigma=sigma, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass Exponential(Prior):\n def __init__(self, mu, name=None, latex_label=None, unit=None):\n \"\"\"Exponential prior with mean mu\n\n Parameters\n ----------\n mu: float\n Mean of the Exponential prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,\n unit=unit)\n self.mu = mu\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Exponential prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n return scipy.stats.expon.ppf(val, scale=self.mu)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n return scipy.stats.expon.pdf(val, scale=self.mu)\n\n def ln_prob(self, val):\n return scipy.stats.expon.logpdf(val, scale=self.mu)\n\n\nclass StudentT(Prior):\n def __init__(self, df, mu=0., scale=1., name=None, latex_label=None,\n unit=None):\n \"\"\"Student's t-distribution prior with number of degrees of freedom df,\n mean mu and scale\n\n https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution\n\n Parameters\n ----------\n df: float\n Number of degrees of freedom for distribution\n mu: float\n Mean of the Student's t-prior\n scale:\n Width of the Student's t-prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n\n if df <= 0. or scale <= 0.:\n raise ValueError(\"For the StudentT prior the number of degrees of freedom and scale must be positive\")\n\n self.df = df\n self.mu = mu\n self.scale = scale\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Student's t-prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.t.ppf(val, self.df, loc=self.mu, scale=self.scale)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.t.pdf(val, self.df, loc=self.mu, scale=self.scale)\n\n def ln_prob(self, val):\n return scipy.stats.t.logpdf(val, self.df, loc=self.mu, scale=self.scale)\n\n\nclass Beta(Prior):\n def __init__(self, alpha, beta, minimum=0, maximum=1, name=None,\n latex_label=None, unit=None):\n \"\"\"Beta distribution\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n This wraps around\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html\n\n Parameters\n ----------\n alpha: float\n first shape parameter\n beta: float\n second shape parameter\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n Prior.__init__(self, minimum=minimum, maximum=maximum, name=name,\n latex_label=latex_label, unit=unit)\n\n if alpha <= 0. or beta <= 0.:\n raise ValueError(\"alpha and beta must both be positive values\")\n\n self.alpha = alpha\n self.beta = beta\n self._loc = minimum\n self._scale = maximum - minimum\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Beta prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.beta.ppf(\n val, self.alpha, self.beta, loc=self._loc, scale=self._scale)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n spdf = scipy.stats.beta.pdf(\n val, self.alpha, self.beta, loc=self._loc, scale=self._scale)\n if np.all(np.isfinite(spdf)):\n return spdf\n\n # deal with the fact that if alpha or beta are < 1 you get infinities at 0 and 1\n if isinstance(val, np.ndarray):\n pdf = np.zeros(len(val))\n pdf[np.isfinite(spdf)] = spdf[np.isfinite]\n return spdf\n else:\n return 0.\n\n def ln_prob(self, val):\n spdf = scipy.stats.beta.logpdf(\n val, self.alpha, self.beta, loc=self._loc, scale=self._scale)\n if np.all(np.isfinite(spdf)):\n return spdf\n\n if isinstance(val, np.ndarray):\n pdf = -np.inf * np.ones(len(val))\n pdf[np.isfinite(spdf)] = spdf[np.isfinite]\n return spdf\n else:\n return -np.inf\n\n\nclass Logistic(Prior):\n def __init__(self, mu, scale, name=None, latex_label=None, unit=None):\n \"\"\"Logistic distribution\n\n https://en.wikipedia.org/wiki/Logistic_distribution\n\n Parameters\n ----------\n mu: float\n Mean of the distribution\n scale: float\n Width of the distribution\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n\n if scale <= 0.:\n raise ValueError(\"For the Logistic prior the scale must be positive\")\n\n self.mu = mu\n self.scale = scale\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Logistic prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.logistic.ppf(val, loc=self.mu, scale=self.scale)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.logistic.pdf(val, loc=self.mu, scale=self.scale)\n\n def ln_prob(self, val):\n return scipy.stats.logistic.logpdf(val, loc=self.mu, scale=self.scale)\n\n\nclass Cauchy(Prior):\n def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):\n \"\"\"Cauchy distribution\n\n https://en.wikipedia.org/wiki/Cauchy_distribution\n\n Parameters\n ----------\n alpha: float\n Location parameter\n beta: float\n Scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)\n\n if beta <= 0.:\n raise ValueError(\"For the Cauchy prior the scale must be positive\")\n\n self.alpha = alpha\n self.beta = beta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Cauchy prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.cauchy.ppf(val, loc=self.alpha, scale=self.beta)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return scipy.stats.cauchy.pdf(val, loc=self.alpha, scale=self.beta)\n\n def ln_prob(self, val):\n return scipy.stats.cauchy.logpdf(val, loc=self.alpha, scale=self.beta)\n\n\nclass Lorentzian(Cauchy):\n def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):\n \"\"\"Synonym for the Cauchy distribution\n\n https://en.wikipedia.org/wiki/Cauchy_distribution\n\n Parameters\n ----------\n alpha: float\n Location parameter\n beta: float\n Scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Cauchy.__init__(self, alpha=alpha, beta=beta, name=name,\n latex_label=latex_label, unit=unit)\n\n\nclass Gamma(Prior):\n def __init__(self, k, theta=1., name=None, latex_label=None, unit=None):\n \"\"\"Gamma distribution\n\n https://en.wikipedia.org/wiki/Gamma_distribution\n\n Parameters\n ----------\n k: float\n The shape parameter\n theta: float\n The scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,\n unit=unit)\n\n if k <= 0 or theta <= 0:\n raise ValueError(\"For the Gamma prior the shape and scale must be positive\")\n\n self.k = k\n self.theta = theta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Gamma prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n\n # use scipy distribution percentage point function (ppf)\n return scipy.stats.gamma.ppf(val, self.k, loc=0., scale=self.theta)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n\n return scipy.stats.gamma.pdf(val, self.k, loc=0., scale=self.theta)\n\n def ln_prob(self, val):\n return scipy.stats.gamma.logpdf(val, self.k, loc=0., scale=self.theta)\n\n\nclass ChiSquared(Gamma):\n def __init__(self, nu, name=None, latex_label=None, unit=None):\n \"\"\"Chi-squared distribution\n\n https://en.wikipedia.org/wiki/Chi-squared_distribution\n\n Parameters\n ----------\n nu: int\n Number of degrees of freedom\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n\n if nu <= 0 or not isinstance(nu, int):\n raise ValueError(\"For the ChiSquared prior the number of degrees of freedom must be a positive integer\")\n\n Gamma.__init__(self, name=name, k=nu / 2., theta=2.,\n latex_label=latex_label, unit=unit)\n\n @property\n def nu(self):\n return int(self.k * 2)\n\n @nu.setter\n def nu(self, nu):\n self.k = nu / 2.\n\n\nclass Interped(Prior):\n\n def __init__(self, xx, yy, minimum=np.nan, maximum=np.nan, name=None,\n latex_label=None, unit=None):\n \"\"\"Creates an interpolated prior function from arrays of xx and yy=p(xx)\n\n Parameters\n ----------\n xx: array_like\n x values for the to be interpolated prior function\n yy: array_like\n p(xx) values for the to be interpolated prior function\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n Attributes\n -------\n probability_density: scipy.interpolate.interp1d\n Interpolated prior probability distribution\n cumulative_distribution: scipy.interpolate.interp1d\n Interpolated cumulative prior probability distribution\n inverse_cumulative_distribution: scipy.interpolate.interp1d\n Inverted cumulative prior probability distribution\n YY: array_like\n Cumulative prior probability distribution\n\n \"\"\"\n self.xx = xx\n self.yy = yy\n self.__all_interpolated = interp1d(x=xx, y=yy, bounds_error=False, fill_value=0)\n Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,\n minimum=np.nanmax(np.array((min(xx), minimum))),\n maximum=np.nanmin(np.array((max(xx), maximum))))\n self.__initialize_attributes()\n\n def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if np.array_equal(self.xx, other.xx) and np.array_equal(self.yy, other.yy):\n return True\n return False\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ----------\n val: float\n\n Returns\n -------\n float: Prior probability of val\n \"\"\"\n return self.probability_density(val)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the prior.\n\n This maps to the inverse CDF. This is done using interpolation.\n \"\"\"\n Prior.test_valid_for_rescaling(val)\n rescaled = self.inverse_cumulative_distribution(val)\n if rescaled.shape == ():\n rescaled = float(rescaled)\n return rescaled\n\n @property\n def minimum(self):\n \"\"\"Return minimum of the prior distribution.\n\n Updates the prior distribution if minimum is set to a different value.\n\n Returns\n -------\n float: Minimum of the prior distribution\n\n \"\"\"\n return self.__minimum\n\n @minimum.setter\n def minimum(self, minimum):\n self.__minimum = minimum\n if '_Interped__maximum' in self.__dict__ and self.__maximum < np.inf:\n self.__update_instance()\n\n @property\n def maximum(self):\n \"\"\"Return maximum of the prior distribution.\n\n Updates the prior distribution if maximum is set to a different value.\n\n Returns\n -------\n float: Maximum of the prior distribution\n\n \"\"\"\n return self.__maximum\n\n @maximum.setter\n def maximum(self, maximum):\n self.__maximum = maximum\n if '_Interped__minimum' in self.__dict__ and self.__minimum < np.inf:\n self.__update_instance()\n\n def __update_instance(self):\n self.xx = np.linspace(self.minimum, self.maximum, len(self.xx))\n self.yy = self.__all_interpolated(self.xx)\n self.__initialize_attributes()\n\n def __initialize_attributes(self):\n if np.trapz(self.yy, self.xx) != 1:\n logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))\n self.yy /= np.trapz(self.yy, self.xx)\n self.YY = cumtrapz(self.yy, self.xx, initial=0)\n # Need last element of cumulative distribution to be exactly one.\n self.YY[-1] = 1\n self.probability_density = interp1d(x=self.xx, y=self.yy, bounds_error=False, fill_value=0)\n self.cumulative_distribution = interp1d(x=self.xx, y=self.YY, bounds_error=False, fill_value=0)\n self.inverse_cumulative_distribution = interp1d(x=self.YY, y=self.xx, bounds_error=True)\n\n\nclass FromFile(Interped):\n\n def __init__(self, file_name, minimum=None, maximum=None, name=None,\n latex_label=None, unit=None):\n \"\"\"Creates an interpolated prior function from arrays of xx and yy=p(xx) extracted from a file\n\n Parameters\n ----------\n file_name: str\n Name of the file containing the xx and yy arrays\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n Attributes\n -------\n all_interpolated: scipy.interpolate.interp1d\n Interpolated prior function\n\n \"\"\"\n try:\n self.id = file_name\n xx, yy = np.genfromtxt(self.id).T\n Interped.__init__(self, xx=xx, yy=yy, minimum=minimum,\n maximum=maximum, name=name,\n latex_label=latex_label, unit=unit)\n except IOError:\n logger.warning(\"Can't load {}.\".format(self.id))\n logger.warning(\"Format should be:\")\n logger.warning(r\"x\\tp(x)\")\n"
] | [
[
"numpy.multiply",
"scipy.interpolate.interp1d",
"numpy.any",
"numpy.log",
"numpy.trapz",
"numpy.nan_to_num",
"numpy.isfinite",
"numpy.arccos",
"numpy.cos",
"scipy.special.erfinv",
"numpy.random.uniform",
"numpy.arcsin",
"scipy.integrate.cumtrapz",
"numpy.exp",
"numpy.atleast_1d",
"numpy.array_equal",
"numpy.sin",
"numpy.genfromtxt",
"scipy.special.erf"
]
] |
van-truong/comptox_ai | [
"393b05c617822e30f54c967ef07ec53ba4b09688"
] | [
"comptox_ai/ml/nn.py"
] | [
"\"\"\"\nBase class and utilities for defining neural networks to be used on ComptoxAI\ndata.\n\nWe stick to PyTorch for implementing all neural networks, due to its speed,\nexpressiveness, and readability. For more documentation on PyTorch, check out \n`PyTorch Documentation<https://pytorch.org/docs/stable/index.html>`_. Several\nof the models we have reimplemented for ComptoxAI were previously only\nimplemented in Tensorflow or another deep learning library. Users are strongly\nencouraged to submit pull requests or create a new issue on GitHub if they\ndiscover any errors made in the translation process!\n\"\"\"\n\nfrom _typeshed import NoneType\nfrom comptox_ai.db.graph_db import Graph, GraphDB\nimport shutil\nimport os\n\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.metrics import roc_auc_score\n\nfrom torch_geometric.utils import negative_sampling\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv\n# from torch_geometric.utils import train_test_split_edges\nfrom comptox_ai.ml.train_test_split_edges import train_test_split_edges\n\n\nclass NeuralNetwork(object):\n def __init__(self, **kwargs):\n arg_opts = {\n 'name',\n 'lr',\n 'num_epochs',\n 'logging',\n 'verbose'\n }\n for kwarg in kwargs.keys():\n assert kwarg in arg_opts, 'Invalid argument: {}'.format(kwarg)\n\n self.verbose = kwargs.get('verbose', False)\n self.data = None\n self.model = None\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n ## link prediction model - code doesn't work from here on down, will prob need to delete!\n ## - erica\n class Net(torch.nn.Module):\n def __init__(self, name, in_channels, out_channels):\n super(Net, self).__init__()\n if name == 'link-prediction':\n self.conv1 = GCNConv(in_channels, 128)\n self.conv2 = GCNConv(128, out_channels)\n\n def encode(self, x, edge_index):\n x = self.conv1(x, edge_index)\n x = x.relu()\n return self.conv2(x, edge_index)\n\n def decode(self, z, pos_ege_index, neg_edge_index):\n edge_index = torch.cat([pos_ege_index, neg_edge_index], dim=-1)\n return (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)\n\n def decode_all(self, z):\n prob_adj = z @ z.t()\n return (prob_adj > 0).nonzero(as_tuple=False).t()\n\n def load_data(self, graph_name, node_types):\n db = GraphDB()\n\n db.drop_all_existing_graphs() # alt: drop graph if exists rather than dropping all graphs \n db.build_graph_native_projection(\n graph_name=graph_name,\n node_proj=node_types,\n relationship_proj=\"'*'\"\n )\n\n dir_abspath = os.path.join(os.getcwd(), 'comptox_ai/db/exports', f\"{graph_name}\")\n try:\n shutil.rmtree(dir_abspath)\n except OSError as e:\n print(\"Error: %s : %s\" % (dir_abspath, e.strerror))\n\n db.export_graph(graph_name)\n data = db.to_pytorch(graph_name, node_types)\n\n ## debugging\n print(f\"data: {data}\")\n print(f\"data.x:\\n\\t{data.x}\")\n print(f\"data.edge_index:\\n\\t{data.edge_index}\")\n\n ## train test split data\n data = train_test_split_edges(data)\n self.data = data.to(self.device)\n\n self.model = self.Net(self.data.num_features, 64).to(self.device)\n\n\n def get_link_labels(pos_edge_index, neg_edge_index):\n num_links = pos_edge_index.size(1) + neg_edge_index.size(1)\n link_labels = torch.zeros(num_links, dtype=torch.float, device=device)\n link_labels[:pos_edge_index.size(1)] = 1.\n return link_labels\n\n\n def train(data):\n model.train()\n\n neg_edge_index = negative_sampling(\n edge_index=data.train_pos_edge_index, num_nodes=data.num_nodes,\n num_neg_samples=data.train_pos_edge_index.size(1))\n\n optimizer.zero_grad()\n z = model.encode(data.x, data.train_pos_edge_index)\n link_logits = model.decode(z, data.train_pos_edge_index, neg_edge_index)\n link_labels = get_link_labels(data.train_pos_edge_index, neg_edge_index)\n loss = F.binary_cross_entropy_with_logits(link_logits, link_labels)\n loss.backward()\n optimizer.step()\n\n return loss\n\n\n @torch.no_grad()\n def test(data):\n model.eval()\n\n z = model.encode(data.x, data.train_pos_edge_index)\n\n results = []\n for prefix in ['val', 'test']:\n pos_edge_index = data[f'{prefix}_pos_edge_index']\n neg_edge_index = data[f'{prefix}_neg_edge_index']\n link_logits = model.decode(z, pos_edge_index, neg_edge_index)\n link_probs = link_logits.sigmoid()\n link_labels = get_link_labels(pos_edge_index, neg_edge_index)\n results.append(roc_auc_score(link_labels.cpu(), link_probs.cpu()))\n return results\n\n\n best_val_auc = test_auc = 0\n for epoch in range(1, 101):\n loss = train(data)\n val_auc, tmp_test_auc = test(data)\n if val_auc > best_val_auc:\n best_val = val_auc\n test_auc = tmp_test_auc\n print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Val: {val_auc:.4f}, '\n f'Test: {test_auc:.4f}')\n\n z = model.encode(data.x, data.train_pos_edge_index)\n final_edge_index = model.decode_all(z)\n\n\n\n\n "
] | [
[
"torch.no_grad",
"torch.cuda.is_available",
"torch.zeros",
"torch.cat",
"torch.nn.functional.binary_cross_entropy_with_logits"
]
] |
imsandydk/Convoying_project_s22 | [
"0b60a23fe148839f243a8a96acae9ee0fd8b9a81"
] | [
"convoy.py"
] | [
"import carla\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport sys\nfrom srunner.tools.route_manipulation import interpolate_trajectory\n\n#Returns only the waypoints in one lane\ndef single_lane(waypoint_list, lane):\n waypoints = []\n for i in range(len(waypoint_list) - 1):\n if waypoint_list[i].lane_id == lane:\n waypoints.append(waypoint_list[i])\n return waypoints\n \n#Returns only the waypoints that are not along the straights\ndef get_curvy_waypoints(waypoints):\n curvy_waypoints = []\n for i in range(len(waypoints) - 1):\n x1 = waypoints[i].transform.location.x\n y1 = waypoints[i].transform.location.y\n x2 = waypoints[i+1].transform.location.x\n y2 = waypoints[i+1].transform.location.y\n if (abs(x1 - x2) > 1) and (abs(y1 - y2) > 1):\n print(\"x1: \" + str(x1) + \" x2: \" + str(x2))\n print(abs(x1 - x2))\n print(\"y1: \" + str(y1) + \" y2: \" + str(y2))\n print(abs(y1 - y2))\n curvy_waypoints.append(waypoints[i])\n \n #To make the path reconnect to the starting location\n curvy_waypoints.append(curvy_waypoints[0])\n\n return curvy_waypoints\n\ndef control_pure_pursuit(vehicle_tr, waypoint_tr, max_steer, wheelbase):\n # TODO: convert vehicle transform to rear axle transform\n wp_loc_rel = relative_location(vehicle_tr, waypoint_tr.location) + carla.Vector3D(wheelbase, 0, 0)\n wp_ar = [wp_loc_rel.x, wp_loc_rel.y]\n d2 = wp_ar[0]**2 + wp_ar[1]**2\n steer_rad = math.atan(2 * wheelbase * wp_loc_rel.y / d2)\n steer_deg = math.degrees(steer_rad)\n steer_deg = np.clip(steer_deg, -max_steer, max_steer)\n return steer_deg / max_steer\n\ndef relative_location(frame, location):\n origin = frame.location\n forward = frame.get_forward_vector()\n right = frame.get_right_vector()\n up = frame.get_up_vector()\n disp = location - origin\n x = np.dot([disp.x, disp.y, disp.z], [forward.x, forward.y, forward.z])\n y = np.dot([disp.x, disp.y, disp.z], [right.x, right.y, right.z])\n z = np.dot([disp.x, disp.y, disp.z], [up.x, up.y, up.z])\n return carla.Vector3D(x, y, z)\n\ndef get_next_waypoint(world, vehicle, waypoints):\n vehicle_location = vehicle.get_location()\n min_distance = 1000\n next_waypoint = None\n\n for waypoint in waypoints:\n waypoint_location = waypoint.transform.location\n\n #Only check waypoints that are in the front of the vehicle (if x is negative, then the waypoint is to the rear)\n #TODO: Check if this applies for all maps\n if (waypoint_location - vehicle_location).x > 0:\n\n #Find the waypoint closest to the vehicle, but once vehicle is close to upcoming waypoint, search for next one\n if vehicle_location.distance(waypoint_location) < min_distance and vehicle_location.distance(waypoint_location) > 5:\n min_distance = vehicle_location.distance(waypoint_location)\n next_waypoint = waypoint\n\n return next_waypoint\n\ndef main():\n\n ##Modifiable Variables\n targetLane = -3\n\n client = carla.Client('127.0.0.1', 2000)\n client.set_timeout(10.0)\n\n # Read the opendrive file to a string\n xodr_path = \"speedway.xodr\"\n #xodr_path = \"Crossing8Course.xodr\"\n od_file = open(xodr_path)\n data = od_file.read()\n\n # Load the opendrive map\n vertex_distance = 2.0 # in meters\n max_road_length = 50.0 # in meters\n wall_height = 1.0 # in meters\n extra_width = 0.6 # in meters\n world = client.generate_opendrive_world(\n data, carla.OpendriveGenerationParameters(\n vertex_distance=vertex_distance,\n max_road_length=max_road_length,\n wall_height=wall_height,\n additional_width=extra_width,\n smooth_junctions=True,\n enable_mesh_visibility=True))\n\n spectator = world.get_spectator()\n\n map = world.get_map()\n waypoint_list = map.generate_waypoints(40)\n\n print(\"Length: \" + str(len(waypoint_list)))\n \n #Take only the waypoints from the targetLane\n waypoints = single_lane(waypoint_list, targetLane)\n\n #Remove all unneccesary waypoints along the straights\n curvy_waypoints = get_curvy_waypoints(waypoints)\n\n #Save graph of plotted points as bezier.png\n x = [p.transform.location.x for p in curvy_waypoints]\n y = [p.transform.location.y for p in curvy_waypoints]\n plt.plot(x, y, marker = 'o')\n plt.savefig(\"bezier.png\")\n\n #Set spawning location as initial waypoint\n waypoint = curvy_waypoints[0]\n blueprint = world.get_blueprint_library().filter('vehicle.*model3*')[0]\n location = waypoint.transform.location + carla.Vector3D(0, 0, 1.5)\n rotation = waypoint.transform.rotation\n vehicle = world.spawn_actor(blueprint, carla.Transform(location, rotation))\n print(\"SPAWNED!\")\n \n #Vehicle properties setup\n physics_control = vehicle.get_physics_control()\n max_steer = physics_control.wheels[0].max_steer_angle\n rear_axle_center = (physics_control.wheels[2].position + physics_control.wheels[3].position)/200\n offset = rear_axle_center - vehicle.get_location()\n wheelbase = np.linalg.norm([offset.x, offset.y, offset.z])\n vehicle.set_simulate_physics(True)\n\n #Add spectator camera to get the view to move with the car \n camera_bp = world.get_blueprint_library().find('sensor.camera.rgb')\n camera_transform = carla.Transform(carla.Location(x=-10,z=10), carla.Rotation(-45,0,0))\n camera = world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle)\n\n ##INSERT MODIFYING WAYPOINTS HERE\n\n while True:\n\n #Update the camera view\n spectator.set_transform(camera.get_transform())\n\n #Get next waypoint\n waypoint = get_next_waypoint(world, vehicle, curvy_waypoints)\n world.debug.draw_point(waypoint.transform.location, life_time=5)\n\n #Control vehicle's throttle and steering\n throttle = 0.85\n vehicle_transform = vehicle.get_transform()\n vehicle_location = vehicle_transform.location\n steer = control_pure_pursuit(vehicle_transform, waypoint.transform, max_steer, wheelbase)\n control = carla.VehicleContr\n vehicle.apply_control(control)\n\nif __name__ == \"__main__\":\n sys.exit(main())"
] | [
[
"matplotlib.pyplot.savefig",
"numpy.clip",
"matplotlib.pyplot.plot",
"numpy.dot",
"numpy.linalg.norm"
]
] |
santhtadi/rest_api_in_django | [
"eae65bce23494e1950b9bd8dfdf6dbab71f1f922"
] | [
"restApi/views.py"
] | [
"from django.http import HttpResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport numpy as np\nfrom PIL import Image\nimport io\n\n\n# Create your views here.\ndef index(request):\n return HttpResponse(request, \"hi there\")\n\n\nclass SendImage(APIView):\n @staticmethod\n def check_validity(req):\n ret = True\n message = \"\"\n keys = [w for w in req.keys()]\n if \"image\" not in keys:\n ret = False\n message += \"image is not appended, \" \\\n \"try appending the image in header files with key 'image', please refer to \" \\\n \"https://github.com/santhtadi/rest_api_in_django \" \\\n \"for more details ; \"\n return ret, message\n\n # post is responsible for receiving files\n # develop det put and delete according to your need\n def post(self, request):\n # print the data in request to dashboard\n print(request.data)\n # convert the request data to a dictionary object in python\n req = dict(request.data)\n # check if all the required files are appended or not\n valid, error_message = self.check_validity(req)\n if not valid:\n return Response({\"message\": error_message}, status=status.HTTP_400_BAD_REQUEST)\n # read the image as bytes\n by = req['image'][0].read()\n # convert bytes as image using pillow library\n img = Image.open(io.BytesIO(by)).convert('RGB')\n # create an array using numpy\n image_in_rgb_format = np.array(img)\n # change RGB to BGR format for using with opencv library\n image_in_opencv_format = image_in_rgb_format[:, :, ::-1].copy()\n # returning size of image as output\n return Response({\"image_size\": image_in_opencv_format.shape}, status=status.HTTP_200_OK)\n"
] | [
[
"numpy.array"
]
] |
UtkarshK10/NLP-Spam-Filter | [
"795ea74897bd32cd3ce345e78f8c5d772a6da350"
] | [
"spam_filter.py"
] | [
"import sys\nimport nltk\nimport sklearn\n\n\nimport pandas as pd\nimport numpy as np\n\ndf= pd.read_table('SMSSpamCollection',header= None, encoding='utf-8')\n\n\nclasses = df[0]\nprint(classes.value_counts())\n\n#Preprocess the data\n\n\"\"\"\n0= ham\n1=spam\nfor this we use label encoder\n\"\"\"\nfrom sklearn.preprocessing import LabelEncoder\n\nencoder=LabelEncoder()\nY=encoder.fit_transform(classes)\n\n\n#store the sms data\ntext_messages = df[1]\n\n\n\n\n#replace email addresses with emailaddr\nprocessed= text_messages.str.replace(r'^.+@[^\\.].*\\.[a-z]{2,}$','emailaddr')\n\n#replace urls with webaddress\nprocessed= processed.str.replace(r'^http\\://[a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,3}(/\\S*)?$','webaddress')\n\n#replace money symbols with 'moneysymb'\nprocessed=processed.str.replace(r'£|\\$','moneysymb')\n\n#replace 10 digit number with 'phonenumber'\nprocessed= processed.str.replace(r'^\\(?[\\d]{3}\\)?[\\s-]?[\\d]{3}[\\s-]?[\\d]{4}$','phonenumber')\n\n#replace normal numbers with 'numbr' \nprocessed=processed.str.replace(r'\\d+(\\.\\d+)?','numbr')\n\n\n\n#remove punctuation\n\nprocessed=processed.str.replace(r'[^\\w\\d\\s]','')\nprocessed=processed.str.replace(r'\\s+',' ')\nprocessed=processed.str.lower()\n\n\n# remove stop words\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nstop_words=set(stopwords.words('english'))\nprocessed=processed.apply(lambda x : ' '.join(term for term in x.split() if term not in stop_words ))\n\n# Stemming - like,likes,liked ~like\nps=nltk.PorterStemmer()\nprocessed=processed.apply(lambda x : ' '.join(ps.stem(term) for term in x.split()))\n\n\n\n#Tokenizing\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize\n\nall_words=[]\n\nfor message in processed:\n words=word_tokenize(message)\n for w in words:\n all_words.append(w)\n \nall_words= nltk.FreqDist(all_words)\n\n\n#print the total number of words and 15 most common words\n'''\nprint('Number of words:{}'.format(len(all_words)))\nprint('Most Common Words:{}'.format(all_words.most_common(15)))\n'''\n\n#using the 1500 most common word as features\nword_features=list(all_words.keys())[:1500]\n\n\n#defining find a feature function\ndef find_features(message):\n words=word_tokenize(message)\n features={}\n for word in word_features:\n features[word]=(word in words)\n return features\n\n#example\nfeatures = find_features(processed[0])\nfor key,value in features.items():\n if value == True:\n print(key)\n \n# zipper method for appending i/p - o/p\ndef zipper(x, y):\n\tsize = len(x) if len(x) < len(y) else len(y)\n\tretList = []\n\tfor i in range(size):\n\t\tretList.append((x[i], y[i]))\n\treturn retList \n\n \n#find features for all this messages\nmessages = zipper(processed,Y)\n\n#define a seed for reproductibility\nseed=1\nnp.random.seed=seed\nnp.random.shuffle(messages)\nfeaturesets=[(find_features(text),label) for (text,label) in messages]\n\n#split training and testing data using sklearn\nfrom sklearn import model_selection\ntraining,testing = model_selection.train_test_split(featuresets,test_size=0.25,random_state=seed)\n'''\nprint('Training: {}'.format(len(training)))\nprint('Testing: {}'.format(len(testing)))\n'''\n\n#Scikitlearn classifiers with nltk\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report,accuracy_score,confusion_matrix\n\n\n#Define models to train and comparing best model on its accuracy\nnames=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear']\nclassifiers=[\n KNeighborsClassifier(),\n DecisionTreeClassifier(),\n RandomForestClassifier(),\n LogisticRegression(),\n SGDClassifier(max_iter=100),\n MultinomialNB(),\n SVC(kernel='linear')\n \n ]\n\nmodels = zipper(names,classifiers)\n\n#Wrap models in nltk and find their accuracy then select best method\nfrom nltk.classify.scikitlearn import SklearnClassifier\n\nfor name,model in models:\n nltk_model=SklearnClassifier(model)\n nltk_model.train(training)\n accuracy=nltk.classify.accuracy(nltk_model,testing)*100\n print('{}: Accuracy: {}'.format(name,accuracy))\n \n#ensemble method -- Voting Classifier for better accuracy\n \nfrom sklearn.ensemble import VotingClassifier\n\nnames=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear']\nclassifiers=[\n KNeighborsClassifier(),\n DecisionTreeClassifier(),\n RandomForestClassifier(),\n LogisticRegression(),\n SGDClassifier(max_iter=100),\n MultinomialNB(),\n SVC(kernel='linear')\n \n ]\n \nmodels = zipper(names,classifiers)\n# n_jobs=-1 means all algo can run in parallel\nnltk_ensemble= SklearnClassifier(VotingClassifier(estimators=models,voting='hard',n_jobs= -1))\nnltk_ensemble.train(training)\naccuracy=nltk.classify.accuracy(nltk_ensemble,testing)*100\nprint('Ensemble Method Accuracy: {}'.format(accuracy))\n\n#make class label predictions\ntxt_features,labels=zip(*testing)\nprediction = nltk_ensemble.classify_many(txt_features)\n\n#print a confusion matrix and a classification report\nprint(classification_report(labels,prediction))\npd.DataFrame(\n confusion_matrix(labels,prediction),\n index=[['actual','actual'],['ham','spam']],\n columns=[['predicted','predicted'],['ham','spam']]\n )\n"
] | [
[
"pandas.read_table",
"numpy.random.shuffle",
"sklearn.svm.SVC",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.SGDClassifier",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.metrics.confusion_matrix",
"sklearn.ensemble.VotingClassifier",
"sklearn.preprocessing.LabelEncoder",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.model_selection.train_test_split"
]
] |
jld23/saspy | [
"47adeb5b9e298e6b9ec017f850245e318f2faa57"
] | [
"saspy/sasiocom.py"
] | [
"#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport datetime\nimport csv\nimport io\nimport numbers\nimport os\nimport shlex\nimport sys\nimport warnings\n\nimport logging\nlogger = logging.getLogger('saspy')\n\ntry:\n from win32com.client import dynamic\nexcept ImportError:\n pass\n\ntry:\n import pandas as pd\nexcept ImportError:\n pass\n\n\nclass SASConfigCOM(object):\n \"\"\"\n This object is not intended to be used directly. Instantiate a SASSession\n object instead.\n \"\"\"\n NO_OVERRIDE = ['kernel', 'sb']\n\n def __init__(self, **kwargs):\n self._kernel = kwargs.get('kernel')\n\n session = kwargs['sb']\n sascfg = session.sascfg.SAScfg\n name = session.sascfg.name\n cfg = getattr(sascfg, name)\n opts = getattr(sascfg, 'SAS_config_options', {})\n outs = getattr(sascfg, 'SAS_output_options', {})\n\n self.host = cfg.get('iomhost')\n self.port = cfg.get('iomport')\n self.user = cfg.get('omruser')\n self.pw = cfg.get('omrpw')\n self.authkey = cfg.get('authkey')\n self.class_id = cfg.get('class_id', '440196d4-90f0-11d0-9f41-00a024bb830c')\n self.provider = cfg.get('provider')\n self.encoding = cfg.get('encoding', '')\n\n self.output = outs.get('output', 'html5')\n\n self.verbose = opts.get('verbose', True)\n self.verbose = kwargs.get('verbose', self.verbose)\n\n self._lock = opts.get('lock_down', True)\n self._prompt = session.sascfg._prompt\n\n if self.authkey is not None:\n self._set_authinfo()\n\n for key, value in filter(lambda x: x[0] not in self.NO_OVERRIDE, kwargs.items()):\n self._try_override(key, value)\n\n def _set_authinfo(self):\n \"\"\"\n Attempt to set the session user's credentials based on provided\n key to read from ~/.authinfo file. See .authinfo documentation\n here: https://documentation.sas.com/api/docsets/authinfo/9.4/content/authinfo.pdf.\n\n This method supports a subset of the .authinfo spec, in accordance with\n other IO access methods. This method will only parse `user` and `password`\n arguments, but does support spaces in values if the value is quoted. Use\n python's `shlex` library to parse these values.\n \"\"\"\n if os.name == 'nt':\n authfile = os.path.expanduser(os.path.join('~', '_authinfo'))\n else:\n authfile = os.path.expanduser(os.path.join('~', '.authinfo'))\n\n try:\n with open(authfile, 'r') as f:\n # Take first matching line found\n parsed = (shlex.split(x, posix=False) for x in f.readlines())\n authline = next(filter(lambda x: x[0] == self.authkey, parsed), None)\n\n except OSError:\n logger.error('Error trying to read {}'.format(authfile))\n authline = None\n\n if authline is None:\n logger.error('Key {} not found in authinfo file: {}'.format(self.authkey, authfile))\n elif len(authline) < 5:\n logger.error('Incomplete authinfo credentials in {}; key: {}'.format(authfile, self.authkey))\n else:\n # Override user/pw if previously set\n # `authline` is in the following format:\n # AUTHKEY username USERNAME password PASSWORD\n self.user = authline[2]\n self.pw = authline[4]\n\n def _try_override(self, attr, value):\n \"\"\"\n Attempt to override a configuration file option if `self._lock` is\n False. Otherwise, warn the user.\n :param attr: Configuration attribute.\n :param value: Configuration value.\n \"\"\"\n if self._lock is False:\n setattr(self, attr, value)\n else:\n err = \"Param '{}' was ignored due to configuration restriction\".format(attr)\n logger.warning(err, file=sys.stderr)\n\n\nclass SASSessionCOM(object):\n \"\"\"\n Initiate a connection to a SAS server and provide access for Windows\n clients without the Java dependency. Utilizes available COM objects for\n client communication with the IOM interface.\n It may be possible to communicate with local SAS instances as well,\n although this is functionality is untested. A slight change may be\n required to the `_startsas` method to support local instances.\n \"\"\"\n SAS_APP = 'SASApp'\n HTML_RESULT_FILE = 'saspy_results.html'\n\n # SASObjectManager.Protocols Enum values\n PROTOCOL_COM = 0\n PROTOCOL_IOM = 2\n\n # SAS Date/Time/Datetime formats\n FMT_DEFAULT_DATE_NAME = 'E8601DA'\n FMT_DEFAULT_DATE_LENGTH = 10\n FMT_DEFAULT_DATE_PRECISION = 0\n FMT_DEFAULT_TIME_NAME = 'E8601TM'\n FMT_DEFAULT_TIME_LENGTH = 15\n FMT_DEFAULT_TIME_PRECISION = 6\n FMT_DEFAULT_DATETIME_NAME = 'E8601DT'\n FMT_DEFAULT_DATETIME_LENGTH = 26\n FMT_DEFAULT_DATETIME_PRECISION = 6\n\n # Pandas data types\n PD_NUM_TYPE = ('i', 'u', 'f', 'c')\n PD_STR_TYPE = ('S', 'U', 'V')\n PD_DT_TYPE = ('M')\n PD_BOOL_TYPE = ('b')\n\n # ADODB RecordSet CursorTypeEnum values\n CURSOR_UNSPECIFIED = -1\n CURSOR_FORWARD = 0\n CURSOR_KEYSET = 1\n CURSOR_DYNAMIC = 2\n CURSOR_STATIC = 3\n\n # ADODB RecordSet LockTypeEnum values\n LOCK_UNSPECIFIED = -1\n LOCK_READONLY = 1\n LOCK_PESSIMISTIC = 2\n LOCK_OPTIMISTIC = 3\n LOCK_BATCH_OPTIMISTIC = 4\n\n # ADODB RecordSet CommandTypeEnum values\n CMD_UNSPECIFIED = -1\n CMD_TEXT = 1\n CMD_TABLE = 2\n CMD_STORED_PROC = 4\n CMD_UNKNOWN = 8\n CMD_FILE = 256\n CMD_TABLE_DIRECT = 512\n\n # ADODB Connection SchemaEnum values\n SCHEMA_COLUMNS = 4\n SCHEMA_TABLES = 20\n\n # ADODB ObjectStateEnum values\n STATE_CLOSED = 0\n STATE_OPEN = 1\n\n # FileService StreamOpenMode values\n STREAM_READ = 1\n STREAM_WRITE = 2\n\n def __init__(self, **kwargs):\n self._log = ''\n self.sascfg = SASConfigCOM(**kwargs)\n self._sb = kwargs.get('sb')\n\n self.pid = self._startsas()\n\n def __del__(self):\n if self.adodb.State == self.STATE_OPEN:\n self._endsas()\n\n def _startsas(self) -> str:\n \"\"\"\n Create a workspace and open a connection with SAS.\n :return [str]:\n \"\"\"\n if getattr(self, 'workspace', None) is not None:\n # Do not create a new connection\n return self.workspace.UniqueIdentifier\n\n factory = dynamic.Dispatch('SASObjectManager.ObjectFactoryMulti2')\n server = dynamic.Dispatch('SASObjectManager.ServerDef')\n\n self.keeper = dynamic.Dispatch('SASObjectManager.ObjectKeeper')\n self.adodb = dynamic.Dispatch('ADODB.Connection')\n\n if self.sascfg.host is None:\n # Create a local connection.\n server.MachineDNSName = '127.0.0.1'\n server.Port = 0\n server.Protocol = self.PROTOCOL_COM\n\n user = None\n password = None\n else:\n # Create a remote connection. The following are required:\n # 1. host\n # 2. port\n # 3. class_id\n server.MachineDNSName = self.sascfg.host\n server.Port = self.sascfg.port\n server.Protocol = self.PROTOCOL_IOM\n server.ClassIdentifier = self.sascfg.class_id\n\n if self.sascfg.user is not None:\n user = self.sascfg.user\n else:\n user = self.sascfg._prompt('Username: ')\n\n if self.sascfg.pw is not None:\n password = self.sascfg.pw\n else:\n password = self.sascfg._prompt('Password: ', pw=True)\n\n self.workspace = factory.CreateObjectByServer(self.SAS_APP, True,\n server, user, password)\n\n self.keeper.AddObject(1, 'WorkspaceObject', self.workspace)\n self.adodb.Open('Provider={}; Data Source=iom-id://{}'.format(\n self.sascfg.provider, self.workspace.UniqueIdentifier))\n\n ll = self.submit(\"options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;\", \"text\")\n if self.sascfg.verbose:\n logger.info(\"SAS Connection established. Workspace UniqueIdentifier is \"+str(self.workspace.UniqueIdentifier)+\"\\n\")\n\n return self.workspace.UniqueIdentifier\n\n def _endsas(self):\n \"\"\"\n Close a connection with SAS.\n \"\"\"\n self.adodb.Close()\n self.keeper.RemoveObject(self.workspace)\n self.workspace.Close()\n if self.sascfg.verbose:\n logger.info(\"SAS Connection terminated. Workspace UniqueIdentifierid was \"+str(self.pid))\n\n def _getlst(self, buf: int=2048) -> str:\n \"\"\"\n Flush listing.\n :option buf [int]: Download buffer. Default 2048.\n :return [str]:\n \"\"\"\n flushed = self.workspace.LanguageService.FlushList(buf)\n result = flushed\n while flushed:\n flushed = self.workspace.LanguageService.FlushList(buf)\n result += flushed\n\n return result\n\n def _getlog(self, buf: int=2048) -> str:\n \"\"\"\n Flush log.\n :option buf [int]: Download buffer. Default 2048.\n :return [str]:\n \"\"\"\n flushed = self.workspace.LanguageService.FlushLog(buf)\n result = flushed\n while flushed:\n flushed = self.workspace.LanguageService.FlushLog(buf)\n result += flushed\n\n # Store flush result in running log\n self._log += result\n\n if result.count('ERROR:') > 0:\n warnings.warn(\"Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem\")\n self._sb.check_error_log = True\n\n return result\n\n def _getfile(self, fname: str, buf: int=2048, decode: bool=False) -> str:\n \"\"\"\n Use object file service to download a file from the provider.\n :param fname [str]: Filename.\n :option buf [int]: Download buffer. Default 2048.\n :option decode [bool]: Decode the byte stream.\n :return [str]:\n \"\"\"\n fobj = self.workspace.FileService.AssignFileref('outfile', 'DISK', fname, '', '')\n\n # Use binary stream to support text and image transfers. The binary\n # stream interface does not require a max line length, which allows\n # support of arbitrarily wide tables.\n stream = fobj[0].OpenBinaryStream(self.STREAM_READ)\n flushed = stream.Read(buf)\n result = bytes(flushed)\n while flushed:\n flushed = stream.Read(buf)\n result += bytes(flushed)\n\n stream.Close()\n self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)\n\n if decode is True:\n result = result.decode(self.sascfg.encoding, errors='replace')\n\n return result\n\n def _gethtmlfn(self) -> str:\n \"\"\"\n Return the path of the output HTML file. This is the combination of\n the `workpath` attribute and `HTML_RESULT_FILE` constant.\n :return [str]:\n \"\"\"\n return self._sb.workpath + self.HTML_RESULT_FILE\n\n def _reset(self):\n \"\"\"\n Reset the LanguageService interface to its initial state with respect\n to token scanning. Use it to release the LanguageService from an error\n state associated with the execution of invalid syntax or incomplete\n program source. This primarily occurs when a statement is submitted\n without a trailing semicolon.\n \"\"\"\n self.workspace.LanguageService.Reset()\n\n def _tablepath(self, table: str, libref: str=None) -> str:\n \"\"\"\n Define a sas dataset path based on a table name and optional libref\n name. Will return a two-level or one-level path string based on the\n provided arguments. One-level names are of this form: `table`, while\n two-level names are of this form: `libref.table`. If libref is not\n defined, SAS will implicitly define the library to WORK or USER. The\n USER library needs to have been defined previously in SAS, otherwise\n WORK is the default option. If the `libref` parameter is any value\n that evaluates to `False`, the one-level path is returned.\n :param table [str]: SAS data set name.\n :option libref [str]: Optional library name.\n :return [str]:\n \"\"\"\n if not libref:\n path = \"'{}'n\".format(table.strip())\n else:\n path = \"{}.'{}'n\".format(libref, table.strip())\n\n return path\n\n def _schema(self, table: str, libref: str=None) -> dict:\n \"\"\"\n Request a table schema for a given `libref.table`.\n :param table [str]: Table name\n :option libref [str]: Library name.\n :return [dict]:\n \"\"\"\n #tablepath = self._tablepath(table, libref=libref)\n if not libref:\n tablepath = table\n else:\n tablepath = \"{}.{}\".format(libref, table)\n\n criteria = [None, None, tablepath]\n\n schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)\n schema.MoveFirst()\n\n metadata = {}\n while not schema.EOF:\n col_info = {x.Name: x.Value for x in schema.Fields}\n if col_info['FORMAT_NAME'] in self._sb.sas_date_fmts:\n col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(days=x) if x else x\n elif col_info['FORMAT_NAME'] in self._sb.sas_datetime_fmts:\n col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(seconds=x) if x else x\n # elif FIXME TIME FORMATS\n else:\n col_info['CONVERT'] = lambda x: x\n\n metadata[col_info['COLUMN_NAME']] = col_info\n schema.MoveNext()\n\n schema.Close()\n\n return metadata\n\n def _prompt(self, key: str, hide: bool=False) -> tuple:\n \"\"\"\n Ask the user for input about a given key.\n :param key [str]: Key name.\n :option hide [bool]: Hide user keyboard input.\n :return [tuple]:\n \"\"\"\n input_ok = False\n while input_ok is False:\n val = self.sascfg._prompt('Enter value for macro variable {} '.format(key), pw=hide)\n\n if val is None:\n raise RuntimeError(\"No value for prompted macro variable provided.\")\n\n if val:\n input_ok = True\n else:\n print('Input not valid.')\n\n return (key, val)\n\n def _asubmit(self, code: str, results: str='html'):\n \"\"\"\n Submit any SAS code. Does not return a result.\n :param code [str]: SAS statements to execute.\n \"\"\"\n # Support html ods\n if results.lower() == 'html':\n ods_open = \"\"\"\n ods listing close;\n ods {} (id=saspy_internal) options(bitmap_mode='inline')\n file=\"{}\"\n device=svg\n style={};\n ods graphics on / outputfmt=png;\n \"\"\".format(self.sascfg.output, self._gethtmlfn(), self._sb.HTML_Style)\n\n ods_close = \"\"\"\n ods {} (id=saspy_internal) close;\n ods listing;\n \"\"\".format(self.sascfg.output)\n else:\n ods_open = ''\n ods_close = ''\n\n # Submit program\n full_code = ods_open + code + ods_close\n self.workspace.LanguageService.Submit(full_code)\n\n def submit(self, code: str, results: str='html', prompt: dict=None, **kwargs) -> dict:\n \"\"\"\n Submit any SAS code. Returns log and listing as dictionary with keys\n LOG and LST.\n :param code [str]: SAS statements to execute.\n :option results [str]: Result format. Options: HTML, TEXT. Default HTML.\n :option prompt [dict]: Create macro variables from prompted keys.\n \"\"\"\n RESET = \"\"\";*';*\";*/;quit;run;\"\"\"\n prompt = prompt if prompt is not None else {}\n printto = kwargs.pop('undo', False)\n\n macro_declare = ''\n for key, value in prompt.items():\n macro_declare += '%let {} = {};\\n'.format(*self._prompt(key, value))\n\n # Submit program\n self._asubmit(RESET + macro_declare + code + RESET, results)\n\n # Retrieve listing and log\n log = self._getlog()\n if results.lower() == 'html':\n # Make the following replacements in HTML listing:\n # 1. Swap \\x0c for \\n\n # 2. Change body class selector\n # 3. Increase font size\n listing = self._getfile(self._gethtmlfn(), decode=True) \\\n .replace(chr(12), chr(10)) \\\n .replace('<body class=\"c body\">', '<body class=\"l body\">') \\\n .replace('font-size: x-small;', 'font-size: normal;')\n else:\n listing = self._getlst()\n\n # Invalid syntax will put the interface in to an error state. Reset\n # the LanguageService to prevent further errors.\n # FIXME: In the future, may only want to reset on ERROR. However, this\n # operation seems pretty lightweight, so calling `_reset()` on all\n # submits is not a burden.\n self._reset()\n\n if printto:\n self._asubmit(\"\\nproc printto;run;\\n\", 'text')\n log += self._getlog()\n\n self._sb._lastlog = log\n return {'LOG': log, 'LST': listing}\n\n def saslog(self) -> str:\n \"\"\"\n Return the full SAS log.\n :return [str]:\n \"\"\"\n return self._log\n\n def exist(self, table: str, libref: str=None) -> bool:\n \"\"\"\n Determine if a `libref.table` exists.\n :param table [str]: Table name\n :option libref [str]: Library name.\n :return [bool]:\n \"\"\"\n #tablepath = self._tablepath(table, libref=libref)\n #criteria = [None, None, tablepath]\n\n #schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)\n #exists = not schema.BOF\n\n #schema.Close()\n\n #return exists\n\n code = 'data _null_; e = exist(\"'\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip()+\"'n\"+'\"'+\");\\n\"\n code += 'v = exist(\"'\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip()+\"'n\"+'\"'+\", 'VIEW');\\n if e or v then e = 1;\\n\"\n code += \"te='TABLE_EXISTS='; put te e;run;\\n\"\n\n ll = self.submit(code, \"text\")\n\n l2 = ll['LOG'].rpartition(\"TABLE_EXISTS= \")\n l2 = l2[2].partition(\"\\n\")\n exists = int(l2[0])\n\n return bool(exists)\n\n\n def read_sasdata(self, table: str, libref: str=None, dsopts: dict=None) -> tuple:\n \"\"\"\n Read any SAS dataset and return as a tuple of header, rows\n :param table [str]: Table name\n :option libref [str]: Library name.\n :option dsopts [dict]: Dataset options.\n :return [tuple]:\n \"\"\"\n TARGET = '_saspy_sd2df'\n EXPORT = \"\"\"\n data {tgt};\n set {tbl} {dopt};\n run;\n \"\"\"\n\n dsopts = self._sb._dsopts(dsopts) if dsopts is not None else ''\n tablepath = self._tablepath(table, libref=libref)\n recordset = dynamic.Dispatch('ADODB.RecordSet')\n\n # Create an intermediate dataset with `dsopts` applied\n export = EXPORT.format(tgt=TARGET, tbl=tablepath, dopt=dsopts)\n self.workspace.LanguageService.Submit(export)\n meta = self._schema(TARGET)\n\n # Connect RecordSet object to ADODB connection with params:\n # Cursor: Forward Only\n # Lock: Read Only\n # Command: Table Direct\n recordset.Open(TARGET, self.adodb, self.CURSOR_FORWARD,\n self.LOCK_READONLY, self.CMD_TABLE_DIRECT)\n recordset.MoveFirst()\n\n header = [x.Name for x in recordset.Fields]\n rows = []\n while not recordset.EOF:\n rows.append([meta[x.Name]['CONVERT'](x.Value) for x in recordset.Fields])\n recordset.MoveNext()\n\n recordset.Close()\n\n return (header, rows, meta)\n\n def read_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=False, opts: dict=None):\n \"\"\"\n Submit an import job to the SAS workspace.\n :param filepath [str]: File URI.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option nosob [bool]: Return the SAS code instead of executing it.\n :option opts [dict]: SAS PROC IMPORT options.\n \"\"\"\n opts = opts if opts is not None else {}\n filepath = 'url ' + filepath if filepath.lower().startswith('http') else filepath\n tablepath = self._tablepath(table, libref=libref)\n\n proc_code = \"\"\"\n filename csv_file \"{}\";\n proc import datafile=csv_file out={} dbms=csv replace;\n {}\n run;\n \"\"\".format(filepath.replace('\"', '\"\"'), tablepath, self._sb._impopts(opts))\n\n if nosub is True:\n return proc_code\n else:\n return self.submit(proc_code, 'text')\n\n def write_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=True, dsopts: dict=None, opts: dict=None):\n \"\"\"\n Submit an export job to the SAS workspace.\n :param filepath [str]: File URI.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option nosob [bool]: Return the SAS code instead of executing it.\n :option opts [dict]: SAS PROC IMPORT options.\n :option dsopts [dict]: SAS dataset options.\n \"\"\"\n opts = opts if opts is not None else {}\n dsopts = dsopts if dsopts is not None else {}\n tablepath = self._tablepath(table, libref=libref)\n\n proc_code = \"\"\"\n filename csv_file \"{}\";\n proc export data={} {} outfile=csv_file dbms=csv replace;\n {}\n run;\n \"\"\".format(filepath.replace('\"', '\"\"'), tablepath, self._sb._dsopts(dsopts), self._sb._expopts(opts))\n\n if nosub is True:\n return proc_code\n else:\n return self.submit(proc_code, 'text')['LOG']\n\n def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',\n libref: str =\"\", keep_outer_quotes: bool=False,\n embedded_newlines: bool=True,\n LF: str = '\\x01', CR: str = '\\x02',\n colsep: str = '\\x03', colrep: str = ' ',\n datetimes: dict={}, outfmts: dict={}, labels: dict={},\n outdsopts: dict={}, encode_errors = None, char_lengths = None,\n **kwargs):\n \"\"\"\n Create a SAS dataset from a pandas data frame.\n :param df [pd.DataFrame]: Pandas data frame containing data to write.\n :param table [str]: Table name.\n :option libref [str]: Library name. Default work.\n\n None of these options are used by this access method; they are needed for other access methods\n keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.\n embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set\n LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\\x01'\n CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\\x02'\n colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\\x03'\n colrep - the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' '\n datetimes - not implemented yet in this access method\n outfmts - not implemented yet in this access method\n labels - not implemented yet in this access method\n outdsopts - not implemented yet in this access method\n encode_errors - not implemented yet in this access method\n char_lengths - not implemented yet in this access method\n \"\"\"\n DATETIME_NAME = 'DATETIME26.6'\n DATETIME_FMT = '%Y-%m-%dT%H:%M:%S.%f'\n\n if self.sascfg.verbose:\n if keep_outer_quotes != False:\n logger.warning(\"'keep_outer_quotes=' is not used with this access method. option ignored.\")\n if embedded_newlines != True:\n logger.warning(\"'embedded_newlines=' is not used with this access method. option ignored.\")\n if LF != '\\x01' or CR != '\\x02' or colsep != '\\x03':\n logger.warning(\"'LF=, CR= and colsep=' are not used with this access method. option(s) ignored.\")\n if datetimes != {}:\n logger.warning(\"'datetimes=' is not used with this access method. option ignored.\")\n if outfmts != {}:\n logger.warning(\"'outfmts=' is not used with this access method. option ignored.\")\n if labels != {}:\n logger.warning(\"'labels=' is not used with this access method. option ignored.\")\n if outdsopts != {}:\n logger.warning(\"'outdsopts=' is not used with this access method. option ignored.\")\n if encode_errors:\n logger.warning(\"'encode_errors=' is not used with this access method. option ignored.\")\n if char_lengths:\n logger.warning(\"'char_lengths=' is not used with this access method. option ignored.\")\n\n tablepath = self._tablepath(table, libref=libref)\n\n if type(df.index) != pd.RangeIndex:\n warnings.warn(\"Note that Indexes are not transferred over as columns. Only actual coulmns are transferred\")\n\n columns = []\n formats = {}\n for i, name in enumerate(df.columns):\n if df[name].dtypes.kind in self.PD_NUM_TYPE:\n # Numeric type\n definition = \"'{}'n num\".format(name)\n formats[name] = lambda x: str(x) if pd.isnull(x) is False else 'NULL'\n elif df[name].dtypes.kind in self.PD_STR_TYPE:\n # Character type\n # NOTE: If a character string contains a single `'`, replace\n # it with `''`. This is the SAS equivalent to `\\'`.\n length = df[name].map(len).max()\n definition = \"'{}'n char({})\".format(name, length)\n formats[name] = lambda x: \"'{}'\".format(x.replace(\"'\", \"''\")) if pd.isnull(x) is False else 'NULL'\n elif df[name].dtypes.kind in self.PD_DT_TYPE:\n # Datetime type\n definition = \"'{}'n num informat={} format={}\".format(name, DATETIME_NAME, DATETIME_NAME)\n formats[name] = lambda x: \"'{:{}}'DT\".format(x, DATETIME_FMT) if pd.isnull(x) is False else 'NULL'\n else:\n # Default to character type\n # NOTE: If a character string contains a single `'`, replace\n # it with `''`. This is the SAS equivalent to `\\'`.\n length = df[name].map(str).map(len).max()\n definition = \"'{}'n char({})\".format(name, length)\n formats[name] = lambda x: \"'{}'\".format(x.replace(\"'\", \"''\")) if pd.isnull(x) is False else 'NULL'\n\n columns.append(definition)\n\n sql_values = []\n for index, row in df.iterrows():\n vals = []\n for i, col in enumerate(row):\n func = formats[df.columns[i]]\n vals.append(func(col))\n\n sql_values.append('values({})'.format(', '.join(vals)))\n\n sql_create = 'create table {} ({});'.format(tablepath, ', '.join(columns))\n sql_insert = 'insert into {} {};'.format(tablepath, '\\n'.join(sql_values))\n\n self.adodb.Execute(sql_create)\n self.adodb.Execute(sql_insert)\n return None\n\n def sasdata2dataframe(self, table: str, libref: str=None, dsopts: dict=None, method: str='', **kwargs) -> 'pd.DataFrame':\n \"\"\"\n Create a pandas data frame from a SAS dataset.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option dsopts [dict]: Dataset options.\n :option method [str]: Download method.\n :option tempkeep [bool]: Download the csv file if using the csv method.\n :option tempfile [str]: File path for the saved output file.\n :return [pd.DataFrame]:\n \"\"\"\n # strip off unused by this access method options from kwargs\n # so they can't be passes to panda later\n rowsep = kwargs.pop('rowsep', ' ')\n colsep = kwargs.pop('colsep', ' ')\n rowrep = kwargs.pop('rowrep', ' ')\n colrep = kwargs.pop('colrep', ' ')\n\n if method.upper() == 'DISK':\n logger.error(\"This access method doesn't support the DISK method. Try CSV or MEMORY\")\n return None\n\n if method.upper() == 'CSV':\n df = self.sasdata2dataframeCSV(table, libref, dsopts=dsopts, **kwargs)\n else:\n my_fmts = kwargs.pop('my_fmts', False)\n k_dts = kwargs.pop('dtype', None)\n if self.sascfg.verbose:\n if my_fmts != False:\n logger.warning(\"'my_fmts=' is not supported in this access method. option ignored.\")\n if k_dts is not None:\n logger.warning(\"'dtype=' is only used with the CSV version of this method. option ignored.\")\n\n header, rows, meta = self.read_sasdata(table, libref, dsopts=dsopts)\n df = pd.DataFrame.from_records(rows, columns=header, **kwargs)\n\n for col in meta.keys():\n if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:\n df[col] = pd.to_datetime(df[col], errors='coerce')\n elif meta[col]['DATA_TYPE'] == 5:\n df[col] = pd.to_numeric(df[col], errors='coerce')\n\n return df\n\n def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None,\n tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':\n \"\"\"\n Create a pandas data frame from a SAS dataset.\n :param table [str]: Table name.\n :option libref [str]: Library name.\n :option dsopts [dict]: Dataset options.\n :option opts [dict]: dictionary containing any of the following Proc Export options(delimiter, putnames)\n :option tempkeep [bool]: Download the csv file if using the csv method.\n :option tempfile [str]: File path for the saved output file.\n :return [pd.DataFrame]:\n \"\"\"\n FORMAT_STRING = '{column} {format}{length}.{precision}'\n EXPORT = \"\"\"\n data _saspy_sd2df;\n format {fmt};\n set {tbl};\n run;\n\n proc export data=_saspy_sd2df {dopt}\n outfile=\"{out}\"\n dbms=csv replace;\n {exopts}\n run;\n \"\"\"\n k_dts = kwargs.get('dtype', None)\n my_fmts = kwargs.pop('my_fmts', False)\n if self.sascfg.verbose:\n if my_fmts != False:\n logger.warning(\"'my_fmts=' is not supported in this access method. option ignored.\")\n\n sas_csv = '{}saspy_sd2df.csv'.format(self._sb.workpath)\n dopts = self._sb._dsopts(dsopts) if dsopts is not None else ''\n tablepath = self._tablepath(table, libref=libref)\n\n expopts = self._sb._expopts(kwargs.pop('opts', {}))\n\n # Convert any date format to one pandas can understand (ISO-8601).\n # Save a reference of the column name in a list so pandas can parse\n # the column during construction.\n datecols = []\n fmtlist = []\n meta = self._schema(table, libref)\n for name, col in meta.items():\n if col['FORMAT_NAME'] in self._sb.sas_date_fmts:\n datecols.append(name)\n col_format = self.FMT_DEFAULT_DATE_NAME\n col_length = self.FMT_DEFAULT_DATE_LENGTH\n col_precis = self.FMT_DEFAULT_DATE_PRECISION\n elif col['FORMAT_NAME'] in self._sb.sas_datetime_fmts:\n datecols.append(name)\n col_format = self.FMT_DEFAULT_DATETIME_NAME\n col_length = self.FMT_DEFAULT_DATETIME_LENGTH\n col_precis = self.FMT_DEFAULT_DATETIME_PRECISION\n # elif FIXME TIME FORMATS\n else:\n col_format = col['FORMAT_NAME']\n col_length = col['FORMAT_LENGTH']\n col_precis = col['FORMAT_DECIMAL']\n\n if col['FORMAT_NAME']:\n full_format = FORMAT_STRING.format(\n column=col['COLUMN_NAME'],\n format=col_format,\n length=col_length,\n precision=col_precis)\n\n fmtlist.append(full_format)\n\n export = EXPORT.format(fmt=' '.join(fmtlist),\n tbl=tablepath,\n dopt=dopts,\n exopts=expopts,\n out=sas_csv)\n\n # Use `LanguageService.Submit` instead of `submit` for a slight\n # performance bump. We don't need the log or listing here so skip\n # the wrapper function.\n self.workspace.LanguageService.Submit(export)\n\n outstring = self._getfile(sas_csv, decode=True)\n\n # Write temp file if requested by user\n if kwargs.get('tempkeep') is True and kwargs.get('tempfile') is not None:\n with open(kwargs['tempfile'], 'w') as f:\n f.write(outstring)\n\n df = pd.read_csv(io.StringIO(outstring), parse_dates=datecols, **kwargs)\n\n if k_dts is None: # don't override these if user provided their own dtypes\n for col in meta.keys():\n if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:\n df[col] = pd.to_datetime(df[col], errors='coerce')\n\n return df\n\n def upload(self, local: str, remote: str, overwrite: bool=True, permission: str='', **kwargs):\n \"\"\"\n Upload a file to the SAS server.\n :param local [str]: Local filename.\n :param remote [str]: Local filename.\n :option overwrite [bool]: Overwrite the file if it exists.\n :option permission [str]: See SAS filename statement documentation.\n \"\"\"\n perms = \"PERMISSION='{}'\".format(permission) if permission else ''\n valid = self._sb.file_info(remote, quiet=True)\n\n if valid == {}:\n # Parameter `remote` references a directory. Default to using the\n # filename in `local` path.\n remote_file = remote + self._sb.hostsep + os.path.basename(local)\n elif valid is not None and overwrite is False:\n # Parameter `remote` references a file that exists but we cannot\n # overwrite it.\n # TODO: Raise exception here instead of returning dict\n return {'Success': False,\n 'LOG': 'File {} exists and overwrite was set to False. Upload was stopped.'.format(remote)}\n else:\n remote_file = remote\n\n with open(local, 'rb') as f:\n fobj = self.workspace.FileService.AssignFileref('infile', 'DISK', remote_file, perms, '')\n stream = fobj[0].OpenBinaryStream(self.STREAM_WRITE)\n\n stream.Write(f.read())\n stream.Close()\n self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)\n\n return {'Success': True,\n 'LOG': 'File successfully written using FileService.'}\n\n def download(self, local: str, remote: str, overwrite: bool=True, **kwargs):\n \"\"\"\n Download a file from the SAS server.\n :param local [str]: Local filename.\n :param remote [str]: Local filename.\n :option overwrite [bool]: Overwrite the file if it exists.\n \"\"\"\n valid = self._sb.file_info(remote, quiet=True)\n\n if valid is None:\n # Parameter `remote` references an invalid file path.\n # TODO: Raise exception here instead of returning dict\n return {'Success': False,\n 'LOG': 'File {} does not exist.'.format(remote)}\n elif valid == {}:\n # Parameter `remote` references a directory.\n # TODO: Raise exception here instead of returning dict\n return {'Success': False,\n 'LOG': 'File {} is a directory.'.format(remote)}\n\n if os.path.isdir(local) is True:\n # Parameter `local` references a directory. Default to using the\n # filename in `remote` path.\n local_file = os.path.join(local, remote.rpartition(self._sb.hostsep)[2])\n else:\n local_file = local\n\n with open(local_file, 'wb') as f:\n f.write(self._getfile(remote))\n\n return {'Success': True,\n 'LOG': 'File successfully read using FileService.'}\n"
] | [
[
"pandas.to_datetime",
"pandas.isnull",
"pandas.DataFrame.from_records",
"pandas.to_numeric"
]
] |
verult-prowtest/Cirq | [
"653bf210f04635f6d8fde80d37cb25edbab6eb31"
] | [
"cirq/ops/eigen_gate.py"
] | [
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport fractions\nfrom typing import Tuple, Union, List, Optional, cast, TypeVar, NamedTuple, \\\n Iterable\n\nimport abc\n\nimport numpy as np\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq._compat import gcd\nfrom cirq.ops import raw_types\nfrom cirq.type_workarounds import NotImplementedType\n\n\nTSelf = TypeVar('TSelf', bound='EigenGate')\n\n\nEigenComponent = NamedTuple(\n 'EigenComponent',\n [\n # The θ in λ = exp(i π θ) where λ is a unique eigenvalue. The exponent\n # factor is used, instead of just a raw unit complex number, because it\n # disambiguates several cases. For example, when λ=-1 you can set θ to\n # -1 instead of +1 resulting in square root operations returning -i\n # instead of +i.\n ('eigenvalue_exponent_factor', float),\n\n # The projection matrix onto the eigenspace of the eigenvalue. Must\n # equal Σ_k |λ_k⟩⟨λ_k| where the |λ_k⟩ vectors form an orthonormal\n # basis for the eigenspace.\n ('eigenspace_projector', np.ndarray),\n ]\n)\n\n\[email protected]_equality(distinct_child_types=True, approximate=True)\nclass EigenGate(raw_types.Gate):\n \"\"\"A gate with a known eigendecomposition.\n\n EigenGate is particularly useful when one wishes for different parts of\n the same eigenspace to be extrapolated differently. For example, if a gate\n has a 2-dimensional eigenspace with eigenvalue -1, but one wishes for the\n square root of the gate to split this eigenspace into a part with\n eigenvalue i and a part with eigenvalue -i, then EigenGate allows this\n functionality to be unambiguously specified via the _eigen_components\n method.\n \"\"\"\n\n def __init__(self, *, # Forces keyword args.\n exponent: Union[sympy.Basic, float] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"Initializes the parameters used to compute the gate's matrix.\n\n The eigenvalue of each eigenspace of a gate is computed by\n\n 1. Starting with an angle in half turns as returned by the gate's\n ``_eigen_components`` method:\n\n θ\n\n 2. Shifting the angle by `global_shift`:\n\n θ + s\n\n 3. Scaling the angle by `exponent`:\n\n (θ + s) * e\n\n 4. Converting from half turns to a complex number on the unit circle:\n\n exp(i * pi * (θ + s) * e)\n\n Args:\n exponent: The t in gate**t. Determines how much the eigenvalues of\n the gate are scaled by. For example, eigenvectors phased by -1\n when `gate**1` is applied will gain a relative phase of\n e^{i pi exponent} when `gate**exponent` is applied (relative to\n eigenvectors unaffected by `gate**1`).\n global_shift: Offsets the eigenvalues of the gate at exponent=1.\n In effect, this controls a global phase factor on the gate's\n unitary matrix. The factor is:\n\n exp(i * pi * global_shift * exponent)\n\n For example, `cirq.X**t` uses a `global_shift` of 0 but\n `cirq.Rx(t)` uses a `global_shift` of -0.5, which is why\n `cirq.unitary(cirq.Rx(pi))` equals -iX instead of X.\n \"\"\"\n self._exponent = exponent\n self._global_shift = global_shift\n self._canonical_exponent_cached = None\n\n @property\n def exponent(self) -> Union[sympy.Basic, float]:\n return self._exponent\n\n # virtual method\n def _with_exponent(self: TSelf,\n exponent: Union[sympy.Basic, float]) -> TSelf:\n \"\"\"Return the same kind of gate, but with a different exponent.\n\n Child classes should override this method if they have an __init__\n method with a differing signature.\n \"\"\"\n # pylint: disable=unexpected-keyword-arg\n if self._global_shift == 0:\n return type(self)(exponent=exponent)\n return type(self)(\n exponent=exponent,\n global_shift=self._global_shift)\n # pylint: enable=unexpected-keyword-arg\n\n def _diagram_exponent(self,\n args: protocols.CircuitDiagramInfoArgs,\n *,\n ignore_global_phase: bool = True):\n \"\"\"The exponent to use in circuit diagrams.\n\n Basically, this just canonicalizes the exponent in a way that is\n insensitive to global phase. Only relative phases affect the \"true\"\n exponent period, and since we omit global phase detail in diagrams this\n is the appropriate canonicalization to use. To use the absolute period\n instead of the relative period (e.g. for when printing Rx(rads) style\n symbols, where rads=pi and rads=-pi are equivalent but should produce\n different text) set 'ignore_global_phase' to False.\n\n Note that the exponent is canonicalized into the range\n (-period/2, period/2]\n and that this canonicalization happens after rounding, so that e.g.\n X^-0.999999 shows as X instead of X^-1 when using a digit precision of\n 3.\n\n Args:\n args: The diagram args being used to produce the diagram.\n ignore_global_phase: Determines whether the global phase of the\n operation is considered when computing the period of the\n exponent.\n\n Returns:\n A rounded canonicalized exponent.\n \"\"\"\n if not isinstance(self._exponent, (int, float)):\n return self._exponent\n result = float(self._exponent)\n\n if ignore_global_phase:\n # Compute global-phase-independent period of the gate.\n shifts = list(self._eigen_shifts())\n relative_shifts = {e - shifts[0] for e in shifts[1:]}\n relative_periods = [abs(2/e) for e in relative_shifts if e != 0]\n diagram_period = _approximate_common_period(relative_periods)\n else:\n # Use normal period of the gate.\n diagram_period = self._period()\n if diagram_period is None:\n return result\n\n # Canonicalize the rounded exponent into (-period/2, period/2].\n if args.precision is not None:\n result = np.around(result, args.precision)\n h = diagram_period / 2\n if not (-h < result <= h):\n result = h - result\n result %= diagram_period\n result = h - result\n\n return result\n\n # virtual method\n def _eigen_shifts(self) -> List[float]:\n \"\"\"Describes the eigenvalues of the gate's matrix.\n\n By default, this just extracts the shifts by calling\n self._eigen_components(). However, because that method generates\n matrices it may be extremely expensive.\n\n Returns:\n A list of floats. Each float in the list corresponds to one of the\n eigenvalues of the gate's matrix, before accounting for any global\n shift. Each float is the θ in λ = exp(i π θ) (where λ is the\n eigenvalue).\n \"\"\"\n return [e[0] for e in self._eigen_components()]\n\n @abc.abstractmethod\n def _eigen_components(self) -> List[Union[EigenComponent,\n Tuple[float, np.ndarray]]]:\n \"\"\"Describes the eigendecomposition of the gate's matrix.\n\n Returns:\n A list of EigenComponent tuples. Each tuple in the list\n corresponds to one of the eigenspaces of the gate's matrix. Each\n tuple has two elements. The first element of a tuple is the θ in\n λ = exp(i π θ) (where λ is the eigenvalue of the eigenspace). The\n second element is a projection matrix onto the eigenspace.\n\n Examples:\n The Pauli Z gate's eigencomponents are:\n\n [\n (0, np.array([[1, 0],\n [0, 0]])),\n (1, np.array([[0, 0],\n [0, 1]])),\n ]\n\n Valid eigencomponents for Rz(π) = -iZ are:\n\n [\n (-0.5, np.array([[1, 0],\n [0, 0]])),\n (+0.5, np.array([[0, 0],\n [0, 1]])),\n ]\n\n But in principle you could also use this:\n\n [\n (+1.5, np.array([[1, 0],\n [0, 0]])),\n (-0.5, np.array([[0, 0],\n [0, 1]])),\n ]\n\n The choice between -0.5 and +1.5 does not affect the gate's\n matrix, but it does affect the matrix of powers of the gates\n (because (x+2)*s != x*s (mod 2) when s is a real number).\n\n The Pauli X gate's eigencomponents are:\n\n [\n (0, np.array([[0.5, 0.5],\n [0.5, 0.5]])),\n (1, np.array([[+0.5, -0.5],\n [-0.5, +0.5]])),\n ]\n \"\"\"\n\n def _period(self) -> Optional[float]:\n \"\"\"Determines how the exponent parameter is canonicalized when equating.\n\n Returns:\n None if the exponent should not be canonicalized. Otherwise a float\n indicating the period of the exponent. If the period is p, then a\n given exponent will be shifted by p until it is in the range\n (-p/2, p/2] during initialization.\n \"\"\"\n exponents = {e + self._global_shift for e in self._eigen_shifts()}\n real_periods = [abs(2/e) for e in exponents if e != 0]\n return _approximate_common_period(real_periods)\n\n def __pow__(self: TSelf, exponent: Union[float, sympy.Symbol]) -> TSelf:\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return self._with_exponent(exponent=new_exponent)\n\n @property\n def _canonical_exponent(self):\n if self._canonical_exponent_cached is None:\n period = self._period()\n if not period or protocols.is_parameterized(self._exponent):\n self._canonical_exponent_cached = self._exponent\n else:\n self._canonical_exponent_cached = self._exponent % period\n return self._canonical_exponent_cached\n\n def _value_equality_values_(self):\n return self._canonical_exponent, self._global_shift\n\n def _value_equality_approximate_values_(self):\n period = self._period()\n if not period or protocols.is_parameterized(self._exponent):\n exponent = self._exponent\n else:\n exponent = value.PeriodicValue(self._exponent, period)\n return exponent, self._global_shift\n\n def _trace_distance_bound_(self):\n if protocols.is_parameterized(self._exponent):\n return 1\n\n angles = [half_turns for half_turns, _ in self._eigen_components()]\n min_angle = min(angles)\n max_angle = max(angles)\n return abs((max_angle - min_angle) * self._exponent * 3.5)\n\n def _has_unitary_(self) -> bool:\n return not self._is_parameterized_()\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if self._is_parameterized_():\n return NotImplemented\n e = cast(float, self._exponent)\n return np.sum([\n component * 1j**(\n 2 * e * (half_turns + self._global_shift))\n for half_turns, component in self._eigen_components()\n ], axis=0)\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self._exponent)\n\n def _resolve_parameters_(self: TSelf, param_resolver) -> TSelf:\n return self._with_exponent(\n exponent=param_resolver.value_of(self._exponent))\n\n\ndef _lcm(vals: Iterable[int]) -> int:\n t = 1\n for r in vals:\n t = t * r // gcd(t, r)\n return t\n\n\ndef _approximate_common_period(periods: List[float],\n approx_denom: int = 60,\n reject_atol: float = 1e-8) -> Optional[float]:\n \"\"\"Finds a value that is nearly an integer multiple of multiple periods.\n\n The returned value should be the smallest non-negative number with this\n property. If `approx_denom` is too small the computation can fail to satisfy\n the `reject_atol` criteria and return `None`. This is actually desirable\n behavior, since otherwise the code would e.g. return a nonsense value when\n asked to compute the common period of `np.e` and `np.pi`.\n\n Args:\n periods: The result must be an approximate integer multiple of each of\n these.\n approx_denom: Determines how the floating point values are rounded into\n rational values (so that integer methods such as lcm can be used).\n Each floating point value f_k will be rounded to a rational number\n of the form n_k / approx_denom. If you want to recognize rational\n periods of the form i/d then d should divide `approx_denom`.\n reject_atol: If the computed approximate common period is at least this\n far from an integer multiple of any of the given periods, then it\n is discarded and `None` is returned instead.\n\n Returns:\n The approximate common period, or else `None` if the given\n `approx_denom` wasn't sufficient to approximate the common period to\n within the given `reject_atol`.\n \"\"\"\n if not periods:\n return None\n if any(e == 0 for e in periods):\n return None\n if len(periods) == 1:\n return abs(periods[0])\n approx_rational_periods = [\n fractions.Fraction(int(np.round(abs(p) * approx_denom)), approx_denom)\n for p in periods\n ]\n common = float(_common_rational_period(approx_rational_periods))\n\n for p in periods:\n if p != 0 and abs(p * np.round(common / p) - common) > reject_atol:\n return None\n\n return common\n\n\ndef _common_rational_period(rational_periods: List[fractions.Fraction]\n ) -> fractions.Fraction:\n \"\"\"Finds the least common integer multiple of some fractions.\n\n The solution is the smallest positive integer c such that there\n exists integers n_k satisfying p_k * n_k = c for all k.\n \"\"\"\n assert rational_periods, \"no well-defined solution for an empty list\"\n common_denom = _lcm(p.denominator for p in rational_periods)\n int_periods = [p.numerator * common_denom // p.denominator\n for p in rational_periods]\n int_common_period = _lcm(int_periods)\n return fractions.Fraction(int_common_period, common_denom)\n"
] | [
[
"numpy.round",
"numpy.around"
]
] |
ocjosen/tensorflow | [
"ada0605591911094c142d39cbd87294ed2716e8b"
] | [
"tensorflow/python/feature_column/feature_column.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This API defines FeatureColumn abstraction.\n\nFeatureColumns provide a high level abstraction for ingesting and representing\nfeatures. FeatureColumns are also the primary way of encoding features for\ncanned `tf.estimator.Estimator`s.\n\nWhen using FeatureColumns with `Estimators`, the type of feature column you\nshould choose depends on (1) the feature type and (2) the model type.\n\n1. Feature type:\n\n * Continuous features can be represented by `numeric_column`.\n * Categorical features can be represented by any `categorical_column_with_*`\n column:\n - `categorical_column_with_vocabulary_list`\n - `categorical_column_with_vocabulary_file`\n - `categorical_column_with_hash_bucket`\n - `categorical_column_with_identity`\n - `weighted_categorical_column`\n\n2. Model type:\n\n * Deep neural network models (`DNNClassifier`, `DNNRegressor`).\n\n Continuous features can be directly fed into deep neural network models.\n\n age_column = numeric_column(\"age\")\n\n To feed sparse features into DNN models, wrap the column with\n `embedding_column` or `indicator_column`. `indicator_column` is recommended\n for features with only a few possible values. For features with many\n possible values, to reduce the size of your model, `embedding_column` is\n recommended.\n\n embedded_dept_column = embedding_column(\n categorical_column_with_vocabulary_list(\n \"department\", [\"math\", \"philosophy\", ...]), dimension=10)\n\n * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).\n\n Sparse features can be fed directly into linear models. They behave like an\n indicator column but with an efficient implementation.\n\n dept_column = categorical_column_with_vocabulary_list(\"department\",\n [\"math\", \"philosophy\", \"english\"])\n\n It is recommended that continuous features be bucketized before being\n fed into linear models.\n\n bucketized_age_column = bucketized_column(\n source_column=age_column,\n boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])\n\n Sparse features can be crossed (also known as conjuncted or combined) in\n order to form non-linearities, and then fed into linear models.\n\n cross_dept_age_column = crossed_column(\n columns=[\"department\", bucketized_age_column],\n hash_bucket_size=1000)\n\nExample of building canned `Estimator`s using FeatureColumns:\n\n ```python\n # Define features and transformations\n deep_feature_columns = [age_column, embedded_dept_column]\n wide_feature_columns = [dept_column, bucketized_age_column,\n cross_dept_age_column]\n\n # Build deep model\n estimator = DNNClassifier(\n feature_columns=deep_feature_columns,\n hidden_units=[500, 250, 50])\n estimator.train(...)\n\n # Or build a wide model\n estimator = LinearClassifier(\n feature_columns=wide_feature_columns)\n estimator.train(...)\n\n # Or build a wide and deep model!\n estimator = DNNLinearCombinedClassifier(\n linear_feature_columns=wide_feature_columns,\n dnn_feature_columns=deep_feature_columns,\n dnn_hidden_units=[500, 250, 50])\n estimator.train(...)\n ```\n\n\nFeatureColumns can also be transformed into a generic input layer for\ncustom models using `input_layer`.\n\nExample of building model using FeatureColumns, this can be used in a\n`model_fn` which is given to the {tf.estimator.Estimator}:\n\n ```python\n # Building model via layers\n\n deep_feature_columns = [age_column, embedded_dept_column]\n columns_to_tensor = parse_feature_columns_from_examples(\n serialized=my_data,\n feature_columns=deep_feature_columns)\n first_layer = input_layer(\n features=columns_to_tensor,\n feature_columns=deep_feature_columns)\n second_layer = fully_connected(first_layer, ...)\n ```\n\nNOTE: Functions prefixed with \"_\" indicate experimental or private parts of\nthe API subject to change, and should not be relied upon!\n\nNOTE: The new feature columns are being developed in feature_column_v2.py and\nare a somewhat duplicate of the code here. Please make sure to update logic\nin both places.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport math\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.feature_column import utils as fc_utils\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.layers import base\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import template\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _internal_input_layer(features,\n feature_columns,\n weight_collections=None,\n trainable=True,\n cols_to_vars=None,\n scope=None,\n cols_to_output_tensors=None,\n from_template=False):\n \"\"\"See input_layer. `scope` is a name or variable scope to use.\"\"\"\n\n feature_columns = _normalize_feature_columns(feature_columns)\n for column in feature_columns:\n if not isinstance(column, _DenseColumn):\n raise ValueError(\n 'Items of feature_columns must be a _DenseColumn. '\n 'You can wrap a categorical column with an '\n 'embedding_column or indicator_column. Given: {}'.format(column))\n weight_collections = list(weight_collections or [])\n if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:\n weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)\n if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:\n weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)\n\n def _get_logits(): # pylint: disable=missing-docstring\n builder = _LazyBuilder(features)\n output_tensors = []\n ordered_columns = []\n for column in sorted(feature_columns, key=lambda x: x.name):\n ordered_columns.append(column)\n with variable_scope.variable_scope(\n None, default_name=column._var_scope_name): # pylint: disable=protected-access\n tensor = column._get_dense_tensor( # pylint: disable=protected-access\n builder,\n weight_collections=weight_collections,\n trainable=trainable)\n num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access\n batch_size = array_ops.shape(tensor)[0]\n output_tensor = array_ops.reshape(\n tensor, shape=(batch_size, num_elements))\n output_tensors.append(output_tensor)\n if cols_to_vars is not None:\n # Retrieve any variables created (some _DenseColumn's don't create\n # variables, in which case an empty list is returned).\n cols_to_vars[column] = ops.get_collection(\n ops.GraphKeys.GLOBAL_VARIABLES,\n scope=variable_scope.get_variable_scope().name)\n if cols_to_output_tensors is not None:\n cols_to_output_tensors[column] = output_tensor\n _verify_static_batch_size_equality(output_tensors, ordered_columns)\n return array_ops.concat(output_tensors, 1)\n\n # If we're constructing from the `make_template`, that by default adds a\n # variable scope with the name of the layer. In that case, we dont want to\n # add another `variable_scope` as that would break checkpoints.\n if from_template:\n return _get_logits()\n else:\n with variable_scope.variable_scope(\n scope, default_name='input_layer', values=features.values()):\n return _get_logits()\n\n\n@tf_export(v1=['feature_column.input_layer'])\ndef input_layer(features,\n feature_columns,\n weight_collections=None,\n trainable=True,\n cols_to_vars=None,\n cols_to_output_tensors=None):\n \"\"\"Returns a dense `Tensor` as input layer based on given `feature_columns`.\n\n Generally a single example in training data is described with FeatureColumns.\n At the first layer of the model, this column oriented data should be converted\n to a single `Tensor`.\n\n Example:\n\n ```python\n price = numeric_column('price')\n keywords_embedded = embedding_column(\n categorical_column_with_hash_bucket(\"keywords\", 10K), dimensions=16)\n columns = [price, keywords_embedded, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n for units in [128, 64, 32]:\n dense_tensor = tf.compat.v1.layers.dense(dense_tensor, units, tf.nn.relu)\n prediction = tf.compat.v1.layers.dense(dense_tensor, 1)\n ```\n\n Args:\n features: A mapping from key to tensors. `_FeatureColumn`s look up via these\n keys. For example `numeric_column('price')` will look at 'price' key in\n this dict. Values can be a `SparseTensor` or a `Tensor` depends on\n corresponding `_FeatureColumn`.\n feature_columns: An iterable containing the FeatureColumns to use as inputs\n to your model. All items should be instances of classes derived from\n `_DenseColumn` such as `numeric_column`, `embedding_column`,\n `bucketized_column`, `indicator_column`. If you have categorical features,\n you can wrap them with an `embedding_column` or `indicator_column`.\n weight_collections: A list of collection names to which the Variable will be\n added. Note that variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n cols_to_vars: If not `None`, must be a dictionary that will be filled with a\n mapping from `_FeatureColumn` to list of `Variable`s. For example, after\n the call, we might have cols_to_vars =\n {_EmbeddingColumn(\n categorical_column=_HashedCategoricalColumn(\n key='sparse_feature', hash_bucket_size=5, dtype=tf.string),\n dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),\n <tf.Variable 'some_variable:1' shape=(5, 10)]}\n If a column creates no variables, its value will be an empty list.\n cols_to_output_tensors: If not `None`, must be a dictionary that will be\n filled with a mapping from '_FeatureColumn' to the associated\n output `Tensor`s.\n\n Returns:\n A `Tensor` which represents input layer of a model. Its shape\n is (batch_size, first_layer_dimension) and its dtype is `float32`.\n first_layer_dimension is determined based on given `feature_columns`.\n\n Raises:\n ValueError: if an item in `feature_columns` is not a `_DenseColumn`.\n \"\"\"\n return _internal_input_layer(\n features,\n feature_columns,\n weight_collections=weight_collections,\n trainable=trainable,\n cols_to_vars=cols_to_vars,\n cols_to_output_tensors=cols_to_output_tensors)\n\n\n# TODO(akshayka): InputLayer should be a subclass of Layer, and it\n# should implement the logic in input_layer using Layer's build-and-call\n# paradigm; input_layer should create an instance of InputLayer and\n# return the result of invoking its apply method, just as functional layers do.\nclass InputLayer(object):\n \"\"\"An object-oriented version of `input_layer` that reuses variables.\"\"\"\n\n def __init__(self,\n feature_columns,\n weight_collections=None,\n trainable=True,\n cols_to_vars=None,\n name='feature_column_input_layer',\n create_scope_now=True):\n \"\"\"See `input_layer`.\"\"\"\n\n self._feature_columns = feature_columns\n self._weight_collections = weight_collections\n self._trainable = trainable\n self._cols_to_vars = cols_to_vars\n self._name = name\n self._input_layer_template = template.make_template(\n self._name, _internal_input_layer, create_scope_now_=create_scope_now)\n self._scope = self._input_layer_template.variable_scope\n\n def __call__(self, features):\n return self._input_layer_template(\n features=features,\n feature_columns=self._feature_columns,\n weight_collections=self._weight_collections,\n trainable=self._trainable,\n cols_to_vars=None,\n from_template=True)\n\n @property\n def name(self):\n return self._name\n\n @property\n def non_trainable_variables(self):\n return self._input_layer_template.non_trainable_variables\n\n @property\n def non_trainable_weights(self):\n return self._input_layer_template.non_trainable_weights\n\n @property\n def trainable_variables(self):\n return self._input_layer_template.trainable_variables\n\n @property\n def trainable_weights(self):\n return self._input_layer_template.trainable_weights\n\n @property\n def variables(self):\n return self._input_layer_template.variables\n\n @property\n def weights(self):\n return self._input_layer_template.weights\n\n\n@tf_export(v1=['feature_column.linear_model'])\ndef linear_model(features,\n feature_columns,\n units=1,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True,\n cols_to_vars=None):\n \"\"\"Returns a linear prediction `Tensor` based on given `feature_columns`.\n\n This function generates a weighted sum based on output dimension `units`.\n Weighted sum refers to logits in classification problems. It refers to the\n prediction itself for linear regression problems.\n\n Note on supported columns: `linear_model` treats categorical columns as\n `indicator_column`s. To be specific, assume the input as `SparseTensor` looks\n like:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n }\n ```\n `linear_model` assigns weights for the presence of \"a\", \"b\", \"c' implicitly,\n just like `indicator_column`, while `input_layer` explicitly requires wrapping\n each of categorical columns with an `embedding_column` or an\n `indicator_column`.\n\n Example of usage:\n\n ```python\n price = numeric_column('price')\n price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])\n keywords = categorical_column_with_hash_bucket(\"keywords\", 10K)\n keywords_price = crossed_column('keywords', price_buckets, ...)\n columns = [price_buckets, keywords, keywords_price ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n prediction = linear_model(features, columns)\n ```\n\n The `sparse_combiner` argument works as follows\n For example, for two features represented as the categorical columns:\n\n ```python\n # Feature 1\n\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [0, 1]: \"b\"\n [1, 0]: \"c\"\n }\n\n # Feature 2\n\n shape = [2, 3]\n {\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n [1, 1]: \"f\"\n [1, 2]: \"f\"\n }\n ```\n\n with `sparse_combiner` as \"mean\", the linear model outputs consequently\n are:\n\n ```\n y_0 = 1.0 / 2.0 * ( w_a + w_b ) + w_d + b\n y_1 = w_c + 1.0 / 3.0 * ( w_e + 2.0 * w_f ) + b\n ```\n\n where `y_i` is the output, `b` is the bias, and `w_x` is the weight\n assigned to the presence of `x` in the input features.\n\n Args:\n features: A mapping from key to tensors. `_FeatureColumn`s look up via these\n keys. For example `numeric_column('price')` will look at 'price' key in\n this dict. Values are `Tensor` or `SparseTensor` depending on\n corresponding `_FeatureColumn`.\n feature_columns: An iterable containing the FeatureColumns to use as inputs\n to your model. All items should be instances of classes derived from\n `_FeatureColumn`s.\n units: An integer, dimensionality of the output space. Default value is 1.\n sparse_combiner: A string specifying how to reduce if a categorical column\n is multivalent. Except `numeric_column`, almost all columns passed to\n `linear_model` are considered as categorical columns. It combines each\n categorical column independently. Currently \"mean\", \"sqrtn\" and \"sum\" are\n supported, with \"sum\" the default for linear model. \"sqrtn\" often achieves\n good accuracy, in particular with bag-of-words columns.\n * \"sum\": do not normalize features in the column\n * \"mean\": do l1 normalization on features in the column\n * \"sqrtn\": do l2 normalization on features in the column\n weight_collections: A list of collection names to which the Variable will be\n added. Note that, variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n cols_to_vars: If not `None`, must be a dictionary that will be filled with a\n mapping from `_FeatureColumn` to associated list of `Variable`s. For\n example, after the call, we might have cols_to_vars = {\n _NumericColumn(\n key='numeric_feature1', shape=(1,):\n [<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],\n 'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],\n _NumericColumn(\n key='numeric_feature2', shape=(2,)):\n [<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}\n If a column creates no variables, its value will be an empty list. Note\n that cols_to_vars will also contain a string key 'bias' that maps to a\n list of Variables.\n\n Returns:\n A `Tensor` which represents predictions/logits of a linear model. Its shape\n is (batch_size, units) and its dtype is `float32`.\n\n Raises:\n ValueError: if an item in `feature_columns` is neither a `_DenseColumn`\n nor `_CategoricalColumn`.\n \"\"\"\n with variable_scope.variable_scope(None, 'linear_model') as vs:\n model_name = _strip_leading_slashes(vs.name)\n linear_model_layer = _LinearModel(\n feature_columns=feature_columns,\n units=units,\n sparse_combiner=sparse_combiner,\n weight_collections=weight_collections,\n trainable=trainable,\n name=model_name)\n retval = linear_model_layer(features) # pylint: disable=not-callable\n if cols_to_vars is not None:\n cols_to_vars.update(linear_model_layer.cols_to_vars())\n return retval\n\n\ndef _add_to_collections(var, weight_collections):\n \"\"\"Adds a var to the list of weight_collections provided.\n\n Handles the case for partitioned and non-partitioned variables.\n\n Args:\n var: A variable or Partitioned Variable.\n weight_collections: List of collections to add variable to.\n \"\"\"\n for weight_collection in weight_collections:\n # The layer self.add_variable call already adds it to GLOBAL_VARIABLES.\n if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:\n continue\n # TODO(rohanj): Explore adding a _get_variable_list method on `Variable`\n # so that we don't have to do this check.\n if isinstance(var, variables.PartitionedVariable):\n for constituent_var in list(var):\n ops.add_to_collection(weight_collection, constituent_var)\n else:\n ops.add_to_collection(weight_collection, var)\n\n\nclass _FCLinearWrapper(base.Layer):\n \"\"\"Wraps a _FeatureColumn in a layer for use in a linear model.\n\n See `linear_model` above.\n \"\"\"\n\n def __init__(self,\n feature_column,\n units=1,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True,\n name=None,\n **kwargs):\n super(_FCLinearWrapper, self).__init__(\n trainable=trainable, name=name, **kwargs)\n self._feature_column = feature_column\n self._units = units\n self._sparse_combiner = sparse_combiner\n self._weight_collections = weight_collections\n\n def build(self, _):\n if isinstance(self._feature_column, _CategoricalColumn):\n weight = self.add_variable(\n name='weights',\n shape=(self._feature_column._num_buckets, self._units), # pylint: disable=protected-access\n initializer=init_ops.zeros_initializer(),\n trainable=self.trainable)\n else:\n num_elements = self._feature_column._variable_shape.num_elements() # pylint: disable=protected-access\n weight = self.add_variable(\n name='weights',\n shape=[num_elements, self._units],\n initializer=init_ops.zeros_initializer(),\n trainable=self.trainable)\n _add_to_collections(weight, self._weight_collections)\n self._weight_var = weight\n self.built = True\n\n def call(self, builder):\n weighted_sum = _create_weighted_sum(\n column=self._feature_column,\n builder=builder,\n units=self._units,\n sparse_combiner=self._sparse_combiner,\n weight_collections=self._weight_collections,\n trainable=self.trainable,\n weight_var=self._weight_var)\n return weighted_sum\n\n\nclass _BiasLayer(base.Layer):\n \"\"\"A layer for the bias term.\n \"\"\"\n\n def __init__(self,\n units=1,\n trainable=True,\n weight_collections=None,\n name=None,\n **kwargs):\n super(_BiasLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n self._units = units\n self._weight_collections = weight_collections\n\n def build(self, _):\n self._bias_variable = self.add_variable(\n 'bias_weights',\n shape=[self._units],\n initializer=init_ops.zeros_initializer(),\n trainable=self.trainable)\n _add_to_collections(self._bias_variable, self._weight_collections)\n self.built = True\n\n def call(self, _):\n return self._bias_variable\n\n\ndef _get_expanded_variable_list(variable):\n if (isinstance(variable, variables.Variable) or\n resource_variable_ops.is_resource_variable(variable)):\n return [variable] # Single variable case.\n else: # Must be a PartitionedVariable, so convert into a list.\n return list(variable)\n\n\ndef _strip_leading_slashes(name):\n return name.rsplit('/', 1)[-1]\n\n\nclass _LinearModel(training.Model):\n \"\"\"Creates a linear model using feature columns.\n\n See `linear_model` for details.\n \"\"\"\n\n def __init__(self,\n feature_columns,\n units=1,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True,\n name=None,\n **kwargs):\n super(_LinearModel, self).__init__(name=name, **kwargs)\n self._feature_columns = _normalize_feature_columns(\n feature_columns)\n self._weight_collections = list(weight_collections or [])\n if ops.GraphKeys.GLOBAL_VARIABLES not in self._weight_collections:\n self._weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)\n if ops.GraphKeys.MODEL_VARIABLES not in self._weight_collections:\n self._weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)\n\n column_layers = {}\n for column in sorted(self._feature_columns, key=lambda x: x.name):\n with variable_scope.variable_scope(\n None, default_name=column._var_scope_name) as vs: # pylint: disable=protected-access\n # Having the fully expressed variable scope name ends up doubly\n # expressing the outer scope (scope with which this method was called)\n # in the name of the variable that would get created.\n column_name = _strip_leading_slashes(vs.name)\n column_layer = _FCLinearWrapper(column, units, sparse_combiner,\n self._weight_collections, trainable,\n column_name, **kwargs)\n column_layers[column_name] = column_layer\n self._column_layers = self._add_layers(column_layers)\n self._bias_layer = _BiasLayer(\n units=units,\n trainable=trainable,\n weight_collections=self._weight_collections,\n name='bias_layer',\n **kwargs)\n self._cols_to_vars = {}\n\n def cols_to_vars(self):\n \"\"\"Returns a dict mapping _FeatureColumns to variables.\n\n See `linear_model` for more information.\n This is not populated till `call` is called i.e. layer is built.\n \"\"\"\n return self._cols_to_vars\n\n def call(self, features):\n with variable_scope.variable_scope(self.name):\n for column in self._feature_columns:\n if not isinstance(column, (_DenseColumn, _CategoricalColumn)):\n raise ValueError(\n 'Items of feature_columns must be either a '\n '_DenseColumn or _CategoricalColumn. Given: {}'.format(column))\n weighted_sums = []\n ordered_columns = []\n builder = _LazyBuilder(features)\n for layer in sorted(self._column_layers.values(), key=lambda x: x.name):\n column = layer._feature_column # pylint: disable=protected-access\n ordered_columns.append(column)\n weighted_sum = layer(builder)\n weighted_sums.append(weighted_sum)\n self._cols_to_vars[column] = ops.get_collection(\n ops.GraphKeys.GLOBAL_VARIABLES, scope=layer.scope_name)\n\n _verify_static_batch_size_equality(weighted_sums, ordered_columns)\n predictions_no_bias = math_ops.add_n(\n weighted_sums, name='weighted_sum_no_bias')\n predictions = nn_ops.bias_add(\n predictions_no_bias,\n self._bias_layer( # pylint: disable=not-callable\n builder,\n scope=variable_scope.get_variable_scope()), # pylint: disable=not-callable\n name='weighted_sum')\n bias = self._bias_layer.variables[0]\n self._cols_to_vars['bias'] = _get_expanded_variable_list(bias)\n return predictions\n\n def _add_layers(self, layers):\n # \"Magic\" required for keras.Model classes to track all the variables in\n # a list of layers.Layer objects.\n # TODO(ashankar): Figure out API so user code doesn't have to do this.\n for name, layer in layers.items():\n setattr(self, 'layer-%s' % name, layer)\n return layers\n\n\ndef _transform_features(features, feature_columns):\n \"\"\"Returns transformed features based on features columns passed in.\n\n Please note that most probably you would not need to use this function. Please\n check `input_layer` and `linear_model` to see whether they will\n satisfy your use case or not.\n\n Example:\n\n ```python\n # Define features and transformations\n crosses_a_x_b = crossed_column(\n columns=[\"sparse_feature_a\", \"sparse_feature_b\"], hash_bucket_size=10000)\n price_buckets = bucketized_column(\n source_column=numeric_column(\"price\"), boundaries=[...])\n\n columns = [crosses_a_x_b, price_buckets]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n transformed = transform_features(features=features, feature_columns=columns)\n\n assertCountEqual(columns, transformed.keys())\n ```\n\n Args:\n features: A mapping from key to tensors. `_FeatureColumn`s look up via these\n keys. For example `numeric_column('price')` will look at 'price' key in\n this dict. Values can be a `SparseTensor` or a `Tensor` depends on\n corresponding `_FeatureColumn`.\n feature_columns: An iterable containing all the `_FeatureColumn`s.\n\n Returns:\n A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values.\n \"\"\"\n feature_columns = _normalize_feature_columns(feature_columns)\n outputs = {}\n with ops.name_scope(\n None, default_name='transform_features', values=features.values()):\n builder = _LazyBuilder(features)\n for column in sorted(feature_columns, key=lambda x: x.name):\n with ops.name_scope(None, default_name=column.name):\n outputs[column] = builder.get(column)\n return outputs\n\n\n@tf_export(v1=['feature_column.make_parse_example_spec'])\ndef make_parse_example_spec(feature_columns):\n \"\"\"Creates parsing spec dictionary from input feature_columns.\n\n The returned dictionary can be used as arg 'features' in\n `tf.io.parse_example`.\n\n Typical usage example:\n\n ```python\n # Define features and transformations\n feature_a = categorical_column_with_vocabulary_file(...)\n feature_b = numeric_column(...)\n feature_c_bucketized = bucketized_column(numeric_column(\"feature_c\"), ...)\n feature_a_x_feature_c = crossed_column(\n columns=[\"feature_a\", feature_c_bucketized], ...)\n\n feature_columns = set(\n [feature_b, feature_c_bucketized, feature_a_x_feature_c])\n features = tf.io.parse_example(\n serialized=serialized_examples,\n features=make_parse_example_spec(feature_columns))\n ```\n\n For the above example, make_parse_example_spec would return the dict:\n\n ```python\n {\n \"feature_a\": parsing_ops.VarLenFeature(tf.string),\n \"feature_b\": parsing_ops.FixedLenFeature([1], dtype=tf.float32),\n \"feature_c\": parsing_ops.FixedLenFeature([1], dtype=tf.float32)\n }\n ```\n\n Args:\n feature_columns: An iterable containing all feature columns. All items\n should be instances of classes derived from `_FeatureColumn`.\n\n Returns:\n A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`\n value.\n\n Raises:\n ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`\n instance.\n \"\"\"\n result = {}\n for column in feature_columns:\n if not isinstance(column, _FeatureColumn):\n raise ValueError(\n 'All feature_columns must be _FeatureColumn instances. '\n 'Given: {}'.format(column))\n config = column._parse_example_spec # pylint: disable=protected-access\n for key, value in six.iteritems(config):\n if key in result and value != result[key]:\n raise ValueError(\n 'feature_columns contain different parse_spec for key '\n '{}. Given {} and {}'.format(key, value, result[key]))\n result.update(config)\n return result\n\n\ndef _embedding_column(categorical_column,\n dimension,\n combiner='mean',\n initializer=None,\n ckpt_to_load_from=None,\n tensor_name_in_ckpt=None,\n max_norm=None,\n trainable=True):\n \"\"\"`_DenseColumn` that converts from sparse, categorical input.\n\n Use this when your inputs are sparse, but you want to convert them to a dense\n representation (e.g., to feed to a DNN).\n\n Inputs must be a `_CategoricalColumn` created by any of the\n `categorical_column_*` function. Here is an example of using\n `embedding_column` with `DNNClassifier`:\n\n ```python\n video_id = categorical_column_with_identity(\n key='video_id', num_buckets=1000000, default_value=0)\n columns = [embedding_column(video_id, 9),...]\n\n estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)\n\n label_column = ...\n def input_fn():\n features = tf.io.parse_example(\n ..., features=make_parse_example_spec(columns + [label_column]))\n labels = features.pop(label_column.name)\n return features, labels\n\n estimator.train(input_fn=input_fn, steps=100)\n ```\n\n Here is an example using `embedding_column` with model_fn:\n\n ```python\n def model_fn(features, ...):\n video_id = categorical_column_with_identity(\n key='video_id', num_buckets=1000000, default_value=0)\n columns = [embedding_column(video_id, 9),...]\n dense_tensor = input_layer(features, columns)\n # Form DNN layers, calculate loss, and return EstimatorSpec.\n ...\n ```\n\n Args:\n categorical_column: A `_CategoricalColumn` created by a\n `categorical_column_with_*` function. This column produces the sparse IDs\n that are inputs to the embedding lookup.\n dimension: An integer specifying dimension of the embedding, must be > 0.\n combiner: A string specifying how to reduce if there are multiple entries\n in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with\n 'mean' the default. 'sqrtn' often achieves good accuracy, in particular\n with bag-of-words columns. Each of this can be thought as example level\n normalizations on the column. For more information, see\n `tf.embedding_lookup_sparse`.\n initializer: A variable initializer function to be used in embedding\n variable initialization. If not specified, defaults to\n `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and\n standard deviation `1/sqrt(dimension)`.\n ckpt_to_load_from: String representing checkpoint name/pattern from which to\n restore column weights. Required if `tensor_name_in_ckpt` is not `None`.\n tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from\n which to restore the column weights. Required if `ckpt_to_load_from` is\n not `None`.\n max_norm: If not `None`, embedding values are l2-normalized to this value.\n trainable: Whether or not the embedding is trainable. Default is True.\n\n Returns:\n `_DenseColumn` that converts from sparse input.\n\n Raises:\n ValueError: if `dimension` not > 0.\n ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`\n is specified.\n ValueError: if `initializer` is specified and is not callable.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if (dimension is None) or (dimension < 1):\n raise ValueError('Invalid dimension {}.'.format(dimension))\n if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):\n raise ValueError('Must specify both `ckpt_to_load_from` and '\n '`tensor_name_in_ckpt` or none of them.')\n\n if (initializer is not None) and (not callable(initializer)):\n raise ValueError('initializer must be callable if specified. '\n 'Embedding of column_name: {}'.format(\n categorical_column.name))\n if initializer is None:\n initializer = init_ops.truncated_normal_initializer(\n mean=0.0, stddev=1 / math.sqrt(dimension))\n\n embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access\n\n def _creator(weight_collections, scope):\n embedding_column_layer = _EmbeddingColumnLayer(\n embedding_shape=embedding_shape,\n initializer=initializer,\n weight_collections=weight_collections,\n trainable=trainable,\n name='embedding_column_layer')\n return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable\n\n return _EmbeddingColumn(\n categorical_column=categorical_column,\n dimension=dimension,\n combiner=combiner,\n layer_creator=_creator,\n ckpt_to_load_from=ckpt_to_load_from,\n tensor_name_in_ckpt=tensor_name_in_ckpt,\n max_norm=max_norm,\n trainable=trainable)\n\n\ndef _numeric_column(key,\n shape=(1,),\n default_value=None,\n dtype=dtypes.float32,\n normalizer_fn=None):\n \"\"\"Represents real valued or numerical features.\n\n Example:\n\n ```python\n price = numeric_column('price')\n columns = [price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n\n # or\n bucketized_price = bucketized_column(price, boundaries=[...])\n columns = [bucketized_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n shape: An iterable of integers specifies the shape of the `Tensor`. An\n integer can be given which means a single dimension `Tensor` with given\n width. The `Tensor` representing the column will have the shape of\n [batch_size] + `shape`.\n default_value: A single value compatible with `dtype` or an iterable of\n values compatible with `dtype` which the column takes on during\n `tf.Example` parsing if data is missing. A default value of `None` will\n cause `tf.io.parse_example` to fail if an example does not contain this\n column. If a single value is provided, the same value will be applied as\n the default value for every item. If an iterable of values is provided,\n the shape of the `default_value` should be equal to the given `shape`.\n dtype: defines the type of values. Default value is `tf.float32`. Must be a\n non-quantized, real integer or floating point type.\n normalizer_fn: If not `None`, a function that can be used to normalize the\n value of the tensor after `default_value` is applied for parsing.\n Normalizer function takes the input `Tensor` as its argument, and returns\n the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that\n even though the most common use case of this function is normalization, it\n can be used for any kind of Tensorflow transformations.\n\n Returns:\n A `_NumericColumn`.\n\n Raises:\n TypeError: if any dimension in shape is not an int\n ValueError: if any dimension in shape is not a positive integer\n TypeError: if `default_value` is an iterable but not compatible with `shape`\n TypeError: if `default_value` is not compatible with `dtype`.\n ValueError: if `dtype` is not convertible to `tf.float32`.\n \"\"\"\n shape = _check_shape(shape, key)\n if not (dtype.is_integer or dtype.is_floating):\n raise ValueError('dtype must be convertible to float. '\n 'dtype: {}, key: {}'.format(dtype, key))\n default_value = fc_utils.check_default_value(\n shape, default_value, dtype, key)\n\n if normalizer_fn is not None and not callable(normalizer_fn):\n raise TypeError(\n 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))\n\n fc_utils.assert_key_is_string(key)\n return _NumericColumn(\n key,\n shape=shape,\n default_value=default_value,\n dtype=dtype,\n normalizer_fn=normalizer_fn)\n\n\ndef _bucketized_column(source_column, boundaries):\n \"\"\"Represents discretized dense input.\n\n Buckets include the left boundary, and exclude the right boundary. Namely,\n `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,\n `[1., 2.)`, and `[2., +inf)`.\n\n For example, if the inputs are\n\n ```python\n boundaries = [0, 10, 100]\n input tensor = [[-5, 10000]\n [150, 10]\n [5, 100]]\n ```\n\n then the output will be\n\n ```python\n output = [[0, 3]\n [3, 2]\n [1, 3]]\n ```\n\n Example:\n\n ```python\n price = numeric_column('price')\n bucketized_price = bucketized_column(price, boundaries=[...])\n columns = [bucketized_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n\n # or\n columns = [bucketized_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n `bucketized_column` can also be crossed with another categorical column using\n `crossed_column`:\n\n ```python\n price = numeric_column('price')\n # bucketized_column converts numerical feature to a categorical one.\n bucketized_price = bucketized_column(price, boundaries=[...])\n # 'keywords' is a string feature.\n price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)\n columns = [price_x_keywords, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n Args:\n source_column: A one-dimensional dense column which is generated with\n `numeric_column`.\n boundaries: A sorted list or tuple of floats specifying the boundaries.\n\n Returns:\n A `_BucketizedColumn`.\n\n Raises:\n ValueError: If `source_column` is not a numeric column, or if it is not\n one-dimensional.\n ValueError: If `boundaries` is not a sorted list or tuple.\n \"\"\"\n if not isinstance(source_column, _NumericColumn):\n raise ValueError(\n 'source_column must be a column generated with numeric_column(). '\n 'Given: {}'.format(source_column))\n if len(source_column.shape) > 1:\n raise ValueError(\n 'source_column must be one-dimensional column. '\n 'Given: {}'.format(source_column))\n if (not boundaries or\n not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):\n raise ValueError('boundaries must be a sorted list.')\n for i in range(len(boundaries) - 1):\n if boundaries[i] >= boundaries[i + 1]:\n raise ValueError('boundaries must be a sorted list.')\n return _BucketizedColumn(source_column, tuple(boundaries))\n\n\ndef _categorical_column_with_hash_bucket(key,\n hash_bucket_size,\n dtype=dtypes.string):\n \"\"\"Represents sparse feature where ids are set by hashing.\n\n Use this when your sparse features are in string or integer format, and you\n want to distribute your inputs into a finite number of buckets by hashing.\n output_id = Hash(input_feature_string) % bucket_size for string type input.\n For int type input, the value is converted to its string representation first\n and then hashed by the same formula.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n Example:\n\n ```python\n keywords = categorical_column_with_hash_bucket(\"keywords\", 10K)\n columns = [keywords, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n\n # or\n keywords_embedded = embedding_column(keywords, 16)\n columns = [keywords_embedded, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n hash_bucket_size: An int > 1. The number of buckets.\n dtype: The type of features. Only string and integer types are supported.\n\n Returns:\n A `_HashedCategoricalColumn`.\n\n Raises:\n ValueError: `hash_bucket_size` is not greater than 1.\n ValueError: `dtype` is neither string nor integer.\n \"\"\"\n if hash_bucket_size is None:\n raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))\n\n if hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be at least 1. '\n 'hash_bucket_size: {}, key: {}'.format(\n hash_bucket_size, key))\n\n fc_utils.assert_key_is_string(key)\n fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n\n return _HashedCategoricalColumn(key, hash_bucket_size, dtype)\n\n\ndef _categorical_column_with_vocabulary_file(key,\n vocabulary_file,\n vocabulary_size=None,\n num_oov_buckets=0,\n default_value=None,\n dtype=dtypes.string):\n \"\"\"A `_CategoricalColumn` with a vocabulary file.\n\n Use this when your inputs are in string or integer format, and you have a\n vocabulary file that maps each value to an integer ID. By default,\n out-of-vocabulary values are ignored. Use either (but not both) of\n `num_oov_buckets` and `default_value` to specify how to include\n out-of-vocabulary values.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n Example with `num_oov_buckets`:\n File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state\n abbreviation. All inputs with values in that file are assigned an ID 0-49,\n corresponding to its line number. All other values are hashed and assigned an\n ID 50-54.\n\n ```python\n states = categorical_column_with_vocabulary_file(\n key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,\n num_oov_buckets=5)\n columns = [states, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n Example with `default_value`:\n File '/us/states.txt' contains 51 lines - the first line is 'XX', and the\n other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'\n in input, and other values missing from the file, will be assigned ID 0. All\n others are assigned the corresponding line number 1-50.\n\n ```python\n states = categorical_column_with_vocabulary_file(\n key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,\n default_value=0)\n columns = [states, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n And to make an embedding with either:\n\n ```python\n columns = [embedding_column(states, 3),...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n vocabulary_file: The vocabulary file name.\n vocabulary_size: Number of the elements in the vocabulary. This must be no\n greater than length of `vocabulary_file`, if less than length, later\n values are ignored. If None, it is set to the length of `vocabulary_file`.\n num_oov_buckets: Non-negative integer, the number of out-of-vocabulary\n buckets. All out-of-vocabulary inputs will be assigned IDs in the range\n `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of\n the input value. A positive `num_oov_buckets` can not be specified with\n `default_value`.\n default_value: The integer ID value to return for out-of-vocabulary feature\n values, defaults to `-1`. This can not be specified with a positive\n `num_oov_buckets`.\n dtype: The type of features. Only string and integer types are supported.\n\n Returns:\n A `_CategoricalColumn` with a vocabulary file.\n\n Raises:\n ValueError: `vocabulary_file` is missing or cannot be opened.\n ValueError: `vocabulary_size` is missing or < 1.\n ValueError: `num_oov_buckets` is a negative integer.\n ValueError: `num_oov_buckets` and `default_value` are both specified.\n ValueError: `dtype` is neither string nor integer.\n \"\"\"\n if not vocabulary_file:\n raise ValueError('Missing vocabulary_file in {}.'.format(key))\n\n if vocabulary_size is None:\n if not gfile.Exists(vocabulary_file):\n raise ValueError('vocabulary_file in {} does not exist.'.format(key))\n\n with gfile.GFile(vocabulary_file) as f:\n vocabulary_size = sum(1 for _ in f)\n logging.info(\n 'vocabulary_size = %d in %s is inferred from the number of elements '\n 'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)\n\n # `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.\n if vocabulary_size < 1:\n raise ValueError('Invalid vocabulary_size in {}.'.format(key))\n if num_oov_buckets:\n if default_value is not None:\n raise ValueError(\n 'Can\\'t specify both num_oov_buckets and default_value in {}.'.format(\n key))\n if num_oov_buckets < 0:\n raise ValueError('Invalid num_oov_buckets {} in {}.'.format(\n num_oov_buckets, key))\n fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n fc_utils.assert_key_is_string(key)\n return _VocabularyFileCategoricalColumn(\n key=key,\n vocabulary_file=vocabulary_file,\n vocabulary_size=vocabulary_size,\n num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,\n default_value=-1 if default_value is None else default_value,\n dtype=dtype)\n\n\ndef _categorical_column_with_vocabulary_list(key,\n vocabulary_list,\n dtype=None,\n default_value=-1,\n num_oov_buckets=0):\n \"\"\"A `_CategoricalColumn` with in-memory vocabulary.\n\n Use this when your inputs are in string or integer format, and you have an\n in-memory vocabulary mapping each value to an integer ID. By default,\n out-of-vocabulary values are ignored. Use either (but not both) of\n `num_oov_buckets` and `default_value` to specify how to include\n out-of-vocabulary values.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n Example with `num_oov_buckets`:\n In the following example, each input in `vocabulary_list` is assigned an ID\n 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other\n inputs are hashed and assigned an ID 4-5.\n\n ```python\n colors = categorical_column_with_vocabulary_list(\n key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),\n num_oov_buckets=2)\n columns = [colors, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n Example with `default_value`:\n In the following example, each input in `vocabulary_list` is assigned an ID\n 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other\n inputs are assigned `default_value` 0.\n\n\n ```python\n colors = categorical_column_with_vocabulary_list(\n key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)\n columns = [colors, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n And to make an embedding with either:\n\n ```python\n columns = [embedding_column(colors, 3),...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n vocabulary_list: An ordered iterable defining the vocabulary. Each feature\n is mapped to the index of its value (if present) in `vocabulary_list`.\n Must be castable to `dtype`.\n dtype: The type of features. Only string and integer types are supported.\n If `None`, it will be inferred from `vocabulary_list`.\n default_value: The integer ID value to return for out-of-vocabulary feature\n values, defaults to `-1`. This can not be specified with a positive\n `num_oov_buckets`.\n num_oov_buckets: Non-negative integer, the number of out-of-vocabulary\n buckets. All out-of-vocabulary inputs will be assigned IDs in the range\n `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a\n hash of the input value. A positive `num_oov_buckets` can not be specified\n with `default_value`.\n\n Returns:\n A `_CategoricalColumn` with in-memory vocabulary.\n\n Raises:\n ValueError: if `vocabulary_list` is empty, or contains duplicate keys.\n ValueError: `num_oov_buckets` is a negative integer.\n ValueError: `num_oov_buckets` and `default_value` are both specified.\n ValueError: if `dtype` is not integer or string.\n \"\"\"\n if (vocabulary_list is None) or (len(vocabulary_list) < 1):\n raise ValueError(\n 'vocabulary_list {} must be non-empty, column_name: {}'.format(\n vocabulary_list, key))\n if len(set(vocabulary_list)) != len(vocabulary_list):\n raise ValueError(\n 'Duplicate keys in vocabulary_list {}, column_name: {}'.format(\n vocabulary_list, key))\n vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)\n if num_oov_buckets:\n if default_value != -1:\n raise ValueError(\n 'Can\\'t specify both num_oov_buckets and default_value in {}.'.format(\n key))\n if num_oov_buckets < 0:\n raise ValueError('Invalid num_oov_buckets {} in {}.'.format(\n num_oov_buckets, key))\n fc_utils.assert_string_or_int(\n vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))\n if dtype is None:\n dtype = vocabulary_dtype\n elif dtype.is_integer != vocabulary_dtype.is_integer:\n raise ValueError(\n 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(\n dtype, vocabulary_dtype, key))\n fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n fc_utils.assert_key_is_string(key)\n\n return _VocabularyListCategoricalColumn(\n key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype,\n default_value=default_value, num_oov_buckets=num_oov_buckets)\n\n\ndef _categorical_column_with_identity(key, num_buckets, default_value=None):\n \"\"\"A `_CategoricalColumn` that returns identity values.\n\n Use this when your inputs are integers in the range `[0, num_buckets)`, and\n you want to use the input value itself as the categorical ID. Values outside\n this range will result in `default_value` if specified, otherwise it will\n fail.\n\n Typically, this is used for contiguous ranges of integer indexes, but\n it doesn't have to be. This might be inefficient, however, if many of IDs\n are unused. Consider `categorical_column_with_hash_bucket` in that case.\n\n For input dictionary `features`, `features[key]` is either `Tensor` or\n `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int\n and `''` for string, which will be dropped by this feature column.\n\n In the following examples, each input in the range `[0, 1000000)` is assigned\n the same value. All other inputs are assigned `default_value` 0. Note that a\n literal 0 in inputs will result in the same default ID.\n\n Linear model:\n\n ```python\n video_id = categorical_column_with_identity(\n key='video_id', num_buckets=1000000, default_value=0)\n columns = [video_id, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n Embedding for a DNN model:\n\n ```python\n columns = [embedding_column(video_id, 9),...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n ```\n\n Args:\n key: A unique string identifying the input feature. It is used as the\n column name and the dictionary key for feature parsing configs, feature\n `Tensor` objects, and feature columns.\n num_buckets: Range of inputs and outputs is `[0, num_buckets)`.\n default_value: If `None`, this column's graph operations will fail for\n out-of-range inputs. Otherwise, this value must be in the range\n `[0, num_buckets)`, and will replace inputs in that range.\n\n Returns:\n A `_CategoricalColumn` that returns identity values.\n\n Raises:\n ValueError: if `num_buckets` is less than one.\n ValueError: if `default_value` is not in range `[0, num_buckets)`.\n \"\"\"\n if num_buckets < 1:\n raise ValueError(\n 'num_buckets {} < 1, column_name {}'.format(num_buckets, key))\n if (default_value is not None) and (\n (default_value < 0) or (default_value >= num_buckets)):\n raise ValueError(\n 'default_value {} not in range [0, {}), column_name {}'.format(\n default_value, num_buckets, key))\n fc_utils.assert_key_is_string(key)\n return _IdentityCategoricalColumn(\n key=key, num_buckets=num_buckets, default_value=default_value)\n\n\ndef _indicator_column(categorical_column):\n \"\"\"Represents multi-hot representation of given categorical column.\n\n - For DNN model, `indicator_column` can be used to wrap any\n `categorical_column_*` (e.g., to feed to DNN). Consider to Use\n `embedding_column` if the number of buckets/unique(values) are large.\n\n - For Wide (aka linear) model, `indicator_column` is the internal\n representation for categorical column when passing categorical column\n directly (as any element in feature_columns) to `linear_model`. See\n `linear_model` for details.\n\n ```python\n name = indicator_column(categorical_column_with_vocabulary_list(\n 'name', ['bob', 'george', 'wanda'])\n columns = [name, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n\n dense_tensor == [[1, 0, 0]] # If \"name\" bytes_list is [\"bob\"]\n dense_tensor == [[1, 0, 1]] # If \"name\" bytes_list is [\"bob\", \"wanda\"]\n dense_tensor == [[2, 0, 0]] # If \"name\" bytes_list is [\"bob\", \"bob\"]\n ```\n\n Args:\n categorical_column: A `_CategoricalColumn` which is created by\n `categorical_column_with_*` or `crossed_column` functions.\n\n Returns:\n An `_IndicatorColumn`.\n \"\"\"\n return _IndicatorColumn(categorical_column)\n\n\ndef _weighted_categorical_column(categorical_column,\n weight_feature_key,\n dtype=dtypes.float32):\n \"\"\"Applies weight values to a `_CategoricalColumn`.\n\n Use this when each of your sparse inputs has both an ID and a value. For\n example, if you're representing text documents as a collection of word\n frequencies, you can provide 2 parallel sparse input features ('terms' and\n 'frequencies' below).\n\n Example:\n\n Input `tf.Example` objects:\n\n ```proto\n [\n features {\n feature {\n key: \"terms\"\n value {bytes_list {value: \"very\" value: \"model\"}}\n }\n feature {\n key: \"frequencies\"\n value {float_list {value: 0.3 value: 0.1}}\n }\n },\n features {\n feature {\n key: \"terms\"\n value {bytes_list {value: \"when\" value: \"course\" value: \"human\"}}\n }\n feature {\n key: \"frequencies\"\n value {float_list {value: 0.4 value: 0.1 value: 0.2}}\n }\n }\n ]\n ```\n\n ```python\n categorical_column = categorical_column_with_hash_bucket(\n column_name='terms', hash_bucket_size=1000)\n weighted_column = weighted_categorical_column(\n categorical_column=categorical_column, weight_feature_key='frequencies')\n columns = [weighted_column, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction, _, _ = linear_model(features, columns)\n ```\n\n This assumes the input dictionary contains a `SparseTensor` for key\n 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have\n the same indices and dense shape.\n\n Args:\n categorical_column: A `_CategoricalColumn` created by\n `categorical_column_with_*` functions.\n weight_feature_key: String key for weight values.\n dtype: Type of weights, such as `tf.float32`. Only float and integer weights\n are supported.\n\n Returns:\n A `_CategoricalColumn` composed of two sparse features: one represents id,\n the other represents weight (value) of the id feature in that example.\n\n Raises:\n ValueError: if `dtype` is not convertible to float.\n \"\"\"\n if (dtype is None) or not (dtype.is_integer or dtype.is_floating):\n raise ValueError('dtype {} is not convertible to float.'.format(dtype))\n return _WeightedCategoricalColumn(\n categorical_column=categorical_column,\n weight_feature_key=weight_feature_key,\n dtype=dtype)\n\n\ndef _crossed_column(keys, hash_bucket_size, hash_key=None):\n \"\"\"Returns a column for performing crosses of categorical features.\n\n Crossed features will be hashed according to `hash_bucket_size`. Conceptually,\n the transformation can be thought of as:\n Hash(cartesian product of features) % `hash_bucket_size`\n\n For example, if the input features are:\n\n * SparseTensor referred by first key:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n }\n ```\n\n * SparseTensor referred by second key:\n\n ```python\n shape = [2, 1]\n {\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n }\n ```\n\n then crossed feature will look like:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: Hash64(\"d\", Hash64(\"a\")) % hash_bucket_size\n [1, 0]: Hash64(\"e\", Hash64(\"b\")) % hash_bucket_size\n [1, 1]: Hash64(\"e\", Hash64(\"c\")) % hash_bucket_size\n }\n ```\n\n Here is an example to create a linear model with crosses of string features:\n\n ```python\n keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)\n columns = [keywords_x_doc_terms, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n You could also use vocabulary lookup before crossing:\n\n ```python\n keywords = categorical_column_with_vocabulary_file(\n 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)\n keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)\n columns = [keywords_x_doc_terms, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n If an input feature is of numeric type, you can use\n `categorical_column_with_identity`, or `bucketized_column`, as in the example:\n\n ```python\n # vertical_id is an integer categorical feature.\n vertical_id = categorical_column_with_identity('vertical_id', 10K)\n price = numeric_column('price')\n # bucketized_column converts numerical feature to a categorical one.\n bucketized_price = bucketized_column(price, boundaries=[...])\n vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\n columns = [vertical_id_x_price, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n linear_prediction = linear_model(features, columns)\n ```\n\n To use crossed column in DNN model, you need to add it in an embedding column\n as in this example:\n\n ```python\n vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\n vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)\n dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])\n ```\n\n Args:\n keys: An iterable identifying the features to be crossed. Each element can\n be either:\n * string: Will use the corresponding feature which must be of string type.\n * `_CategoricalColumn`: Will use the transformed tensor produced by this\n column. Does not support hashed categorical column.\n hash_bucket_size: An int > 1. The number of buckets.\n hash_key: Specify the hash_key that will be used by the `FingerprintCat64`\n function to combine the crosses fingerprints on SparseCrossOp (optional).\n\n Returns:\n A `_CrossedColumn`.\n\n Raises:\n ValueError: If `len(keys) < 2`.\n ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.\n ValueError: If any of the keys is `_HashedCategoricalColumn`.\n ValueError: If `hash_bucket_size < 1`.\n \"\"\"\n if not hash_bucket_size or hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be > 1. '\n 'hash_bucket_size: {}'.format(hash_bucket_size))\n if not keys or len(keys) < 2:\n raise ValueError(\n 'keys must be a list with length > 1. Given: {}'.format(keys))\n for key in keys:\n if (not isinstance(key, six.string_types) and\n not isinstance(key, _CategoricalColumn)):\n raise ValueError(\n 'Unsupported key type. All keys must be either string, or '\n 'categorical column except _HashedCategoricalColumn. '\n 'Given: {}'.format(key))\n if isinstance(key, _HashedCategoricalColumn):\n raise ValueError(\n 'categorical_column_with_hash_bucket is not supported for crossing. '\n 'Hashing before crossing will increase probability of collision. '\n 'Instead, use the feature name as a string. Given: {}'.format(key))\n return _CrossedColumn(\n keys=tuple(keys), hash_bucket_size=hash_bucket_size,\n hash_key=hash_key)\n\n\n# TODO(rohanj): Clearly define semantics of this layer.\nclass _EmbeddingColumnLayer(base.Layer):\n \"\"\"A layer that stores all the state required for a embedding column.\"\"\"\n\n def __init__(self,\n embedding_shape,\n initializer,\n weight_collections=None,\n trainable=True,\n name=None,\n **kwargs):\n \"\"\"Constructor.\n\n Args:\n embedding_shape: Shape of the embedding variable used for lookup.\n initializer: A variable initializer function to be used in embedding\n variable initialization.\n weight_collections: A list of collection names to which the Variable will\n be added. Note that, variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: Name of the layer\n **kwargs: keyword named properties.\n \"\"\"\n super(_EmbeddingColumnLayer, self).__init__(\n trainable=trainable, name=name, **kwargs)\n self._embedding_shape = embedding_shape\n self._initializer = initializer\n self._weight_collections = weight_collections\n\n def set_weight_collections(self, weight_collections):\n \"\"\"Sets the weight collections for the layer.\n\n Args:\n weight_collections: A list of collection names to which the Variable will\n be added.\n \"\"\"\n self._weight_collections = weight_collections\n\n def build(self, _):\n self._embedding_weight_var = self.add_variable(\n name='embedding_weights',\n shape=self._embedding_shape,\n dtype=dtypes.float32,\n initializer=self._initializer,\n trainable=self.trainable)\n if self._weight_collections and not context.executing_eagerly():\n _add_to_collections(self._embedding_weight_var, self._weight_collections)\n self.built = True\n\n def call(self, _):\n return self._embedding_weight_var\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass _FeatureColumn(object):\n \"\"\"Represents a feature column abstraction.\n\n WARNING: Do not subclass this layer unless you know what you are doing:\n the API is subject to future changes.\n\n To distinguish the concept of a feature family and a specific binary feature\n within a family, we refer to a feature family like \"country\" as a feature\n column. Following is an example feature in a `tf.Example` format:\n {key: \"country\", value: [ \"US\" ]}\n In this example the value of feature is \"US\" and \"country\" refers to the\n column of the feature.\n\n This class is an abstract class. User should not create instances of this.\n \"\"\"\n\n @abc.abstractproperty\n def name(self):\n \"\"\"Returns string. Used for naming and for name_scope.\"\"\"\n pass\n\n def __lt__(self, other):\n \"\"\"Allows feature columns to be sorted in Python 3 as they are in Python 2.\n\n Feature columns need to occasionally be sortable, for example when used as\n keys in a features dictionary passed to a layer.\n\n In CPython, `__lt__` must be defined for all objects in the\n sequence being sorted. If any objects do not have an `__lt__` compatible\n with feature column objects (such as strings), then CPython will fall back\n to using the `__gt__` method below.\n https://docs.python.org/3/library/stdtypes.html#list.sort\n\n Args:\n other: The other object to compare to.\n\n Returns:\n True if the string representation of this object is lexicographically less\n than the string representation of `other`. For FeatureColumn objects,\n this looks like \"<__main__.FeatureColumn object at 0xa>\".\n \"\"\"\n return str(self) < str(other)\n\n def __gt__(self, other):\n \"\"\"Allows feature columns to be sorted in Python 3 as they are in Python 2.\n\n Feature columns need to occasionally be sortable, for example when used as\n keys in a features dictionary passed to a layer.\n\n `__gt__` is called when the \"other\" object being compared during the sort\n does not have `__lt__` defined.\n Example: http://gpaste/4803354716798976\n\n Args:\n other: The other object to compare to.\n\n Returns:\n True if the string representation of this object is lexicographically\n greater than the string representation of `other`. For FeatureColumn\n objects, this looks like \"<__main__.FeatureColumn object at 0xa>\".\n \"\"\"\n return str(self) > str(other)\n\n @property\n def _var_scope_name(self):\n \"\"\"Returns string. Used for variable_scope. Defaults to self.name.\"\"\"\n return self.name\n\n @abc.abstractmethod\n def _transform_feature(self, inputs):\n \"\"\"Returns intermediate representation (usually a `Tensor`).\n\n Uses `inputs` to create an intermediate representation (usually a `Tensor`)\n that other feature columns can use.\n\n Example usage of `inputs`:\n Let's say a Feature column depends on raw feature ('raw') and another\n `_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will\n be used as follows:\n\n ```python\n raw_tensor = inputs.get('raw')\n fc_tensor = inputs.get(input_fc)\n ```\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n\n Returns:\n Transformed feature `Tensor`.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def _parse_example_spec(self):\n \"\"\"Returns a `tf.Example` parsing spec as dict.\n\n It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is\n a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other\n supported objects. Please check documentation of `tf.io.parse_example` for\n all supported spec objects.\n\n Let's say a Feature column depends on raw feature ('raw') and another\n `_FeatureColumn` (input_fc). One possible implementation of\n _parse_example_spec is as follows:\n\n ```python\n spec = {'raw': tf.io.FixedLenFeature(...)}\n spec.update(input_fc._parse_example_spec)\n return spec\n ```\n \"\"\"\n pass\n\n def _reset_config(self):\n \"\"\"Resets the configuration in the column.\n\n Some feature columns e.g. embedding or shared embedding columns might\n have some state that is needed to be reset sometimes. Use this method\n in that scenario.\n \"\"\"\n\n\nclass _DenseColumn(_FeatureColumn):\n \"\"\"Represents a column which can be represented as `Tensor`.\n\n WARNING: Do not subclass this layer unless you know what you are doing:\n the API is subject to future changes.\n\n Some examples of this type are: numeric_column, embedding_column,\n indicator_column.\n \"\"\"\n\n @abc.abstractproperty\n def _variable_shape(self):\n \"\"\"`TensorShape` of `_get_dense_tensor`, without batch dimension.\"\"\"\n pass\n\n @abc.abstractmethod\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns a `Tensor`.\n\n The output of this function will be used by model-builder-functions. For\n example the pseudo code of `input_layer` will be like:\n\n ```python\n def input_layer(features, feature_columns, ...):\n outputs = [fc._get_dense_tensor(...) for fc in feature_columns]\n return tf.concat(outputs)\n ```\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: List of graph collections to which Variables (if any\n will be created) are added.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n\n Returns:\n `Tensor` of shape [batch_size] + `_variable_shape`.\n \"\"\"\n pass\n\n\ndef _create_weighted_sum(column,\n builder,\n units,\n sparse_combiner,\n weight_collections,\n trainable,\n weight_var=None):\n \"\"\"Creates a weighted sum for a dense/categorical column for linear_model.\"\"\"\n if isinstance(column, _CategoricalColumn):\n return _create_categorical_column_weighted_sum(\n column=column,\n builder=builder,\n units=units,\n sparse_combiner=sparse_combiner,\n weight_collections=weight_collections,\n trainable=trainable,\n weight_var=weight_var)\n else:\n return _create_dense_column_weighted_sum(\n column=column,\n builder=builder,\n units=units,\n weight_collections=weight_collections,\n trainable=trainable,\n weight_var=weight_var)\n\n\ndef _create_dense_column_weighted_sum(column,\n builder,\n units,\n weight_collections,\n trainable,\n weight_var=None):\n \"\"\"Create a weighted sum of a dense column for linear_model.\"\"\"\n tensor = column._get_dense_tensor( # pylint: disable=protected-access\n builder,\n weight_collections=weight_collections,\n trainable=trainable)\n num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access\n batch_size = array_ops.shape(tensor)[0]\n tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))\n if weight_var is not None:\n weight = weight_var\n else:\n weight = variable_scope.get_variable(\n name='weights',\n shape=[num_elements, units],\n initializer=init_ops.zeros_initializer(),\n trainable=trainable,\n collections=weight_collections)\n return math_ops.matmul(tensor, weight, name='weighted_sum')\n\n\nclass _CategoricalColumn(_FeatureColumn):\n \"\"\"Represents a categorical feature.\n\n WARNING: Do not subclass this layer unless you know what you are doing:\n the API is subject to future changes.\n\n A categorical feature typically handled with a `tf.SparseTensor` of IDs.\n \"\"\"\n\n IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name\n 'IdWeightPair', ['id_tensor', 'weight_tensor'])\n\n @abc.abstractproperty\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n pass\n\n @abc.abstractmethod\n def _get_sparse_tensors(self,\n inputs,\n weight_collections=None,\n trainable=None):\n \"\"\"Returns an IdWeightPair.\n\n `IdWeightPair` is a pair of `SparseTensor`s which represents ids and\n weights.\n\n `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`\n `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a\n `SparseTensor` of `float` or `None` to indicate all weights should be\n taken to be 1. If specified, `weight_tensor` must have exactly the same\n shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing\n output of a `VarLenFeature` which is a ragged matrix.\n\n Args:\n inputs: A `LazyBuilder` as a cache to get input tensors required to\n create `IdWeightPair`.\n weight_collections: List of graph collections to which variables (if any\n will be created) are added.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.compat.v1.get_variable`).\n \"\"\"\n pass\n\n\ndef _create_categorical_column_weighted_sum(column,\n builder,\n units,\n sparse_combiner,\n weight_collections,\n trainable,\n weight_var=None):\n # pylint: disable=g-doc-return-or-yield,g-doc-args\n \"\"\"Create a weighted sum of a categorical column for linear_model.\n\n Note to maintainer: As implementation details, the weighted sum is\n implemented via embedding_lookup_sparse toward efficiency. Mathematically,\n they are the same.\n\n To be specific, conceptually, categorical column can be treated as multi-hot\n vector. Say:\n\n ```python\n x = [0 0 1] # categorical column input\n w = [a b c] # weights\n ```\n The weighted sum is `c` in this case, which is same as `w[2]`.\n\n Another example is\n\n ```python\n x = [0 1 1] # categorical column input\n w = [a b c] # weights\n ```\n The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.\n\n For both cases, we can implement weighted sum via embedding_lookup with\n sparse_combiner = \"sum\".\n \"\"\"\n\n sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access\n builder,\n weight_collections=weight_collections,\n trainable=trainable)\n id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [\n array_ops.shape(sparse_tensors.id_tensor)[0], -1\n ])\n weight_tensor = sparse_tensors.weight_tensor\n if weight_tensor is not None:\n weight_tensor = sparse_ops.sparse_reshape(\n weight_tensor, [array_ops.shape(weight_tensor)[0], -1])\n\n if weight_var is not None:\n weight = weight_var\n else:\n weight = variable_scope.get_variable(\n name='weights',\n shape=(column._num_buckets, units), # pylint: disable=protected-access\n initializer=init_ops.zeros_initializer(),\n trainable=trainable,\n collections=weight_collections)\n return embedding_ops.safe_embedding_lookup_sparse(\n weight,\n id_tensor,\n sparse_weights=weight_tensor,\n combiner=sparse_combiner,\n name='weighted_sum')\n\n\nclass _SequenceDenseColumn(_FeatureColumn):\n \"\"\"Represents dense sequence data.\"\"\"\n\n TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name\n 'TensorSequenceLengthPair', ['dense_tensor', 'sequence_length'])\n\n @abc.abstractmethod\n def _get_sequence_dense_tensor(\n self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns a `TensorSequenceLengthPair`.\"\"\"\n pass\n\n\nclass _LazyBuilder(object):\n \"\"\"Handles caching of transformations while building the model.\n\n `_FeatureColumn` specifies how to digest an input column to the network. Some\n feature columns require data transformations. This class caches those\n transformations.\n\n Some features may be used in more than one place. For example, one can use a\n bucketized feature by itself and a cross with it. In that case we\n should create only one bucketization op instead of creating ops for each\n feature column separately. To handle re-use of transformed columns,\n `_LazyBuilder` caches all previously transformed columns.\n\n Example:\n We're trying to use the following `_FeatureColumn`s:\n\n ```python\n bucketized_age = fc.bucketized_column(fc.numeric_column(\"age\"), ...)\n keywords = fc.categorical_column_with_hash_buckets(\"keywords\", ...)\n age_X_keywords = fc.crossed_column([bucketized_age, \"keywords\"])\n ... = linear_model(features,\n [bucketized_age, keywords, age_X_keywords]\n ```\n\n If we transform each column independently, then we'll get duplication of\n bucketization (one for cross, one for bucketization itself).\n The `_LazyBuilder` eliminates this duplication.\n \"\"\"\n\n def __init__(self, features):\n \"\"\"Creates a `_LazyBuilder`.\n\n Args:\n features: A mapping from feature column to objects that are `Tensor` or\n `SparseTensor`, or can be converted to same via\n `sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key\n signifies a base feature (not-transformed). A `_FeatureColumn` key\n means that this `Tensor` is the output of an existing `_FeatureColumn`\n which can be reused.\n \"\"\"\n self._features = features.copy()\n self._feature_tensors = {}\n\n def get(self, key):\n \"\"\"Returns a `Tensor` for the given key.\n\n A `str` key is used to access a base feature (not-transformed). When a\n `_FeatureColumn` is passed, the transformed feature is returned if it\n already exists, otherwise the given `_FeatureColumn` is asked to provide its\n transformed output, which is then cached.\n\n Args:\n key: a `str` or a `_FeatureColumn`.\n\n Returns:\n The transformed `Tensor` corresponding to the `key`.\n\n Raises:\n ValueError: if key is not found or a transformed `Tensor` cannot be\n computed.\n \"\"\"\n if key in self._feature_tensors:\n # FeatureColumn is already transformed or converted.\n return self._feature_tensors[key]\n\n if key in self._features:\n feature_tensor = self._get_raw_feature_as_tensor(key)\n self._feature_tensors[key] = feature_tensor\n return feature_tensor\n\n if isinstance(key, six.string_types):\n raise ValueError('Feature {} is not in features dictionary.'.format(key))\n\n if not isinstance(key, _FeatureColumn):\n raise TypeError('\"key\" must be either a \"str\" or \"_FeatureColumn\". '\n 'Provided: {}'.format(key))\n\n column = key\n logging.debug('Transforming feature_column %s.', column)\n transformed = column._transform_feature(self) # pylint: disable=protected-access\n if transformed is None:\n raise ValueError('Column {} is not supported.'.format(column.name))\n self._feature_tensors[column] = transformed\n return transformed\n\n def _get_raw_feature_as_tensor(self, key):\n \"\"\"Gets the raw_feature (keyed by `key`) as `tensor`.\n\n The raw feature is converted to (sparse) tensor and maybe expand dim.\n\n For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if\n the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will\n error out as it is not supported.\n\n Args:\n key: A `str` key to access the raw feature.\n\n Returns:\n A `Tensor` or `SparseTensor`.\n\n Raises:\n ValueError: if the raw feature has rank 0.\n \"\"\"\n raw_feature = self._features[key]\n feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(\n raw_feature)\n\n def expand_dims(input_tensor):\n # Input_tensor must have rank 1.\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n return sparse_ops.sparse_reshape(\n input_tensor, [array_ops.shape(input_tensor)[0], 1])\n else:\n return array_ops.expand_dims(input_tensor, -1)\n\n rank = feature_tensor.get_shape().ndims\n if rank is not None:\n if rank == 0:\n raise ValueError(\n 'Feature (key: {}) cannot have rank 0. Give: {}'.format(\n key, feature_tensor))\n return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n\n # Handle dynamic rank.\n with ops.control_dependencies([\n check_ops.assert_positive(\n array_ops.rank(feature_tensor),\n message='Feature (key: {}) cannot have rank 0. Given: {}'.format(\n key, feature_tensor))]):\n return control_flow_ops.cond(\n math_ops.equal(1, array_ops.rank(feature_tensor)),\n lambda: expand_dims(feature_tensor),\n lambda: feature_tensor)\n\n\n# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py\ndef _shape_offsets(shape):\n \"\"\"Returns moving offset for each dimension given shape.\"\"\"\n offsets = []\n for dim in reversed(shape):\n if offsets:\n offsets.append(dim * offsets[-1])\n else:\n offsets.append(dim)\n offsets.reverse()\n return offsets\n\n\n# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py\ndef _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):\n \"\"\"Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.\n\n If `input_tensor` is already a `SparseTensor`, just return it.\n\n Args:\n input_tensor: A string or integer `Tensor`.\n ignore_value: Entries in `dense_tensor` equal to this value will be\n absent from the resulting `SparseTensor`. If `None`, default value of\n `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).\n\n Returns:\n A `SparseTensor` with the same shape as `input_tensor`.\n\n Raises:\n ValueError: when `input_tensor`'s rank is `None`.\n \"\"\"\n input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(\n input_tensor)\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n return input_tensor\n with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):\n if ignore_value is None:\n if input_tensor.dtype == dtypes.string:\n # Exception due to TF strings are converted to numpy objects by default.\n ignore_value = ''\n elif input_tensor.dtype.is_integer:\n ignore_value = -1 # -1 has a special meaning of missing feature\n else:\n # NOTE: `as_numpy_dtype` is a property, so with the parentheses this is\n # constructing a new numpy object of the given type, which yields the\n # default value for that type.\n ignore_value = input_tensor.dtype.as_numpy_dtype()\n ignore_value = math_ops.cast(\n ignore_value, input_tensor.dtype, name='ignore_value')\n indices = array_ops.where(\n math_ops.not_equal(input_tensor, ignore_value), name='indices')\n return sparse_tensor_lib.SparseTensor(\n indices=indices,\n values=array_ops.gather_nd(input_tensor, indices, name='values'),\n dense_shape=array_ops.shape(\n input_tensor, out_type=dtypes.int64, name='dense_shape'))\n\n\ndef _normalize_feature_columns(feature_columns):\n \"\"\"Normalizes the `feature_columns` input.\n\n This method converts the `feature_columns` to list type as best as it can. In\n addition, verifies the type and other parts of feature_columns, required by\n downstream library.\n\n Args:\n feature_columns: The raw feature columns, usually passed by users.\n\n Returns:\n The normalized feature column list.\n\n Raises:\n ValueError: for any invalid inputs, such as empty, duplicated names, etc.\n \"\"\"\n if isinstance(feature_columns, _FeatureColumn):\n feature_columns = [feature_columns]\n\n if isinstance(feature_columns, collections.Iterator):\n feature_columns = list(feature_columns)\n\n if isinstance(feature_columns, dict):\n raise ValueError('Expected feature_columns to be iterable, found dict.')\n\n for column in feature_columns:\n if not isinstance(column, _FeatureColumn):\n raise ValueError('Items of feature_columns must be a _FeatureColumn. '\n 'Given (type {}): {}.'.format(type(column), column))\n if not feature_columns:\n raise ValueError('feature_columns must not be empty.')\n name_to_column = {}\n for column in feature_columns:\n if column.name in name_to_column:\n raise ValueError('Duplicate feature column name found for columns: {} '\n 'and {}. This usually means that these columns refer to '\n 'same base feature. Either one must be discarded or a '\n 'duplicated but renamed item must be inserted in '\n 'features dict.'.format(column,\n name_to_column[column.name]))\n name_to_column[column.name] = column\n\n return feature_columns\n\n\nclass _NumericColumn(_DenseColumn,\n collections.namedtuple('_NumericColumn', [\n 'key', 'shape', 'default_value', 'dtype',\n 'normalizer_fn'\n ])):\n \"\"\"see `numeric_column`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {\n self.key:\n parsing_ops.FixedLenFeature(self.shape, self.dtype,\n self.default_value)\n }\n\n def _transform_feature(self, inputs):\n input_tensor = inputs.get(self.key)\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n raise ValueError(\n 'The corresponding Tensor of numerical column must be a Tensor. '\n 'SparseTensor is not supported. key: {}'.format(self.key))\n if self.normalizer_fn is not None:\n input_tensor = self.normalizer_fn(input_tensor)\n return math_ops.cast(input_tensor, dtypes.float32)\n\n @property\n def _variable_shape(self):\n return tensor_shape.TensorShape(self.shape)\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns dense `Tensor` representing numeric feature.\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: Unused `weight_collections` since no variables are\n created in this function.\n trainable: Unused `trainable` bool since no variables are created in\n this function.\n\n Returns:\n Dense `Tensor` created within `_transform_feature`.\n \"\"\"\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n return inputs.get(self)\n\n\nclass _BucketizedColumn(_DenseColumn, _CategoricalColumn,\n collections.namedtuple('_BucketizedColumn', [\n 'source_column', 'boundaries'])):\n \"\"\"See `bucketized_column`.\"\"\"\n\n @property\n def name(self):\n return '{}_bucketized'.format(self.source_column.name)\n\n @property\n def _parse_example_spec(self):\n return self.source_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n source_tensor = inputs.get(self.source_column)\n return math_ops._bucketize( # pylint: disable=protected-access\n source_tensor,\n boundaries=self.boundaries)\n\n @property\n def _variable_shape(self):\n return tensor_shape.TensorShape(\n tuple(self.source_column.shape) + (len(self.boundaries) + 1,))\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n input_tensor = inputs.get(self)\n return array_ops.one_hot(\n indices=math_ops.cast(input_tensor, dtypes.int64),\n depth=len(self.boundaries) + 1,\n on_value=1.,\n off_value=0.)\n\n @property\n def _num_buckets(self):\n # By construction, source_column is always one-dimensional.\n return (len(self.boundaries) + 1) * self.source_column.shape[0]\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n \"\"\"Converts dense inputs to SparseTensor so downstream code can use it.\"\"\"\n input_tensor = inputs.get(self)\n batch_size = array_ops.shape(input_tensor)[0]\n # By construction, source_column is always one-dimensional.\n source_dimension = self.source_column.shape[0]\n\n i1 = array_ops.reshape(\n array_ops.tile(\n array_ops.expand_dims(math_ops.range(0, batch_size), 1),\n [1, source_dimension]),\n (-1,))\n i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])\n # Flatten the bucket indices and unique them across dimensions\n # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets\n bucket_indices = (\n array_ops.reshape(input_tensor, (-1,)) +\n (len(self.boundaries) + 1) * i2)\n\n indices = math_ops.cast(\n array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)\n dense_shape = math_ops.cast(\n array_ops.stack([batch_size, source_dimension]), dtypes.int64)\n sparse_tensor = sparse_tensor_lib.SparseTensor(\n indices=indices,\n values=bucket_indices,\n dense_shape=dense_shape)\n return _CategoricalColumn.IdWeightPair(sparse_tensor, None)\n\n\nclass _EmbeddingColumn(\n _DenseColumn, _SequenceDenseColumn,\n collections.namedtuple(\n '_EmbeddingColumn',\n ('categorical_column', 'dimension', 'combiner', 'layer_creator',\n 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):\n \"\"\"See `embedding_column`.\"\"\"\n\n @property\n def name(self):\n if not hasattr(self, '_name'):\n self._name = '{}_embedding'.format(self.categorical_column.name)\n return self._name\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n return inputs.get(self.categorical_column)\n\n @property\n def _variable_shape(self):\n if not hasattr(self, '_shape'):\n self._shape = tensor_shape.TensorShape([self.dimension])\n return self._shape\n\n def _get_dense_tensor_internal(self,\n inputs,\n weight_collections=None,\n trainable=None):\n \"\"\"Private method that follows the signature of _get_dense_tensor.\"\"\"\n # Get sparse IDs and weights.\n sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access\n inputs, weight_collections=weight_collections, trainable=trainable)\n sparse_ids = sparse_tensors.id_tensor\n sparse_weights = sparse_tensors.weight_tensor\n\n embedding_weights = self.layer_creator(\n weight_collections=weight_collections,\n scope=variable_scope.get_variable_scope())\n\n if self.ckpt_to_load_from is not None:\n to_restore = embedding_weights\n if isinstance(to_restore, variables.PartitionedVariable):\n to_restore = to_restore._get_variable_list() # pylint: disable=protected-access\n checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {\n self.tensor_name_in_ckpt: to_restore\n })\n\n # Return embedding lookup result.\n return embedding_ops.safe_embedding_lookup_sparse(\n embedding_weights=embedding_weights,\n sparse_ids=sparse_ids,\n sparse_weights=sparse_weights,\n combiner=self.combiner,\n name='%s_weights' % self.name,\n max_norm=self.max_norm)\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use input_layer, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'sequence_input_layer instead of input_layer. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n return self._get_dense_tensor_internal(\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n\n def _get_sequence_dense_tensor(\n self, inputs, weight_collections=None, trainable=None):\n if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use sequence_input_layer. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return _SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _get_graph_for_variable(var):\n if isinstance(var, variables.PartitionedVariable):\n return list(var)[0].graph\n else:\n return var.graph\n\n\nclass _SharedEmbeddingColumn(\n _DenseColumn, _SequenceDenseColumn,\n collections.namedtuple(\n '_SharedEmbeddingColumn',\n ('categorical_column', 'dimension', 'combiner', 'initializer',\n 'shared_embedding_collection_name', 'ckpt_to_load_from',\n 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):\n \"\"\"See `embedding_column`.\"\"\"\n\n @property\n def name(self):\n if not hasattr(self, '_name'):\n self._name = '{}_shared_embedding'.format(self.categorical_column.name)\n return self._name\n\n @property\n def _var_scope_name(self):\n return self.shared_embedding_collection_name\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n return inputs.get(self.categorical_column)\n\n @property\n def _variable_shape(self):\n if not hasattr(self, '_shape'):\n self._shape = tensor_shape.TensorShape([self.dimension])\n return self._shape\n\n def _get_dense_tensor_internal(self,\n inputs,\n weight_collections=None,\n trainable=None):\n \"\"\"Private method that follows the signature of _get_dense_tensor.\"\"\"\n # This method is called from a variable_scope with name _var_scope_name,\n # which is shared among all shared embeddings. Open a name_scope here, so\n # that the ops for different columns have distinct names.\n with ops.name_scope(None, default_name=self.name):\n # Get sparse IDs and weights.\n sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access\n inputs, weight_collections=weight_collections, trainable=trainable)\n sparse_ids = sparse_tensors.id_tensor\n sparse_weights = sparse_tensors.weight_tensor\n\n embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access\n shared_embedding_collection = ops.get_collection(\n self.shared_embedding_collection_name)\n if shared_embedding_collection:\n if len(shared_embedding_collection) > 1:\n raise ValueError(\n 'Collection {} can only contain one variable. '\n 'Suggested fix A: Choose a unique name for this collection. '\n 'Suggested fix B: Do not add any variables to this collection. '\n 'The feature_column library already adds a variable under the '\n 'hood.'.format(shared_embedding_collection))\n embedding_weights = shared_embedding_collection[0]\n if embedding_weights.get_shape() != embedding_shape:\n raise ValueError(\n 'Shared embedding collection {} contains variable {} of '\n 'unexpected shape {}. Expected shape is {}. '\n 'Suggested fix A: Choose a unique name for this collection. '\n 'Suggested fix B: Do not add any variables to this collection. '\n 'The feature_column library already adds a variable under the '\n 'hood.'.format(self.shared_embedding_collection_name,\n embedding_weights.name,\n embedding_weights.get_shape(), embedding_shape))\n else:\n embedding_weights = variable_scope.get_variable(\n name='embedding_weights',\n shape=embedding_shape,\n dtype=dtypes.float32,\n initializer=self.initializer,\n trainable=self.trainable and trainable,\n collections=weight_collections)\n ops.add_to_collection(self.shared_embedding_collection_name,\n embedding_weights)\n if self.ckpt_to_load_from is not None:\n to_restore = embedding_weights\n if isinstance(to_restore, variables.PartitionedVariable):\n to_restore = to_restore._get_variable_list() # pylint: disable=protected-access\n checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {\n self.tensor_name_in_ckpt: to_restore\n })\n\n # Return embedding lookup result.\n return embedding_ops.safe_embedding_lookup_sparse(\n embedding_weights=embedding_weights,\n sparse_ids=sparse_ids,\n sparse_weights=sparse_weights,\n combiner=self.combiner,\n name='%s_weights' % self.name,\n max_norm=self.max_norm)\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use input_layer, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'sequence_input_layer instead of input_layer. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column),\n self.categorical_column))\n return self._get_dense_tensor_internal(\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n\n def _get_sequence_dense_tensor(self,\n inputs,\n weight_collections=None,\n trainable=None):\n if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In embedding_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use sequence_input_layer. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column),\n self.categorical_column))\n dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access\n inputs=inputs,\n weight_collections=weight_collections,\n trainable=trainable)\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return _SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _check_shape(shape, key):\n \"\"\"Returns shape if it's valid, raises error otherwise.\"\"\"\n assert shape is not None\n if not nest.is_sequence(shape):\n shape = [shape]\n shape = tuple(shape)\n for dimension in shape:\n if not isinstance(dimension, six.integer_types):\n raise TypeError('shape dimensions must be integer. '\n 'shape: {}, key: {}'.format(shape, key))\n if dimension < 1:\n raise ValueError('shape dimensions must be greater than 0. '\n 'shape: {}, key: {}'.format(shape, key))\n return shape\n\n\nclass _HashedCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_HashedCategoricalColumn',\n ['key', 'hash_bucket_size', 'dtype'])):\n \"\"\"see `categorical_column_with_hash_bucket`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n raise ValueError('SparseColumn input must be a SparseTensor.')\n\n fc_utils.assert_string_or_int(\n input_tensor.dtype,\n prefix='column_name: {} input_tensor'.format(self.key))\n\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError(\n 'Column dtype and SparseTensors dtype must be compatible. '\n 'key: {}, column dtype: {}, tensor dtype: {}'.format(\n self.key, self.dtype, input_tensor.dtype))\n\n if self.dtype == dtypes.string:\n sparse_values = input_tensor.values\n else:\n sparse_values = string_ops.as_string(input_tensor.values)\n\n sparse_id_values = string_ops.string_to_hash_bucket_fast(\n sparse_values, self.hash_bucket_size, name='lookup')\n return sparse_tensor_lib.SparseTensor(\n input_tensor.indices, sparse_id_values, input_tensor.dense_shape)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.hash_bucket_size\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _VocabularyFileCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_VocabularyFileCategoricalColumn', (\n 'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype',\n 'default_value'\n ))):\n \"\"\"See `categorical_column_with_vocabulary_file`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError(\n 'Column dtype and SparseTensors dtype must be compatible. '\n 'key: {}, column dtype: {}, tensor dtype: {}'.format(\n self.key, self.dtype, input_tensor.dtype))\n\n fc_utils.assert_string_or_int(\n input_tensor.dtype,\n prefix='column_name: {} input_tensor'.format(self.key))\n\n key_dtype = self.dtype\n if input_tensor.dtype.is_integer:\n # `index_table_from_file` requires 64-bit integer keys.\n key_dtype = dtypes.int64\n input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n\n return lookup_ops.index_table_from_file(\n vocabulary_file=self.vocabulary_file,\n num_oov_buckets=self.num_oov_buckets,\n vocab_size=self.vocabulary_size,\n default_value=self.default_value,\n key_dtype=key_dtype,\n name='{}_lookup'.format(self.key)).lookup(input_tensor)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.vocabulary_size + self.num_oov_buckets\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _VocabularyListCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_VocabularyListCategoricalColumn', (\n 'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'\n ))):\n \"\"\"See `categorical_column_with_vocabulary_list`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError(\n 'Column dtype and SparseTensors dtype must be compatible. '\n 'key: {}, column dtype: {}, tensor dtype: {}'.format(\n self.key, self.dtype, input_tensor.dtype))\n\n fc_utils.assert_string_or_int(\n input_tensor.dtype,\n prefix='column_name: {} input_tensor'.format(self.key))\n\n key_dtype = self.dtype\n if input_tensor.dtype.is_integer:\n # `index_table_from_tensor` requires 64-bit integer keys.\n key_dtype = dtypes.int64\n input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n\n return lookup_ops.index_table_from_tensor(\n vocabulary_list=tuple(self.vocabulary_list),\n default_value=self.default_value,\n num_oov_buckets=self.num_oov_buckets,\n dtype=key_dtype,\n name='{}_lookup'.format(self.key)).lookup(input_tensor)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return len(self.vocabulary_list) + self.num_oov_buckets\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _IdentityCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_IdentityCategoricalColumn', (\n 'key', 'num_buckets', 'default_value'\n ))):\n\n \"\"\"See `categorical_column_with_identity`.\"\"\"\n\n @property\n def name(self):\n return self.key\n\n @property\n def _parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}\n\n def _transform_feature(self, inputs):\n input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))\n\n if not input_tensor.dtype.is_integer:\n raise ValueError(\n 'Invalid input, not integer. key: {} dtype: {}'.format(\n self.key, input_tensor.dtype))\n\n values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')\n num_buckets = math_ops.cast(\n self.num_buckets, dtypes.int64, name='num_buckets')\n zero = math_ops.cast(0, dtypes.int64, name='zero')\n if self.default_value is None:\n # Fail if values are out-of-range.\n assert_less = check_ops.assert_less(\n values, num_buckets, data=(values, num_buckets),\n name='assert_less_than_num_buckets')\n assert_greater = check_ops.assert_greater_equal(\n values, zero, data=(values,),\n name='assert_greater_or_equal_0')\n with ops.control_dependencies((assert_less, assert_greater)):\n values = array_ops.identity(values)\n else:\n # Assign default for out-of-range values.\n values = array_ops.where(\n math_ops.logical_or(\n values < zero, values >= num_buckets, name='out_of_range'),\n array_ops.fill(\n dims=array_ops.shape(values),\n value=math_ops.cast(self.default_value, dtypes.int64),\n name='default_values'), values)\n\n return sparse_tensor_lib.SparseTensor(\n indices=input_tensor.indices,\n values=values,\n dense_shape=input_tensor.dense_shape)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.num_buckets\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\nclass _WeightedCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple('_WeightedCategoricalColumn', (\n 'categorical_column', 'weight_feature_key', 'dtype'\n ))):\n \"\"\"See `weighted_categorical_column`.\"\"\"\n\n @property\n def name(self):\n return '{}_weighted_by_{}'.format(\n self.categorical_column.name, self.weight_feature_key)\n\n @property\n def _parse_example_spec(self):\n config = self.categorical_column._parse_example_spec # pylint: disable=protected-access\n if self.weight_feature_key in config:\n raise ValueError('Parse config {} already exists for {}.'.format(\n config[self.weight_feature_key], self.weight_feature_key))\n config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)\n return config\n\n @property\n def _num_buckets(self):\n return self.categorical_column._num_buckets # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n weight_tensor = inputs.get(self.weight_feature_key)\n if weight_tensor is None:\n raise ValueError('Missing weights {}.'.format(self.weight_feature_key))\n weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(\n weight_tensor)\n if self.dtype != weight_tensor.dtype.base_dtype:\n raise ValueError('Bad dtype, expected {}, but got {}.'.format(\n self.dtype, weight_tensor.dtype))\n if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):\n # The weight tensor can be a regular Tensor. In this case, sparsify it.\n weight_tensor = _to_sparse_input_and_drop_ignore_values(\n weight_tensor, ignore_value=0.0)\n if not weight_tensor.dtype.is_floating:\n weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)\n return (inputs.get(self.categorical_column), weight_tensor)\n\n def _get_sparse_tensors(\n self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n tensors = inputs.get(self)\n return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1])\n\n\nclass _CrossedColumn(\n _CategoricalColumn,\n collections.namedtuple('_CrossedColumn',\n ['keys', 'hash_bucket_size', 'hash_key'])):\n \"\"\"See `crossed_column`.\"\"\"\n\n @property\n def name(self):\n feature_names = []\n for key in _collect_leaf_level_keys(self):\n if isinstance(key, _FeatureColumn):\n feature_names.append(key.name)\n else: # key must be a string\n feature_names.append(key)\n return '_X_'.join(sorted(feature_names))\n\n @property\n def _parse_example_spec(self):\n config = {}\n for key in self.keys:\n if isinstance(key, _FeatureColumn):\n config.update(key._parse_example_spec) # pylint: disable=protected-access\n else: # key must be a string\n config.update({key: parsing_ops.VarLenFeature(dtypes.string)})\n return config\n\n def _transform_feature(self, inputs):\n feature_tensors = []\n for key in _collect_leaf_level_keys(self):\n if isinstance(key, six.string_types):\n feature_tensors.append(inputs.get(key))\n elif isinstance(key, _CategoricalColumn):\n ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access\n if ids_and_weights.weight_tensor is not None:\n raise ValueError(\n 'crossed_column does not support weight_tensor, but the given '\n 'column populates weight_tensor. '\n 'Given column: {}'.format(key.name))\n feature_tensors.append(ids_and_weights.id_tensor)\n else:\n raise ValueError('Unsupported column type. Given: {}'.format(key))\n return sparse_ops.sparse_cross_hashed(\n inputs=feature_tensors,\n num_buckets=self.hash_bucket_size,\n hash_key=self.hash_key)\n\n @property\n def _num_buckets(self):\n \"\"\"Returns number of buckets in this sparse feature.\"\"\"\n return self.hash_bucket_size\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n return _CategoricalColumn.IdWeightPair(inputs.get(self), None)\n\n\ndef _collect_leaf_level_keys(cross):\n \"\"\"Collects base keys by expanding all nested crosses.\n\n Args:\n cross: A `_CrossedColumn`.\n\n Returns:\n A list of strings or `_CategoricalColumn` instances.\n \"\"\"\n leaf_level_keys = []\n for k in cross.keys:\n if isinstance(k, _CrossedColumn):\n leaf_level_keys.extend(_collect_leaf_level_keys(k))\n else:\n leaf_level_keys.append(k)\n return leaf_level_keys\n\n\nclass _IndicatorColumn(_DenseColumn, _SequenceDenseColumn,\n collections.namedtuple('_IndicatorColumn',\n ['categorical_column'])):\n \"\"\"Represents a one-hot column for use in deep networks.\n\n Args:\n categorical_column: A `_CategoricalColumn` which is created by\n `categorical_column_with_*` function.\n \"\"\"\n\n @property\n def name(self):\n return '{}_indicator'.format(self.categorical_column.name)\n\n def _transform_feature(self, inputs):\n \"\"\"Returns dense `Tensor` representing feature.\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n\n Returns:\n Transformed feature `Tensor`.\n\n Raises:\n ValueError: if input rank is not known at graph building time.\n \"\"\"\n id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n id_tensor = id_weight_pair.id_tensor\n weight_tensor = id_weight_pair.weight_tensor\n\n # If the underlying column is weighted, return the input as a dense tensor.\n if weight_tensor is not None:\n weighted_column = sparse_ops.sparse_merge(\n sp_ids=id_tensor,\n sp_values=weight_tensor,\n vocab_size=int(self._variable_shape[-1]))\n # Remove (?, -1) index.\n weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],\n weighted_column.dense_shape)\n # Use scatter_nd to merge duplicated indices if existed,\n # instead of sparse_tensor_to_dense.\n return array_ops.scatter_nd(weighted_column.indices,\n weighted_column.values,\n weighted_column.dense_shape)\n\n dense_id_tensor = sparse_ops.sparse_tensor_to_dense(\n id_tensor, default_value=-1)\n\n # One hot must be float for tf.concat reasons since all other inputs to\n # input_layer are float32.\n one_hot_id_tensor = array_ops.one_hot(\n dense_id_tensor,\n depth=self._variable_shape[-1],\n on_value=1.0,\n off_value=0.0)\n\n # Reduce to get a multi-hot per example.\n return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n @property\n def _variable_shape(self):\n \"\"\"Returns a `TensorShape` representing the shape of the dense `Tensor`.\"\"\"\n return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access\n\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n \"\"\"Returns dense `Tensor` representing feature.\n\n Args:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: Unused `weight_collections` since no variables are\n created in this function.\n trainable: Unused `trainable` bool since no variables are created in\n this function.\n\n Returns:\n Dense `Tensor` created within `_transform_feature`.\n\n Raises:\n ValueError: If `categorical_column` is a `_SequenceCategoricalColumn`.\n \"\"\"\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use input_layer, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'sequence_input_layer instead of input_layer. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n return inputs.get(self)\n\n def _get_sequence_dense_tensor(\n self, inputs, weight_collections=None, trainable=None):\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use sequence_input_layer. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(\n self.name, type(self.categorical_column),\n self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n dense_tensor = inputs.get(self)\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return _SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _verify_static_batch_size_equality(tensors, columns):\n \"\"\"Validates that the first dim (batch size) of all tensors are equal or None.\n\n Args:\n tensors: list of tensors to check.\n columns: list of feature columns matching tensors. Will be used for error\n messaging.\n\n Raises:\n ValueError: if one of the tensors has a variant batch size\n \"\"\"\n # bath_size is a tf.compat.v1.Dimension object.\n expected_batch_size = None\n for i in range(0, len(tensors)):\n if tensors[i].shape.dims[0].value is not None:\n if expected_batch_size is None:\n bath_size_column_index = i\n expected_batch_size = tensors[i].shape.dims[0]\n elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):\n raise ValueError(\n 'Batch size (first dimension) of each feature must be same. '\n 'Batch size of columns ({}, {}): ({}, {})'.format(\n columns[bath_size_column_index].name, columns[i].name,\n expected_batch_size, tensors[i].shape.dims[0]))\n\n\nclass _SequenceCategoricalColumn(\n _CategoricalColumn,\n collections.namedtuple(\n '_SequenceCategoricalColumn', ['categorical_column'])):\n \"\"\"Represents sequences of categorical data.\"\"\"\n\n @property\n def name(self):\n return self.categorical_column.name\n\n @property\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n def _transform_feature(self, inputs):\n return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access\n\n @property\n def _num_buckets(self):\n return self.categorical_column._num_buckets # pylint: disable=protected-access\n\n def _get_sparse_tensors(self, inputs, weight_collections=None,\n trainable=None):\n sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access\n id_tensor = sparse_tensors.id_tensor\n weight_tensor = sparse_tensors.weight_tensor\n\n # Expands third dimension, if necessary so that embeddings are not\n # combined during embedding lookup. If the tensor is already 3D, leave\n # as-is.\n shape = array_ops.shape(id_tensor)\n # Compute the third dimension explicitly instead of setting it to -1, as\n # that doesn't work for dynamically shaped tensors with 0-length at runtime.\n # This happens for empty sequences.\n target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]\n id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)\n if weight_tensor is not None:\n weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)\n\n return _CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)\n"
] | [
[
"tensorflow.python.ops.check_ops.assert_greater_equal",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.sparse_ops.sparse_slice",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.python.ops.template.make_template",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.sparse_ops.sparse_reshape",
"tensorflow.python.ops.string_ops.string_to_hash_bucket_fast",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.parsing_ops.VarLenFeature",
"tensorflow.python.ops.sparse_ops.sparse_cross_hashed",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.one_hot",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.check_ops.assert_less",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.math_ops._bucketize",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.training.checkpoint_utils.init_from_checkpoint",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.embedding_ops.safe_embedding_lookup_sparse",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.ops.parsing_ops.FixedLenFeature",
"tensorflow.python.feature_column.utils.check_default_value",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.feature_column.utils.assert_key_is_string",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.ops.string_ops.as_string",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.feature_column.utils.sequence_length_from_sparse_tensor",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"numpy.array",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.ops.array_ops.scatter_nd"
]
] |
JosephMeghanath/apptuit-py | [
"ae0d038931efca94435e3a5efe5e4a4ed6f1956e"
] | [
"tests/test_query.py"
] | [
"\"\"\"\nTests for the query API\n\"\"\"\n\nimport sys\n\ntry:\n from unittest.mock import Mock, patch\nexcept ImportError:\n from mock import Mock, patch\n\nfrom nose.tools import assert_is_not_none, assert_is_none, assert_equals, assert_true, assert_raises\nimport pandas as pd\nimport requests\nfrom apptuit import Apptuit, ApptuitException, apptuit_client\n\n\ndef get_mock_response():\n \"\"\"\n Returns a mock response for the get request\n \"\"\"\n with open('tests/response.json') as f:\n return f.readlines()[0]\n\n\ndef test_api_endpoint_param():\n \"\"\"\n Test the api_endpoint param of apptuit client\n \"\"\"\n _ = Apptuit(sanitize_mode=None, token=\"test_token\", api_endpoint=\"https://api.apptuit.ai/\")\n with assert_raises(ValueError):\n _ = Apptuit(sanitize_mode=None, token=\"test_token\", api_endpoint=None)\n with assert_raises(ValueError):\n _ = Apptuit(sanitize_mode=None, token=\"test_token\", api_endpoint=\"\")\n\n\ndef do_query(mock_get):\n \"\"\"\n Execute the query API and return the mock response\n \"\"\"\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n return client.query(query, start, end)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_query(mock_get):\n \"\"\"\n Test a valid query and make sure results are returned\n \"\"\"\n resp = do_query(mock_get)\n assert_is_not_none(resp[0])\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_query_result_number_index(mock_get):\n \"\"\"\n Test that we can access the output by number based indexing from\n the query result\n \"\"\"\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_is_not_none(df)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_query_result_string_index(mock_get):\n \"\"\"\n Test that we can access the output by the name of the metric from the\n query result\n \"\"\"\n resp = do_query(mock_get)\n df = resp[\"nyc.taxi.rides\"].to_df()\n assert_is_not_none(df)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_df_shape(mock_get):\n \"\"\"\n Verify the dataframe shape\n \"\"\"\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_equals(df.shape, (432, 1))\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_number_of_series(mock_get):\n \"\"\"\n Verify the number of time series in the query result\n \"\"\"\n resp = do_query(mock_get)\n assert_equals(len(resp[0].series), 1)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_data(mock_get):\n \"\"\"\n Verify the data returned from the query\n \"\"\"\n expected_df = pd.read_csv('tests/nyc.taxi.rides.csv', index_col=0, header=0, parse_dates=True)\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_true(df.equals(expected_df))\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_metadata(mock_get):\n \"\"\"\n Test that the metadata of the query results are as expected\n \"\"\"\n expected_series_name = \"nyc.taxi.rides\"\n expected_tags = {\"host\": \"localhost\"}\n resp = do_query(mock_get)\n series = resp[0].series[0]\n assert_equals(series.name.metric, expected_series_name)\n assert_equals(series.name.tags, expected_tags)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_multiple_retries(mock_get):\n \"\"\"\n Test that the query API attempts retries when an error is returned from\n the backend API. Since we patch the status code as 504 and create an HTTPError\n as a side effect of the get call, we cannot verify that the retries succeed.\n \"\"\"\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 504\n mock_get.side_effect = requests.exceptions.HTTPError\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n with assert_raises(ApptuitException):\n client.query(query, start, end, retry_count=3)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_get_error(mock_get):\n \"\"\"\n Test that when the retry_count is 0 for the query API we get an exception\n \"\"\"\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 504\n mock_get.side_effect = requests.exceptions.HTTPError()\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n with assert_raises(ApptuitException):\n client.query(query, start, end, retry_count=0)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_empty_dps(mock_get):\n \"\"\"\n Test that we get an exception if the dps array is empty in the JSON response\n \"\"\"\n mock_get.return_value.content = '{\"outputs\":[{\"id\":\"nyc:taxi:rides\",\"result\":[{ \\\n \"metric\":\"nyc.taxi.rides\",\"tags\":{\"host\":\"localhost\"}, \\\n \"aggregatedTags\":[],\"dps\":[]}]}], \\\n \"hints\":[],\"query\": {\"querytext\":\"fetch(\\'nyc.taxi.rides\\')\", \\\n \"startTime\":1406831400, \\\n \"startTimeHumanReadableSYS\":\"July 31, 2014 6:30:00 PM UTC\", \\\n \"startTimeHumanReadableIST\":\"August 1, 2014 12:00:00 AM IST\", \\\n \"endTime\":1407609000, \"endTimeHumanReadableSYS\":\"August 9,2014 6:30:00 PM UTC\", \\\n \"endTimeHumanReadableIST\":\"August 10, 2014 12:00:00 AM IST\", \\\n \"digest\":\"Mdt8e+HDjnGByMMJdEnTnNdUxKo=:60845\", \"optionsdigest\":\"\", \\\n \"options\":\"{}\"},\"query_stats\":{\"compactedRows\":217, \"processedRows\":217, \\\n \"dataPointsProcessed\":219, \"numSeries\":1, \"queryTimeMillis\":152, \\\n \"hbaseTimeMillis\":21},\"timing_diagnostics\": \\\n [{\"tag\":\"QUERY_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":152},{\"tag\":\"AST_BUILD_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":29}, \\\n {\"tag\":\"AST_JYTHON_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":29},{\"tag\":\"STATEMENT_VALIDATION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_BUILDING_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"QUERY_OPTIMIZATION_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":106},{\"tag\":\"SCHEMA_SERVICE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":93}, \\\n {\"tag\":\"DATASOURCE_FETCH_RUN_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":32},{\"tag\":\"TSD_HBASE_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":21}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":52},{\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_TAGS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":51}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_DPS_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"DATASOURCE_FETCH_DP_DECODE_CORE_PROCESSING_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_DS_WAIT_TIME\", \"instanceCount\":4, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"DATASOURCE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":12}, \\\n {\"tag\":\"PLAN_EXECUTION_JPY_REMOVE_DF_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":17},{\"tag\":\"RESULT_DATA_MARSHALLING_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}]}'\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n client.query(query, start, end)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_empty_output(mock_get):\n \"\"\"\n Test the case when the outputs array is empty in the response\n \"\"\"\n mock_get.return_value.content = '{\"outputs\":[],\"hints\":[],\"query\": \\\n {\"querytext\":\"fetch(\\'nyc.taxi.rides\\')\", \\\n \"startTime\":1406831400, \"startTimeHumanReadableSYS\":\"July 31, 2014 6:30:00 PM UTC\", \\\n \"startTimeHumanReadableIST\":\"August 1, 2014 12:00:00 AM IST\", \"endTime\":1407609000, \\\n \"endTimeHumanReadableSYS\":\"August 9, 2014 6:30:00 PM UTC\", \\\n \"endTimeHumanReadableIST\":\"August 10, 2014 12:00:00 AM IST\", \\\n \"digest\":\"Mdt8e+HDjnGByMMJdEnTnNdUxKo=:60845\", \"optionsdigest\":\"\", \\\n \"options\":\"{}\"},\"query_stats\":{\"compactedRows\":217, \"processedRows\":217, \\\n \"dataPointsProcessed\":219, \"numSeries\":1, \"queryTimeMillis\":152, \\\n \"hbaseTimeMillis\":21},\"timing_diagnostics\": \\\n [{\"tag\":\"QUERY_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":152},{\"tag\":\"AST_BUILD_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":29}, \\\n {\"tag\":\"AST_JYTHON_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":29},{\"tag\":\"STATEMENT_VALIDATION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_BUILDING_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"QUERY_OPTIMIZATION_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":106},{\"tag\":\"SCHEMA_SERVICE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":93}, \\\n {\"tag\":\"DATASOURCE_FETCH_RUN_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":32},{\"tag\":\"TSD_HBASE_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":21}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":52},{\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_TAGS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":51}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_DPS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_CORE_PROCESSING_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_DS_WAIT_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":12},{\"tag\":\"PLAN_EXECUTION_JPY_REMOVE_DF_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":17}, \\\n {\"tag\":\"RESULT_DATA_MARSHALLING_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0}]}'\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n resp = client.query(query, start, end)\n assert_is_none(resp)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_empty_results(mock_get):\n \"\"\"\n Test that when results array is empty in the response and we try to access the\n outputs in the results object we get a KeyError\n \"\"\"\n mock_get.return_value.content = '{\"outputs\":[{\"id\":\"nyc:taxi:rides\", \\\n \"result\":[]}],\"hints\":[],\"query\": \\\n {\"querytext\":\"fetch(\\'nyc.taxi.rides\\')\", \\\n \"startTime\":1406831400, \\\n \"startTimeHumanReadableSYS\":\"July 31, 2014 6:30:00 PM UTC\", \\\n \"startTimeHumanReadableIST\":\"August 1, 2014 12:00:00 AM IST\", \\\n \"endTime\":1407609000, \\\n \"endTimeHumanReadableSYS\":\"August 9, 2014 6:30:00 PM UTC\", \\\n \"endTimeHumanReadableIST\":\"August 10, 2014 12:00:00 AM IST\", \\\n \"digest\":\"Mdt8e+HDjnGByMMJdEnTnNdUxKo=:60845\", \\\n \"optionsdigest\":\"\", \"options\":\"{}\"}, \\\n \"query_stats\":{\"compactedRows\":217, \"processedRows\":217, \\\n \"dataPointsProcessed\":219, \"numSeries\":1, \"queryTimeMillis\":152, \\\n \"hbaseTimeMillis\":21}, \\\n \"timing_diagnostics\":[{\"tag\":\"QUERY_EXECUTION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":152}, \\\n {\"tag\":\"AST_BUILD_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":29},{\"tag\":\"AST_JYTHON_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":29}, \\\n {\"tag\":\"STATEMENT_VALIDATION_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_BUILDING_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0},{\"tag\":\"QUERY_OPTIMIZATION_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"PLAN_EXECUTION_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":106},{\"tag\":\"SCHEMA_SERVICE_FETCH_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":93}, \\\n {\"tag\":\"DATASOURCE_FETCH_RUN_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":32},{\"tag\":\"TSD_HBASE_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":21}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_TIME\", \"instanceCount\":2, \\\n \"totalElapsedTimeMillis\":52}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_TAGS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":51}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_GET_DPS_TIME\", \\\n \"instanceCount\":2, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_CORE_PROCESSING_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_DP_DECODE_DS_WAIT_TIME\", \\\n \"instanceCount\":4, \"totalElapsedTimeMillis\":0}, \\\n {\"tag\":\"DATASOURCE_FETCH_TOTAL_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":12}, \\\n {\"tag\":\"PLAN_EXECUTION_JPY_REMOVE_DF_TOTAL_TIME\", \\\n \"instanceCount\":1, \"totalElapsedTimeMillis\":17}, \\\n {\"tag\":\"RESULT_DATA_MARSHALLING_TIME\", \"instanceCount\":1, \\\n \"totalElapsedTimeMillis\":0}]}'\n mock_get.return_value.status_code = 200\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n resp = client.query(query, start, end)\n with assert_raises(KeyError):\n _ = resp[0]\n\ndef test_timeseries_obj_creation():\n \"\"\"\n Negative test cases for TimeSeries object when either index or\n values is missing (not both at the same time)\n \"\"\"\n with assert_raises(ValueError):\n apptuit_client.TimeSeries('metric', {}, values=[3.14])\n\n with assert_raises(ValueError):\n apptuit_client.TimeSeries('metric', {}, index=[123456])\n\n with assert_raises(ValueError):\n apptuit_client.TimeSeries('metric', {}, index=[123455, 123456], values=[3.14])\n\n with assert_raises(ValueError):\n apptuit_client.TimeSeries(metric=None, tags=None)\n\n\n@patch('apptuit.apptuit_client.requests.get')\ndef test_missing_pandas(mock_get):\n orig_modules = sys.modules.copy()\n orig_pandas = orig_modules['pandas']\n orig_modules['pandas'] = None\n with patch.dict(sys.modules, orig_modules):\n if orig_pandas:\n sys.modules['pandas'] = None\n resp = do_query(mock_get)\n with assert_raises(ImportError):\n resp[0].to_df()\n"
] | [
[
"pandas.read_csv"
]
] |
ktaebum/tf2-gnn | [
"3c763274f6586fb8d4cf3d74e3e0fd03b970b43f"
] | [
"tf2_gnn/test/layers/test_RGCN.py"
] | [
"\"\"\"Tests for the RGCN message passing layer.\"\"\"\nimport tensorflow as tf\nimport pytest\n\nfrom tf2_gnn.layers.message_passing import MessagePassingInput, RGCN\n\n\nshape_test_data = [\n (tf.TensorShape(dims=(None, 3)), tuple(tf.TensorShape(dims=(None, 2)) for _ in range(3)), 5),\n (tf.TensorShape(dims=(None, 1)), tuple(tf.TensorShape(dims=(None, 2)) for _ in range(1)), 1),\n (tf.TensorShape(dims=(None, 7)), tuple(tf.TensorShape(dims=(None, 2)) for _ in range(14)), 7),\n]\n\n\[email protected](\"node_embedding_shape,adjacency_list_shapes,hidden_dim\", shape_test_data)\ndef test_rgcn_layer_has_expected_number_of_trainable_variables_when_not_using_source_and_target(\n node_embedding_shape, adjacency_list_shapes, hidden_dim\n):\n # Given:\n rgcn_params = RGCN.get_default_hyperparameters()\n rgcn_params[\"hidden_dim\"] = hidden_dim\n rgcn_params[\"use_target_state_as_input\"] = False\n rgcn_layer = RGCN(rgcn_params)\n\n # When:\n rgcn_layer.build(\n MessagePassingInput(\n node_embeddings=node_embedding_shape, adjacency_lists=adjacency_list_shapes\n )\n )\n trainable_vars = rgcn_layer.trainable_variables\n all_vars = rgcn_layer.variables\n\n # Then:\n assert len(trainable_vars) == len(adjacency_list_shapes) # One dense layer per layer type.\n assert len(all_vars) == len(trainable_vars) # There should be no un-trainable variables.\n\n for trainable_var in trainable_vars:\n assert tuple(trainable_var.shape.as_list()) == (node_embedding_shape[-1], hidden_dim)\n\n\[email protected](\"node_embedding_shape,adjacency_list_shapes,hidden_dim\", shape_test_data)\ndef test_rgcn_layer_has_expected_number_of_trainable_variables_when_using_source_and_target(\n node_embedding_shape, adjacency_list_shapes, hidden_dim\n):\n # Given:\n rgcn_params = RGCN.get_default_hyperparameters()\n rgcn_params[\"hidden_dim\"] = hidden_dim\n rgcn_params[\"use_target_state_as_input\"] = True\n rgcn_layer = RGCN(rgcn_params)\n\n # When:\n rgcn_layer.build(\n MessagePassingInput(\n node_embeddings=node_embedding_shape, adjacency_lists=adjacency_list_shapes\n )\n )\n trainable_vars = rgcn_layer.trainable_variables\n all_vars = rgcn_layer.variables\n\n # Then:\n assert len(trainable_vars) == len(adjacency_list_shapes) # One dense layer per layer type.\n assert len(all_vars) == len(trainable_vars) # There should be no un-trainable variables.\n for trainable_var in trainable_vars:\n assert tuple(trainable_var.shape.as_list()) == (2 * node_embedding_shape[-1], hidden_dim)\n"
] | [
[
"tensorflow.TensorShape"
]
] |
WeipengMO/flair | [
"e6c9990bcfdd1d2e585bab1f45b7f8dc68b21fbc"
] | [
"script/get_full_length_transcripts.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n'''\r\nAuthor : windz\r\nDate : 2020-04-15 15:26:26\r\nLastEditTime : 2021-10-09 16:43:49\r\nDescription : get full length transcripts from bam\r\n'''\r\n\r\n\r\nimport pysam\r\nimport pandas as pd\r\nimport click\r\n\r\n\r\[email protected]()\r\[email protected]('-i', '--infile', required=True)\r\[email protected]('--full_len', required=True)\r\[email protected]('--first_exon_path', required=True)\r\ndef main(infile, full_len, first_exon_path):\r\n #first_exon_path = '/public/home/mowp/test/nanopore_cdna/supplementary_data/representative_gene_model/representative_gene_first_exon.tsv'\r\n first_exon_df = pd.read_csv(first_exon_path, sep='\\t')\r\n first_exon_df.set_index(['gene_id'], inplace=True)\r\n first_exon_dict = first_exon_df.to_dict(orient='index')\r\n\r\n #infile = '/public/home/mowp/test/nanopore_cdna/aligned_data/fhh.tagged.mm2.sorted.bam'\r\n with pysam.AlignmentFile(infile, 'rb') as inbam:\r\n full_len_bam = pysam.AlignmentFile(full_len, 'wb', template=inbam)\r\n for read in inbam:\r\n read_gene_id = read.get_tag('gi')\r\n\r\n if read_gene_id in first_exon_dict:\r\n # 过滤与基因方向不一致的reads\r\n if first_exon_dict[read_gene_id]['strand'] == '+' and read.is_reverse:\r\n continue\r\n if first_exon_dict[read_gene_id]['strand'] == '-' and not read.is_reverse:\r\n continue\r\n\r\n if (first_exon_dict[read_gene_id]['strand'] == '+' and \r\n read.reference_start <= first_exon_dict[read_gene_id]['exon_end']):\r\n full_len_bam.write(read)\r\n elif (first_exon_dict[read_gene_id]['strand'] == '-' and\r\n read.reference_end >= first_exon_dict[read_gene_id]['exon_start']):\r\n full_len_bam.write(read)\r\n\r\n \r\n full_len_bam.close()\r\n pysam.index(full_len)\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()"
] | [
[
"pandas.read_csv"
]
] |
Moyzes-Campos/pykernels | [
"c8afdc79a15197ad3be2a0db0118f5e948577f49"
] | [
"tests/datasets.py"
] | [
"\"\"\"\nAccess layer for datasets used in tests\n\"\"\"\n\n__author__ = 'lejlot'\n\nimport numpy as np\n\ndef baseline_logic(operator):\n \"\"\" Creates 4-point dataset with given logical operator \"\"\"\n\n data = np.array([[1, 1], [0, 0], [1, 0], [0, 1]])\n labels = np.array([max(0, min(1, operator(*point))) for point in data])\n return data, labels\n"
] | [
[
"numpy.array"
]
] |
jeromekelleher/msprime_sim | [
"8ec1945290fcfd2889dbb2a677e21012162fbc89"
] | [
"src/write.py"
] | [
"from __future__ import division\nimport msprime\nimport pandas as pd\nimport numpy as np\nimport os\n\ndef trees(out, tree_sequence, chr, m, n_pops, N, sim, vcf, sample_index):\n\t# DEV: Throw a warning if you try to do this and n_sims is high.\n\tvcf_name = out + \".vcf\"\n\twith open(vcf_name, \"w\") as vcf_file:\n\t\ttree_sequence.write_vcf(vcf_file, ploidy=2)\n\n\tprint(N)\n\t# DEV: update the function - no longer require N (double check this).\n\tN = int(tree_sequence.get_sample_size() / 2)\n\tprint(N)\n\t\n\t# Create altered IDs an Family IDs to replace the .fam file that we will create.\n\tfam_id = np.tile('msp', N)\n\tindex_old = [i for i in xrange(N)]\n\t# Have to change the '0' ID to something else, as plink doesn't like IIDs to be '0'.\n\tindex_old[0] = 'A'\n\t# Similarly for the new list of indices, plink doesn't like IIDs to be '0'.\n\tindex_new = [i for i in sample_index]\n\tmatches = [x for x in index_new if x == 0]\n\tif len(matches) == 1:\n\t\tindex_new[index_new.index(0)] = 'A'\n\n\t# Create a new table to define the re-indexing of the tree. \n\t# Writing to .vcf does not save the sample numbers, so we need to keep track of these and \n\t# replace them in the .fam file.\n\n\td={'old_fam':fam_id, 'old_within_fam':index_new, 'new_fam':fam_id, 'new_within_fam':index_old}\n\tdf=pd.DataFrame(d)\n\ttmp_index_tsv = out + '.index.tmp.tsv'\n\tdf.to_csv(tmp_index_tsv, sep='\\t', header=False, index=False)\n\n\tif vcf is False:\n\t\t# Note that the following line is OS dependent. OSX requires a gap after '-i'.\n\t\tos.system(\"sed -i.bak '1,/msp_0/ s/msp_0/msp_A/' \" + vcf_name)\n\t\t# Convert to Plink bed format - need to ensure that plink is in your path.\n\t\tbfile_out = out + \".chr\" + str(chr+1) + \".sim\" + str(sim+1)\n\t\tos.system(\"../plink/plink --vcf \" + vcf_name + \" --out \" + bfile_out + \" --make-bed\")\n\t\t# Now, fix the chromosome number and the names of the mutations.\n\t\tmut_names=np.core.defchararray.add('rs.' + str(chr+1) + \".\", np.arange(1,m+1).astype('str'))\n\t\tchr_vec=np.tile(chr+1, m)\n\t\td={'chr':chr_vec, 'rs': mut_names}\n\t\tdf=pd.DataFrame(d)\n\t\ttmp_tsv = out + '.tmp.tsv'\n\t\ttmp_bim = out + '.bim_tmp.tsv'\n\t\tdf.to_csv(tmp_tsv, sep='\\t', header=False, index=False)\n\t\tos.system('cut -f 3,4,5,6 ' + bfile_out + '.bim > ' + tmp_bim)\n\t\tos.system('paste ' + tmp_tsv + ' ' + tmp_bim + ' > ' + bfile_out + '.bim')\n\t\tos.system('rm ' + tmp_tsv + '; rm ' + tmp_bim)\n\n\t\t# Now remove the .vcf files.\n\t\tos.system('rm ' + vcf_name + '; rm ' + vcf_name + '.bak; rm ' + bfile_out + '.fam.bak')\n\n\t\tos.system('../plink/plink --update-ids ' + tmp_index_tsv + ' --bfile ' + bfile_out + ' --make-bed --out ' + bfile_out)\n\t\t# Rename 'A' to '0'.\n\t\tos.system(\"sed -i.bak 's/msp A/msp 0/' \" + bfile_out + '.fam')\n\t\t# os.system('rm ' + tmp_index_tsv)\n\t\t# Remove the .bak and temporary files\n\t\tos.system('rm ' + bfile_out + '*.bak')\n\t\tos.system('rm ' + bfile_out + '*~')\n\n\tpop_ann = np.empty(N)\n\n\tfor pops in xrange(n_pops):\n\t\tpop_leaves = tree_sequence.get_samples(population_id=pops)\n\t\tpop_ann[map(int, [x/2 for x in pop_leaves[0::2]])] = pops\n\n\tif chr==0:\n\t\tdf_pop=pd.DataFrame({'sample':sample_index, 'population':pop_ann.astype(int)})\n\t\tdf_pop.to_csv(out + \".sim\" + str(sim+1) + '.pop.tsv', sep='\\t', header=True, index=False)"
] | [
[
"numpy.arange",
"numpy.tile",
"pandas.DataFrame",
"numpy.empty"
]
] |
pierfra-ro/allesfitter | [
"a6a885aaeb3253fec0d924ef3b45e8b7c473b181"
] | [
"allesfitter/detection/transit_search.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 17:55:39 2020\n\n@author:\nDr. Maximilian N. Günther\nEuropean Space Agency (ESA)\nEuropean Space Research and Technology Centre (ESTEC)\nKeplerlaan 1, 2201 AZ Noordwijk, The Netherlands\nEmail: [email protected]\nGitHub: mnguenther\nTwitter: m_n_guenther\nWeb: www.mnguenther.com\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\n#::: modules\nimport os, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport warnings\nfrom pprint import pprint\nfrom datetime import datetime\nfrom astropy import units as u\nfrom astropy import constants as c\nfrom astropy.stats import sigma_clip\nfrom astropy.timeseries import BoxLeastSquares as bls\nfrom ..exoworlds_rdx.lightcurves.index_transits import index_transits\n# import time as timer\nimport contextlib\n\n#::: specific modules\ntry:\n from wotan import flatten\nexcept ImportError:\n pass\n\ntry:\n from transitleastsquares import transitleastsquares as tls\n from transitleastsquares import transit_mask, catalog_info\nexcept ImportError:\n pass\n\n#::: my modules\ntry:\n from exoworlds.tess import tessio\nexcept:\n pass\nfrom ..exoworlds_rdx.lightcurves.lightcurve_tools import plot_phase_folded_lightcurve, rebin_err \nfrom ..time_series import clean, slide_clip\nfrom ..lightcurves import tessclean\nfrom ..inout import write_json, write_csv\nfrom ..plotting import fullplot, brokenplot, tessplot, monthplot\n\n\n#::: plotting settings\nimport seaborn as sns\nsns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)\nsns.set_style({\"xtick.direction\": \"in\",\"ytick.direction\": \"in\"})\nsns.set_context(rc={'lines.markeredgewidth': 1})\n\n \n\n###############################################################################\n#::: print to logfile\n###############################################################################\ndef logprint(*text, options=None):\n original = sys.stdout\n try:\n with open(os.path.join(options['outdir'],'logfile.log'), 'a' ) as f:\n sys.stdout = f\n print(*text)\n except OSError:\n pass #For unknown reasons, the combination of open() and os.path.join() does not work on some Windows versions\n sys.stdout = original\n \n \n \n###############################################################################\n#::: pretty-print to logfile\n###############################################################################\ndef logpprint(*text, options=None):\n original = sys.stdout\n try:\n with open(os.path.join(options['outdir'],'logfile.log'), 'a' ) as f:\n sys.stdout = f\n pprint(*text)\n except OSError:\n pass #For unknown reasons, the combination of open() and os.path.join() does not work on some Windows versions\n sys.stdout = original\n\n\n \n###############################################################################\n#::: apply a mask (if wished so)\n###############################################################################\ndef mask(time, flux, flux_err, period, duration, T0):\n intransit = transit_mask(time, period, duration, T0)\n time = time[~intransit]\n flux = flux[~intransit]\n if flux_err is not None:\n flux_err = flux_err[~intransit]\n time, flux, flux_err = clean(time, flux, flux_err)\n else:\n time, flux = clean(time, flux)\n return time, flux, flux_err\n \n\n\n###############################################################################\n#::: check for multiples of a value (e.g., of a period)\n###############################################################################\ndef is_multiple_of(a, b, tolerance=0.05):\n a = np.float(a)\n b = np.float(b) \n result = a % b\n return (abs(result/b) <= tolerance) or (abs((b-result)/b) <= tolerance)\n\n\n\n###############################################################################\n#::: BLS search on an input lightcurve\n###############################################################################\ndef bls_search(time, flux, flux_err=None):\n if flux_err is None: \n ind = np.where(~np.isnan(time*flux))\n time = np.array(time)[ind]\n flux = np.array(flux)[ind]\n else:\n ind = np.where(~np.isnan(time*flux*flux_err))\n time = np.array(time)[ind]\n flux = np.array(flux)[ind]\n flux_err = np.array(flux_err)[ind]\n print(time, flux)\n plt.figure()\n plt.plot(time, flux, 'b.')\n model = bls(time * u.day, flux, dy=flux_err)\n print(model)\n periodogram = model.autopower(0.05)\n plt.plot(periodogram.period, periodogram.power) \n # max_power = np.argmax(periodogram.power)\n # stats = model.compute_stats(periodogram.period[max_power],\n # periodogram.duration[max_power],\n # periodogram.transit_time[max_power])\n # print(stats)\n \n \n \n###############################################################################\n#::: get TLS kwargs from TICv8\n###############################################################################\ndef get_tls_kwargs_by_tic(tic_id, sigma=3, tls_kwargs=None, quiet=True):\n #mass comes first, radius comes second in the TLS source code for catalog_info()\n u, M_star, M_star_lerr, M_star_uerr, R_star, R_star_lerr, R_star_uerr = catalog_info(TIC_ID=int(tic_id))\n if not quiet:\n print('TICv8 info:')\n print('Quadratic limb darkening u_0, u_1', u[0], u[1])\n print('Stellar radius', R_star, '+', R_star_lerr, '-', R_star_uerr)\n print('Stellar mass', M_star, '+', M_star_lerr, '-', M_star_uerr)\n \n if tls_kwargs is None: tls_kwargs = {}\n tls_kwargs['R_star']=float(R_star)\n tls_kwargs['R_star_min']=R_star-sigma*R_star_lerr\n tls_kwargs['R_star_max']=R_star+sigma*R_star_uerr\n tls_kwargs['M_star']=float(M_star)\n tls_kwargs['M_star_min']=M_star-sigma*M_star_lerr\n tls_kwargs['M_star_max']=M_star+sigma*M_star_uerr\n tls_kwargs['u']=u\n \n if np.isnan(tls_kwargs['R_star']): \n tls_kwargs['R_star'] = 1.\n warnings.warn(\"tls_kwargs: R_star was undefined in TICv8. Filling it with R_star=1.\")\n if np.isnan(tls_kwargs['R_star_min']): \n tls_kwargs['R_star_min'] = 0.13\n warnings.warn(\"tls_kwargs: R_star_min was undefined in TICv8. Filling it with R_star_min=0.13\")\n if np.isnan(tls_kwargs['R_star_max']): \n tls_kwargs['R_star_max'] = 3.5\n warnings.warn(\"tls_kwargs: R_star_max was undefined in TICv8. Filling it with R_star_max=3.5\")\n if np.isnan(tls_kwargs['M_star']): \n tls_kwargs['M_star'] = 1.\n warnings.warn(\"tls_kwargs: M_star was undefined in TICv8. Filling it with M_star=1.\")\n if np.isnan(tls_kwargs['M_star_min']): \n tls_kwargs['M_star_min'] = 0.1\n warnings.warn(\"tls_kwargs: M_star_min was undefined in TICv8. Filling it with M_star_min=0.1\")\n if np.isnan(tls_kwargs['M_star_max']): \n tls_kwargs['M_star_max'] = 1.\n warnings.warn(\"tls_kwargs: M_star_max was undefined in TICv8. Filling it with M_star_max=0.1\")\n if np.isnan(tls_kwargs['u']).any(): \n tls_kwargs['u'] = [0.4804, 0.1867]\n warnings.warn(\"tls_kwargs: u was undefined in TICv8. Filling it with u=[0.4804, 0.1867]\")\n\n return tls_kwargs\n\n\n\n###############################################################################\n#::: write TLS reuslts as a dictionary to a json file\n###############################################################################\ndef write_tls_results(fname, results):\n '''\n Parameters\n ----------\n fname : str\n Name of the output json file.\n results : transitleastsuqares.results class\n The results returned form a TLS run.\n\n Returns\n -------\n None.\n\n Outputs\n -------\n A json file that contains a dictionary of the most important tls results.\n The json file can be read into Python again via allesfitter's read_dic.\n \n Explanation\n -----------\n The TLS results object contains the following keys, where \n 'short' indicates it's a float or short list (e.g., the found period or depth per transit) and \n 'long' indicates it's a humongous array (e.g., the whole light curve).\n We only want to save the 'short' parts to save space:\n SDE short\n SDE_raw short\n chi2_min short\n chi2red_min short\n period short\n period_uncertainty short\n T0 short\n duration short\n depth short\n depth_mean short\n depth_mean_even short\n depth_mean_odd short\n transit_depths short\n transit_depths_uncertainties short\n rp_rs short\n snr short\n snr_per_transit short\n snr_pink_per_transit short\n odd_even_mismatch short\n transit_times short\n per_transit_count short\n transit_count short\n distinct_transit_count short\n empty_transit_count short\n FAP short\n in_transit_count short\n after_transit_count short\n before_transit_count short\n periods long\n power long\n power_raw long\n SR long\n chi2 long\n chi2red long\n model_lightcurve_time long\n model_lightcurve_model long\n model_folded_phase long\n folded_y long\n folded_dy long\n folded_phase long\n model_folded_model long\n Also:\n correct_duration short\n model long (our self-made model(time) curve)\n '''\n dic = {}\n for key in ['SDE', 'SDE_raw', 'chi2_min', 'chi2red_min', 'period', 'period_uncertainty',\\\n 'T0', 'duration', 'depth', 'depth_mean', 'depth_mean_even', 'depth_mean_odd',\\\n 'transit_depths', 'transit_depths_uncertainties', 'rp_rs',\\\n 'snr', 'snr_per_transit', 'snr_pink_per_transit', 'odd_even_mismatch',\\\n 'transit_times', 'per_transit_count', 'transit_count', 'distinct_transit_count',\\\n 'empty_transit_count', 'FAP', 'in_transit_count', 'after_transit_count',\\\n 'before_transit_count'] + ['correct_duration']: \n if (type(results[key])!=np.ndarray): #if it's not an array, save it as is\n dic[key] = results[key]\n else: #if it's a short array, save as list (for json)\n dic[key] = results[key].tolist()\n write_json(fname, dic)\n \n \n \n###############################################################################\n#::: function to convert the results into a dictionary\n###############################################################################\ndef _to_dic(results):\n dic = {}\n for key in results: \n dic[key] = results[key]\n return dic\n\n\n\n###############################################################################\n#::: TLS search on an input lightcurve\n###############################################################################\ndef tls_search(time, flux, flux_err, plot=True, plot_type='brokenplot', **kwargs):\n '''\n Summary:\n -------\n This runs TLS on these data with the given infos\n \n Inputs:\n -------\n time : array of flaot\n time stamps of observations\n flux : array of flaot\n normalized flux\n flux_err : array of flaot\n error of normalized flux\n **kwargs : collection of keyword arguments\n All keyword arguments will be passed to TLS.\n Missing keywords will be replaced with default values:\n R_star : float\n radius of the star (e.g. median)\n default 1 R_sun (from TLS)\n R_star_min : float\n minimum radius of the star (e.g. 1st percentile)\n default 0.13 R_sun (from TLS)\n R_star_max : float\n maximum radius of the star (e.g. 99th percentile)\n default 3.5 R_sun (from TLS)\n M_star : float\n mass of the star (e.g. median)\n default 1. M_sun (from TLS)\n M_star_min : float\n minimum mass of the star (e.g. 1st percentile)\n default 0.1 M_sun (from TLS)\n M_star_max : float\n maximum mass of the star (e.g. 99th percentile)\n default 1. M_sun (from TLS) \n u : list\n quadratic limb darkening parameters\n default [0.4804, 0.1867]\n period_min : float\n the minimum period to be searched (in days)\n period_max : float\n the maximum period to be searched (in days)\n show_progress_bar : bool\n Show a progress bar for TLS\n default True\n SNR_threshold : float\n the SNR threshold at which to stop the TLS search\n default 5\n SDE_threshold : float\n the SDE threshold at which to stop the TLS search\n default -inf\n FAP_threshold : float\n the False Alarm Probability threshold at which to stop the TLS search\n default inf\n quiet : bool\n silence all TLS outprint\n default True\n \n Returns:\n -------\n results_all : list of dictionaries\n List of all dictionaries containing the TLS results \n (with dictionaries made from the transitleastsqaures.results class).\n fig_all : list of matplotlib.figure object, optional\n List of all summary figures. Only returned if plot is True.\n '''\n \n #::: seeed\n np.random.seed(42)\n \n \n #::: handle inputs\n time, flux, flux_err = clean(time, flux, flux_err)\n plot_bool = plot\n \n if 'show_progress_bar' not in kwargs: kwargs['show_progress_bar'] = True\n if 'SNR_threshold' not in kwargs: kwargs['SNR_threshold'] = 5.\n if 'SDE_threshold' not in kwargs: kwargs['SDE_threshold'] = -np.inf #don't trust SDE\n if 'FAP_threshold' not in kwargs: kwargs['FAP_threshold'] = np.inf #don't trust FAP \n if 'quiet' not in kwargs: kwargs['quiet'] = True\n if 'inj_period' not in kwargs: kwargs['inj_period'] = np.nan\n \n non_tls_keys = ['SNR_threshold','SDE_threshold','FAP_threshold','quiet','inj_period']\n tls_kwargs_original = {key: kwargs[key] for key in kwargs.keys() if key not in non_tls_keys} #for the original tls\n #the rest is filled automatically by TLS if it was not given\n print('tls_kwargs_original', tls_kwargs_original)\n \n #::: init\n SNR = 1e12\n SDE = 1e12\n FAP = 0\n FOUND_SIGNAL = False\n results_all = [] \n fig_lightcurve_all = [] \n fig_folded_all = [] \n \n \n #::: function to run it once\n def _run1(time, flux, flux_err):\n if kwargs['quiet']:\n with open(os.devnull, 'w') as devnull:\n with contextlib.redirect_stdout(devnull):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = tls(time, flux, flux_err)\n results = model.power(**tls_kwargs_original)\n else:\n model = tls(time, flux, flux_err)\n results = model.power(**tls_kwargs_original)\n \n results = _to_dic(results)\n results['detection'] = (results['snr'] >= kwargs['SNR_threshold']) and (results['SDE'] >= kwargs['SDE_threshold']) and (results['FAP'] <= kwargs['FAP_threshold'])\n results['correct_duration'] = np.nan \n results['R_planet_'] = np.nan\n\n \n if results['detection']:\n #::: calculcate the correct_duration, as TLS sometimes returns unreasonable durations\n ind_tr_phase = np.where( results['model_folded_model'] < 1. )[0]\n results['correct_duration'] = results['period'] * (results['model_folded_phase'][ind_tr_phase[-1]] - results['model_folded_phase'][ind_tr_phase[0]])\n \n if 'R_star' in kwargs:\n results['R_planet'] = results['rp_rs'] * kwargs['R_star'] * 109.07637070600963 #from Rsun to Rearth\n \n return results\n \n \n #::: function to plot it once\n # def _plot1(time, flux, flux_err, results):\n # fig, axes = plt.subplots(1, 3, figsize=(20,5), tight_layout=True)\n \n # ax = axes[0]\n # ax.plot(results['folded_phase'], results['folded_y'], 'k.', color='silver', rasterized=True)\n # bintime, binflux, binflux_err, _ = rebin_err(results['folded_phase'], results['folded_y'], dt = 0.001*results['period'], ferr_type='medsig', ferr_style='sem')\n # ax.plot(bintime, binflux, 'b.', rasterized=True)\n # ax.plot(results['model_folded_phase'], results['model_folded_model'], 'r-', lw=3)\n \n # ax = axes[1]\n # ax.plot((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], 'k.', color='silver', rasterized=True)\n # bintime, binflux, binflux_err, _ = rebin_err((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], dt = 0.001*results['period']*24, ferr_type='medsig', ferr_style='sem')\n # ax.plot(bintime, binflux, 'bo', rasterized=True)\n # ax.plot((results['model_folded_phase']-0.5)*results['period']*24, results['model_folded_model'], 'r-', lw=3)\n # ax.set(xlim=[ -1.5*results['correct_duration']*24, +1.5*results['correct_duration']*24 ], xlabel='Time (h)', yticks=[])\n \n # ax = axes[2]\n # ax.text( .02, 0.95, 'P = ' + np.format_float_positional(results['period'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.85, 'Depth = ' + np.format_float_positional(1e3*(1.-results['depth']),4) + ' ppt', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.75, 'Duration = ' + np.format_float_positional(24*results['correct_duration'],4) + ' h', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.65, 'T_0 = ' + np.format_float_positional(results['T0'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.55, 'SNR = ' + np.format_float_positional(results['snr'],4), ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.45, 'SDE = ' + np.format_float_positional(results['SDE'],4), ha='left', va='center', transform=ax.transAxes )\n # ax.text( .02, 0.35, 'FAP = ' + np.format_float_scientific(results['FAP'],4), ha='left', va='center', transform=ax.transAxes )\n # ax.set_axis_off()\n \n # return fig\n \n \n #::: search for transits in a loop\n while (SNR >= kwargs['SNR_threshold']) and (SDE >= kwargs['SDE_threshold']) and (FAP <= kwargs['FAP_threshold']) and (FOUND_SIGNAL==False):\n \n #::: run once \n results = _run1(time, flux, flux_err)\n \n #::: if a transit was detected, store the results, plot, and apply a mask for the next run\n if results['detection']:\n results_all.append(results)\n \n results['model'] = np.interp(time, results['model_lightcurve_time'], results['model_lightcurve_model'])\n \n if plot_bool:\n # fig = _plot1(time, flux, flux_err, results)\n fig_lightcurve = _tls_search_plot_lightcurve(time, flux, results, typ=plot_type)\n fig_folded = _tls_search_plot_folded(time, flux, results)\n fig_lightcurve_all.append(fig_lightcurve)\n fig_folded_all.append(fig_folded)\n \n time, flux, flux_err = mask(time, flux, flux_err, \n results['period'], \n np.max((1.5*results['correct_duration'])), \n results['T0'])\n\n #::: update values\n SNR = results['snr']\n SDE = results['SDE']\n FAP = results['FAP']\n if is_multiple_of(results['period'],kwargs['inj_period']): SNR = -np.inf #if run as part of an injection-recovery test, then abort if it matches the injected period\n \n \n #::: return\n if plot_bool:\n return results_all, fig_lightcurve_all, fig_folded_all\n else:\n return results_all\n\n\n\ndef _cut(time, model_lightcurve_time, model_lightcurve_flux):\n return np.interp(time, model_lightcurve_time, model_lightcurve_flux) \n\n\n\ndef _tls_search_plot_lightcurve(time, flux, results, typ='fullplot'):\n \"\"\"\n ...\n\n Parameters\n ----------\n time : TYPE\n DESCRIPTION.\n flux : TYPE\n DESCRIPTION.\n flux_err : TYPE\n DESCRIPTION.\n results : TYPE\n DESCRIPTION.\n typ : TYPE, optional\n 'fullplot', 'brokenplot', 'tessplot', 'monthplot'. The default is 'fullplot'.\n\n Returns\n -------\n None.\n \"\"\"\n \n if typ=='fullplot':\n axes = fullplot(time, flux)\n axes = fullplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes)\n elif typ=='brokenplot':\n axes = brokenplot(time, flux)\n axes = brokenplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes)\n elif typ=='tessplot':\n trend = _cut(time, results['model_lightcurve_time'], results['model_lightcurve_model'])\n axes = tessplot(time, flux, trend=trend)\n # axes = tessplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes, shade=False)\n elif typ=='monthplot':\n axes = monthplot(time, flux)\n axes = monthplot(results['model_lightcurve_time'], results['model_lightcurve_model'], color='r', ls='-', marker='', lw=3, zorder=100, axes=axes)\n \n return plt.gcf()\n \n\n \ndef _tls_search_plot_folded(time, flux, results):\n \"\"\"\n ...\n\n Parameters\n ----------\n time : TYPE\n DESCRIPTION.\n flux : TYPE\n DESCRIPTION.\n results : TYPE\n DESCRIPTION.\n axes : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n axes : TYPE\n DESCRIPTION.\n \"\"\"\n \n fig, axes = plt.subplots(1, 3, figsize=(12,3), tight_layout=True)\n \n ax = axes[0]\n bintime, binflux, binflux_err, _ = rebin_err(results['folded_phase'], results['folded_y'], dt = 0.001*results['period'], ferr_type='medsig', ferr_style='sem')\n ax.plot(bintime, binflux, 'b.', rasterized=True)\n ax.plot(results['model_folded_phase'], results['model_folded_model'], 'r-', lw=3)\n ylim = ax.get_ylim()\n ax.plot(results['folded_phase'], results['folded_y'], 'k.', color='silver', rasterized=True, zorder=-1)\n ax.set_ylim(ylim)\n \n ax = axes[1]\n bintime, binflux, binflux_err, _ = rebin_err((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], dt = 0.001*results['period']*24, ferr_type='medsig', ferr_style='sem')\n ax.plot(bintime, binflux, 'bo', rasterized=True)\n ax.plot((results['model_folded_phase']-0.5)*results['period']*24, results['model_folded_model'], 'r-', lw=3)\n ax.set(xlim=[ -1.5*results['correct_duration']*24, +1.5*results['correct_duration']*24 ], xlabel='Time (h)', yticks=[])\n ylim = ax.get_ylim()\n ax.plot((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], 'k.', color='silver', rasterized=True, zorder=-1)\n ax.set_ylim(ylim)\n \n ax = axes[2]\n ax.text( .02, 0.95, 'P = ' + np.format_float_positional(results['period'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.85, 'Depth = ' + np.format_float_positional(1e3*(1.-results['depth']),4) + ' ppt', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.75, 'Duration = ' + np.format_float_positional(24*results['correct_duration'],4) + ' h', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.65, 'T_0 = ' + np.format_float_positional(results['T0'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.55, 'SNR = ' + np.format_float_positional(results['snr'],4), ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.45, 'SDE = ' + np.format_float_positional(results['SDE'],4), ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.35, 'FAP = ' + np.format_float_scientific(results['FAP'],4), ha='left', va='center', transform=ax.transAxes )\n ax.text( .02, 0.25, 'R_planet/R_star = ' + np.format_float_positional(results['rp_rs'],4), ha='left', va='center', transform=ax.transAxes )\n if ~np.isnan(results['R_planet']): \n ax.text( .02, 0.15, 'R_planet = ' + np.format_float_positional(results['R_planet'],4), ha='left', va='center', transform=ax.transAxes )\n ax.set_axis_off()\n \n return fig\n \n\n\ndef _tls_search_plot_individual(time, flux, flux_err, results):\n pass #TODO\n\n \n\n###############################################################################\n#::: Convenient wrapper for TESS tasks\n###############################################################################\n#TODO: work in progress\ndef tls_search_tess(time, flux, flux_err, \n wotan_kwargs=None,\n tls_kwargs=None,\n bad_regions=None,\n options=None):\n\n if options is None: options = {}\n if 'outdir' not in options: options['outdir'] = ''\n if wotan_kwargs is None: wotan_kwargs = {'flatten': {'method':'biweight', 'window_length':1}}\n \n #::: logprint\n with open( os.path.join(options['outdir'], 'logfile.log'), 'w' ) as f:\n f.write('TLS search, UTC ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + '\\n')\n logprint('\\nWotan kwargs:', options=options)\n logpprint(wotan_kwargs, options=options)\n logprint('\\nTLS kwargs:', options=options)\n logpprint(tls_kwargs, options=options)\n logprint('\\nOptions:', options=options)\n logpprint(options, options=options)\n \n \n #::: cleaning\n flux_clean, fig1, fig2, fig3 = \\\n tessclean(time, flux, plot=True,\n method=wotan_kwargs['flatten']['method'],\n window_length=wotan_kwargs['flatten']['window_length'],\n bad_regions=bad_regions)\n \n write_csv(os.path.join(options['outdir'],'flux_clean.csv'), (time, flux_clean, flux_err), header='time,flux_clean,flux_err')\n \n with PdfPages( os.path.join(options['outdir'],'flux_clean.pdf') ) as pdf:\n pdf.savefig( fig1 )\n pdf.savefig( fig2 )\n pdf.savefig( fig3 )\n plt.close('all')\n \n \n #::: transit search\n results_all, fig_lightcurve_all, fig_folded_all = \\\n tls_search(time, flux_clean, flux_err, \n plot=True, plot_type='tessplot',\n **tls_kwargs)\n \n if len(results_all)>0:\n with open( os.path.join(options['outdir'],'tls_summary.txt'), 'w' ) as f:\n f.write('TLS found '+str(len(results_all))+' potential signal(s).')\n \n for i, results in enumerate(results_all):\n write_tls_results( os.path.join(options['outdir'],'tls_signal_'+str(i)+'.txt'), results )\n \n for i, (fig1, fig2) in enumerate(zip(fig_lightcurve_all, fig_folded_all)):\n with PdfPages( os.path.join(options['outdir'],'tls_signal_'+str(i)+'.pdf') ) as pdf:\n pdf.savefig( fig1 )\n pdf.savefig( fig2 )\n plt.close('all')\n \n else:\n with open( os.path.join(options['outdir'],'tls_summary.txt'), 'w' ) as f:\n f.write('TLS found no potential signal(s).')\n \n \n\n###############################################################################\n#::: TLS search on an input lightcurve\n###############################################################################\n# def tls_search_old(time, flux, flux_err,\n# known_transits=None,\n# tls_kwargs=None,\n# wotan_kwargs=None,\n# options=None):\n# '''\n# Summary:\n# -------\n# This runs TLS on these data with the given infos\n \n# Inputs:\n# -------\n# time : array of flaot\n# time stamps of observations\n# flux : array of flaot\n# normalized flux\n# flux_err : array of flaot\n# error of normalized flux\n \n# Optional Inputs:\n# ----------------\n# known_transits : None or dict\n# >> can be used to mask known transits before running TLS\n# if None\n# nothing happens\n# if dict \n# if one transit is already known, give for example: \n# known_transits = {'period':[1.3], 'duration':[2.1], 'epoch':[245800.0]}\n# if multiple transits are already known, give for example: \n# known_transits = {'name':['b','c'], 'period':[1.3, 21.0], 'duration':[2.1, 4.1], 'epoch':[245800.0, 245801.0]}\n# 'period' is the period of the known transit(s)\n# 'duration' is the total duration of the known transit(s), i.e. from first ingress point to last egrees point, in days\n# 'epoch' is the epoch of the known transit(s)\n \n# tls_kwargs : None, str or dict:\n# >> can be used to fine-tune the TLS algorithm\n# if None\n# the default parameters will be chosen (see below)\n# if 'default'\n# the default parameters will be chosen (see below)\n# if dict\n# a dictionary with the following keywords is expected; \n# missing keywords will be replaced with default values\n# R_star : float\n# radius of the star (e.g. median)\n# default 1 R_sun (from TLS)\n# R_star_min : float\n# minimum radius of the star (e.g. 1st percentile)\n# default 0.13 R_sun (from TLS)\n# R_star_max : float\n# maximum radius of the star (e.g. 99th percentile)\n# default 3.5 R_sun (from TLS)\n# M_star : float\n# mass of the star (e.g. median)\n# default 1. M_sun (from TLS)\n# M_star_min : float\n# minimum mass of the star (e.g. 1st percentile)\n# default 0.1 M_sun (from TLS)\n# M_star_max : float\n# maximum mass of the star (e.g. 99th percentile)\n# default 1. M_sun (from TLS) \n# u : list\n# quadratic limb darkening parameters\n# default [0.4804, 0.1867]\n# SNR_threshold : float\n# the SNR threshold at which to stop the TLS search\n# default 5\n# SDE_threshold : float\n# the SDE threshold at which to stop the TLS search\n# default -inf\n# FAP_threshold : float\n# the False Alarm Probability threshold at which to stop the TLS search\n# default inf\n# period_min : float\n# the minimum period to be searched (in days)\n# period_max : float\n# the maximum period to be searched (in days)\n \n# wotan_kwargs : None, str, or dict:\n# >> can be used to detrend the data before the TLS search\n# if None\n# the default detrending will run (see below)\n# if str is 'default'\n# the default detrending will run (see below)\n# if str is 'off'\n# no detrending will run\n# if dict\n# a dictionary with two sub-dictionaries is expected; \n# missing keywords will be replaced with default values\n# wotan_kwargs['slide_clip'] : dict\n# this dictionary contains all slide clipping arguments\n# window_length : float\n# slide clip window length\n# default 1\n# low : float\n# slide clip lower sigma\n# default 20\n# high : float\n# slide clip upper sigma\n# default 3\n# wotan_kwargs['flatten'] : dict\n# this dictionary contains contains all detrending arguments\n# method : str\n# detrending method\n# default 'biweight'\n# window_length : float\n# detrending window length in days\n# default 1 \n \n# options : None or dict, keywords:\n# >> can be used for any general options\n# if None\n# the default options will be used (see below)\n# if dict\n# a dictionary with the following keywords is expected;\n# missing keywords will be replaced with default values\n# show_plot : bool\n# can show a plot of each phase-folded transit candidate and TLS model in the terminal \n# default is True\n# save_plot : bool or str\n# can save a plot of each phase-folded transit candidate and TLS model into outdir\n# if True, will be set to '123'\n# if str, then: '1': detrended plot, '2': TLS plot, '3': all TLS plots, and any combinations thereof\n# default is True\n# save_csv : bool\n# can save a csv of the detrended lightcurve\n# default is True\n# outdir : string\n# if None\n# a new directory called \"results\" will be created in the current folder\n# default is \"tls_results_[wotan_flatten_method]_[wotan_flatten_window_length]\"\n \n# Returns:\n# -------\n# List of all TLS results\n# '''\n \n# #::: seeed\n# np.random.seed(42)\n \n \n# #::: handle inputs\n# def clean(time,flux,flux_err):\n# if flux_err is None:\n# ind = np.where( ~np.isnan(time*flux) )[0]\n# time = time[ind]\n# flux = flux[ind]\n# else:\n# ind = np.where( ~np.isnan(time*flux*flux_err) )[0]\n# time = time[ind]\n# flux = flux[ind]\n# flux_err = flux_err[ind]\n# return time, flux, flux_err\n \n# time, flux, flux_err = clean(time,flux,flux_err)\n# time_input = 1.*time\n# flux_input = 1.*flux #for plotting\n \n \n# if type(wotan_kwargs)==str and wotan_kwargs=='off': \n# detrend = False\n# else:\n# detrend = True\n# if (wotan_kwargs is None) or (type(wotan_kwargs)==str and wotan_kwargs=='default'): wotan_kwargs={} \n# if 'slide_clip' not in wotan_kwargs: wotan_kwargs['slide_clip'] = {}\n# if wotan_kwargs['slide_clip'] is not None:\n# if 'window_length' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['window_length'] = 1.\n# if 'low' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['low'] = 20.\n# if 'high' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['high'] = 3.\n \n# if 'flatten' not in wotan_kwargs: wotan_kwargs['flatten'] = {}\n# if wotan_kwargs['flatten'] is not None:\n# if 'method' not in wotan_kwargs['flatten']: wotan_kwargs['flatten']['method'] = 'biweight'\n# if 'window_length' not in wotan_kwargs['flatten']: wotan_kwargs['flatten']['window_length'] = 1.\n# #the rest is filled automatically by Wotan\n \n# if tls_kwargs is None: tls_kwargs = {}\n# if 'show_progress_bar' not in tls_kwargs: tls_kwargs['show_progress_bar'] = False\n# if 'SNR_threshold' not in tls_kwargs: tls_kwargs['SNR_threshold'] = 5.\n# if 'SDE_threshold' not in tls_kwargs: tls_kwargs['SDE_threshold'] = -np.inf #don't trust SDE\n# if 'FAP_threshold' not in tls_kwargs: tls_kwargs['FAP_threshold'] = np.inf #don't trust FAP \n# tls_kwargs_original = {key: tls_kwargs[key] for key in tls_kwargs.keys() if key not in ['SNR_threshold','SDE_threshold','FAP_threshold']} #for the original tls\n# #the rest is filled automatically by TLS\n \n# if options is None: options = {}\n# if 'show_plot' not in options: options['show_plot'] = True\n# if type(options['show_plot'])==bool and (options['show_plot'] is True): options['show_plot']='123' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if type(options['show_plot'])==bool and (options['show_plot'] is False): options['show_plot']='' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if 'save_plot' not in options: options['save_plot'] = True\n# if type(options['save_plot'])==bool and (options['save_plot'] is True): options['save_plot']='123' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if type(options['save_plot'])==bool and (options['save_plot'] is False): options['save_plot']='' #1: detrended plot, 2: TLS plot, 3: all TLS plots\n# if 'save_csv' not in options: options['save_csv'] = True\n# if 'outdir' not in options: \n# if detrend:\n# options['outdir'] = 'tls_results_'+wotan_kwargs['flatten']['method']+'_'+str(wotan_kwargs['flatten']['window_length'])\n# else:\n# options['outdir'] = 'tls_results_undetrended'\n# if 'quiet' not in options: options['quiet'] = True\n# if 'inj_period' not in options: options['inj_period'] = np.nan\n \n \n# #::: init\n# SNR = 1e12\n# SDE = 1e12\n# FAP = 0\n# FOUND_SIGNAL = False\n# results_all = [] \n# if len(options['outdir'])>0 and not os.path.exists(options['outdir']): os.makedirs(options['outdir']) \n \n \n# #::: logprint\n# with open( os.path.join(options['outdir'], 'logfile.log'), 'w' ) as f:\n# f.write('TLS search, UTC ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + '\\n')\n# logprint('\\nWotan kwargs:', options=options)\n# logpprint(wotan_kwargs, options=options)\n# logprint('\\nTLS kwargs:', options=options)\n# logpprint(tls_kwargs, options=options)\n# logprint('\\nOptions:', options=options)\n# logpprint(options, options=options)\n \n# # timer1 = timer.time()\n# # print('t1', timer1 - timer0)\n \n# #::: apply a mask (if wished so)\n# if known_transits is not None:\n# for period, duration, T0 in zip(known_transits['period'], known_transits['duration'], known_transits['epoch']):\n# time, flux, flux_err = mask(time, flux, flux_err, period, duration, T0)\n \n \n# #::: global sigma clipping\n# flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)\n \n# # timer2 = timer.time()\n# # print('t2', timer2 - timer0)\n \n# #::: detrend (if wished so)\n# if detrend:\n \n# #::: slide clipping (super slow)\n# # if wotan_kwargs['slide_clip'] is not None: flux = slide_clip(time, flux, **wotan_kwargs['slide_clip']) #slide_clip is super slow (10 seconds for a TESS 2 min lightcurve for a single Sector)\n# # timer3a = timer.time()\n# # print('t3a', timer3a - timer0) \n \n# #::: fast slide clipping (super fast)\n# if wotan_kwargs['slide_clip'] is not None: \n# flux = slide_clip(time, flux, **wotan_kwargs['slide_clip']) #slide_clip is super fast (<1 seconds for a TESS 2 min lightcurve for a single Sector)\n# flux_clip = 1*flux\n# # timer3a = timer.time()\n# # print('t3a', timer3a - timer0) \n \n# #::: detrending (super fast)\n# if wotan_kwargs['flatten'] is not None: \n# flux, trend = flatten(time, flux, return_trend=True, **wotan_kwargs['flatten']) #flatten is super fast, (<1 second for a TESS 2 min lightcurve for a single Sector)\n# # timer3b = timer.time()\n# # print('t3b', timer3b - timer0) \n \n# #::: global sigma clipping on the flattened flux (super fast)\n# flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)\n# # timer3c = timer.time()\n# # print('t3c', timer3c - timer0) \n \n# if ('1' in options['show_plot']) or ('1' in options['save_plot']):\n# gone = np.isnan(time_input*flux_input)\n# print(time_input, gone)\n# axes = tessplot(time_input[gone], flux_input[gone], color='r')\n# tessplot(time, flux_clip, trend=trend, axes=axes, shade=False)\n# for ax in axes: ax.set_ylabel('Flux\\n(original)')\n# fig1 = plt.gcf()\n \n# axes = tessplot(time, flux_clip, trend=trend)\n# for ax in axes: ax.set_ylabel('Flux\\n(clipped)')\n# fig2 = plt.gcf()\n \n# axes = tessplot(time, flux)\n# fig3 = plt.gcf()\n# for ax in axes: ax.set_ylabel('Flux\\n(clipped & detrended)')\n \n# # fig, axes = plt.subplots(2,1, figsize=(40,8))\n# # brokenplot(time_input, flux_input, trend=trend, ax=axes[0])\n# # axes[0].set(ylabel='Flux (input)', xticklabels=[])\n# # brokenplot(time, trend, fmt='r-', ax=axes[0])\n# # axes[0].plot(time_input, flux_input, 'b.', rasterized=True)\n# # axes[0].plot(time, trend, 'r-', lw=2)\n# # brokenplot(time_input, flux_input, ax=axes[1], clip=True)\n# # brokenplot(time, trend, fmt='r-', ax=axes[1], clip=True)\n# # axes[1].set(ylabel='Flux (clipped)', xticklabels=[])\n# # brokenplot(time, flux, ax=axes[1])\n# # axes[1].plot(time, flux, 'b.', rasterized=True)\n# # axes[1].set(ylabel='Flux (detrended)', xlabel='Time (BJD)')\n# # axes[2].set(ylabel='Flux (detrended)')\n# if ('1' in options['save_plot']):\n# # try: \n# f = os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.pdf')\n# with PdfPages(f) as pdf:\n# pdf.savefig( fig1 )\n# pdf.savefig( fig2 )\n# pdf.savefig( fig3 )\n# # fig.savefig(os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.pdf'), bbox_inches='tight') #some matplotlib versions crash when saving pdf...\n# # except: \n# # fig.savefig(os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.jpg'), bbox_inches='tight') #some matplotlib versions need pillow for jpg (conda install pillow)...\n \n# if ('1' in options['show_plot']):\n# plt.show()\n# else:\n# plt.close('all')\n \n# if options['save_csv']:\n# if flux_err is None: flux_err0 = np.nan*time\n# else: flux_err0 = flux_err\n# X = np.column_stack((time, flux, flux_err0, trend))\n# np.savetxt(os.path.join(options['outdir'],'flux_'+wotan_kwargs['flatten']['method']+'.csv'), X, delimiter=',', header='time,flux_detrended,flux_err,trend')\n \n# time_detrended = 1.*time #just for plotting\n# flux_detrended = 1.*flux #just for plotting\n \n# # timer3d = timer.time()\n# # print('t3d', timer3d - timer0) \n \n \n# #::: search for transits\n# i = 0\n# ind_trs = []\n# while (SNR >= tls_kwargs['SNR_threshold']) and (SDE >= tls_kwargs['SDE_threshold']) and (FAP <= tls_kwargs['FAP_threshold']) and (FOUND_SIGNAL==False):\n \n# if options['quiet']:\n# with open(os.devnull, 'w') as devnull:\n# with contextlib.redirect_stdout(devnull):\n# with warnings.catch_warnings():\n# warnings.simplefilter(\"ignore\")\n# model = tls(time, flux, flux_err)\n# results = model.power(**tls_kwargs_original)\n# else:\n# model = tls(time, flux, flux_err)\n# results = model.power(**tls_kwargs_original)\n \n# # timer4 = timer.time()\n# # print('t4', timer4 - timer0) \n \n# # plt.figure()\n# # plt.plot(time, flux, 'b.')\n# # pprint(tls_kwargs_original)\n# # pprint(results)\n# # err\n \n# if (results['snr'] >= tls_kwargs['SNR_threshold']) and (results['SDE'] >= tls_kwargs['SDE_threshold']) and (results['FAP'] <= tls_kwargs['FAP_threshold']):\n \n# #::: calculcate the correct_duration, as TLS sometimes returns unreasonable durations\n# ind_tr_phase = np.where( results['model_folded_model'] < 1. )[0]\n# correct_duration = results['period'] * (results['model_folded_phase'][ind_tr_phase[-1]] - results['model_folded_phase'][ind_tr_phase[0]])\n \n# #::: mark transit\n# ind_tr, ind_out = index_transits(time_input, results['T0'], results['period'], correct_duration)\n# ind_trs.append(ind_tr)\n \n# #::: mask out detected transits and append results\n# time1, flux1 = 1*time, 1*flux #for plotting\n# time, flux, flux_err = mask(time, flux, flux_err, results['period'], np.max((1.5*correct_duration)), results['T0'])\n# results = _to_dic(results)\n# results['correct_duration'] = correct_duration\n# results_all.append(results)\n \n# #::: write TLS stats to file\n# write_tls_results(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.txt'), results)\n# # with open(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.txt'), 'wt') as out:\n# # pprint(results, stream=out)\n \n# # timer5 = timer.time()\n# # print('t5', timer5 - timer0) \n \n# #::: individual TLS plots\n# if ('2' in options['show_plot']) or ('2' in options['save_plot']):\n# fig = plt.figure(figsize=(20,8), tight_layout=True)\n# gs = fig.add_gridspec(2,3)\n \n# ax = fig.add_subplot(gs[0,:])\n# ax.plot(time1, flux1, 'k.', color='silver', rasterized=True)\n# bintime, binflux, binflux_err, _ = rebin_err(time1, flux1, dt = 10./60/24, ferr_type='medsig', ferr_style='sem') #in 10 min intervals\n# ax.plot(bintime, binflux, 'b.', rasterized=True)\n# ax.plot(results['model_lightcurve_time'], results['model_lightcurve_model'], 'r-', lw=3)\n# ax.set(xlabel='Time (BJD)', ylabel='Flux')\n \n# ax = fig.add_subplot(gs[1,0])\n# ax.plot(results['folded_phase'], results['folded_y'], 'k.', color='silver', rasterized=True)\n# bintime, binflux, binflux_err, _ = rebin_err(results['folded_phase'], results['folded_y'], dt = 0.001*results['period'], ferr_type='medsig', ferr_style='sem')\n# ax.plot(bintime, binflux, 'b.', rasterized=True)\n# # plot_phase_folded_lightcurve(time1, flux1, results['period'], results['T0'], dt=0.002, ax=ax)\n# ax.plot(results['model_folded_phase'], results['model_folded_model'], 'r-', lw=3)\n# # ax.set(xlabel='Phase', ylabel='Flux')\n \n# ax = fig.add_subplot(gs[1,1])\n# ax.plot((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], 'k.', color='silver', rasterized=True)\n# bintime, binflux, binflux_err, _ = rebin_err((results['folded_phase']-0.5)*results['period']*24, results['folded_y'], dt = 0.001*results['period']*24, ferr_type='medsig', ferr_style='sem')\n# ax.plot(bintime, binflux, 'bo', rasterized=True)\n# # plot_phase_folded_lightcurve(time1*24, flux1, results['period']*24, results['T0'], ax=ax, dt=0.002)\n# ax.plot((results['model_folded_phase']-0.5)*results['period']*24, results['model_folded_model'], 'r-', lw=3)\n# ax.set(xlim=[ -1.5*correct_duration*24, +1.5*correct_duration*24 ], xlabel='Time (h)', yticks=[])\n \n# ax = fig.add_subplot(gs[1,2])\n# ax.text( .02, 0.95, 'P = ' + np.format_float_positional(results['period'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.85, 'Depth = ' + np.format_float_positional(1e3*(1.-results['depth']),4) + ' ppt', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.75, 'Duration = ' + np.format_float_positional(24*correct_duration,4) + ' h', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.65, 'T_0 = ' + np.format_float_positional(results['T0'],4) + ' d', ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.55, 'SNR = ' + np.format_float_positional(results['snr'],4), ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.45, 'SDE = ' + np.format_float_positional(results['SDE'],4), ha='left', va='center', transform=ax.transAxes )\n# ax.text( .02, 0.35, 'FAP = ' + np.format_float_scientific(results['FAP'],4), ha='left', va='center', transform=ax.transAxes )\n# ax.set_axis_off()\n# if ('2' in options['save_plot']):\n# try: fig.savefig(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.pdf'), bbox_inches='tight') #some matplotlib versions crash when saving pdf...\n# except: fig.savefig(os.path.join(options['outdir'],'tls_signal_'+str(i)+'.jpg'), bbox_inches='tight') #some matplotlib versions need pillow for jpg (conda install pillow)...\n# if ('2' in options['show_plot']):\n# plt.show(fig)\n# else:\n# plt.close(fig)\n \n# # timer6 = timer.time()\n# # print('t6', timer6 - timer0) \n \n# SNR = results['snr']\n# SDE = results['SDE']\n# FAP = results['FAP']\n# if is_multiple_of(results['period'],options['inj_period']): SNR = -np.inf #if run as part of an inejction-recovery test, then abort if it matches the injected period\n# i+=1\n \n \n \n# #::: full lightcurve plot\n# if ('3' in options['show_plot']) or ('3' in options['save_plot']):\n \n# if detrend:\n# fig, axes = plt.subplots(2,1, figsize=(40,8), tight_layout=True)\n# ax = axes[0]\n# ax.plot(time_input, flux_input, 'k.', color='grey', rasterized=True)\n# ax.plot(time_input, trend, 'r-', lw=2)\n# for number, ind_tr in enumerate(ind_trs):\n# ax.plot(time_input[ind_tr], flux_input[ind_tr], marker='.', linestyle='none', label='signal '+str(number))\n# ax.set(ylabel='Flux (input)', xticklabels=[])\n# ax.legend()\n\n# ax = axes[1]\n# ax.plot(time_detrended, flux_detrended, 'k.', color='grey', rasterized=True)\n# for number, ind_tr in enumerate(ind_trs):\n# ax.plot(time_detrended[ind_tr], flux_detrended[ind_tr], marker='.', linestyle='none', label='signal '+str(number))\n# ax.set(ylabel='Flux (detrended)', xlabel='Time (BJD)')\n# ax.legend()\n \n# else:\n# fig = plt.figure(figsize=(20,4), tight_layout=True)\n# fig, ax = plt.subplots(1,1, figsize=(40,4))\n# ax.plot(time_input, flux_input, 'k.', color='grey', rasterized=True)\n# ax.set(ylabel='Flux (input)', xlabel='Time (BJD)')\n# for number, ind_tr in enumerate(ind_trs):\n# ax.plot(time_input[ind_tr], flux_input[ind_tr], marker='.', linestyle='none', label='signal '+str(number))\n# ax.legend()\n \n# if ('3' in options['save_plot']):\n# try: fig.savefig(os.path.join(options['outdir'],'tls_signal_all.pdf'), bbox_inches='tight') #some matplotlib versions crash when saving pdf...\n# except: fig.savefig(os.path.join(options['outdir'],'tls_signal_all.jpg'), bbox_inches='tight') #some matplotlib versions need pillow for jpg (conda install pillow)...\n# if ('3' in options['show_plot']):\n# plt.show(fig)\n# else:\n# plt.close(fig) \n \n \n# return results_all\n\n\n\n###############################################################################\n#::: TLS search using tessio\n###############################################################################\ndef tls_search_by_tic(tic_id,\n tls_kwargs=None, SNR_threshold=5., known_transits=None,\n options=None):\n '''\n Summary:\n -------\n wrapper around tls_search()\n retrieves the SPOC PDC-SAP lightcurve\n retrieves all TIC catalog information from MAST\n calls tls_search()\n \n Inputs:\n -------\n tic_id : str\n TIC ID\n \n Optional Inputs:\n ----------------\n see tls_search()\n \n Returns:\n -------\n list of all TLS results\n '''\n \n #::: handle inputs\n if options is None: options = {}\n if 'show_plot' not in options: options['show_plot']=False\n if 'save_plot' not in options: options['save_plot']=False\n if 'outdir' not in options: options['outdir']=''\n \n #::: format inputs\n tic_id = str(int(tic_id))\n \n #::: load data and inject transit\n time, flux, flux_err = tessio.get(tic_id, pipeline='spoc', PDC=True, unpack=True)\n \n #::: load TIC info / tls kwargs\n tls_kwargs = get_tls_kwargs_by_tic(tic_id, tls_kwargs=tls_kwargs)\n \n return tls_search(time, flux, flux_err,\n tls_kwargs=tls_kwargs,\n SNR_threshold=SNR_threshold,\n known_transits=known_transits,\n options=options)\n\n\n\n###############################################################################\n#::: main\n###############################################################################\nif __name__ == '__main__':\n pass\n \n ###########################################################################\n #::: Example: search for a transit with TLS and tessio\n ###########################################################################\n # tic_id = '269701147'\n # SNR_threshold=5.,\n # known_transits = {'epoch':[2458715.3547, 2458726.0526, 2458743.5534],\n # 'period':[8.8806, 28.5810, 38.3497],\n # 'duration':[3.09/24., 4.45/24., 5.52/24.]\n # }\n \n # results_all = tls_search_by_tic(tic_id,\n # SNR_threshold=SNR_threshold,\n # known_transits=known_transits)\n # print(results_all)"
] | [
[
"numpy.format_float_scientific",
"numpy.interp",
"numpy.format_float_positional",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gcf",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.float",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.isnan",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.where"
]
] |
IgnacioCarlucho/linear_nonlinear_control | [
"37a7d720f64c6441c7eda386fa2eb6948634e120"
] | [
"mpc/extend/main_track.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport copy\n\n# from mpc_func_with_cvxopt import MpcController as MpcController_cvxopt\nfrom extended_MPC import IterativeMpcController\nfrom animation import AnimDrawer\n# from control import matlab\nfrom coordinate_trans import coordinate_transformation_in_angle, coordinate_transformation_in_position\nfrom traj_func import make_sample_traj\nfrom func_curvature import calc_curvatures, calc_ideal_vel\n\nclass WheeledSystem():\n \"\"\"SampleSystem, this is the simulator\n Kinematic model of car\n\n Attributes\n -----------\n xs : numpy.ndarray\n system states, [x, y, phi, beta]\n history_xs : list\n time history of state\n tau : float\n time constant of tire\n FRONT_WHEEL_BASE : float\n REAR_WHEEL_BASE : float\n predict_xs : \n \"\"\"\n def __init__(self, init_states=None):\n \"\"\"\n Palameters\n -----------\n init_state : float, optional, shape(3, )\n initial state of system default is None\n \"\"\"\n self.NUM_STATE = 4\n self.xs = np.zeros(self.NUM_STATE)\n\n self.tau = 0.01\n\n self.FRONT_WHEELE_BASE = 1.0\n self.REAR_WHEELE_BASE = 1.0\n\n if init_states is not None:\n self.xs = copy.deepcopy(init_states)\n\n self.history_xs = [init_states]\n self.history_predict_xs = []\n\n def update_state(self, us, dt=0.01):\n \"\"\"\n Palameters\n ------------\n u : numpy.ndarray\n inputs of system in some cases this means the reference\n dt : float in seconds, optional\n sampling time of simulation, default is 0.01 [s]\n \"\"\"\n k0 = [0.0 for _ in range(self.NUM_STATE)]\n k1 = [0.0 for _ in range(self.NUM_STATE)]\n k2 = [0.0 for _ in range(self.NUM_STATE)]\n k3 = [0.0 for _ in range(self.NUM_STATE)]\n\n functions = [self._func_x_1, self._func_x_2, self._func_x_3, self._func_x_4]\n\n # solve Runge-Kutta\n for i, func in enumerate(functions):\n k0[i] = dt * func(self.xs[0], self.xs[1], self.xs[2], self.xs[3], us[0], us[1])\n\n for i, func in enumerate(functions):\n k1[i] = dt * func(self.xs[0] + k0[0]/2., self.xs[1] + k0[1]/2., self.xs[2] + k0[2]/2., self.xs[3] + k0[3]/2, us[0], us[1])\n \n for i, func in enumerate(functions):\n k2[i] = dt * func(self.xs[0] + k1[0]/2., self.xs[1] + k1[1]/2., self.xs[2] + k1[2]/2., self.xs[3] + k1[3]/2., us[0], us[1])\n \n for i, func in enumerate(functions):\n k3[i] = dt * func(self.xs[0] + k2[0], self.xs[1] + k2[1], self.xs[2] + k2[2], self.xs[3] + k2[3], us[0], us[1])\n \n self.xs[0] += (k0[0] + 2. * k1[0] + 2. * k2[0] + k3[0]) / 6.\n self.xs[1] += (k0[1] + 2. * k1[1] + 2. * k2[1] + k3[1]) / 6.\n self.xs[2] += (k0[2] + 2. * k1[2] + 2. * k2[2] + k3[2]) / 6.\n self.xs[3] += (k0[3] + 2. * k1[3] + 2. * k2[3] + k3[3]) / 6.\n \n # save\n save_states = copy.deepcopy(self.xs)\n self.history_xs.append(save_states)\n # print(self.xs)\n\n def predict_state(self, us, dt=0.01):\n \"\"\"make predict state by using optimal input made by MPC\n Paramaters\n -----------\n us : array-like, shape(2, N)\n optimal input made by MPC\n dt : float in seconds, optional\n sampling time of simulation, default is 0.01 [s]\n \"\"\"\n\n xs = copy.deepcopy(self.xs)\n predict_xs = [copy.deepcopy(xs)]\n\n for i in range(us.shape[1]):\n k0 = [0.0 for _ in range(self.NUM_STATE)]\n k1 = [0.0 for _ in range(self.NUM_STATE)]\n k2 = [0.0 for _ in range(self.NUM_STATE)]\n k3 = [0.0 for _ in range(self.NUM_STATE)]\n\n functions = [self._func_x_1, self._func_x_2, self._func_x_3, self._func_x_4]\n\n # solve Runge-Kutta\n for i, func in enumerate(functions):\n k0[i] = dt * func(xs[0], xs[1], xs[2], xs[3], us[0, i], us[1, i])\n\n for i, func in enumerate(functions):\n k1[i] = dt * func(xs[0] + k0[0]/2., xs[1] + k0[1]/2., xs[2] + k0[2]/2., xs[3] + k0[3]/2., us[0, i], us[1, i])\n \n for i, func in enumerate(functions):\n k2[i] = dt * func(xs[0] + k1[0]/2., xs[1] + k1[1]/2., xs[2] + k1[2]/2., xs[3] + k1[3]/2., us[0, i], us[1, i])\n \n for i, func in enumerate(functions):\n k3[i] = dt * func(xs[0] + k2[0], xs[1] + k2[1], xs[2] + k2[2], xs[3] + k2[3], us[0, i], us[1, i])\n \n xs[0] += (k0[0] + 2. * k1[0] + 2. * k2[0] + k3[0]) / 6.\n xs[1] += (k0[1] + 2. * k1[1] + 2. * k2[1] + k3[1]) / 6.\n xs[2] += (k0[2] + 2. * k1[2] + 2. * k2[2] + k3[2]) / 6.\n xs[3] += (k0[3] + 2. * k1[3] + 2. * k2[3] + k3[3]) / 6.\n\n predict_xs.append(copy.deepcopy(xs))\n\n self.history_predict_xs.append(np.array(predict_xs))\n\n def _func_x_1(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"\n Parameters\n ------------\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = u_1 * math.cos(y_3 + y_4)\n y_dot = u_1 * math.cos(y_3)\n\n return y_dot\n \n def _func_x_2(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"\n Parameters\n ------------\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = u_1 * math.sin(y_3 + y_4)\n y_dot = u_1 * math.sin(y_3)\n\n return y_dot\n \n def _func_x_3(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"\n Parameters\n ------------\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = u_1 / self.REAR_WHEELE_BASE * math.sin(y_4)\n y_dot = u_1 * math.tan(y_4) / (self.REAR_WHEELE_BASE + self.FRONT_WHEELE_BASE)\n\n return y_dot\n\n def _func_x_4(self, y_1, y_2, y_3, y_4, u_1, u_2):\n \"\"\"Ad, Bd, W_D, Q, R\n ParAd, Bd, W_D, Q, R\n ---Ad, Bd, W_D, Q, R\n y_1 : float\n y_2 : float\n y_3 : float\n u_1 : float\n system input\n u_2 : float\n system input\n \"\"\"\n # y_dot = math.atan2(self.REAR_WHEELE_BASE * math.tan(u_2) ,self.REAR_WHEELE_BASE + self.FRONT_WHEELE_BASE)\n y_dot = - 1. / self.tau * (y_4 - u_2)\n\n return y_dot\n\nclass SystemModel():\n \"\"\"\n Attributes\n -----------\n WHEEL_BASE : float\n wheel base of the car\n Ad_s : list\n list of system model matrix Ad\n Bd_s : list\n list of system model matrix Bd\n W_D_s : list\n list of system model matrix W_D_s\n Q : numpy.ndarray\n R : numpy.ndarray\n \"\"\"\n def __init__(self, tau = 0.01, dt = 0.01):\n \"\"\"\n Parameters\n -----------\n tau : time constant, optional\n dt : sampling time, optional\n \"\"\"\n self.dt = dt\n self.tau = tau\n self.WHEEL_BASE = 2.2\n\n self.Ad_s = []\n self.Bd_s = []\n self.W_D_s = []\n\n def calc_predict_sytem_model(self, V, curvatures, predict_step):\n \"\"\"\n calc next predict systemo models\n V : float\n current speed of car\n curvatures : list\n this is the next curvature's list\n predict_step : int\n predict step of MPC\n \"\"\"\n for i in range(predict_step):\n delta_r = math.atan2(self.WHEEL_BASE, 1. / curvatures[i])\n\n A12 = (V / self.WHEEL_BASE) / (math.cos(delta_r)**2)\n A22 = (1. - 1. / self.tau * self.dt)\n\n Ad = np.array([[1., V * self.dt, 0.], \n [0., 1., A12 * self.dt],\n [0., 0., A22]])\n\n Bd = np.array([[0.], [0.], [1. / self.tau]]) * self.dt\n\n # -v*curvature + v/L*(tan(delta_r)-delta_r*cos_delta_r_squared_inv);\n # W_D_0 = V / self.WHEEL_BASE * (delta_r / (math.cos(delta_r)**2)\n W_D_0 = -V * curvatures[i] + (V / self.WHEEL_BASE) * (math.tan(delta_r) - delta_r / (math.cos(delta_r)**2))\n\n W_D = np.array([[0.], [W_D_0], [0.]]) * self.dt\n\n self.Ad_s.append(Ad)\n self.Bd_s.append(Bd)\n self.W_D_s.append(W_D)\n\n # return self.Ad_s, self.Bd_s, self.W_D_s\n\ndef search_nearest_point(points, base_point):\n \"\"\"\n Parameters\n -----------\n points : numpy.ndarray, shape is (2, N)\n base_point : numpy.ndarray, shape is (2, 1)\n\n Returns\n -------\n nearest_index : \n nearest_point : \n \"\"\"\n distance_mat = np.sqrt(np.sum((points - base_point)**2, axis=0))\n\n index_min = np.argmin(distance_mat)\n\n return index_min, points[:, index_min]\n\n\ndef main():\n # parameters\n dt = 0.01\n simulation_time = 20 # in seconds\n PREDICT_STEP = 30\n iteration_num = int(simulation_time / dt)\n\n # make simulator with coninuous matrix\n init_xs_lead = np.array([0., 0., math.pi/5, 0.])\n lead_car = WheeledSystem(init_states=init_xs_lead)\n\n # make system model\n lead_car_system_model = SystemModel()\n\n # reference\n history_traj_ref = []\n history_angle_ref = []\n traj_ref_xs, traj_ref_ys = make_sample_traj(int(simulation_time/dt))\n traj_ref = np.array([traj_ref_xs, traj_ref_ys])\n \n # nearest point\n index_min, nearest_point = search_nearest_point(traj_ref, lead_car.xs[:2].reshape(2, 1))\n\n # get traj's curvature\n NUM_SKIP = 3\n MARGIN = 50\n angles, curvatures = calc_curvatures(traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN], PREDICT_STEP, NUM_SKIP)\n\n # save traj ref\n history_traj_ref.append(traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN])\n history_angle_ref.append(angles)\n\n # print(history_traj_ref)\n # input()\n\n # evaluation function weight\n Q = np.diag([1e2, 1., 1e3])\n R = np.diag([1e2])\n\n # System model update\n V = calc_ideal_vel(traj_ref, dt) # in pratical we should calc from the state\n lead_car_system_model.calc_predict_sytem_model(V, curvatures, PREDICT_STEP)\n\n # make controller with discreted matrix\n lead_controller = IterativeMpcController(lead_car_system_model, Q, R, PREDICT_STEP,\n dt_input_upper=np.array([1 * dt]), dt_input_lower=np.array([-1 * dt]),\n input_upper=np.array([1.]), input_lower=np.array([-1.]))\n\n\n # initialize\n lead_controller.initialize_controller()\n \n for i in range(iteration_num):\n print(\"simulation time = {0}\".format(i))\n\n ## lead\n # world traj\n lead_states = lead_car.xs\n\n # nearest point\n index_min, nearest_point = search_nearest_point(traj_ref, lead_car.xs[:2].reshape(2, 1))\n\n # end check\n if len(traj_ref_ys) <= index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN:\n print(\"break\")\n break \n\n # get traj's curvature\n angles, curvatures = calc_curvatures(traj_ref[:, index_min+MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN], PREDICT_STEP, NUM_SKIP)\n\n # save\n history_traj_ref.append(traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN])\n history_angle_ref.append(angles)\n\n # System model update\n V = calc_ideal_vel(traj_ref, dt) # in pratical we should calc from the state\n lead_car_system_model.calc_predict_sytem_model(V, curvatures, PREDICT_STEP)\n\n # transformation\n # car\n relative_car_position = coordinate_transformation_in_position(lead_states[:2].reshape(2, 1), nearest_point)\n relative_car_position = coordinate_transformation_in_angle(relative_car_position, angles[0])\n\n relative_car_angle = lead_states[2] - angles[0]\n relative_car_state = np.hstack((relative_car_position[1], relative_car_angle, lead_states[-1]))\n\n # traj_ref\n relative_traj = coordinate_transformation_in_position(traj_ref[:, index_min:index_min + PREDICT_STEP], nearest_point)\n relative_traj = coordinate_transformation_in_angle(relative_traj, angles[0])\n relative_ref_angle = np.array(angles) - angles[0]\n\n # make ref\n lead_reference = np.array([[relative_traj[1, -1], relative_ref_angle[-1], 0.] for i in range(PREDICT_STEP)]).flatten()\n\n print(\"relative car state = {}\".format(relative_car_state))\n print(\"nearest point = {}\".format(nearest_point))\n # input()\n\n # update system matrix\n lead_controller.update_system_model(lead_car_system_model)\n\n lead_opt_u, all_opt_u = lead_controller.calc_input(relative_car_state, lead_reference)\n\n lead_opt_u = np.hstack((np.array([V]), lead_opt_u))\n\n all_opt_u = np.stack((np.ones(PREDICT_STEP)*V, all_opt_u.flatten()))\n\n print(\"opt_u = {}\".format(lead_opt_u))\n print(\"all_opt_u = {}\".format(all_opt_u))\n \n # predict\n lead_car.predict_state(all_opt_u, dt=dt)\n\n # update\n lead_car.update_state(lead_opt_u, dt=dt)\n\n # print(lead_car.history_predict_xs)\n # input()\n\n # figures and animation\n lead_history_states = np.array(lead_car.history_xs)\n lead_history_predict_states = lead_car.history_predict_xs\n\n \"\"\"\n time_history_fig = plt.figure()\n x_fig = time_history_fig.add_subplot(311)\n y_fig = time_history_fig.add_subplot(312)\n theta_fig = time_history_fig.add_subplot(313)\n\n car_traj_fig = plt.figure()\n traj_fig = car_traj_fig.add_subplot(111)\n traj_fig.set_aspect('equal')\n\n x_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 0], label=\"lead\")\n x_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 0], label=\"follow\")\n x_fig.set_xlabel(\"time [s]\")\n x_fig.set_ylabel(\"x\")\n x_fig.legend()\n\n y_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 1], label=\"lead\")\n y_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 1], label=\"follow\")\n y_fig.plot(np.arange(0, simulation_time+0.01, dt), [4. for _ in range(iteration_num+1)], linestyle=\"dashed\")\n y_fig.set_xlabel(\"time [s]\")\n y_fig.set_ylabel(\"y\")\n y_fig.legend()\n\n theta_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 2], label=\"lead\")\n theta_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 2], label=\"follow\")\n theta_fig.plot(np.arange(0, simulation_time+0.01, dt), [0. for _ in range(iteration_num+1)], linestyle=\"dashed\")\n theta_fig.set_xlabel(\"time [s]\")\n theta_fig.set_ylabel(\"theta\")\n theta_fig.legend()\n\n time_history_fig.tight_layout()\n\n traj_fig.plot(lead_history_states[:, 0], lead_history_states[:, 1], label=\"lead\")\n traj_fig.plot(follow_history_states[:, 0], follow_history_states[:, 1], label=\"follow\")\n traj_fig.set_xlabel(\"x\")\n traj_fig.set_ylabel(\"y\")\n traj_fig.legend()\n plt.show()\n\n lead_history_us = np.array(lead_controller.history_us)\n follow_history_us = np.array(follow_controller.history_us)\n input_history_fig = plt.figure()\n u_1_fig = input_history_fig.add_subplot(111)\n\n u_1_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_us[:, 0], label=\"lead\")\n u_1_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_us[:, 0], label=\"follow\")\n u_1_fig.set_xlabel(\"time [s]\")\n u_1_fig.set_ylabel(\"u_omega\")\n \n input_history_fig.tight_layout()\n plt.show()\n \"\"\"\n\n animdrawer = AnimDrawer([lead_history_states, lead_history_predict_states, traj_ref, history_traj_ref, history_angle_ref])\n animdrawer.draw_anim()\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.zeros",
"numpy.diag",
"numpy.argmin",
"numpy.hstack",
"numpy.array"
]
] |
ili0820/Superresolution | [
"69d3f578d00c5521928c0614894d70b63ed42963"
] | [
"utility.py"
] | [
"import math\nimport time\nimport random\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lrs\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.device_count() == 1:\n torch.cuda.manual_seed(seed)\n else:\n torch.cuda.manual_seed_all(seed)\n \n\nclass timer():\n def __init__(self):\n self.acc = 0\n self.tic()\n\n def tic(self):\n self.t0 = time.time()\n\n def toc(self):\n return time.time() - self.t0\n\n def hold(self):\n self.acc += self.toc()\n\n def release(self):\n ret = self.acc\n self.acc = 0\n\n return ret\n\n def reset(self):\n self.acc = 0\n\n\ndef quantize(img, rgb_range):\n pixel_range = 255 / rgb_range\n return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)\n\n\ndef calc_psnr(sr, hr, scale, rgb_range, benchmark=False):\n if sr.size(-2) > hr.size(-2) or sr.size(-1) > hr.size(-1):\n print(\"the dimention of sr image is not equal to hr's! \")\n sr = sr[:,:,:hr.size(-2),:hr.size(-1)]\n diff = (sr - hr).data.div(rgb_range)\n\n if benchmark:\n shave = scale\n if diff.size(1) > 1:\n convert = diff.new(1, 3, 1, 1)\n convert[0, 0, 0, 0] = 65.738\n convert[0, 1, 0, 0] = 129.057\n convert[0, 2, 0, 0] = 25.064\n diff.mul_(convert).div_(256)\n diff = diff.sum(dim=1, keepdim=True)\n else:\n shave = scale + 6\n\n valid = diff[:, :, shave:-shave, shave:-shave]\n mse = valid.pow(2).mean()\n\n return -10 * math.log10(mse)\n\n\ndef make_optimizer(opt, my_model):\n trainable = filter(lambda x: x.requires_grad, my_model.parameters())\n optimizer_function = optim.Adam\n kwargs = {\n 'betas': (opt.beta1, opt.beta2),\n 'eps': opt.epsilon\n }\n kwargs['lr'] = opt.lr\n kwargs['weight_decay'] = opt.weight_decay\n \n return optimizer_function(trainable, **kwargs)\n\n\ndef make_dual_optimizer(opt, dual_models):\n dual_optimizers = []\n for dual_model in dual_models:\n temp_dual_optim = torch.optim.Adam(\n params=dual_model.parameters(),\n lr = opt.lr, \n betas = (opt.beta1, opt.beta2),\n eps = opt.epsilon,\n weight_decay=opt.weight_decay)\n dual_optimizers.append(temp_dual_optim)\n \n return dual_optimizers\n\n\ndef make_scheduler(opt, my_optimizer):\n scheduler = lrs.CosineAnnealingLR(\n my_optimizer,\n float(opt.epochs),\n eta_min=opt.eta_min\n )\n\n return scheduler\n\n\ndef make_dual_scheduler(opt, dual_optimizers):\n dual_scheduler = []\n for i in range(len(dual_optimizers)):\n scheduler = lrs.CosineAnnealingLR(\n dual_optimizers[i],\n float(opt.epochs),\n eta_min=opt.eta_min\n )\n dual_scheduler.append(scheduler)\n\n return dual_scheduler\n\n\ndef init_model(args):\n # Set the templates here\n if args.model.find('DRN-S') >= 0:\n if args.scale == 4:\n args.n_blocks = 30\n args.n_feats = 16\n elif args.scale == 8:\n args.n_blocks = 30\n args.n_feats = 8\n else:\n print('Use defaults n_blocks and n_feats.')\n args.dual = True\n\n if args.model.find('DRN-L') >= 0:\n if args.scale == 4:\n args.n_blocks = 40\n args.n_feats = 20\n elif args.scale == 8:\n args.n_blocks = 36\n args.n_feats = 10\n else:\n print('Use defaults n_blocks and n_feats.')\n args.dual = True\n\n\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.device_count"
]
] |
seibert/metagraph-cuda | [
"ba29e59604cd34864cfed763c9ce9dc21d5b1377"
] | [
"metagraph_cuda/plugins/cugraph/types.py"
] | [
"import numpy as np\nfrom metagraph.wrappers import (\n EdgeSetWrapper,\n EdgeMapWrapper,\n CompositeGraphWrapper,\n BipartiteGraphWrapper,\n)\nfrom metagraph import dtypes\nfrom metagraph.types import (\n Graph,\n BipartiteGraph,\n EdgeSet,\n EdgeMap,\n)\nfrom .. import has_cugraph\nfrom typing import List, Set, Dict, Any\n\nif has_cugraph:\n import cugraph\n import cudf\n\n from ..cudf.types import CuDFNodeSet, CuDFNodeMap\n\n class CuGraphEdgeSet(EdgeSetWrapper, abstract=EdgeSet):\n def __init__(self, graph):\n self.value = graph\n\n class TypeMixin:\n @classmethod\n def _compute_abstract_properties(\n cls, obj, props: List[str], known_props: Dict[str, Any]\n ) -> Dict[str, Any]:\n ret = known_props.copy()\n\n # fast properties\n for prop in {\"is_directed\"} - ret.keys():\n if prop == \"is_directed\":\n ret[prop] = obj.value.is_directed()\n\n return ret\n\n @classmethod\n def assert_equal(\n cls,\n obj1,\n obj2,\n aprops1,\n aprops2,\n cprops1,\n cprops2,\n *,\n rel_tol=None,\n abs_tol=None,\n ):\n assert (\n aprops1 == aprops2\n ), f\"abstract property mismatch: {aprops1} != {aprops2}\"\n g1 = obj1.value\n g2 = obj2.value\n # Compare\n g1_type = type(g1.nodes())\n g2_type = type(g2.nodes())\n assert g1_type == g2_type, f\"node type mismatch: {g1_type} != {g2_type}\"\n nodes_equal = (g1.nodes() == g2.nodes()).all()\n if isinstance(nodes_equal, cudf.DataFrame):\n nodes_equal = nodes_equal.all()\n assert nodes_equal, f\"node mismatch: {g1.nodes()} != {g2.nodes()}\"\n assert len(g1.edges()) == len(\n g2.edges()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n g1_edges_reindexed = g1.edges().set_index([\"src\", \"dst\"])\n g2_edges_reindexed = g2.edges().set_index([\"src\", \"dst\"])\n assert (\n g2_edges_reindexed.index.isin(g2_edges_reindexed.index).all().item()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n\n class CuGraphEdgeMap(EdgeMapWrapper, abstract=EdgeMap):\n def __init__(self, graph):\n self.value = graph\n self._assert_instance(graph, cugraph.Graph)\n\n def _determine_dtype(self, all_values):\n all_types = {type(v) for v in all_values}\n if not all_types or (all_types - {float, int, bool}):\n return \"str\"\n for type_ in (float, int, bool):\n if type_ in all_types:\n return str(type_.__name__)\n\n class TypeMixin:\n @classmethod\n def _compute_abstract_properties(\n cls, obj, props: List[str], known_props: Dict[str, Any]\n ) -> Dict[str, Any]:\n ret = known_props.copy()\n\n # fast properties\n for prop in {\"is_directed\", \"dtype\"} - ret.keys():\n if prop == \"is_directed\":\n ret[prop] = obj.value.is_directed()\n if prop == \"dtype\":\n if obj.value.edgelist:\n obj_dtype = obj.value.view_edge_list().weights.dtype\n else:\n obj_dtype = obj.value.view_adj_list()[2].dtype\n ret[prop] = dtypes.dtypes_simplified[obj_dtype]\n\n # slow properties, only compute if asked\n slow_props = props - ret.keys()\n if \"has_negative_weights\" in slow_props:\n if obj.value.edgelist:\n weights = obj.value.view_edge_list().weights\n else:\n weights = obj.value.view_adj_list()[2]\n ret[\"has_negative_weights\"] = (weights < 0).any()\n\n return ret\n\n @classmethod\n def assert_equal(\n cls,\n obj1,\n obj2,\n aprops1,\n aprops2,\n cprops1,\n cprops2,\n *,\n rel_tol=1e-9,\n abs_tol=0.0,\n ):\n assert (\n aprops1 == aprops2\n ), f\"abstract property mismatch: {aprops1} != {aprops2}\"\n g1 = obj1.value\n g2 = obj2.value\n # Compare\n assert (\n g1.number_of_nodes() == g2.number_of_nodes()\n ), f\"{g1.number_of_nodes()} != {g2.number_of_nodes()}\"\n assert (\n g1.number_of_edges() == g2.number_of_edges()\n ), f\"{g1.number_of_edges()} != {g2.number_of_edges()}\"\n\n if g1.edgelist:\n g1_edge_list = g1.view_edge_list()\n g1_nodes = cudf.concat(\n [g1_edge_list[\"src\"], g1_edge_list[\"dst\"]]\n ).unique()\n g2_edge_list = g2.view_edge_list()\n g2_nodes = cudf.concat(\n [g2_edge_list[\"src\"], g2_edge_list[\"dst\"]]\n ).unique()\n assert (\n g1_nodes.isin(g2_nodes).all() and g2_nodes.isin(g1_nodes).all()\n ), \"g1 and g2 have different nodes\"\n assert len(g1_edge_list) == len(\n g2_edge_list\n ), f\"g1 and g2 have a different number of edges\"\n # TODO the below takes an additional possibly unneeded O(n) memory\n assert len(g1.edges()) == len(\n g2.edges()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n g1_edges_reindexed = g1_edge_list.set_index(\n [\"src\", \"dst\", \"weights\"]\n )\n g2_edges_reindexed = g2_edge_list.set_index(\n [\"src\", \"dst\", \"weights\"]\n )\n assert (\n g2_edges_reindexed.index.isin(g2_edges_reindexed.index)\n .all()\n .item()\n ), f\"edge mismatch: {g1.edges()} != {g2.edges()}\"\n else:\n assert (\n g1.number_of_nodes() == g2.number_of_nodes()\n ), \"g1 and g2 have different nodes\"\n for i, g1_series in enumerate(g1.view_adj_list()):\n g2_series = g2.view_adj_list()[i]\n assert (g1_series == None) == (\n g2_series == None\n ), \"one of g1 or g2 is weighted while the other is not\"\n if g1_series != None:\n if np.issubdtype(g1_series.dtype.type, np.float):\n assert cupy.isclose(g1_series == g2_series)\n else:\n assert all(\n g1_series == g2_series\n ), \"g1 and g2 have different edges\"\n\n class CuGraph(CompositeGraphWrapper, abstract=Graph):\n def __init__(self, edges, nodes=None):\n if isinstance(edges, cugraph.Graph):\n if edges.edgelist:\n if edges.edgelist.weights:\n edges = CuGraphEdgeMap(edges)\n else:\n edges = CuGraphEdgeSet(edges)\n elif edges.adjlist:\n if edges.view_adj_list()[-1] is not None:\n edges = CuGraphEdgeMap(edges)\n else:\n edges = CuGraphEdgeSet(edges)\n self._assert_instance(edges, (CuGraphEdgeSet, CuGraphEdgeMap))\n if nodes is not None:\n self._assert_instance(nodes, (CuDFNodeSet, CuDFNodeMap))\n super().__init__(edges, nodes)\n\n class CuGraphBipartiteGraph(BipartiteGraphWrapper, abstract=BipartiteGraph):\n def __init__(self, graph):\n \"\"\"\n :param graph: cugraph.Graph instance s.t. cugraph.Graph.is_bipartite() returns True\n \"\"\"\n self._assert_instance(graph, cugraph.Graph)\n self._assert(graph.is_bipartite(), f\"{graph} is not bipartite\")\n nodes = graph.sets() # TODO consider storing this as an attribute\n self._assert(len(nodes) == 2, \"nodes must have length of 2\")\n self._assert_instance(nodes[0], cudf.Series)\n self._assert_instance(nodes[1], cudf.Series)\n # O(n^2), but cheaper than converting to Python sets\n common_nodes = nodes[0][nodes[0].isin(nodes[1])]\n if len(common_nodes) != 0:\n raise ValueError(\n f\"Node IDs found in both parts of the graph: {common_nodes.values.tolist()}\"\n )\n partition_nodes = cudf.concat([nodes[0], nodes[1]])\n unclaimed_nodes_mask = ~graph.nodes().isin(partition_nodes)\n if unclaimed_nodes_mask.any():\n unclaimed_nodes = graph.nodes()[unclaimed_nodes_mask].values.tolist()\n raise ValueError(\n f\"Node IDs found in graph, but not listed in either partition: {unclaimed_nodes}\"\n )\n # TODO handle node weights\n self.value = graph\n\n class TypeMixin:\n @classmethod\n def _compute_abstract_properties(\n cls, obj, props: Set[str], known_props: Dict[str, Any]\n ) -> Dict[str, Any]:\n ret = known_props.copy()\n\n if {\"edge_type\", \"edge_dtype\", \"edge_has_negative_weights\"} & (\n props - ret.keys()\n ):\n if obj.value.edgelist:\n edgelist = obj.value.view_edge_list()\n weights = (\n edgelist.weights if \"weights\" in edgelist.columns else None\n )\n else:\n weights = obj.value.view_adj_list()[2]\n\n # fast properties\n for prop in {\"is_directed\", \"edge_type\", \"edge_dtype\",} - ret.keys():\n if prop == \"is_directed\":\n ret[prop] = obj.value.is_directed()\n elif prop == \"edge_type\":\n ret[prop] = \"set\" if weights is None else \"map\"\n elif prop == \"edge_dtype\":\n ret[prop] = dtypes.dtypes_simplified[weights.dtype]\n\n # slow properties, only compute if asked\n slow_props = props - ret.keys()\n if {\"node0_dtype\", \"node1_dtype\"} & slow_props:\n nodes = obj.value.sets()\n if prop == \"node0_dtype\":\n ret[prop] = dtypes.dtypes_simplified[obj.nodes[0].dtype]\n elif prop == \"node1_dtype\":\n ret[prop] = dtypes.dtypes_simplified[obj.nodes[1].dtype]\n slow_props = slow_props - ret.keys()\n if {\n \"node0_type\",\n \"node1_type\",\n \"edge_has_negative_weights\",\n } & slow_props:\n for prop in slow_props:\n if prop == \"node0_type\":\n # TODO properly handle when node weights are supported\n ret[prop] = \"set\"\n elif prop == \"node1_type\":\n # TODO properly handle when node weights are supported\n ret[prop] = \"set\"\n elif prop == \"edge_has_negative_weights\":\n ret[prop] = weights.lt(0).any()\n\n return ret\n\n @classmethod\n def assert_equal(\n cls,\n obj1,\n obj2,\n aprops1,\n aprops2,\n cprops1,\n cprops2,\n *,\n rel_tol=1e-9,\n abs_tol=0.0,\n ):\n assert aprops1 == aprops2, f\"property mismatch: {aprops1} != {aprops2}\"\n g1 = obj1.value\n g2 = obj2.value\n canonicalize_nodes = lambda series: series.set_index(series)\n obj1_nodes = [canonicalize_nodes(nodes) for nodes in obj1.value.sets()]\n obj2_nodes = [canonicalize_nodes(nodes) for nodes in obj2.value.sets()]\n # Compare\n assert len(obj1_nodes[0]) == len(\n obj2_nodes[0]\n ), f\"{len(obj1_nodes[0])} == {len(obj2_nodes[0])}\"\n assert len(obj1_nodes[1]) == len(\n obj2_nodes[1]\n ), f\"{len(obj1_nodes[1])} == {len(obj2_nodes[1])}\"\n assert all(\n obj1_nodes[0] == obj2_nodes[0]\n ), f\"{obj1_nodes[0]} != {obj2_nodes[0]}\"\n assert all(\n obj1_nodes[1] == obj2_nodes[1]\n ), f\"{obj1_nodes[1]} != {obj2_nodes[1]}\"\n assert (\n g1.number_of_edges() == g2.number_of_edges()\n ), f\"{g1.number_of_edges()} != {g2.number_of_edges()}\"\n if g1.edgelist:\n g1_edge_list = g1.view_edge_list()\n g2_edge_list = g2.view_edge_list()\n assert len(g1_edge_list) == len(\n g2_edge_list\n ), f\"g1 and g2 have a different number of edges\"\n assert len(g1_edge_list.columns) == len(\n g2_edge_list.columns\n ), \"one of g1 or g2 is weighted while the other is not\"\n columns = g1_edge_list.columns\n # TODO the below takes an additional possibly unneeded O(n) memory\n assert g1_edge_list.set_index(columns) == g2_edge_list.set_index(\n columns\n ), \"g1 and g2 have different edges\"\n\n else:\n for i, g1_series in enumerate(g1.view_adj_list()):\n g2_series = g1.view_adj_list()[i]\n assert (g1_series is None) == (\n g2_series is None\n ), \"one of g1 or g2 is weighted while the other is not\"\n if g1_series is not None:\n if np.issubdtype(g1_series.dtype.type, np.float):\n assert cupy.isclose(g1_series == g2_series)\n else:\n assert all(\n g1_series == g2_series\n ), \"g1 and g2 have different edges\"\n\n if aprops1.get(\"node0_type\") == \"map\":\n pass # TODO handle this when node weights are supported\n\n if aprops1.get(\"node1_type\") == \"map\":\n pass # TODO handle this when node weights are supported\n"
] | [
[
"numpy.issubdtype"
]
] |
BillGatesNephew/Cirq | [
"fda14a5f6c65356dfabf8a5bcd599bf57e542041"
] | [
"cirq/testing/consistent_protocols_test.py"
] | [
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import AbstractSet, Sequence, Union\n\nimport pytest\n\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq._compat import proper_repr\nfrom cirq.type_workarounds import NotImplementedType\n\n\nclass GoodGate(cirq.SingleQubitGate):\n def __init__(\n self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n ) -> None:\n self.phase_exponent = cirq.canonicalize_half_turns(phase_exponent)\n self.exponent = exponent\n\n def _has_unitary_(self):\n return not cirq.is_parameterized(self)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if cirq.is_parameterized(self):\n return NotImplemented\n z = cirq.unitary(cirq.Z ** self.phase_exponent)\n x = cirq.unitary(cirq.X ** self.exponent)\n return np.dot(np.dot(z, x), np.conj(z))\n\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs) -> Union[np.ndarray, NotImplementedType]:\n if self.exponent != 1 or cirq.is_parameterized(self):\n return NotImplemented\n\n zero = cirq.slice_for_qubits_equal_to(args.axes, 0)\n one = cirq.slice_for_qubits_equal_to(args.axes, 1)\n c = np.exp(1j * np.pi * self.phase_exponent)\n\n args.target_tensor[one] *= c.conj()\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n args.available_buffer[one] *= c\n\n return args.available_buffer\n\n def _decompose_(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q) ** self.phase_exponent\n x = cirq.X(q) ** self.exponent\n if cirq.is_parameterized(z):\n # coverage: ignore\n return NotImplemented\n return z ** -1, x, z\n\n def _pauli_expansion_(self) -> cirq.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self.phase_exponent / 2\n angle = np.pi * self.exponent / 2\n global_phase = np.exp(1j * angle)\n return cirq.LinearDict(\n {\n 'I': global_phase * np.cos(angle),\n 'X': -1j * global_phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * global_phase * np.sin(angle) * np.sin(2 * phase_angle),\n }\n )\n\n def _phase_by_(self, phase_turns, qubit_index):\n assert qubit_index == 0\n return GoodGate(\n exponent=self.exponent, phase_exponent=self.phase_exponent + phase_turns * 2\n )\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'GoodGate':\n new_exponent = cirq.mul(self.exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n # coverage: ignore\n return NotImplemented\n return GoodGate(phase_exponent=self.phase_exponent, exponent=new_exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n return 'GoodGate({})'.format(', '.join(args))\n\n def _is_parameterized_(self) -> bool:\n return cirq.is_parameterized(self.exponent) or cirq.is_parameterized(self.phase_exponent)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return cirq.parameter_names(self.exponent) | cirq.parameter_names(self.phase_exponent)\n\n def _resolve_parameters_(self, param_resolver) -> 'GoodGate':\n return GoodGate(\n phase_exponent=param_resolver.value_of(self.phase_exponent),\n exponent=param_resolver.value_of(self.exponent),\n )\n\n def _identity_tuple(self):\n return (GoodGate, self.phase_exponent, self.exponent)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n # coverage: ignore\n return NotImplemented\n return self._identity_tuple() == other._identity_tuple()\n\n\nclass BadGateIsParameterized(GoodGate):\n def _is_parameterized_(self) -> bool:\n return not super()._is_parameterized_()\n\n\nclass BadGateParameterNames(GoodGate):\n def _parameter_names_(self) -> AbstractSet[str]:\n return super()._parameter_names_() | {'not_a_param'}\n\n\nclass BadGateApplyUnitaryToTensor(GoodGate):\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs) -> Union[np.ndarray, NotImplementedType]:\n if self.exponent != 1 or cirq.is_parameterized(self):\n # coverage: ignore\n return NotImplemented\n\n zero = cirq.slice_for_qubits_equal_to(args.axes, 0)\n one = cirq.slice_for_qubits_equal_to(args.axes, 1)\n c = np.exp(1j * np.pi * self.phase_exponent)\n\n args.target_tensor[one] *= c\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n args.available_buffer[one] *= c\n\n return args.available_buffer\n\n\nclass BadGateDecompose(GoodGate):\n def _decompose_(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q) ** self.phase_exponent\n x = cirq.X(q) ** (2 * self.exponent)\n if cirq.is_parameterized(z):\n # coverage: ignore\n return NotImplemented\n return z ** -1, x, z\n\n\nclass BadGatePauliExpansion(GoodGate):\n def _pauli_expansion_(self) -> cirq.LinearDict[str]:\n return cirq.LinearDict({'I': 10})\n\n\nclass BadGatePhaseBy(GoodGate):\n def _phase_by_(self, phase_turns, qubit_index):\n assert qubit_index == 0\n return BadGatePhaseBy(\n exponent=self.exponent, phase_exponent=self.phase_exponent + phase_turns * 4\n )\n\n\nclass BadGateRepr(GoodGate):\n def __repr__(self):\n args = ['phase_exponent={!r}'.format(2 * self.phase_exponent)]\n if self.exponent != 1:\n # coverage: ignore\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n return 'BadGateRepr({})'.format(', '.join(args))\n\n\nclass GoodEigenGate(cirq.EigenGate, cirq.SingleQubitGate):\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n def __repr__(self):\n return 'GoodEigenGate' '(exponent={}, global_shift={!r})'.format(\n proper_repr(self._exponent), self._global_shift\n )\n\n\nclass BadEigenGate(GoodEigenGate):\n def _eigen_shifts(self):\n return [0, 0]\n\n def __repr__(self):\n return 'BadEigenGate' '(exponent={}, global_shift={!r})'.format(\n proper_repr(self._exponent), self._global_shift\n )\n\n\ndef test_assert_implements_consistent_protocols():\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=0.0), global_vals={'GoodGate': GoodGate}\n )\n\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=0.25), global_vals={'GoodGate': GoodGate}\n )\n\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=sympy.Symbol('t')), global_vals={'GoodGate': GoodGate}\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateIsParameterized(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateParameterNames(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateApplyUnitaryToTensor(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(BadGateDecompose(phase_exponent=0.25))\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGatePauliExpansion(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(BadGatePhaseBy(phase_exponent=0.25))\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateRepr(phase_exponent=0.25), global_vals={'BadGateRepr': BadGateRepr}\n )\n\n\ndef test_assert_eigengate_implements_consistent_protocols():\n cirq.testing.assert_eigengate_implements_consistent_protocols(\n GoodEigenGate, global_vals={'GoodEigenGate': GoodEigenGate}\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_eigengate_implements_consistent_protocols(\n BadEigenGate, global_vals={'BadEigenGate': BadEigenGate}\n )\n\n\ndef test_assert_commutes_magic_method_consistent_with_unitaries():\n gate_op = cirq.CNOT(*cirq.LineQubit.range(2))\n with pytest.raises(TypeError):\n cirq.testing.assert_commutes_magic_method_consistent_with_unitaries(gate_op)\n\n exponents = [sympy.Symbol('s'), 0.1, 0.2]\n gates = [cirq.ZPowGate(exponent=e) for e in exponents]\n cirq.testing.assert_commutes_magic_method_consistent_with_unitaries(*gates)\n\n cirq.testing.assert_commutes_magic_method_consistent_with_unitaries(cirq.Z, cirq.CNOT)\n"
] | [
[
"numpy.diag",
"numpy.conj",
"numpy.cos",
"numpy.exp",
"numpy.sin",
"numpy.dot"
]
] |
fubel/vision | [
"6845355fd80a48ca7ec80c06aa2d97d50f0b077d"
] | [
"torchvision/models/detection/keypoint_rcnn.py"
] | [
"import torch\nfrom torch import nn\n\nfrom torchvision.ops import MultiScaleRoIAlign\n\nfrom ._utils import overwrite_eps\nfrom ..utils import load_state_dict_from_url\n\nfrom .faster_rcnn import FasterRCNN\nfrom .backbone_utils import resnet_fpn_backbone, _validate_trainable_layers\n\n\n__all__ = [\n \"KeypointRCNN\", \"keypointrcnn_resnet50_fpn\"\n]\n\n\nclass KeypointRCNN(FasterRCNN):\n \"\"\"\n Implements Keypoint R-CNN.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x\n between 0 and W and values of y between 0 and H\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the\n format [x, y, visibility], where visibility=0 means that the keypoint is not visible.\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the keypoint loss.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows:\n\n - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x\n between 0 and W and values of y between 0 and H\n - labels (Int64Tensor[N]): the predicted labels for each image\n - scores (Tensor[N]): the scores or each prediction\n - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.\n\n Args:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain a out_channels attribute, which indicates the number of output\n channels that each feature map has (and it should be the same for all feature maps).\n The backbone should return a single Tensor or and OrderedDict[Tensor].\n num_classes (int): number of output classes of the model (including the background).\n If box_predictor is specified, num_classes should be None.\n min_size (int): minimum size of the image to be rescaled before feeding it to the backbone\n max_size (int): maximum size of the image to be rescaled before feeding it to the backbone\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature\n maps.\n rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN\n rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training\n rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing\n rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training\n rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing\n rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals\n rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training of the RPN.\n rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be\n considered as negative during training of the RPN.\n rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN\n for computing the loss\n rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training\n of the RPN\n rpn_score_thresh (float): during inference, only return proposals with a classification score\n greater than rpn_score_thresh\n box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes\n box_head (nn.Module): module that takes the cropped feature maps as input\n box_predictor (nn.Module): module that takes the output of box_head and returns the\n classification logits and box regression deltas.\n box_score_thresh (float): during inference, only return proposals with a classification score\n greater than box_score_thresh\n box_nms_thresh (float): NMS threshold for the prediction head. Used during inference\n box_detections_per_img (int): maximum number of detections per image, for all classes.\n box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be\n considered as positive during training of the classification head\n box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be\n considered as negative during training of the classification head\n box_batch_size_per_image (int): number of proposals that are sampled during training of the\n classification head\n box_positive_fraction (float): proportion of positive proposals in a mini-batch during training\n of the classification head\n bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the\n bounding boxes\n keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes, which will be used for the keypoint head.\n keypoint_head (nn.Module): module that takes the cropped feature maps as input\n keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the\n heatmap logits\n\n Example::\n\n >>> import torch\n >>> import torchvision\n >>> from torchvision.models.detection import KeypointRCNN\n >>> from torchvision.models.detection.anchor_utils import AnchorGenerator\n >>>\n >>> # load a pre-trained model for classification and return\n >>> # only the features\n >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n >>> # KeypointRCNN needs to know the number of\n >>> # output channels in a backbone. For mobilenet_v2, it's 1280\n >>> # so we need to add it here\n >>> backbone.out_channels = 1280\n >>>\n >>> # let's make the RPN generate 5 x 3 anchors per spatial\n >>> # location, with 5 different sizes and 3 different aspect\n >>> # ratios. We have a Tuple[Tuple[int]] because each feature\n >>> # map could potentially have different sizes and\n >>> # aspect ratios\n >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n >>> aspect_ratios=((0.5, 1.0, 2.0),))\n >>>\n >>> # let's define what are the feature maps that we will\n >>> # use to perform the region of interest cropping, as well as\n >>> # the size of the crop after rescaling.\n >>> # if your backbone returns a Tensor, featmap_names is expected to\n >>> # be ['0']. More generally, the backbone should return an\n >>> # OrderedDict[Tensor], and in featmap_names you can choose which\n >>> # feature maps to use.\n >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=7,\n >>> sampling_ratio=2)\n >>>\n >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=14,\n >>> sampling_ratio=2)\n >>> # put the pieces together inside a KeypointRCNN model\n >>> model = KeypointRCNN(backbone,\n >>> num_classes=2,\n >>> rpn_anchor_generator=anchor_generator,\n >>> box_roi_pool=roi_pooler,\n >>> keypoint_roi_pool=keypoint_roi_pooler)\n >>> model.eval()\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n \"\"\"\n def __init__(self, backbone, num_classes=None,\n # transform parameters\n min_size=None, max_size=1333,\n image_mean=None, image_std=None,\n # RPN parameters\n rpn_anchor_generator=None, rpn_head=None,\n rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,\n rpn_score_thresh=0.0,\n # Box parameters\n box_roi_pool=None, box_head=None, box_predictor=None,\n box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n box_batch_size_per_image=512, box_positive_fraction=0.25,\n bbox_reg_weights=None,\n # keypoint parameters\n keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None,\n num_keypoints=17):\n\n assert isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None)))\n if min_size is None:\n min_size = (640, 672, 704, 736, 768, 800)\n\n if num_classes is not None:\n if keypoint_predictor is not None:\n raise ValueError(\"num_classes should be None when keypoint_predictor is specified\")\n\n out_channels = backbone.out_channels\n\n if keypoint_roi_pool is None:\n keypoint_roi_pool = MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=14,\n sampling_ratio=2)\n\n if keypoint_head is None:\n keypoint_layers = tuple(512 for _ in range(8))\n keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers)\n\n if keypoint_predictor is None:\n keypoint_dim_reduced = 512 # == keypoint_layers[-1]\n keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)\n\n super(KeypointRCNN, self).__init__(\n backbone, num_classes,\n # transform parameters\n min_size, max_size,\n image_mean, image_std,\n # RPN-specific parameters\n rpn_anchor_generator, rpn_head,\n rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,\n rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,\n rpn_nms_thresh,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n rpn_score_thresh,\n # Box parameters\n box_roi_pool, box_head, box_predictor,\n box_score_thresh, box_nms_thresh, box_detections_per_img,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights)\n\n self.roi_heads.keypoint_roi_pool = keypoint_roi_pool\n self.roi_heads.keypoint_head = keypoint_head\n self.roi_heads.keypoint_predictor = keypoint_predictor\n\n\nclass KeypointRCNNHeads(nn.Sequential):\n def __init__(self, in_channels, layers):\n d = []\n next_feature = in_channels\n for out_channels in layers:\n d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))\n d.append(nn.ReLU(inplace=True))\n next_feature = out_channels\n super(KeypointRCNNHeads, self).__init__(*d)\n for m in self.children():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n nn.init.constant_(m.bias, 0)\n\n\nclass KeypointRCNNPredictor(nn.Module):\n def __init__(self, in_channels, num_keypoints):\n super(KeypointRCNNPredictor, self).__init__()\n input_features = in_channels\n deconv_kernel = 4\n self.kps_score_lowres = nn.ConvTranspose2d(\n input_features,\n num_keypoints,\n deconv_kernel,\n stride=2,\n padding=deconv_kernel // 2 - 1,\n )\n nn.init.kaiming_normal_(\n self.kps_score_lowres.weight, mode=\"fan_out\", nonlinearity=\"relu\"\n )\n nn.init.constant_(self.kps_score_lowres.bias, 0)\n self.up_scale = 2\n self.out_channels = num_keypoints\n\n def forward(self, x):\n x = self.kps_score_lowres(x)\n return torch.nn.functional.interpolate(\n x, scale_factor=float(self.up_scale), mode=\"bilinear\", align_corners=False, recompute_scale_factor=False\n )\n\n\nmodel_urls = {\n # legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606\n 'keypointrcnn_resnet50_fpn_coco_legacy':\n 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth',\n 'keypointrcnn_resnet50_fpn_coco':\n 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth',\n}\n\n\ndef keypointrcnn_resnet50_fpn(pretrained=False, progress=True,\n num_classes=2, num_keypoints=17,\n pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):\n \"\"\"\n Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values of ``x``\n between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H``\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the\n format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the keypoint loss.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values of ``x``\n between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H``\n - labels (``Int64Tensor[N]``): the predicted labels for each image\n - scores (``Tensor[N]``): the scores or each prediction\n - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.\n\n Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.\n\n Example::\n\n >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n >>>\n >>> # optionally, if you want to export the model to ONNX:\n >>> torch.onnx.export(model, x, \"keypoint_rcnn.onnx\", opset_version = 11)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n num_keypoints (int): number of keypoints, default 17\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.\n \"\"\"\n trainable_backbone_layers = _validate_trainable_layers(\n pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3)\n\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers)\n model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)\n if pretrained:\n key = 'keypointrcnn_resnet50_fpn_coco'\n if pretrained == 'legacy':\n key += '_legacy'\n state_dict = load_state_dict_from_url(model_urls[key],\n progress=progress)\n model.load_state_dict(state_dict)\n overwrite_eps(model, 0.0)\n return model\n"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.