repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
fxia22/kdnet.pytorch | [
"dacb7163d971bcfe67731502d6f733122281a600"
] | [
"datasets.py"
] | [
"from __future__ import print_function\nimport torch.utils.data as data\nfrom PIL import Image\nimport os\nimport os.path\nimport errno\nimport torch\nimport json\nimport codecs\nimport numpy as np\nimport progressbar\nimport sys\nimport torchvision.transforms as transforms\nimport argparse\nimport json\n\n\nclass PartDataset(data.Dataset):\n def __init__(self, root, npoints = 2048, classification = False, class_choice = None, train = True):\n self.npoints = npoints\n self.root = root\n self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')\n self.cat = {}\n\n self.classification = classification\n\n with open(self.catfile, 'r') as f:\n for line in f:\n ls = line.strip().split()\n self.cat[ls[0]] = ls[1]\n #print(self.cat)\n if not class_choice is None:\n self.cat = {k:v for k,v in self.cat.items() if k in class_choice}\n\n self.meta = {}\n for item in self.cat:\n #print('category', item)\n self.meta[item] = []\n dir_point = os.path.join(self.root, self.cat[item], 'points')\n dir_seg = os.path.join(self.root, self.cat[item], 'points_label')\n #print(dir_point, dir_seg)\n fns = sorted(os.listdir(dir_point))\n if train:\n fns = fns[:int(len(fns) * 0.9)]\n else:\n fns = fns[int(len(fns) * 0.9):]\n\n #print(os.path.basename(fns))\n for fn in fns:\n token = (os.path.splitext(os.path.basename(fn))[0])\n self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg')))\n\n self.datapath = []\n for item in self.cat:\n for fn in self.meta[item]:\n self.datapath.append((item, fn[0], fn[1]))\n\n\n self.classes = dict(zip(self.cat, range(len(self.cat))))\n print(self.classes)\n self.num_seg_classes = 0\n if not self.classification:\n for i in range(len(self.datapath)/50):\n l = len(np.unique(np.loadtxt(self.datapath[i][-1]).astype(np.uint8)))\n if l > self.num_seg_classes:\n self.num_seg_classes = l\n #print(self.num_seg_classes)\n\n\n def __getitem__(self, index):\n fn = self.datapath[index]\n cls = self.classes[self.datapath[index][0]]\n point_set = np.loadtxt(fn[1]).astype(np.float32)\n seg = np.loadtxt(fn[2]).astype(np.int64)\n #print(point_set.shape, seg.shape)\n \n \n point_set = point_set - np.expand_dims(np.mean(point_set, axis = 0), 0)\n dist = np.max(np.sqrt(np.sum(point_set ** 2, axis = 1)),0)\n dist = np.expand_dims(np.expand_dims(dist, 0), 1)\n point_set = point_set/dist\n \n\n choice = np.random.choice(len(seg), self.npoints, replace=True)\n #resample\n point_set = point_set[choice, :] \n point_set = point_set + 1e-5 * np.random.rand(*point_set.shape)\n \n seg = seg[choice]\n point_set = torch.from_numpy(point_set.astype(np.float32))\n seg = torch.from_numpy(seg.astype(np.int64))\n cls = torch.from_numpy(np.array([cls]).astype(np.int64))\n if self.classification:\n return point_set, cls\n else:\n return point_set, seg\n\n def __len__(self):\n return len(self.datapath)\n\n\nif __name__ == '__main__':\n print('test')\n d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'])\n print(len(d))\n ps, seg = d[0]\n print(ps.size(), ps.type(), seg.size(),seg.type())\n\n d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True)\n print(len(d))\n ps, cls = d[0]\n print(ps.size(), ps.type(), cls.size(),cls.type())\n"
] | [
[
"numpy.expand_dims",
"numpy.mean",
"numpy.random.rand",
"numpy.array",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elmontana/EPA-Inspection-Prioritization | [
"65d8129e215d90a0d22e81d39f3bd90b182ae27a"
] | [
"src/train/train.py"
] | [
"import importlib\nimport itertools\nimport numpy as np\nimport os\nimport pickle\nimport tqdm\n\nfrom itertools import repeat\nfrom multiprocessing import Pool\nfrom pathlib import Path\n\nfrom ..models.wrappers import SKLearnWrapper\nfrom ..utils.data_utils import get_data\n\n\n\ndef create_model(model_class_name, model_kwargs):\n \"\"\"\n Dynamically instantiate a model given its name and arguments.\n\n Arguments:\n - model_class_name: the name of the model class (e.g. 'sklearn.tree.DecisionTreeClassifier')\n - model_kwargs: a dictionary of keyword arguments to pass to the model's constructor\n\n Returns:\n - model: an instantiated model\n \"\"\"\n module_name, class_name = model_class_name.rsplit('.', 1)\n model_class = getattr(importlib.import_module(module_name), class_name)\n model = model_class(**model_kwargs)\n\n # Wrap sklearn models\n if model.__module__.startswith('sklearn'):\n model = SKLearnWrapper(model)\n\n return model\n\n\ndef get_model_configurations(config):\n \"\"\"\n Get the set of all model configurations specified by the config.\n For each model keyword argument, the config specifies a list of potential values.\n This function enumerates all possible combinations.\n\n Arguments:\n - config: a configuration dictionary for an experiment (loaded from yaml)\n\n Returns:\n - model_configurations: a list of configurations in the form (model_name, kwargs)\n \"\"\"\n model_configurations = []\n for model_name, model_kwargs_set in config['model_config'].items():\n values_set = itertools.product(*model_kwargs_set.values())\n kwargs_set = [{k: v for k, v in zip(model_kwargs_set.keys(), values)} for values in values_set]\n\n for kwargs in kwargs_set:\n model_configurations.append((model_name, kwargs))\n\n return model_configurations\n\n\ndef train_single_model(experiment_name, model_index, model_config, save_dir, X, y):\n \"\"\"\n Train a single model with provided model specifications and data.\n\n Arguments:\n - experiment_name: name of the experiment\n - model_index: index of the model\n - model_config: configuration of the model;\n a tuple of the form (class_name, kwargs)\n - save_dir: directory to save the model\n - X: feature array\n - y: label array\n\n Returns:\n - model_config: the configuration of the model\n - model_path: the path to the saved model\n \"\"\"\n class_name, kwargs = model_config\n\n # Create & fit model\n try:\n model = create_model(class_name, kwargs)\n model.fit(X, y)\n except Exception as e:\n print(e)\n return model_config, None\n\n # Save model\n model_path = Path(save_dir) / f'{experiment_name}_{class_name}_{model_index}.pkl'\n with open(model_path, 'wb') as file:\n pickle.dump(model, file)\n\n return model_config, model_path\n\n\ndef train_single_model_unpack_args(args):\n \"\"\"\n Train a single model with provided model specifications and data,\n using a single argument to fit the imap interface.\n\n Arguments:\n - args: a tuple with the arguments to a `train_single_model` call.\n \"\"\"\n return train_single_model(*args)\n\n\ndef train_multiprocessing(config, X, y, save_dir, num_processes=4):\n \"\"\"\n Train models in parallel.\n\n Arguments:\n - config: configuration dictionary for this experiment (loaded from yaml)\n - X: feature array\n - y: label array\n - save_dir: directory for saving models\n - num_processes: number of different processes used for training\n\n Returns:\n - model_configurations: list of configurations of trained models\n - model_paths: list of paths to saved trained models\n \"\"\"\n experiment_name = config['experiment_name']\n model_configurations = get_model_configurations(config)\n num_models = len(model_configurations)\n\n sucessful_model_configurations = []\n sucessful_model_paths = []\n\n pool = Pool(processes=num_processes)\n args = zip(\n repeat(experiment_name, num_models),\n range(num_models),\n model_configurations,\n repeat(save_dir, num_models),\n repeat(X, num_models),\n repeat(y, num_models))\n\n training_loop = tqdm.tqdm(\n pool.imap(train_single_model_unpack_args, args),\n total=num_models, desc='Training models')\n\n for model_config, model_path in training_loop:\n training_loop.set_description(f'Training {model_config[0]}')\n if model_path is not None:\n sucessful_model_configurations.append(model_config)\n sucessful_model_paths.append(model_path)\n\n pool.close()\n return sucessful_model_configurations, sucessful_model_paths\n\n\ndef train(config, feature_table, label_table, discard_columns=[], save_dir='./saved_models/'):\n \"\"\"\n Train models as specified by a config file.\n\n Arguments:\n - config: configuration dictionary for this experiment (loaded from yaml)\n - feature_table: name of table containing test features\n - label_table: name of table containing label features\n - discard_columns: names of columns to discard before building matrices\n - save_dir: directory for saving models\n \"\"\"\n\n # Create save directory if not exists\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # Load data\n X, y = get_data(feature_table, label_table, discard_columns=discard_columns)\n X, y = X.to_numpy(copy=True), y.to_numpy(copy=True).astype(int)\n\n # Filter out rows where a label does not exist\n labeled_indices = np.logical_or(y == 0, y == 1)\n X, y = X[labeled_indices], y[labeled_indices]\n\n # Train models\n model_configurations, model_paths = train_multiprocessing(config, X, y, save_dir)\n\n # Summarize models\n model_summaries = []\n for model_config, model_path in zip(model_configurations, model_paths):\n model_class, model_kwargs = model_config\n summary_dict = {\n 'model_class': model_class,\n 'model_path': str(model_path),\n **model_kwargs,\n }\n model_summaries.append(summary_dict)\n\n # Log model summaries\n log_path = Path(save_dir) / f'{config[\"experiment_name\"]}_info.txt'\n log_text = '\\n\\n'.join([str(s) for s in model_summaries])\n with open(log_path, 'w') as log_file:\n log_file.writelines(log_text)\n\n return model_summaries\n"
] | [
[
"numpy.logical_or"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mjjimenez/kdd_air_quality_forecasting | [
"c7b678c2d30555c8ddb778381631a25e88d81fdd"
] | [
"src/features/add_datepart.py"
] | [
"# -*- coding: utf-8 -*-\nimport click\nimport logging\nfrom pathlib import Path\nfrom dotenv import find_dotenv, load_dotenv\nimport pandas as pd\nimport numpy as np\nimport re\n\n# From fast.ai 0.7\ndef add_datepart(df, fldname, drop=True, time=False):\n \"Helper function that adds columns relevant to a date.\"\n df = df.copy()\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)\n targ_pre = re.sub('[Dd]ate$', '', fldname)\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())\n df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9\n if drop: df.drop(fldname, axis=1, inplace=True)\n return df\n\[email protected]()\[email protected]('input_filepath', type=click.Path(exists=True))\[email protected]('output_filepath', type=click.Path())\ndef main(input_filepath, output_filepath):\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\" \n \n processed_df = pd.read_feather(f'{input_filepath}/donggaocun_processed.feather')\n processed_df = add_datepart(processed_df, fldname='utc_datetime',drop=False, time=True)\n processed_df.to_feather(f'{output_filepath}/donggaocun_processed.feather')\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n # project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n"
] | [
[
"numpy.issubdtype",
"pandas.to_datetime",
"pandas.read_feather"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
eeedayu/faster-rcnn-pytorch | [
"5fe662ef75fd2a3fdfb63bf4ecad2c2a0b20f252"
] | [
"nets/resnet50.py"
] | [
"import math\r\n\r\nimport torch.nn as nn\r\nfrom torchvision.models.utils import load_state_dict_from_url\r\n\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\nclass ResNet(nn.Module):\r\n def __init__(self, block, layers, num_classes=1000):\r\n #-----------------------------------#\r\n # 假设输入进来的图片是600,600,3\r\n #-----------------------------------#\r\n self.inplanes = 64\r\n super(ResNet, self).__init__()\r\n\r\n # 600,600,3 -> 300,300,64\r\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\r\n self.bn1 = nn.BatchNorm2d(64)\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n # 300,300,64 -> 150,150,64\r\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)\r\n\r\n # 150,150,64 -> 150,150,256\r\n self.layer1 = self._make_layer(block, 64, layers[0])\r\n # 150,150,256 -> 75,75,512\r\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\r\n # 75,75,512 -> 38,38,1024 到这里可以获得一个38,38,1024的共享特征层\r\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\r\n # self.layer4被用在classifier模型中\r\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\r\n \r\n self.avgpool = nn.AvgPool2d(7)\r\n self.fc = nn.Linear(512 * block.expansion, num_classes)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n def _make_layer(self, block, planes, blocks, stride=1):\r\n downsample = None\r\n #-------------------------------------------------------------------#\r\n # 当模型需要进行高和宽的压缩的时候,就需要用到残差边的downsample\r\n #-------------------------------------------------------------------#\r\n if stride != 1 or self.inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(self.inplanes, planes * block.expansion,kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n layers = []\r\n layers.append(block(self.inplanes, planes, stride, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n x = self.maxpool(x)\r\n\r\n x = self.layer1(x)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.layer4(x)\r\n\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n return x\r\n\r\ndef resnet50(pretrained = False):\r\n model = ResNet(Bottleneck, [3, 4, 6, 3])\r\n if pretrained:\r\n state_dict = load_state_dict_from_url(\"https://download.pytorch.org/models/resnet50-19c8e357.pth\", model_dir=\"./model_data\")\r\n model.load_state_dict(state_dict)\r\n #----------------------------------------------------------------------------#\r\n # 获取特征提取部分,从conv1到model.layer3,最终获得一个38,38,1024的特征层\r\n #----------------------------------------------------------------------------#\r\n features = list([model.conv1, model.bn1, model.relu, model.maxpool, model.layer1, model.layer2, model.layer3])\r\n #----------------------------------------------------------------------------#\r\n # 获取分类部分,从model.layer4到model.avgpool\r\n #----------------------------------------------------------------------------#\r\n classifier = list([model.layer4, model.avgpool])\r\n \r\n features = nn.Sequential(*features)\r\n classifier = nn.Sequential(*classifier)\r\n return features, classifier\r\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LamChob/Global-Memory-Tracing | [
"a6749af3aaec9ca6d1e1b1bcd71c1db097909804"
] | [
"memtrace-pass/post-processing/histogram.py"
] | [
"import os\nimport sys\nimport struct\nimport pprint\nimport matplotlib.pyplot as plt\nimport pickle\nimport math\nimport time\nimport numpy as np\nfrom TraceInc import AutoDict\n\n\ndef timer():\n now = time.time()\n return now\n\ndef create_bins(tmap):\n histogram =[]\n for sk in tmap.values():\n for cta in sk:\n for rk in sk[cta]:\n for rcta in sk[cta][rk].values():\n recv = rcta \n histogram.append(recv[\"size\"])\n return histogram\n\ntype_enut = {\n 0 : \"Load\",\n 1 : \"Store\"\n}\npp = pprint.PrettyPrinter(indent=2)\ntmap = pickle.load( open(sys.argv[1], \"rb\"))\n#print(len(tmap))\nhist = create_bins(tmap)\nuniques = len(set(hist))\n\nif uniques > 100:\n bincnt = uniques\nelse:\n bincnt = 'auto'\nplt.style.use('ggplot')\nplt.hist(hist, bins=100, facecolor='g', alpha=0.75, rwidth=0.8)\nplt.yscale('log')\nplt.xlabel('bytes transferred')\nplt.ylabel('Occurences')\nplt.title('Transfer Size Histogram')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
interxuxing/deepnet | [
"f4e4ff207923e01552c96038a1e2c29eb5d16160"
] | [
"deepnet/examples/multimodal_dbn/collect_dbn_reps.py"
] | [
"\"\"\"Collects Multimodal-DBN representations.\nThis script combines representations created for all inputs, whether missing\ntext or not in one place to be used for classification/retrieval.\n\"\"\"\nimport numpy as np\nimport sys\nimport os\nfrom deepnet import deepnet_pb2\nfrom deepnet import util\nimport glob\nfrom deepnet import datahandler as dh\nimport pdb\nfrom google.protobuf import text_format\n\ndef main():\n model_file = sys.argv[1]\n base_output_dir = sys.argv[2]\n rep_dir = sys.argv[3]\n prefix = sys.argv[4]\n gpu_mem = sys.argv[5]\n main_mem = sys.argv[6]\n model = util.ReadModel(model_file)\n data_pb = deepnet_pb2.Dataset()\n data_pb.name = model.name\n data_pb.gpu_memory = gpu_mem\n data_pb.main_memory = main_mem\n output_dir = os.path.join(base_output_dir, 'validation')\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n output_proto_file = os.path.join(base_output_dir, 'data.pbtxt')\n\n # IMAGE PATHWAY\n img_input_pbtxt = os.path.join(prefix, 'flickr.pbtxt')\n img_hidden1_pbtxt = os.path.join(rep_dir, 'image_rbm1_LAST', 'data.pbtxt')\n img_hidden2_pbtxt = os.path.join(rep_dir, 'image_rbm2_LAST', 'data.pbtxt')\n \n # TEXT PATHWAY\n text_input_pbtxt = os.path.join(prefix, 'flickr_nnz.pbtxt')\n text_hidden1_pbtxt = os.path.join(rep_dir, 'text_rbm1_LAST', 'data.pbtxt')\n text_hidden2_pbtxt = os.path.join(rep_dir, 'text_rbm2_LAST', 'data.pbtxt')\n text_pbtxt_z = os.path.join(rep_dir, 'generated_text', 'data.pbtxt')\n \n joint_pbtxt = os.path.join(rep_dir, 'joint_rbm_LAST', 'data.pbtxt')\n\n \n img_input_pb = util.ReadData(img_input_pbtxt)\n data = next(d for d in img_input_pb.data if d.name == 'image_labelled')\n data.file_pattern = os.path.join(img_input_pb.prefix, data.file_pattern)\n data.stats_file = os.path.join(img_input_pb.prefix, data.stats_file)\n data.name = 'image_input'\n data_pb.data.extend([data])\n\n img_hidden1_pb = util.ReadData(img_hidden1_pbtxt)\n data = next(d for d in img_hidden1_pb.data if d.name == 'image_hidden1_validation')\n data.file_pattern = os.path.join(img_hidden1_pb.prefix, data.file_pattern)\n data.name = 'image_hidden1'\n data_pb.data.extend([data])\n\n img_hidden2_pb = util.ReadData(img_hidden2_pbtxt)\n data = next(d for d in img_hidden2_pb.data if d.name == 'image_hidden2_validation')\n data.file_pattern = os.path.join(img_hidden2_pb.prefix, data.file_pattern)\n data.name = 'image_hidden2'\n data_pb.data.extend([data])\n \n indices_file = os.path.join(prefix, 'text', 'indices_labelled.npz')\n indices = np.load(indices_file)\n nnz_indices = indices['nnz_indices']\n z_indices = indices['z_indices']\n\n text_pb_z = util.ReadData(text_pbtxt_z)\n text_input_pb = util.ReadData(text_input_pbtxt)\n data_nnz = next(d for d in text_input_pb.data if d.name == 'text_labelled')\n data_z = next(d for d in text_pb_z.data if d.name == 'text_input_layer_validation')\n output_file = os.path.join(output_dir, 'text_input-00001-of-00001.npy')\n data = Merge(data_nnz, data_z, nnz_indices, z_indices, text_pb_z.prefix, text_input_pb.prefix, 'text_input', output_file)\n data_pb.data.extend([data])\n\n text_hidden1_pb = util.ReadData(text_hidden1_pbtxt)\n data_nnz = next(d for d in text_hidden1_pb.data if d.name == 'text_hidden1_validation')\n data_z = next(d for d in text_pb_z.data if d.name == 'text_hidden1_validation')\n output_file = os.path.join(output_dir, 'text_hidden1-00001-of-00001.npy')\n data = Merge(data_nnz, data_z, nnz_indices, z_indices, text_pb_z.prefix, text_hidden1_pb.prefix, 'text_hidden1', output_file)\n data_pb.data.extend([data])\n\n text_hidden2_pb = util.ReadData(text_hidden2_pbtxt)\n data_nnz = next(d for d in text_hidden2_pb.data if d.name == 'text_hidden2_validation')\n data_z = next(d for d in text_pb_z.data if d.name == 'text_hidden2_validation')\n output_file = os.path.join(output_dir, 'text_hidden2-00001-of-00001.npy')\n data = Merge(data_nnz, data_z, nnz_indices, z_indices, text_pb_z.prefix, text_hidden2_pb.prefix, 'text_hidden2', output_file)\n data_pb.data.extend([data])\n\n joint_pb = util.ReadData(joint_pbtxt)\n data_nnz = next(d for d in joint_pb.data if d.name == 'joint_hidden_validation')\n data_z = next(d for d in text_pb_z.data if d.name == 'joint_hidden_validation')\n output_file = os.path.join(output_dir, 'joint_hidden-00001-of-00001.npy')\n data = Merge(data_nnz, data_z, nnz_indices, z_indices, text_pb_z.prefix, joint_pb.prefix, 'joint_hidden', output_file)\n data_pb.data.extend([data])\n\n with open(output_proto_file, 'w') as f:\n text_format.PrintMessage(data_pb, f)\n\ndef Load(file_pattern):\n data = None\n for f in sorted(glob.glob(file_pattern)):\n ext = os.path.splitext(f)[1]\n if ext == '.npy':\n this_data = np.load(f)\n elif ext == '.npz':\n this_data = dh.Disk.LoadSparse(f).toarray()\n else:\n raise Exception('unknown data format.')\n if data is None:\n data = this_data\n else:\n data = np.concatenate((data, this_data))\n return data\n\ndef Merge(data_nnz, data_z, indices_nnz, indices_z, prefix_z, prefix_nnz, name, output_file):\n data_nnz = Load(os.path.join(prefix_nnz, data_nnz.file_pattern))\n data_z = Load(os.path.join(prefix_z, data_z.file_pattern))\n assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'\n size = data_nnz.shape[0] + data_z.shape[0]\n numdims = data_nnz.shape[1]\n data = np.zeros((size, numdims), dtype=np.float32)\n data[indices_nnz] = data_nnz\n data[indices_z] = data_z\n np.save(output_file, data)\n\n data = deepnet_pb2.Dataset.Data()\n data.name = name\n data.size = size\n data.dimensions.extend([numdims])\n data.file_pattern = output_file\n\n return data\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"numpy.concatenate",
"numpy.load",
"numpy.zeros",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
taylorfturner/great_expectations | [
"e4964894fb97b933cac713ef1f1a78e33d362ff3"
] | [
"tests/checkpoint/test_simple_checkpoint.py"
] | [
"import os\nfrom typing import Union\n\nimport pandas as pd\nimport pytest\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.checkpoint import SimpleCheckpointConfigurator\nfrom great_expectations.checkpoint.checkpoint import (\n Checkpoint,\n CheckpointResult,\n LegacyCheckpoint,\n SimpleCheckpoint,\n)\nfrom great_expectations.core.batch import RuntimeBatchRequest\nfrom great_expectations.data_context.types.base import CheckpointConfig\nfrom great_expectations.util import deep_filter_properties_iterable\n\n\[email protected]\ndef update_data_docs_action():\n return {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\", \"site_names\": []},\n }\n\n\[email protected]\ndef store_eval_parameter_action():\n return {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n }\n\n\[email protected]\ndef store_validation_result_action():\n return {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n }\n\n\[email protected]\ndef webhook() -> str:\n return \"https://hooks.slack.com/foo/bar\"\n\n\[email protected]\ndef slack_notification_action(webhook):\n return {\n \"name\": \"send_slack_notification\",\n \"action\": {\n \"class_name\": \"SlackNotificationAction\",\n \"slack_webhook\": webhook,\n \"notify_on\": \"all\",\n \"notify_with\": None,\n \"renderer\": {\n \"module_name\": \"great_expectations.render.renderer.slack_renderer\",\n \"class_name\": \"SlackRenderer\",\n },\n },\n }\n\n\[email protected]\ndef context_with_data_source_and_empty_suite(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n datasources = context.list_datasources()\n assert datasources[0][\"class_name\"] == \"Datasource\"\n assert \"my_special_data_connector\" in datasources[0][\"data_connectors\"].keys()\n context.create_expectation_suite(\"one\", overwrite_existing=True)\n assert context.list_expectation_suite_names() == [\"one\"]\n return context\n\n\[email protected]\ndef context_with_data_source_and_empty_suite_with_templates(\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates\n datasources = context.list_datasources()\n assert datasources[0][\"class_name\"] == \"Datasource\"\n assert \"my_special_data_connector\" in datasources[0][\"data_connectors\"].keys()\n context.create_expectation_suite(\"one\", overwrite_existing=True)\n assert context.list_expectation_suite_names() == [\"one\"]\n return context\n\n\[email protected]\ndef simple_checkpoint_defaults(context_with_data_source_and_empty_suite):\n return SimpleCheckpoint(\n name=\"foo\", data_context=context_with_data_source_and_empty_suite\n )\n\n\[email protected]\ndef two_validations(one_validation):\n return [\n one_validation,\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n \"expectation_suite_name\": \"two\",\n },\n ]\n\n\ndef test_simple_checkpoint_default_properties_with_no_optional_arguments(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n):\n \"\"\"This demonstrates the simplest possible usage.\"\"\"\n checkpoint_config = SimpleCheckpointConfigurator(\n \"my_minimal_simple_checkpoint\", empty_data_context\n ).build()\n assert isinstance(checkpoint_config, CheckpointConfig)\n\n assert checkpoint_config.name == \"my_minimal_simple_checkpoint\"\n assert checkpoint_config.action_list == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n assert checkpoint_config.config_version == 1.0\n assert checkpoint_config.class_name == \"Checkpoint\"\n assert checkpoint_config.evaluation_parameters == {}\n assert checkpoint_config.runtime_configuration == {}\n assert checkpoint_config.validations == []\n\n checkpoint_from_store = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_checkpoint(\n \"my_minimal_simple_checkpoint\"\n )\n checkpoint_config = checkpoint_from_store.get_config(mode=\"dict\")\n assert checkpoint_config[\"name\"] == \"my_minimal_simple_checkpoint\"\n assert checkpoint_config[\"action_list\"] == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n assert checkpoint_config[\"config_version\"] == 1.0\n assert checkpoint_config[\"evaluation_parameters\"] == {}\n assert checkpoint_config[\"runtime_configuration\"] == {}\n assert checkpoint_config[\"validations\"] == []\n\n\ndef test_simple_checkpoint_raises_error_on_invalid_slack_webhook(\n empty_data_context,\n):\n with pytest.raises(ValueError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, slack_webhook=\"bad\"\n ).build()\n\n\ndef test_simple_checkpoint_has_slack_action_with_defaults_when_slack_webhook_is_present(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n slack_notification_action,\n webhook,\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n):\n checkpoint_config = SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, slack_webhook=webhook\n ).build()\n expected = [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n slack_notification_action,\n ]\n assert checkpoint_config.action_list == expected\n\n checkpoint_from_store = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_checkpoint(\n \"my_simple_checkpoint_with_slack\"\n )\n checkpoint_config = checkpoint_from_store.get_config(mode=\"dict\")\n assert checkpoint_config[\"name\"] == \"my_simple_checkpoint_with_slack\"\n assert checkpoint_config[\"action_list\"] == expected\n\n\ndef test_simple_checkpoint_raises_error_on_invalid_notify_on(\n empty_data_context,\n):\n for bad in [1, \"bar\", None, []]:\n with pytest.raises(ValueError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, notify_on=bad\n ).build()\n\n\ndef test_simple_checkpoint_raises_error_on_missing_slack_webhook_when_notify_on_is_list(\n empty_data_context, slack_notification_action, webhook\n):\n with pytest.raises(ValueError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, notify_with=[\"prod\", \"dev\"]\n ).build()\n\n\ndef test_simple_checkpoint_raises_error_on_missing_slack_webhook_when_notify_on_is_not_default(\n empty_data_context, slack_notification_action, webhook\n):\n for condition in [\"failure\", \"success\"]:\n with pytest.raises(ValueError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, notify_on=condition\n ).build()\n\n\ndef test_simple_checkpoint_raises_error_on_invalid_notify_with(\n empty_data_context,\n):\n for bad in [1, \"bar\", [\"local_site\", 3]]:\n with pytest.raises(ValueError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, notify_with=bad\n ).build()\n\n\ndef test_simple_checkpoint_notify_with_all_has_data_docs_action_with_none_specified(\n empty_data_context,\n slack_notification_action,\n webhook,\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n):\n \"\"\"\n The underlying SlackNotificationAction and SlackRenderer default to\n including links to all sites if the key notify_with is not present. We are\n intentionally hiding this from users of SimpleCheckpoint by having a default\n of \"all\" that sets the configuration appropriately.\n \"\"\"\n checkpoint_config: Union[CheckpointConfig, dict] = SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, slack_webhook=webhook, notify_with=\"all\"\n ).build()\n\n # set the config to include all sites\n slack_notification_action[\"action\"][\"notify_with\"] = None\n assert slack_notification_action in checkpoint_config.action_list\n\n checkpoint_from_store: Checkpoint = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_checkpoint(\n \"my_simple_checkpoint_with_slack_and_notify_with_all\"\n )\n checkpoint_config = checkpoint_from_store.get_config(mode=\"dict\")\n assert slack_notification_action in checkpoint_config[\"action_list\"]\n\n\ndef test_simple_checkpoint_has_slack_action_with_notify_adjustments_slack_webhook_is_present(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n slack_notification_action,\n webhook,\n):\n checkpoint_config = SimpleCheckpointConfigurator(\n \"foo\",\n empty_data_context,\n slack_webhook=webhook,\n notify_on=\"failure\",\n notify_with=[\"local_site\", \"s3_prod\"],\n ).build()\n\n slack_notification_action[\"action\"][\"notify_on\"] = \"failure\"\n slack_notification_action[\"action\"][\"notify_with\"] = [\"local_site\", \"s3_prod\"]\n expected = [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n slack_notification_action,\n ]\n assert checkpoint_config.action_list == expected\n\n\ndef test_simple_checkpoint_has_no_slack_action_when_no_slack_webhook_is_present(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n):\n checkpoint_config = SimpleCheckpointConfigurator(\"foo\", empty_data_context).build()\n assert checkpoint_config.action_list == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n\n\ndef test_simple_checkpoint_has_update_data_docs_action_that_should_update_all_sites_when_site_names_is_all(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n):\n checkpoint_config = SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, site_names=\"all\"\n ).build()\n # This is confusing: the UpdateDataDocsAction default behavior is to update\n # all sites if site_names=None\n update_data_docs_action[\"action\"][\"site_names\"] = []\n assert checkpoint_config.action_list == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n\n\ndef test_simple_checkpoint_raises_errors_on_invalid_site_name_types(\n empty_data_context,\n):\n for junk_input in [[1, \"local\"], 1, [\"local\", None]]:\n with pytest.raises(TypeError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, site_names=junk_input\n ).build()\n\n\ndef test_simple_checkpoint_raises_errors_on_site_name_that_does_not_exist_on_data_context(\n empty_data_context,\n):\n # assert the fixture is adequate\n assert \"prod\" not in empty_data_context.get_site_names()\n with pytest.raises(TypeError):\n SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, site_names=[\"prod\"]\n ).build()\n\n\ndef test_simple_checkpoint_has_update_data_docs_action_that_should_update_selected_sites_when_sites_are_selected(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n):\n # assert the fixture is adequate\n assert \"local_site\" in empty_data_context.get_site_names()\n\n checkpoint_config = SimpleCheckpointConfigurator(\n \"foo\", empty_data_context, site_names=[\"local_site\"]\n ).build()\n # This is confusing: the UpdateDataDocsAction default behavior is to update\n # all sites if site_names=None\n update_data_docs_action[\"action\"][\"site_names\"] = [\"local_site\"]\n assert checkpoint_config.action_list == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n\n # assert the fixture is adequate\n assert (\n \"local_site\"\n in titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_site_names()\n )\n\n checkpoint_from_store = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates.get_checkpoint(\n \"my_simple_checkpoint_with_site_names\"\n )\n checkpoint_config = checkpoint_from_store.get_config(mode=\"dict\")\n assert checkpoint_config[\"action_list\"] == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n\n\ndef test_simple_checkpoint_has_no_update_data_docs_action_when_site_names_is_none(\n empty_data_context,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n):\n # assert the fixture is adequate\n assert \"local_site\" in empty_data_context.get_site_names()\n\n checkpoint_config = SimpleCheckpointConfigurator(\n name=\"foo\", data_context=empty_data_context, site_names=None\n ).build()\n assert checkpoint_config.action_list == [\n store_validation_result_action,\n store_eval_parameter_action,\n ]\n\n\ndef test_simple_checkpoint_persisted_to_store(\n context_with_data_source_and_empty_suite, webhook, one_validation\n):\n assert context_with_data_source_and_empty_suite.list_checkpoints() == []\n initial_checkpoint_config = SimpleCheckpointConfigurator(\n \"foo\",\n context_with_data_source_and_empty_suite,\n site_names=None,\n ).build()\n # TODO this add_checkpoint will be user facing and it could be more\n # ergonomic by accepting a Checkpoint maybe .add_checkpoint() should take a\n # Checkpoint and there should be a .create_checkpoint() that accepts all\n # the current parameters\n context_with_data_source_and_empty_suite.add_checkpoint(\n **initial_checkpoint_config.to_json_dict()\n )\n assert context_with_data_source_and_empty_suite.list_checkpoints() == [\"foo\"]\n checkpoint: SimpleCheckpoint = (\n context_with_data_source_and_empty_suite.get_checkpoint(name=\"foo\")\n )\n assert isinstance(checkpoint, Checkpoint)\n assert isinstance(checkpoint.get_config(mode=\"dict\"), dict)\n checkpoint_config: dict = checkpoint.get_config(mode=\"json_dict\")\n assert checkpoint_config == {\n \"action_list\": [\n {\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n \"name\": \"store_validation_result\",\n },\n {\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n \"name\": \"store_evaluation_params\",\n },\n ],\n \"batch_request\": {},\n \"class_name\": \"Checkpoint\",\n \"config_version\": 1.0,\n \"evaluation_parameters\": {},\n \"expectation_suite_ge_cloud_id\": None,\n \"expectation_suite_name\": None,\n \"ge_cloud_id\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"name\": \"foo\",\n \"notify_on\": None,\n \"notify_with\": None,\n \"profilers\": [],\n \"run_name_template\": None,\n \"runtime_configuration\": {},\n \"site_names\": None,\n \"slack_webhook\": None,\n \"template_name\": None,\n \"validations\": [],\n }\n result = checkpoint.run(validations=[one_validation])\n assert result.success\n\n\ndef test_simple_checkpoint_defaults_run_and_no_run_params_raises_checkpoint_error(\n context_with_data_source_and_empty_suite, simple_checkpoint_defaults\n):\n with pytest.raises(ge_exceptions.CheckpointError) as cpe:\n # noinspection PyUnusedLocal\n result: CheckpointResult = simple_checkpoint_defaults.run()\n assert (\n 'Checkpoint \"foo\" must contain either a batch_request or validations.'\n in str(cpe.value)\n )\n\n\ndef test_simple_checkpoint_defaults_run_and_basic_run_params_without_persisting_checkpoint(\n context_with_data_source_and_empty_suite, simple_checkpoint_defaults, one_validation\n):\n # verify Checkpoint is not persisted in the data context\n assert context_with_data_source_and_empty_suite.list_checkpoints() == []\n result = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n validations=[one_validation],\n )\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert result.list_expectation_suite_names() == [\"one\"]\n assert len(result.list_validation_results()) == 1\n assert result.success\n\n\ndef test_simple_checkpoint_runtime_kwargs_processing_site_names_only_without_persisting_checkpoint(\n context_with_data_source_and_empty_suite, simple_checkpoint_defaults, one_validation\n):\n # verify Checkpoint is not persisted in the data context\n assert context_with_data_source_and_empty_suite.list_checkpoints() == []\n\n expected_runtime_kwargs: dict = {\n \"name\": \"foo\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"module_name\": \"great_expectations.checkpoint\",\n \"template_name\": None,\n \"run_name_template\": None,\n \"expectation_suite_name\": None,\n \"batch_request\": None,\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n \"site_names\": [\"local_site\"],\n },\n },\n ],\n \"evaluation_parameters\": None,\n \"runtime_configuration\": {},\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n \"expectation_suite_name\": \"one\",\n },\n ],\n \"profilers\": None,\n }\n\n result: CheckpointResult = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n validations=[one_validation],\n site_names=[\"local_site\"],\n )\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert result.list_expectation_suite_names() == [\"one\"]\n assert len(result.list_validation_results()) == 1\n assert result.success\n\n substituted_runtime_config: dict = (\n simple_checkpoint_defaults.get_substituted_config(\n runtime_kwargs=expected_runtime_kwargs\n )\n )\n assert deep_filter_properties_iterable(\n properties=substituted_runtime_config,\n clean_falsy=True,\n ) == deep_filter_properties_iterable(\n properties=expected_runtime_kwargs,\n clean_falsy=True,\n )\n\n\ndef test_simple_checkpoint_runtime_kwargs_processing_slack_webhook_only_without_persisting_checkpoint(\n context_with_data_source_and_empty_suite, simple_checkpoint_defaults, one_validation\n):\n # verify Checkpoint is not persisted in the data context\n assert context_with_data_source_and_empty_suite.list_checkpoints() == []\n\n expected_runtime_kwargs: dict = {\n \"name\": \"foo\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"module_name\": \"great_expectations.checkpoint\",\n \"template_name\": None,\n \"run_name_template\": None,\n \"expectation_suite_name\": None,\n \"batch_request\": None,\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\", \"site_names\": []},\n },\n {\n \"name\": \"send_slack_notification\",\n \"action\": {\n \"class_name\": \"SlackNotificationAction\",\n \"slack_webhook\": \"https://hooks.slack.com/my_slack_webhook.geocities\",\n \"notify_on\": \"all\",\n \"notify_with\": None,\n \"renderer\": {\n \"module_name\": \"great_expectations.render.renderer.slack_renderer\",\n \"class_name\": \"SlackRenderer\",\n },\n },\n },\n ],\n \"evaluation_parameters\": None,\n \"runtime_configuration\": {},\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n \"expectation_suite_name\": \"one\",\n }\n ],\n \"profilers\": None,\n }\n\n result: CheckpointResult = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n validations=[one_validation],\n slack_webhook=\"https://hooks.slack.com/my_slack_webhook.geocities\",\n )\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert result.list_expectation_suite_names() == [\"one\"]\n assert len(result.list_validation_results()) == 1\n assert result.success\n\n substituted_runtime_config: dict = (\n simple_checkpoint_defaults.get_substituted_config(\n runtime_kwargs=expected_runtime_kwargs\n )\n )\n assert deep_filter_properties_iterable(\n properties=substituted_runtime_config,\n clean_falsy=True,\n ) == deep_filter_properties_iterable(\n properties=expected_runtime_kwargs,\n clean_falsy=True,\n )\n\n\ndef test_simple_checkpoint_runtime_kwargs_processing_all_special_kwargs_without_persisting_checkpoint(\n context_with_data_source_and_empty_suite, simple_checkpoint_defaults, one_validation\n):\n # verify Checkpoint is not persisted in the data context\n assert context_with_data_source_and_empty_suite.list_checkpoints() == []\n\n expected_runtime_kwargs: dict = {\n \"name\": \"foo\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"module_name\": \"great_expectations.checkpoint\",\n \"template_name\": None,\n \"run_name_template\": None,\n \"expectation_suite_name\": None,\n \"batch_request\": None,\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n \"site_names\": [\"local_site\"],\n },\n },\n {\n \"name\": \"send_slack_notification\",\n \"action\": {\n \"class_name\": \"SlackNotificationAction\",\n \"slack_webhook\": \"https://hooks.slack.com/my_slack_webhook.geocities\",\n \"notify_on\": \"failure\",\n \"notify_with\": [\"local_site\"],\n \"renderer\": {\n \"module_name\": \"great_expectations.render.renderer.slack_renderer\",\n \"class_name\": \"SlackRenderer\",\n },\n },\n },\n ],\n \"evaluation_parameters\": None,\n \"runtime_configuration\": {},\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n \"expectation_suite_name\": \"one\",\n }\n ],\n \"profilers\": None,\n }\n\n result: CheckpointResult = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n validations=[one_validation],\n site_names=[\"local_site\"],\n notify_with=[\"local_site\"],\n notify_on=\"failure\",\n slack_webhook=\"https://hooks.slack.com/my_slack_webhook.geocities\",\n )\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert result.list_expectation_suite_names() == [\"one\"]\n assert len(result.list_validation_results()) == 1\n assert result.success\n\n substituted_runtime_config: dict = (\n simple_checkpoint_defaults.get_substituted_config(\n runtime_kwargs=expected_runtime_kwargs\n )\n )\n assert deep_filter_properties_iterable(\n properties=substituted_runtime_config,\n clean_falsy=True,\n ) == deep_filter_properties_iterable(\n properties=expected_runtime_kwargs,\n clean_falsy=True,\n )\n\n\n# noinspection PyUnusedLocal\ndef test_simple_checkpoint_runtime_kwargs_processing_all_kwargs(\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n simple_checkpoint_defaults,\n one_validation,\n monkeypatch,\n):\n monkeypatch.setenv(\"GE_ENVIRONMENT\", \"my_ge_environment\")\n monkeypatch.setenv(\"MY_PARAM\", \"1\")\n monkeypatch.setenv(\"VAR\", \"test\")\n\n expected_runtime_kwargs: dict = {\n \"name\": \"foo\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"module_name\": \"great_expectations.checkpoint\",\n \"template_name\": \"my_simple_template_checkpoint\",\n \"run_name_template\": \"my_runtime_run_name_template\",\n \"expectation_suite_name\": \"my_runtime_suite\",\n \"batch_request\": {\n \"data_connector_query\": {\n \"index\": -1,\n },\n },\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n \"site_names\": [\"local_site\"],\n },\n },\n {\n \"name\": \"send_slack_notification\",\n \"action\": {\n \"class_name\": \"SlackNotificationAction\",\n \"slack_webhook\": \"https://hooks.slack.com/my_slack_webhook.geocities\",\n \"notify_on\": \"failure\",\n \"notify_with\": [\"local_site\"],\n \"renderer\": {\n \"module_name\": \"great_expectations.render.renderer.slack_renderer\",\n \"class_name\": \"SlackRenderer\",\n },\n },\n },\n ],\n \"evaluation_parameters\": {\n \"aux_param_0\": \"1\",\n \"aux_param_1\": \"1 + 1\",\n \"environment\": \"my_ge_environment\",\n \"my_runtime_key\": \"my_runtime_value\",\n \"tolerance\": 0.01,\n },\n \"runtime_configuration\": {\n \"my_runtime_key\": \"my_runtime_value\",\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n },\n },\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n \"expectation_suite_name\": \"one\",\n }\n ],\n \"profilers\": None,\n }\n\n result: CheckpointResult = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n template_name=\"my_simple_template_checkpoint\",\n run_name_template=\"my_runtime_run_name_template\",\n expectation_suite_name=\"my_runtime_suite\",\n batch_request={\n \"data_connector_query\": {\n \"index\": -1,\n },\n },\n validations=[one_validation],\n evaluation_parameters={\"my_runtime_key\": \"my_runtime_value\"},\n runtime_configuration={\"my_runtime_key\": \"my_runtime_value\"},\n site_names=[\"local_site\"],\n notify_with=[\"local_site\"],\n notify_on=\"failure\",\n slack_webhook=\"https://hooks.slack.com/my_slack_webhook.geocities\",\n )\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert result.list_expectation_suite_names() == [\"one\"]\n assert len(result.list_validation_results()) == 1\n assert result.success\n\n substituted_runtime_config: dict = (\n simple_checkpoint_defaults.get_substituted_config(\n runtime_kwargs=expected_runtime_kwargs\n )\n )\n assert deep_filter_properties_iterable(\n properties=substituted_runtime_config,\n clean_falsy=True,\n ) == deep_filter_properties_iterable(\n properties=expected_runtime_kwargs,\n clean_falsy=True,\n )\n\n\ndef test_simple_checkpoint_defaults_run_and_basic_run_params_with_persisted_checkpoint_loaded_from_store(\n context_with_data_source_and_empty_suite,\n simple_checkpoint_defaults,\n webhook,\n one_validation,\n):\n context: DataContext = context_with_data_source_and_empty_suite\n checkpoint_config = SimpleCheckpointConfigurator(\n \"foo\", context_with_data_source_and_empty_suite, slack_webhook=webhook\n ).build()\n context.add_checkpoint(**checkpoint_config.to_json_dict())\n checkpoint_name = checkpoint_config.name\n assert context.list_checkpoints() == [checkpoint_name]\n\n del checkpoint_config\n checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(\n checkpoint_name\n )\n assert isinstance(checkpoint, Checkpoint)\n\n result = checkpoint.run(run_name=\"bar\", validations=[one_validation])\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert result.list_expectation_suite_names() == [\"one\"]\n assert len(result.list_validation_results()) == 1\n assert result.success\n\n\[email protected]\ndef one_validation():\n return {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n \"expectation_suite_name\": \"one\",\n }\n\n\ndef test_simple_checkpoint_defaults_run_with_top_level_batch_request_and_suite(\n context_with_data_source_and_empty_suite, simple_checkpoint_defaults\n):\n result = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n batch_request={\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n },\n expectation_suite_name=\"one\",\n validations=[{\"expectation_suite_name\": \"one\"}],\n )\n assert isinstance(result, CheckpointResult)\n assert result.success\n assert len(result.run_results) == 1\n\n\ndef test_simple_checkpoint_error_with_invalid_top_level_batch_request(\n simple_checkpoint_defaults,\n):\n # raised by _validate_init_parameters() in BatchRequest.__init__()\n with pytest.raises(TypeError):\n # missing data_asset_name\n result = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n batch_request={\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n },\n expectation_suite_name=\"one\",\n validations=[{\"expectation_suite_name\": \"one\"}],\n )\n\n\ndef test_simple_checkpoint_defaults_run_multiple_validations_without_persistence(\n context_with_data_source_and_empty_suite,\n simple_checkpoint_defaults,\n two_validations,\n):\n context_with_data_source_and_empty_suite.create_expectation_suite(\"two\")\n assert len(context_with_data_source_and_empty_suite.list_expectation_suites()) == 2\n result = simple_checkpoint_defaults.run(\n run_name=\"bar\",\n validations=two_validations,\n )\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert sorted(result.list_expectation_suite_names()) == sorted([\"one\", \"two\"])\n assert len(result.list_validation_results()) == 2\n assert result.success\n\n\ndef test_simple_checkpoint_defaults_run_multiple_validations_with_persisted_checkpoint_loaded_from_store(\n context_with_data_source_and_empty_suite,\n simple_checkpoint_defaults,\n two_validations,\n):\n context: DataContext = context_with_data_source_and_empty_suite\n context.create_expectation_suite(\"two\")\n assert len(context.list_expectation_suites()) == 2\n\n # persist to store\n checkpoint_class_args: dict = simple_checkpoint_defaults.get_config(\n mode=\"json_dict\"\n )\n context.add_checkpoint(**checkpoint_class_args)\n checkpoint_name = simple_checkpoint_defaults.name\n assert context.list_checkpoints() == [checkpoint_name]\n # reload from store\n del simple_checkpoint_defaults\n checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(\n checkpoint_name\n )\n result = checkpoint.run(run_name=\"bar\", validations=two_validations)\n assert isinstance(result, CheckpointResult)\n assert result.run_id.run_name == \"bar\"\n assert sorted(result.list_expectation_suite_names()) == sorted([\"one\", \"two\"])\n assert len(result.list_validation_results()) == 2\n assert result.success\n\n\ndef test_simple_checkpoint_with_runtime_batch_request_and_runtime_data_connector_creates_config(\n context_with_data_source_and_empty_suite,\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n):\n context: DataContext = context_with_data_source_and_empty_suite\n runtime_batch_request = RuntimeBatchRequest(\n datasource_name=\"my_datasource\",\n data_connector_name=\"my_runtime_data_connector\",\n data_asset_name=\"users\",\n batch_identifiers={\"pipeline_stage_name\": \"first\"}, # defined in fixture\n runtime_parameters={\n \"query\": \"SELECT * FROM taxi_data\"\n }, # not actually run, but used to test configuration\n )\n\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\", data_context=context, batch_request=runtime_batch_request\n )\n checkpoint_config: dict = checkpoint.get_config(mode=\"json_dict\")\n\n assert isinstance(checkpoint_config, dict)\n assert checkpoint_config[\"name\"] == \"my_checkpoint\"\n assert checkpoint_config[\"action_list\"] == [\n store_validation_result_action,\n store_eval_parameter_action,\n update_data_docs_action,\n ]\n assert deep_filter_properties_iterable(\n properties=checkpoint_config[\"batch_request\"],\n clean_falsy=True,\n ) == {\n \"batch_identifiers\": {\"pipeline_stage_name\": \"first\"},\n \"data_asset_name\": \"users\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"datasource_name\": \"my_datasource\",\n \"runtime_parameters\": {\"query\": \"SELECT * FROM taxi_data\"},\n }\n assert checkpoint_config[\"config_version\"] == 1.0\n assert checkpoint_config[\"evaluation_parameters\"] == {}\n assert checkpoint_config[\"runtime_configuration\"] == {}\n assert checkpoint_config[\"validations\"] == []\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_single_runtime_batch_request_batch_data_in_validations_pandas(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_single_runtime_batch_request_batch_data_in_validations_spark(\n data_context_with_datasource_spark_engine, spark_session\n):\n context: DataContext = data_context_with_datasource_spark_engine\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark_session.createDataFrame(pandas_df)\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_single_runtime_batch_request_query_in_validations(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[{\"batch_request\": batch_request}],\n )\n\n result = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_multiple_runtime_batch_request_query_in_validations(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query 1\n batch_request_1 = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # RuntimeBatchRequest with a query 2\n batch_request_2 = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 5\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[\n {\"batch_request\": batch_request_1},\n {\"batch_request\": batch_request_2},\n ],\n )\n\n result = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_raise_error_when_run_when_missing_batch_request_and_validations(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n with pytest.raises(\n ge_exceptions.CheckpointError,\n match='Checkpoint \"my_checkpoint\" must contain either a batch_request or validations.',\n ):\n checkpoint.run()\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_query_in_top_level_batch_request(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n batch_request=batch_request,\n )\n\n result = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_batch_data_in_top_level_batch_request_pandas(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_batch_data_in_top_level_batch_request_spark(\n data_context_with_datasource_spark_engine,\n spark_session,\n):\n context: DataContext = data_context_with_datasource_spark_engine\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark_session.createDataFrame(pandas_df)\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_path_in_top_level_batch_request_pandas(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add simple checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n batch_request=batch_request,\n )\n\n result = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_path_in_top_level_batch_request_spark(\n titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n batch_request=batch_request,\n )\n\n result = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_query_in_checkpoint_run(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_batch_data_in_checkpoint_run_pandas(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_batch_data_in_checkpoint_run_spark(\n data_context_with_datasource_spark_engine, spark_session\n):\n context: DataContext = data_context_with_datasource_spark_engine\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark_session.createDataFrame(pandas_df)\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_query_in_checkpoint_run(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_batch_data_in_checkpoint_run_pandas(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_batch_data_in_checkpoint_run_spark(\n data_context_with_datasource_spark_engine, spark_session\n):\n context: DataContext = data_context_with_datasource_spark_engine\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark_session.createDataFrame(pandas_df)\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_path_checkpoint_run_pandas(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add simple checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_path_in_checkpoint_run_spark(\n titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_path_checkpoint_run_pandas(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add simple checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_path_in_checkpoint_run_spark(\n titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": batch_request}])\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_query_in_context_run_checkpoint(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint_config_dict: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config_dict)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", batch_request=batch_request\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_batch_data_in_context_run_checkpoint_pandas(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", batch_request=batch_request\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_batch_data_in_context_run_checkpoint_spark(\n data_context_with_datasource_spark_engine, spark_session\n):\n context: DataContext = data_context_with_datasource_spark_engine\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark_session.createDataFrame(pandas_df)\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", batch_request=batch_request\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_query_in_context_run_checkpoint(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", validations=[{\"batch_request\": batch_request}]\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_batch_data_in_context_run_checkpoint_pandas(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", validations=[{\"batch_request\": batch_request}]\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_batch_data_in_context_run_checkpoint_spark(\n data_context_with_datasource_spark_engine, spark_session\n):\n context: DataContext = data_context_with_datasource_spark_engine\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark_session.createDataFrame(pandas_df)\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", validations=[{\"batch_request\": batch_request}]\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_path_context_run_checkpoint_pandas(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", batch_request=batch_request\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_path_in_context_run_checkpoint_spark(\n titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", batch_request=batch_request\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_path_context_run_checkpoint_pandas(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", validations=[{\"batch_request\": batch_request}]\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_validations_path_in_context_run_checkpoint_spark(\n titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", validations=[{\"batch_request\": batch_request}]\n )\n\n assert len(context.validations_store.list_keys()) == 1\n assert result[\"success\"]\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_printable_validation_result_with_batch_data(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint: SimpleCheckpoint = SimpleCheckpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n )\n\n result = checkpoint.run(batch_request=batch_request)\n\n assert type(repr(result)) == str\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_runtime_parameters_error_contradictory_batch_request_in_checkpoint_yml_and_checkpoint_run(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n data_path: str = os.path.join(\n context.datasources[\"my_datasource\"]\n .data_connectors[\"my_basic_data_connector\"]\n .base_directory,\n \"Titanic_19120414_1313.csv\",\n )\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n runtime_batch_request: RuntimeBatchRequest\n\n # RuntimeBatchRequest with a path\n # Using typed object instead of dictionary, expected by \"add_checkpoint()\", on purpose to insure that checks work.\n runtime_batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"path\": data_path},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"batch_request\": runtime_batch_request,\n }\n\n context.add_checkpoint(**checkpoint_config)\n checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(\n name=\"my_checkpoint\"\n )\n\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n runtime_batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"Titanic_19120414_1313.csv\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n with pytest.raises(\n ge_exceptions.exceptions.InvalidBatchRequestError,\n match=r\"The runtime_parameters dict must have one \\(and only one\\) of the following keys: 'batch_data', 'query', 'path'.\",\n ):\n checkpoint.run(batch_request=runtime_batch_request)\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_correct_validation_result_batch_request_in_checkpoint_yml_and_checkpoint_run(\n titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation,\n sa,\n):\n context: DataContext = titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n batch_request: dict = {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n\n # RuntimeBatchRequest with a DataFrame\n runtime_batch_request: RuntimeBatchRequest = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"batch_request\": batch_request,\n }\n\n context.add_checkpoint(**checkpoint_config)\n checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(\n name=\"my_checkpoint\"\n )\n\n result = checkpoint.run()\n assert result[\"success\"] == False\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 0\n )\n\n result = checkpoint.run(batch_request=runtime_batch_request)\n assert result[\"success\"]\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 1\n )\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_correct_validation_result_validations_in_checkpoint_yml_and_checkpoint_run(\n titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation,\n sa,\n):\n context: DataContext = titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n batch_request: dict = {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n\n # RuntimeBatchRequest with a DataFrame\n runtime_batch_request: RuntimeBatchRequest = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"validations\": [{\"batch_request\": batch_request}],\n }\n\n context.add_checkpoint(**checkpoint_config)\n checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(\n name=\"my_checkpoint\"\n )\n\n result = checkpoint.run()\n assert result[\"success\"] == False\n assert len(result.run_results.values()) == 1\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 0\n )\n\n result = checkpoint.run(validations=[{\"batch_request\": runtime_batch_request}])\n assert result[\"success\"] == False\n assert len(result.run_results.values()) == 2\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 0\n )\n assert (\n list(result.run_results.values())[1][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[1][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 1\n )\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_correct_validation_result_batch_request_in_checkpoint_yml_and_context_run_checkpoint(\n titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation,\n sa,\n):\n context: DataContext = titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n batch_request: dict = {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n\n # RuntimeBatchRequest with a DataFrame\n runtime_batch_request: RuntimeBatchRequest = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"batch_request\": batch_request,\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(checkpoint_name=\"my_checkpoint\")\n assert result[\"success\"] == False\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 0\n )\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\", batch_request=runtime_batch_request\n )\n assert result[\"success\"]\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 1\n )\n\n\ndef test_simple_checkpoint_instantiates_and_produces_a_correct_validation_result_validations_in_checkpoint_yml_and_context_run_checkpoint(\n titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation,\n sa,\n):\n context: DataContext = titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n batch_request: dict = {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n\n # RuntimeBatchRequest with a DataFrame\n runtime_batch_request: RuntimeBatchRequest = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config: dict = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"validations\": [{\"batch_request\": batch_request}],\n }\n\n context.add_checkpoint(**checkpoint_config)\n\n result = context.run_checkpoint(checkpoint_name=\"my_checkpoint\")\n assert result[\"success\"] == False\n assert len(result.run_results.values()) == 1\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 0\n )\n\n result = context.run_checkpoint(\n checkpoint_name=\"my_checkpoint\",\n validations=[{\"batch_request\": runtime_batch_request}],\n )\n assert result[\"success\"] == False\n assert len(result.run_results.values()) == 2\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[0][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 0\n )\n assert (\n list(result.run_results.values())[1][\"validation_result\"][\"statistics\"][\n \"evaluated_expectations\"\n ]\n == 1\n )\n assert (\n list(result.run_results.values())[1][\"validation_result\"][\"statistics\"][\n \"successful_expectations\"\n ]\n == 1\n )\n\n\ndef test_simple_checkpoint_does_not_pass_dataframes_via_batch_request_into_checkpoint_store(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"batch_request\": batch_request,\n }\n\n with pytest.raises(\n ge_exceptions.InvalidConfigError,\n match='batch_data found in batch_request cannot be saved to CheckpointStore \"checkpoint_store\"',\n ):\n context.add_checkpoint(**checkpoint_config)\n\n\ndef test_simple_checkpoint_does_not_pass_dataframes_via_validations_into_checkpoint_store(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n\n # add checkpoint config\n checkpoint_config = {\n \"class_name\": \"SimpleCheckpoint\",\n \"name\": \"my_checkpoint\",\n \"config_version\": 1,\n \"run_name_template\": \"%Y-%M-foo-bar-template\",\n \"expectation_suite_name\": \"my_expectation_suite\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"validations\": [{\"batch_request\": batch_request}],\n }\n\n with pytest.raises(\n ge_exceptions.InvalidConfigError,\n match='batch_data found in validations cannot be saved to CheckpointStore \"checkpoint_store\"',\n ):\n context.add_checkpoint(**checkpoint_config)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Ida-Ida/hecktor-2020 | [
"d03b9f546a19db5e5514da9d5a6cdfd40abba729"
] | [
"src/layers.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass BasicConv3d(nn.Module):\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv3d, self).__init__()\n self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)\n self.norm = nn.InstanceNorm3d(out_channels, affine=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.norm(x)\n x = F.relu(x, inplace=True)\n return x\n\n\nclass FastSmoothSENorm(nn.Module):\n class SEWeights(nn.Module):\n def __init__(self, in_channels, reduction=2):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels, in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True)\n self.conv2 = nn.Conv3d(in_channels // reduction, in_channels, kernel_size=1, stride=1, padding=0, bias=True)\n\n def forward(self, x):\n b, c, d, h, w = x.size()\n out = torch.mean(x.view(b, c, -1), dim=-1).view(b, c, 1, 1, 1) # output_shape: in_channels x (1, 1, 1)\n out = F.relu(self.conv1(out))\n out = self.conv2(out)\n return out\n\n def __init__(self, in_channels, reduction=2):\n super(FastSmoothSENorm, self).__init__()\n self.norm = nn.InstanceNorm3d(in_channels, affine=False)\n self.gamma = self.SEWeights(in_channels, reduction)\n self.beta = self.SEWeights(in_channels, reduction)\n\n def forward(self, x):\n gamma = torch.sigmoid(self.gamma(x))\n beta = torch.tanh(self.beta(x))\n x = self.norm(x)\n return gamma * x + beta\n\n\nclass FastSmoothSeNormConv3d(nn.Module):\n def __init__(self, in_channels, out_channels, reduction=2, **kwargs):\n super(FastSmoothSeNormConv3d, self).__init__()\n self.conv = nn.Conv3d(in_channels, out_channels, bias=True, **kwargs)\n self.norm = FastSmoothSENorm(out_channels, reduction)\n\n def forward(self, x):\n x = self.conv(x)\n x = F.relu(x, inplace=True)\n x = self.norm(x)\n return x\n\n\nclass RESseNormConv3d(nn.Module):\n def __init__(self, in_channels, out_channels, reduction=2, **kwargs):\n super().__init__()\n self.conv1 = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, **kwargs)\n\n if in_channels != out_channels:\n self.res_conv = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, kernel_size=1, stride=1, padding=0)\n else:\n self.res_conv = None\n\n def forward(self, x):\n residual = self.res_conv(x) if self.res_conv else x\n x = self.conv1(x)\n x += residual\n return x\n\n\nclass UpConv(nn.Module):\n def __init__(self, in_channels, out_channels, reduction=2, scale=2):\n super().__init__()\n self.scale = scale\n self.conv = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n x = self.conv(x)\n x = F.interpolate(x, scale_factor=self.scale, mode='trilinear', align_corners=False)\n return x\n"
] | [
[
"torch.nn.Conv3d",
"torch.nn.functional.relu",
"torch.nn.InstanceNorm3d",
"torch.nn.functional.interpolate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
helene-t/SciDataTool | [
"4374ee2c1a55421af614aac00ba5ac7cf3db1144"
] | [
"SciDataTool/Functions/symmetries.py"
] | [
"# -*- coding: utf-8 -*-\nfrom numpy import tile, concatenate, negative, ones\n\n\ndef rebuild_symmetries(values, axis_index, symmetries):\n \"\"\"Reconstructs the field of a Data object taking symmetries into account\n Parameters\n ----------\n values: ndarray\n ndarray of a field\n axis_index: int\n Index of the axis along which the symmetry is made\n symmetries: dict\n Dictionary of the symmetries along one axis\n Returns\n -------\n ndarray of the reconstructed field\n \"\"\"\n if \"period\" in symmetries.keys():\n values = mytile(values, symmetries.get(\"period\"), axis_index)\n elif \"antiperiod\" in symmetries.keys():\n values2 = concatenate((values, negative(values)), axis=axis_index)\n values3 = mytile(values2, symmetries.get(\"antiperiod\"), axis_index)\n if symmetries.get(\"antiperiod\") % 2 == 1:\n values = concatenate((values3, values), axis=axis_index)\n else:\n values = values3\n return values\n\n\ndef rebuild_symmetries_axis(values, symmetries):\n \"\"\"Reconstructs the field of a Data object taking symmetries into account\n Parameters\n ----------\n values: ndarray\n ndarray of a the axis values\n symmetries: dict\n Dictionary of the symmetries along the axis\n Returns\n -------\n ndarray of the reconstructed axis\n \"\"\"\n values_new = values\n if \"period\" in symmetries.keys():\n for i in range(symmetries.get(\"period\") - 1):\n values_new = concatenate((values_new, values + values_new[-1]))\n elif \"antiperiod\" in symmetries.keys():\n for i in range(symmetries.get(\"antiperiod\") - 1):\n values_new = concatenate((values_new, values + values_new[-1]))\n return values_new\n\n\ndef mytile(values, n, axis_index):\n values_shape = values.shape\n reps = ones(len(values_shape), dtype=int)\n reps[axis_index] = n\n return tile(values, reps)\n"
] | [
[
"numpy.concatenate",
"numpy.negative",
"numpy.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Elnura/MultiplePedestrianDetection_CNN | [
"736a15e011699560bd07a060e1c5a08174004c63"
] | [
"detection/yolov4/detection/algorithm_yolov4_tiny.py"
] | [
"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nRun a YOLO_v3 style detection model on test images.\r\n\"\"\"\r\n\r\nimport colorsys\r\nimport os\r\nfrom os.path import join, dirname, realpath\r\nimport numpy as np\r\nfrom keras import backend as K\r\nfrom keras.models import load_model\r\nfrom keras.layers import Input\r\nfrom tracking.yolov4.detection.yolov4_tiny_model import yolo_eval, yolo_body\r\nfrom tracking.yolov4.detection.utils import letterbox_image\r\n\r\n\r\nclass detection_stage_yolov4_tiny(object):\r\n data_dir_path = join(dirname(dirname(realpath(__file__))), 'model_data_new')\r\n _defaults = {\r\n \"model_path\": os.path.join(data_dir_path, 'yolov4_tiny_voc.h5'),\r\n \"anchors_path\": os.path.join(data_dir_path, 'yolov4_anchors.txt'),\r\n \"classes_path\": os.path.join(data_dir_path, 'voc_classes.txt'),\r\n #\"score\": 0.2,\r\n \"iou\": 0.3\r\n #\"model_image_size\": (416, 416) #---\r\n }\r\n\r\n @classmethod\r\n def get_defaults(cls, n):\r\n if n in cls._defaults:\r\n return cls._defaults[n]\r\n else:\r\n return \"Unrecognized attribute name '\" + n + \"'\"\r\n\r\n\r\n def __init__(self, detection_image_size, detection_score):\r\n self.__dict__.update(self._defaults)\r\n self.class_names = self._get_class()\r\n self.anchors = self._get_anchors()\r\n self.sess = K.get_session()\r\n self.score = detection_score\r\n self.model_image_size = (detection_image_size, detection_image_size)\r\n self.boxes, self.scores, self.classes = self.generate()\r\n\r\n\r\n def _get_class(self):\r\n classes_path = os.path.expanduser(self.classes_path)\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\n def _get_anchors(self):\r\n anchors_path = os.path.expanduser(self.anchors_path)\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n return np.array(anchors).reshape(-1, 2)\r\n\r\n def generate(self):\r\n model_path = os.path.expanduser(self.model_path)\r\n assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'\r\n\r\n # Load model, or construct model and load weights.\r\n num_anchors = len(self.anchors)\r\n num_classes = len(self.class_names)\r\n\r\n try:\r\n self.yolo_model = load_model(model_path, compile=False)\r\n except:\r\n self.yolo_model = yolo_body(Input(shape=(None, None, 3)), num_anchors // 2, num_classes)\r\n self.yolo_model.load_weights(self.model_path)\r\n else:\r\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\r\n num_anchors / len(self.yolo_model.output) * (num_classes + 5), \\\r\n 'Mismatch between model and given anchor and class sizes'\r\n\r\n print('{} model, anchors, and classes loaded.'.format(model_path))\r\n\r\n # self.yolo_model = load_model(model_path, compile=False)\r\n # print('{} model, anchors, and classes loaded.'.format(model_path))\r\n\r\n # Generate colors for drawing bounding boxes.\r\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\r\n for x in range(len(self.class_names))]\r\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\r\n self.colors = list(\r\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\r\n self.colors))\r\n\r\n np.random.seed(10101)\r\n np.random.shuffle(self.colors)\r\n np.random.seed(None)\r\n\r\n self.input_image_shape = K.placeholder(shape=(2,))\r\n\r\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\r\n num_classes, self.input_image_shape,\r\n score_threshold=self.score, iou_threshold=self.iou)\r\n return boxes, scores, classes\r\n\r\n def detect_image(self, image):\r\n new_image_size = self.model_image_size\r\n boxed_image = letterbox_image(image, new_image_size)\r\n image_data = np.array(boxed_image, dtype='float32')\r\n image_data /= 255.\r\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\r\n\r\n out_boxes, out_scores, out_classes = self.sess.run(\r\n [self.boxes, self.scores, self.classes],\r\n feed_dict={\r\n self.yolo_model.input: image_data,\r\n self.input_image_shape: [image.size[1], image.size[0]],\r\n K.learning_phase(): 0\r\n })\r\n\r\n return_boxs = []\r\n for i, c in reversed(list(enumerate(out_classes))):\r\n predicted_class = self.class_names[c]\r\n if predicted_class != 'person':\r\n continue\r\n box = out_boxes[i]\r\n # score = out_scores[i]\r\n x = int(box[1])\r\n y = int(box[0])\r\n w = int(box[3] - box[1])\r\n h = int(box[2] - box[0])\r\n if x < 0:\r\n w = w + x\r\n x = 0\r\n if y < 0:\r\n h = h + y\r\n y = 0\r\n return_boxs.append([x, y, w, h])\r\n\r\n return return_boxs\r\n\r\n\r\n def close_session(self):\r\n self.sess.close()"
] | [
[
"numpy.random.shuffle",
"numpy.array",
"numpy.expand_dims",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TUTElectromechanics/spline-fit-py | [
"38139f46b935a1d086ab397adbbf29decb7fb99f"
] | [
"util/plot.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"Utility functions for plotting.\n\nCreated on Fri Mar 24 14:27:16 2017\n\n@author: jje\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d\n\ndef plot_wireframe( data, legend_label=\"_nolabel_\", figno=None ):\n \"\"\"Make and label a wireframe plot.\n\nParameters:\n data : dict\n key : \"x\",\"y\",\"z\"\n value : tuple (rank-2 array in meshgrid format, axis label)\n\nReturn value:\n ax\n The Axes3D object that was used for plotting.\n\"\"\"\n # http://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html\n fig = plt.figure(figno)\n\n # Axes3D has a tendency to underestimate how much space it needs; it draws its labels\n # outside the window area in certain orientations.\n #\n # This causes the labels to be clipped, which looks bad. We prevent this by creating the axes\n # in a slightly smaller rect (leaving a margin). This way the labels will show - outside the Axes3D,\n # but still inside the figure window.\n #\n # The final touch is to set the window background to a matching white, so that the\n # background of the figure appears uniform.\n #\n fig.patch.set_color( (1,1,1) )\n fig.patch.set_alpha( 1.0 )\n x0y0wh = [ 0.02, 0.02, 0.96, 0.96 ] # left, bottom, width, height (here as fraction of subplot area)\n\n ax = mpl_toolkits.mplot3d.axes3d.Axes3D(fig, rect=x0y0wh)\n\n X,xlabel = data[\"x\"]\n Y,ylabel = data[\"y\"]\n Z,zlabel = data[\"z\"]\n ax.plot_wireframe( X, Y, Z, label=legend_label )\n\n# ax.view_init(34, 140)\n# ax.view_init(34, -40)\n ax.view_init(34, -130)\n ax.axis('tight')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n ax.set_title(zlabel)\n\n return ax\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HongleiXie/MLFromScratch | [
"0e51f4b7566fcc4387bb7d940239bdbf9739b842"
] | [
"MLP.py"
] | [
"import tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\n\n\"\"\"\nImplement the MLP from scratch, used to solve for the fashion-mnist classification problem\n\"\"\"\n\ndef load_data_fashion_mnist(batch_size):\n\n mnist_train, mnist_test = keras.datasets.fashion_mnist.load_data()\n # Divide all numbers by 255 so that all pixel values are between\n # 0 and 1, add a batch dimension at the last. And cast label to int32\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255,\n tf.cast(y, dtype='int32'))\n return tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])), \\\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size)\n\n\nclass Updater():\n \"\"\"\n For updating parameters using minibatch stochastic gradient descent.\n \"\"\"\n\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n\n @staticmethod\n # Minibatch stochastic gradient descent.\n def sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr * grad / batch_size)\n\n def __call__(self, batch_size, grads):\n self.sgd(self.params, grads, self.lr, batch_size)\n\n\ndef relu(X):\n return tf.math.maximum(X, 0)\n\n# define a model\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs)) #reshape each two-dimensional image into a flat vector of length num_inputs\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\n\n# define the loss function\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True) # recommend to set it as True for numerically stability. Then we should remove the last layer's softmax funtion\n\n\ndef accuracy(y_hat, y):\n \"\"\"\n Compute the number of correct predictions.\n \"\"\"\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\n\n\ndef evaluate_accuracy(net, data_iter):\n \"\"\"\n Compute the accuracy for a model on a dataset.\n \"\"\"\n metric = [0.0]*2 # No. of correct predictions, no. of predictions\n for X, y in data_iter:\n addon = accuracy(net(X), y), tf.size(y).numpy()\n metric = [a+b for a, b in zip(metric, addon)]\n return metric[0] / metric[1]\n\ndef train_epoch(net, train_iter, loss, updater):\n\n for X, y in train_iter:\n metric = [0.0] * 3 # Sum of training loss, sum of training accuracy, no. of examples\n # Compute gradients and update parameters\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y_hat, y)\n updater(X.shape[0], tape.gradient(l, updater.params))\n\n l_sum = tf.reduce_sum(l)\n addon = l_sum.numpy(), accuracy(y_hat, y), tf.size(y).numpy()\n metric = [a + b for a, b in zip(metric, addon)]\n\n # Return training loss and training accuracy\n return metric[0] / metric[2], metric[1] / metric[2]\n\ndef train(net, train_iter, test_iter, loss, num_epochs, updater):\n for epoch in range(num_epochs):\n train_metrics = train_epoch(net, train_iter, loss, updater)\n test_acc = evaluate_accuracy(net, test_iter)\n print('epoch {0}, train loss {1}, train acc {2}, test acc {3}'.format(epoch, train_metrics[0],\n train_metrics[1], test_acc))\n\ndef get_fashion_mnist_labels(labels):\n \"\"\"\n Return text labels for the Fashion-MNIST dataset.\n \"\"\"\n text_labels = [\n 't-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt',\n 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]\n\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n \"\"\"\n Plot a list of images.\n \"\"\"\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\n\n\nif __name__ == '__main__':\n\n # initializing parameters\n num_inputs, num_outputs, num_hiddens = 784, 10, 256\n\n W1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\n b1 = tf.Variable(tf.zeros(num_hiddens))\n W2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\n b2 = tf.Variable(tf.random.normal([num_outputs], stddev=.01))\n\n params = [W1, b1, W2, b2]\n\n num_epochs = 10\n lr = 0.1 # learing rate\n updater = Updater([W1, W2, b1, b2], lr)\n\n # load data\n train_iter, test_iter = load_data_fashion_mnist(batch_size=256)\n # start to train the MLP model\n train(net, train_iter, test_iter, loss, num_epochs, updater)\n\n # load testing iter\n for X, y in test_iter:\n break\n\n # show the first 8 images in the testing data\n n = 8\n trues = get_fashion_mnist_labels(y)\n preds = get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true + '\\n' + pred for true, pred in zip(trues, preds)]\n show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n]);\n"
] | [
[
"tensorflow.keras.datasets.fashion_mnist.load_data",
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.cast",
"matplotlib.pyplot.subplots",
"tensorflow.expand_dims",
"tensorflow.GradientTape",
"tensorflow.losses.sparse_categorical_crossentropy",
"tensorflow.argmax",
"tensorflow.random.normal",
"tensorflow.size",
"tensorflow.math.maximum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
boringlee24/sevir_challenges | [
"be5e42795246f791932ada2c7a92e18df0b5d8b7"
] | [
"experiments/train.py"
] | [
"import sys\nsys.path.append('..') # Add src to path\nimport os\nos.environ[\"HDF5_USE_FILE_LOCKING\"]='FALSE'\nimport datetime\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom src.display import get_cmap\nfrom src.utils import make_log_dir\n# comment these out if you don't have cartopy\nimport cartopy.feature as cfeature\nfrom src.display.cartopy import make_ccrs,make_animation\nfrom make_dataset import NowcastGenerator,get_nowcast_train_generator,get_nowcast_test_generator\nfrom unet_benchmark import create_model\nfrom unet_benchmark import nowcast_mae, nowcast_mse\nimport pdb\nfrom pathlib import Path\nimport matplotlib\nmatplotlib.use('Agg')\n\ndata_path=\"/scratch/li.baol/SEVIR\"\n# Target locations of sample training & testing data\nDEST_TRAIN_FILE= os.path.join(data_path,'data/processed/nowcast_training_000.h5')\nDEST_TRAIN_META=os.path.join(data_path, 'data/processed/nowcast_training_000_META.csv')\nDEST_TEST_FILE= os.path.join(data_path, 'data/processed/nowcast_testing_000.h5')\nDEST_TEST_META= os.path.join(data_path, 'data/processed/nowcast_testing_000_META.csv')\n\n# Control how many samples are read. Set to -1 to read all 5000 samples.\nN_TRAIN=-1\nTRAIN_VAL_FRAC=0.8\n#set_trace()\nN_TEST=-1\n\n# Loading data takes a few minutes\nwith h5py.File(DEST_TRAIN_FILE,'r') as hf:\n Nr = N_TRAIN if N_TRAIN>=0 else hf['IN_vil'].shape[0]\n X_train = hf['IN_vil'][:Nr]\n Y_train = hf['OUT_vil'][:Nr]\n training_meta = pd.read_csv(DEST_TRAIN_META).iloc[:Nr]\n X_train,X_val=np.split(X_train,[int(TRAIN_VAL_FRAC*Nr)])\n Y_train,Y_val=np.split(Y_train,[int(TRAIN_VAL_FRAC*Nr)])\n training_meta,val_meta=np.split(training_meta,[int(TRAIN_VAL_FRAC*Nr)])\n#set_trace() \nwith h5py.File(DEST_TEST_FILE,'r') as hf:\n Nr = N_TEST if N_TEST>=0 else hf['IN_vil'].shape[0]\n X_test = hf['IN_vil'][:Nr]\n Y_test = hf['OUT_vil'][:Nr]\n testing_meta=pd.read_csv(DEST_TEST_META).iloc[:Nr]\n\n# Add more as needed\nparams={\n 'start_neurons' :16, # Controls size of hidden layers in CNN, higher = more complexity \n 'activation' :'relu', # Activation used throughout the U-Net, see https://www.tensorflow.org/api_docs/python/tf/keras/activations\n 'loss' :'mae', # Either 'mae' or 'mse', or others as https://www.tensorflow.org/api_docs/python/tf/keras/losses\n 'loss_weights' :0.021, # Scale for loss. Recommend squaring this if using MSE\n 'opt' :tf.keras.optimizers.Adam, # optimizer, see https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n 'learning_rate' :0.001, # Learning rate for optimizer\n 'num_epochs' :10, # Number of epochs to train for\n 'batch_size' :8 # Size of batches during training\n}\n\nunet = create_model(start_neurons=params['start_neurons'],activation=params['activation']) \nunet.load_weights('logs/unet.hdf5')\n\n#unet.summary()\n#pdb.set_trace()\nexprmt_dir='logs'\nmake_log_dir = 'plots'\nPath(exprmt_dir).mkdir(parents=True,exist_ok=True)\nPath(make_log_dir).mkdir(parents=True,exist_ok=True)\n\nopt=params['opt'](learning_rate=params['learning_rate'])\nunet.compile(optimizer=opt, loss=params['loss'],loss_weights=[params['loss_weights']])\n\n# Training 10 epochs takes around 10-20 minutes on GPU\nnum_epochs=params['num_epochs']\nbatch_size=params['batch_size']\n\ncallbacks=[\n tf.keras.callbacks.ModelCheckpoint(exprmt_dir+'/unet.hdf5', \n monitor='val_loss',save_best_only=True),\n tf.keras.callbacks.TensorBoard(log_dir=exprmt_dir+'/tboardlogs')\n]\n\nhistory = unet.fit(x=X_train, y=Y_train,\n batch_size=batch_size,\n epochs=num_epochs,\n callbacks=callbacks,\n validation_data=(X_val, Y_val))\n\nplt.plot(history.history['loss'],label='Train loss')\nplt.plot(history.history['val_loss'],label='Val loss')\nplt.legend()\nplt.savefig('plots/train.png')\n"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"tensorflow.keras.callbacks.TensorBoard"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
unanan/setk | [
"e1248c6d40806c3fff251f3971a585c6ec09d949"
] | [
"scripts/sptk/libs/ns.py"
] | [
"#!/usr/bin/env python\n\n# wujian@2020\n\nimport numpy as np\nimport scipy.signal as ss\nimport scipy.integrate as si\n\n\nclass MCRA(object):\n \"\"\"\n OM-LSA (Optimally Modified Log-Spectral Amplitude Estimator) with MCRA\n Reference:\n 1) Cohen I, Berdugo B. Speech enhancement for non-stationary noise environments[J]. \n Signal processing, 2001, 81(11): 2403-2418.\n \"\"\"\n def __init__(self,\n alpha=0.92,\n delta=5,\n beta=0.7,\n alpha_s=0.9,\n alpha_d=0.85,\n alpha_p=0.2,\n gmin_db=-10,\n xi_min_db=-18,\n w_mcra=1,\n w_local=1,\n w_global=15,\n h_mcra=\"hann\",\n h_local=\"hann\",\n h_global=\"hann\",\n q_max=0.95,\n zeta_min_db=-10,\n zeta_max_db=-5,\n zeta_p_max_db=10,\n zeta_p_min_db=0,\n L=125,\n M=128):\n self.delta = delta\n self.alpha = {\"s\": alpha_s, \"d\": alpha_d, \"p\": alpha_p, \"t\": alpha}\n self.gmin = 10**(gmin_db / 10)\n self.beta = beta\n self.w_m = ss.get_window(h_mcra, w_mcra * 2 + 1)\n self.w_g = ss.get_window(h_global, w_global * 2 + 1)\n self.w_l = ss.get_window(h_local, w_local * 2 + 1)\n self.xi_min = 10**(xi_min_db / 10)\n self.zeta_min = 10**(zeta_min_db / 10)\n self.zeta_max = 10**(zeta_max_db / 10)\n self.zeta_p_min = 10**(zeta_p_min_db / 10)\n self.zeta_p_max = 10**(zeta_p_max_db / 10)\n self.L = L\n self.M = M\n self.q_max = q_max\n\n def run(self, stft, eps=1e-7):\n \"\"\"\n Arguments:\n stft: complex STFT, T x F\n Return:\n gain: real array, T x F\n \"\"\"\n T, F = stft.shape\n\n def expint(v):\n return si.quad(lambda t: np.exp(-t) / t, v, np.inf)[0]\n\n exp_para = np.vectorize(expint)\n\n obs_power = np.abs(stft)**2\n gh1 = 1\n p_hat = np.ones(F)\n zeta = np.ones(F)\n zeta_peak = 0\n beg = 10\n lambda_d_hat = obs_power[0]\n\n G = []\n for t in range(T):\n\n # >>> eq.10\n # a posteriori SNR\n gamma = obs_power[t] / np.maximum(lambda_d_hat, eps)\n gamma = np.maximum(gamma, eps)\n # <<< eq.10\n\n # >>> eq.18: a priori SNR\n xi_hat = self.alpha[\"t\"] * gh1**2 * gamma + (\n 1 - self.alpha[\"t\"]) * np.maximum(gamma - 1, 0)\n xi_hat = np.maximum(xi_hat, self.xi_min)\n # <<< eq.18\n\n # >>> eq.15\n v = gamma * xi_hat / (1 + xi_hat)\n gh1 = xi_hat * np.exp(0.5 * exp_para(v)) / (1 + xi_hat)\n # <<< eq.15\n\n # >>> eq.32\n var_sf = np.convolve(obs_power[t], self.w_m, mode=\"same\")\n # <<< eq.32\n\n if t == 0:\n var_s = obs_power[t]\n var_s_min = var_s\n var_s_tmp = var_s\n else:\n # >>> eq.33\n var_s = self.alpha[\"s\"] * var_s + (1 -\n self.alpha[\"s\"]) * var_sf\n # <<< eq.33\n\n if (t + 1) % self.L == beg:\n # >>> eq.34 & eq.35\n var_s_min = np.minimum(var_s_tmp, var_s)\n var_s_tmp = var_s\n # <<< eq.34 & eq.35\n else:\n # >>> eq.36 & eq.37\n var_s_min = np.minimum(var_s_min, var_s)\n var_s_tmp = np.minimum(var_s_tmp, var_s)\n # <<< eq.36 & eq.37\n\n # >>> eq.39\n var_sr = var_s / np.maximum(eps, var_s_min)\n sr_ind = var_sr > self.delta\n # <<< eq.39\n\n # >>> eq.40\n p_hat = self.alpha[\"p\"] * p_hat + (1 - self.alpha[\"p\"]) * sr_ind\n # >>> eq.40\n\n # >>> eq.31\n alpha_d_hat = self.alpha[\"d\"] + (1 - self.alpha[\"d\"]) * p_hat\n # <<< eq.31\n\n # >>> eq.30\n lambda_d_hat = alpha_d_hat * lambda_d_hat + (\n 1 - alpha_d_hat) * obs_power[t]\n # <<< eq.30\n\n # >>> eq.23\n zeta = self.beta * zeta + (1 - self.beta) * xi_hat\n # <<< eq.23\n\n # >>> eq.24\n zeta_g = np.convolve(zeta, self.w_g, mode=\"same\")\n zeta_l = np.convolve(zeta, self.w_l, mode=\"same\")\n # <<< eq.24\n\n # >>> eq.25\n var_p_g = np.zeros(F)\n pg_idx = np.logical_and(zeta_g > self.zeta_min,\n zeta_g < self.zeta_max)\n var_p_g[pg_idx] = np.log10(\n zeta_g[pg_idx] / self.zeta_min) / np.log10(\n self.zeta_max / self.zeta_min)\n pg_idx = zeta_g >= self.zeta_max\n var_p_g[pg_idx] = 1\n # <<< eq.25\n\n # >>> eq.25\n var_p_l = np.zeros(F)\n pl_idx = np.logical_and(zeta_l > self.zeta_min,\n zeta_l < self.zeta_max)\n var_p_l[pl_idx] = np.log10(\n zeta_l[pl_idx] / self.zeta_min) / np.log10(\n self.zeta_max / self.zeta_min)\n pl_idx = zeta_l >= self.zeta_max\n var_p_l[pl_idx] = 1\n # <<< eq.25\n\n # >>> eq.26\n zeta_frame_cur = np.mean(zeta[:self.M // 2 + 1])\n # <<< eq.26\n\n # >>> eq.27\n if t == 0:\n zeta_frame_pre = zeta_frame_cur\n if zeta_frame_cur > self.zeta_min:\n if zeta_frame_cur > zeta_frame_pre:\n zeta_peak = min(max(zeta_frame_cur, self.zeta_p_min),\n self.zeta_p_max)\n p_frame = 1\n elif zeta_frame_cur <= self.zeta_min * zeta_peak:\n p_frame = 0\n elif zeta_frame_cur >= self.zeta_max * zeta_peak:\n p_frame = 1\n else:\n p_frame = np.log10(zeta_frame_cur /\n (self.zeta_min * zeta_peak))\n p_frame = p_frame / np.log10(self.zeta_max / self.zeta_min)\n else:\n p_frame = 0\n zeta_frame_pre = zeta_frame_cur\n # <<< eq.27\n\n # >>> eq.28\n q_hat = np.minimum(self.q_max, 1 - var_p_l * p_frame * var_p_g)\n # <<< eq.28\n\n # >>> eq.9\n p_inv = 1 + q_hat * (1 + xi_hat) * np.exp(-v) / (1 + q_hat)\n p = 1 / p_inv\n # <<< eq.10\n\n # >>> eq.16\n gain = gh1**p * self.gmin**(1 - p)\n G.append(gain)\n # <<< eq.16\n return np.stack(G)\n\n\nclass iMCRA(object):\n \"\"\"\n OM-LSA (Optimally Modified Log-Spectral Amplitude Estimator) with iMCRA\n Reference:\n 1) Cohen I. Noise spectrum estimation in adverse environments: Improved minima controlled \n recursive averaging[J]. IEEE Transactions on speech and audio processing, 2003, 11(5): \n 466-475.\n \"\"\"\n def __init__(self,\n alpha=0.92,\n alpha_s=0.9,\n alpha_d=0.85,\n b_min=1.66,\n gamma0=4.6,\n gamma1=3,\n zeta0=1.67,\n xi_min_db=-18,\n gmin_db=-10,\n w_mcra=1,\n h_mcra=\"hann\",\n beta=1.47,\n V=15,\n U=8):\n self.alpha = {\"s\": alpha_s, \"d\": alpha_d, \"t\": alpha}\n self.beta = beta\n self.gamma0, self.gamma1 = gamma0, gamma1\n self.zeta0 = zeta0\n self.b_min = 1 / b_min\n self.xi_min = 10**(xi_min_db / 10)\n self.gain_min = 10**(gmin_db / 10)\n self.w_m = ss.get_window(h_mcra, w_mcra * 2 + 1)\n self.V = V\n self.U = U\n\n def run(self, stft, eps=1e-7):\n \"\"\"\n Arguments:\n stft: complex STFT, T x F\n Return:\n gain: real array, T x F\n \"\"\"\n T, F = stft.shape \n obs_power = np.abs(stft)**2\n lambda_d_hat = obs_power[0]\n gh1 = 1\n\n def expint(v):\n return si.quad(lambda t: np.exp(-t) / t, v, np.inf)[0]\n\n exp_para = np.vectorize(expint)\n\n s_min_sw_hat = []\n s_min_sw = []\n G = []\n for t in range(T):\n\n lambda_d = lambda_d_hat * self.beta\n\n # >>> eq.3: posteriori SNR\n gamma = obs_power[t] / np.maximum(lambda_d, eps)\n # <<< eq.3\n\n gain = gh1**2 * gamma\n # >>> eq.32 : a priori SNR\n xi_hat = self.alpha[\"t\"] * gain + (\n 1 - self.alpha[\"t\"]) * np.maximum(gamma - 1, 0)\n xi_hat = np.maximum(xi_hat, self.xi_min)\n # <<< eq.32\n\n # >>> eq.33\n v = gamma * xi_hat / (1 + xi_hat)\n gh1 = xi_hat / (1 + xi_hat) * np.exp(0.5 * exp_para(v))\n # <<< eq.33\n\n # >>> eq.14\n var_sf = np.convolve(obs_power[t], self.w_m, mode=\"same\")\n # <<< eq.14\n\n if t == 0:\n var_s = var_sf\n var_s_hat = var_sf\n var_s_min = var_sf\n var_s_min_sw = var_sf\n else:\n # >>> eq.15\n var_s = self.alpha[\"s\"] * var_s + (1 -\n self.alpha[\"s\"]) * var_sf\n # <<< eq.15\n var_s_min = np.minimum(var_s_min, var_s)\n var_s_min_sw = np.minimum(var_s_min_sw, var_s)\n\n # >>> eq.21\n gamma_min = obs_power[t] * self.b_min / np.maximum(var_s_min, eps)\n zeta = var_sf * self.b_min / np.maximum(var_s_min, eps)\n indicator = np.logical_and(gamma_min < self.gamma0,\n zeta < self.zeta0)\n # <<< eq.21\n\n # >>> eq.26\n ind_conv = np.convolve(indicator, self.w_m, mode=\"same\")\n ind_nz_idx = (ind_conv > 0)\n obs_conv = np.convolve(obs_power[t] * indicator,\n self.w_m,\n mode=\"same\")\n var_sf_hat = var_s_hat.copy()\n var_sf_hat[\n ind_nz_idx] = obs_conv[ind_nz_idx] / ind_conv[ind_nz_idx]\n # <<< eq.26\n\n if t == 0:\n var_s_min_hat = var_s\n var_s_min_sw_hat = var_sf\n else:\n # <<< eq.27\n var_s_hat = self.alpha[\"s\"] * var_s_hat + (\n 1 - self.alpha[\"s\"]) * var_sf_hat\n # >>> eq.27\n var_s_min_hat = np.minimum(var_s_min_hat, var_s_hat)\n var_s_min_sw_hat = np.minimum(var_s_min_sw_hat, var_s_hat)\n\n # >>> eq.28\n gamma_min_hat = obs_power[t] * self.b_min / np.maximum(\n var_s_min_hat, eps)\n zeta_hat = var_s * self.b_min / np.maximum(var_s_min_hat, eps)\n # <<< eq.28\n\n # >>> eq.29\n qhat_idx_c1 = gamma_min_hat < self.gamma1\n qhat_idx_c2 = gamma_min_hat > 1\n # 1 < gamma_min_hat < self.gamma1\n qhat_idx_c3 = np.logical_and(qhat_idx_c2, qhat_idx_c1)\n\n q_hat = np.zeros(F)\n qhat_idx = np.logical_and(qhat_idx_c3, zeta_hat < self.zeta0)\n # (0, 1)\n q_hat[qhat_idx] = (self.gamma1 -\n gamma_min_hat[qhat_idx]) / (self.gamma1 - 1)\n # <<< eq.29\n\n # >>> eq.7\n p_hat = np.zeros(F)\n p_hat_den = 1 + q_hat[qhat_idx] * (1 + xi_hat[qhat_idx]) / (\n 1 - q_hat[qhat_idx]) * np.exp(-v[qhat_idx])\n # (0, 1)\n p_hat[qhat_idx] = 1 / p_hat_den\n phat_idx = np.logical_and(gamma_min_hat >= self.gamma1,\n zeta_hat >= self.zeta0)\n p_hat[phat_idx] = 1\n # <<< eq.7\n\n # >>> eq.11\n alpha_d_hat = self.alpha[\"d\"] + (1 - self.alpha[\"d\"]) * p_hat\n # <<< eq.11\n\n # >>> eq.10\n lambda_d_hat = alpha_d_hat * lambda_d_hat + (\n 1 - alpha_d_hat) * obs_power[t]\n # <<< eq.10\n\n s_min_sw.append(var_s_min_sw)\n s_min_sw_hat.append(var_s_min_sw_hat)\n\n if (t + 1) % self.V == 0:\n # U x F\n u_s_min_sw = np.stack(s_min_sw[-self.U:])\n u_s_min_sw_hat = np.stack(s_min_sw_hat[-self.U:])\n var_s_min = np.min(u_s_min_sw, 0)\n var_s_min_hat = np.min(u_s_min_sw_hat, 0)\n var_s_min_sw = var_s\n var_s_min_sw_hat = var_s_hat\n\n # >>> gain function\n gain = gh1**p_hat * self.gain_min**(1 - p_hat)\n G.append(gain)\n # <<< gain function\n\n return np.stack(G)"
] | [
[
"numpy.convolve",
"numpy.maximum",
"numpy.abs",
"scipy.signal.get_window",
"numpy.minimum",
"numpy.min",
"numpy.stack",
"numpy.ones",
"numpy.vectorize",
"numpy.mean",
"numpy.log10",
"numpy.exp",
"numpy.logical_and",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
GLaDOS-root/chatviz | [
"439802be808320476c2db98c31935c8de87e86bf"
] | [
"src/data_parser.py"
] | [
"import helperfunctions\nimport importer\nimport numpy as np\nimport re \nimport pandas as pd\n#segregate raw .txt file into a nested dict structure with JSON-like referencing\ndef parse_chat(chat):\n arr = []\n for lines in chat.readlines():\n if(re.search(\"([0-9]{1,}\\/[0-9]{1,}\\/[0-9]{1,}\\, [0-9]{1,}\\:[0-9]{1,} (AM|PM) \\- )\", lines)):\n arr.append(lines)\n else:\n arr[len(arr)-1] = arr[len(arr)-1] + lines\n chat_data = np.asarray(arr)\n raw_data = pd.DataFrame(data = chat_data, columns = [\"text\"]) \n return raw_data\n\nif __name__ == \"__main__\":\n chat = importer.chat_import(\"/Users/glados/Documents/test_data.txt\")\n \n"
] | [
[
"numpy.asarray",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SDGraph/Hacktoberfest2k21 | [
"8f8aead15afa10ea12e1b23ece515a10a882de28"
] | [
"Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/ML/1_linear_reg/linearReg.py"
] | [
"# Data Preprocessing Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Data.csv')\n\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\n"
] | [
[
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Diffblue-benchmarks/WeBankFinTech-FATE | [
"7f4a3e7ca50f24e49090e6c117bfabd1785603f2"
] | [
"federatedml/logistic_regression/hetero_logistic_regression/hetero_lr_guest.py"
] | [
"#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nfrom arch.api import federation\nfrom arch.api.utils import log_utils\nfrom federatedml.logistic_regression.base_logistic_regression import BaseLogisticRegression\nfrom federatedml.model_selection import MiniBatch\nfrom federatedml.optim import activation\nfrom federatedml.optim.gradient import HeteroLogisticGradient\nfrom federatedml.secureprotol import EncryptModeCalculator\nfrom federatedml.statistic.data_overview import rubbish_clear\nfrom federatedml.util import consts\nfrom federatedml.util.transfer_variable import HeteroLRTransferVariable\n\nLOGGER = log_utils.getLogger()\n\n\nclass HeteroLRGuest(BaseLogisticRegression):\n def __init__(self, logistic_params):\n super(HeteroLRGuest, self).__init__(logistic_params)\n self.transfer_variable = HeteroLRTransferVariable()\n self.data_batch_count = []\n\n self.encrypted_calculator = None\n\n self.guest_forward = None\n\n def compute_forward(self, data_instances, coef_, intercept_, batch_index=-1):\n \"\"\"\n Compute W * X + b and (W * X + b)^2, where X is the input data, W is the coefficient of lr,\n and b is the interception\n Parameters\n ----------\n data_instance: DTable of Instance, input data\n coef_: list, coefficient of lr\n intercept_: float, the interception of lr\n \"\"\"\n wx = self.compute_wx(data_instances, coef_, intercept_)\n\n en_wx = self.encrypted_calculator[batch_index].encrypt(wx)\n wx_square = wx.mapValues(lambda v: np.square(v))\n en_wx_square = self.encrypted_calculator[batch_index].encrypt(wx_square)\n\n en_wx_join_en_wx_square = en_wx.join(en_wx_square, lambda wx, wx_square: (wx, wx_square))\n self.guest_forward = en_wx_join_en_wx_square.join(wx, lambda e, wx: (e[0], e[1], wx))\n\n # temporary resource recovery and will be removed in the future\n rubbish_list = [wx, en_wx, wx_square, en_wx_square, en_wx_join_en_wx_square]\n rubbish_clear(rubbish_list)\n\n def aggregate_forward(self, host_forward):\n \"\"\"\n Compute (en_wx_g + en_wx_h)^2 = en_wx_g^2 + en_wx_h^2 + 2 * wx_g * en_wx_h , where en_wx_g is the encrypted W * X + b of guest, wx_g is unencrypted W * X + b,\n and en_wx_h is the encrypted W * X + b of host.\n Parameters\n ----------\n host_forward: DTable, include encrypted W * X and (W * X)^2\n\n Returns\n ----------\n aggregate_forward_res\n list\n include W * X and (W * X)^2 federate with guest and host\n \"\"\"\n aggregate_forward_res = self.guest_forward.join(host_forward,\n lambda g, h: (g[0] + h[0], g[1] + h[1] + 2 * g[2] * h[0]))\n return aggregate_forward_res\n\n @staticmethod\n def load_data(data_instance):\n \"\"\"\n set the negative label to -1\n Parameters\n ----------\n data_instance: DTable of Instance, input data\n \"\"\"\n if data_instance.label != 1:\n data_instance.label = -1\n return data_instance\n\n def fit(self, data_instances):\n \"\"\"\n Train lr model of role guest\n Parameters\n ----------\n data_instances: DTable of Instance, input data\n \"\"\"\n\n LOGGER.info(\"Enter hetero_lr_guest fit\")\n self._abnormal_detection(data_instances)\n\n self.header = self.get_header(data_instances)\n data_instances = data_instances.mapValues(HeteroLRGuest.load_data)\n\n public_key = federation.get(name=self.transfer_variable.paillier_pubkey.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.paillier_pubkey),\n idx=0)\n LOGGER.info(\"Get public_key from arbiter:{}\".format(public_key))\n self.encrypt_operator.set_public_key(public_key)\n\n LOGGER.info(\"Generate mini-batch from input data\")\n mini_batch_obj = MiniBatch(data_instances, batch_size=self.batch_size)\n batch_num = mini_batch_obj.batch_nums\n if self.batch_size == -1:\n LOGGER.info(\"batch size is -1, set it to the number of data in data_instances\")\n self.batch_size = data_instances.count()\n\n batch_info = {\"batch_size\": self.batch_size, \"batch_num\": batch_num}\n LOGGER.info(\"batch_info:{}\".format(batch_info))\n federation.remote(batch_info,\n name=self.transfer_variable.batch_info.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),\n role=consts.HOST,\n idx=0)\n LOGGER.info(\"Remote batch_info to Host\")\n federation.remote(batch_info,\n name=self.transfer_variable.batch_info.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),\n role=consts.ARBITER,\n idx=0)\n LOGGER.info(\"Remote batch_info to Arbiter\")\n\n self.encrypted_calculator = [EncryptModeCalculator(self.encrypt_operator,\n self.encrypted_mode_calculator_param.mode,\n self.encrypted_mode_calculator_param.re_encrypted_rate) for _\n in range(batch_num)]\n\n LOGGER.info(\"Start initialize model.\")\n LOGGER.info(\"fit_intercept:{}\".format(self.init_param_obj.fit_intercept))\n model_shape = self.get_features_shape(data_instances)\n weight = self.initializer.init_model(model_shape, init_params=self.init_param_obj)\n if self.init_param_obj.fit_intercept is True:\n self.coef_ = weight[:-1]\n self.intercept_ = weight[-1]\n else:\n self.coef_ = weight\n\n is_send_all_batch_index = False\n self.n_iter_ = 0\n index_data_inst_map = {}\n\n while self.n_iter_ < self.max_iter:\n LOGGER.info(\"iter:{}\".format(self.n_iter_))\n # each iter will get the same batch_data_generator\n batch_data_generator = mini_batch_obj.mini_batch_data_generator(result='index')\n\n batch_index = 0\n for batch_data_index in batch_data_generator:\n LOGGER.info(\"batch:{}\".format(batch_index))\n if not is_send_all_batch_index:\n LOGGER.info(\"remote mini-batch index to Host\")\n federation.remote(batch_data_index,\n name=self.transfer_variable.batch_data_index.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.batch_data_index,\n self.n_iter_,\n batch_index),\n role=consts.HOST,\n idx=0)\n if batch_index >= mini_batch_obj.batch_nums - 1:\n is_send_all_batch_index = True\n\n # Get mini-batch train data\n if len(index_data_inst_map) < batch_num:\n batch_data_inst = data_instances.join(batch_data_index, lambda data_inst, index: data_inst)\n index_data_inst_map[batch_index] = batch_data_inst\n else:\n batch_data_inst = index_data_inst_map[batch_index]\n\n # transforms features of raw input 'batch_data_inst' into more representative features 'batch_feat_inst'\n batch_feat_inst = self.transform(batch_data_inst)\n\n # guest/host forward\n self.compute_forward(batch_feat_inst, self.coef_, self.intercept_, batch_index)\n host_forward = federation.get(name=self.transfer_variable.host_forward_dict.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.host_forward_dict, self.n_iter_, batch_index),\n idx=0)\n LOGGER.info(\"Get host_forward from host\")\n aggregate_forward_res = self.aggregate_forward(host_forward)\n en_aggregate_wx = aggregate_forward_res.mapValues(lambda v: v[0])\n en_aggregate_wx_square = aggregate_forward_res.mapValues(lambda v: v[1])\n\n # compute [[d]]\n if self.gradient_operator is None:\n self.gradient_operator = HeteroLogisticGradient(self.encrypt_operator)\n fore_gradient = self.gradient_operator.compute_fore_gradient(batch_feat_inst, en_aggregate_wx)\n federation.remote(fore_gradient,\n name=self.transfer_variable.fore_gradient.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.fore_gradient,\n self.n_iter_,\n batch_index),\n role=consts.HOST,\n idx=0)\n\n LOGGER.info(\"Remote fore_gradient to Host\")\n # compute guest gradient and loss\n guest_gradient, loss = self.gradient_operator.compute_gradient_and_loss(batch_feat_inst,\n fore_gradient,\n en_aggregate_wx,\n en_aggregate_wx_square,\n self.fit_intercept)\n\n # loss regulation if necessary\n if self.updater is not None:\n guest_loss_regular = self.updater.loss_norm(self.coef_)\n loss += self.encrypt_operator.encrypt(guest_loss_regular)\n\n federation.remote(guest_gradient,\n name=self.transfer_variable.guest_gradient.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_gradient,\n self.n_iter_,\n batch_index),\n role=consts.ARBITER,\n idx=0)\n LOGGER.info(\"Remote guest_gradient to arbiter\")\n\n optim_guest_gradient = federation.get(name=self.transfer_variable.guest_optim_gradient.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_optim_gradient, self.n_iter_,\n batch_index),\n idx=0)\n LOGGER.info(\"Get optim_guest_gradient from arbiter\")\n\n # update model\n LOGGER.info(\"update_model\")\n self.update_model(optim_guest_gradient)\n\n # update local model that transforms features of raw input 'batch_data_inst'\n training_info = {\"iteration\": self.n_iter_, \"batch_index\": batch_index}\n self.update_local_model(fore_gradient, batch_data_inst, self.coef_, **training_info)\n\n # Get loss regulation from Host if regulation is set\n if self.updater is not None:\n en_host_loss_regular = federation.get(name=self.transfer_variable.host_loss_regular.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.host_loss_regular, self.n_iter_,\n batch_index),\n idx=0)\n LOGGER.info(\"Get host_loss_regular from Host\")\n loss += en_host_loss_regular\n\n federation.remote(loss,\n name=self.transfer_variable.loss.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.loss,\n self.n_iter_,\n batch_index),\n role=consts.ARBITER,\n idx=0)\n LOGGER.info(\"Remote loss to arbiter\")\n\n # is converge of loss in arbiter\n batch_index += 1\n\n # temporary resource recovery and will be removed in the future\n rubbish_list = [host_forward,\n aggregate_forward_res,\n en_aggregate_wx,\n en_aggregate_wx_square,\n fore_gradient,\n self.guest_forward\n ]\n rubbish_clear(rubbish_list)\n\n is_stopped = federation.get(name=self.transfer_variable.is_stopped.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.is_stopped, self.n_iter_, batch_index),\n idx=0)\n LOGGER.info(\"Get is_stop flag from arbiter:{}\".format(is_stopped))\n\n self.n_iter_ += 1\n if is_stopped:\n LOGGER.info(\"Get stop signal from arbiter, model is converged, iter:{}\".format(self.n_iter_))\n break\n\n LOGGER.info(\"Reach max iter {}, train model finish!\".format(self.max_iter))\n\n def predict(self, data_instances, predict_param):\n \"\"\"\n Prediction of lr\n Parameters\n ----------\n data_instance:DTable of Instance, input data\n predict_param: PredictParam, the setting of prediction.\n\n Returns\n ----------\n DTable\n include input data label, predict probably, label\n \"\"\"\n LOGGER.info(\"Start predict ...\")\n\n data_features = self.transform(data_instances)\n\n prob_guest = self.compute_wx(data_features, self.coef_, self.intercept_)\n prob_host = federation.get(name=self.transfer_variable.host_prob.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.host_prob),\n idx=0)\n LOGGER.info(\"Get probability from Host\")\n\n # guest probability\n pred_prob = prob_guest.join(prob_host, lambda g, h: activation.sigmoid(g + h))\n pred_label = self.classified(pred_prob, predict_param.threshold)\n if predict_param.with_proba:\n labels = data_instances.mapValues(lambda v: v.label)\n predict_result = labels.join(pred_prob, lambda label, prob: (label, prob))\n else:\n predict_result = data_instances.mapValues(lambda v: (v.label, None))\n\n predict_result = predict_result.join(pred_label, lambda r, p: (r[0], r[1], p))\n return predict_result\n"
] | [
[
"numpy.square"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hongfz16/HCMoCo | [
"140968c66b72034ee2dff610a69be464d8e5866b"
] | [
"A2J/hrnet/official_hrnet.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Ke Sun ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport functools\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch._utils\nimport torch.nn.functional as F\n\nBatchNorm2d = nn.BatchNorm2d\nBN_MOMENTUM = 0.01\nlogger = logging.getLogger(__name__)\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super(HighResolutionModule, self).__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n\n self.multi_scale_output = multi_scale_output\n\n self.branches = self._make_branches(\n num_branches, blocks, num_blocks, num_channels)\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(inplace=True)\n\n def _check_branches(self, num_branches, blocks, num_blocks,\n num_inchannels, num_channels):\n if num_branches != len(num_blocks):\n error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(\n num_branches, len(num_blocks))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_channels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(\n num_branches, len(num_channels))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_inchannels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(\n num_branches, len(num_inchannels))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels,\n stride=1):\n downsample = None\n if stride != 1 or \\\n self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.num_inchannels[branch_index],\n num_channels[branch_index] * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(num_channels[branch_index] * block.expansion,\n momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index], stride, downsample))\n self.num_inchannels[branch_index] = \\\n num_channels[branch_index] * block.expansion\n for i in range(1, num_blocks[branch_index]):\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index]))\n\n return nn.Sequential(*layers)\n\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\n branches = []\n\n for i in range(num_branches):\n branches.append(\n self._make_one_branch(i, block, num_blocks, num_channels))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n num_inchannels = self.num_inchannels\n fuse_layers = []\n for i in range(num_branches if self.multi_scale_output else 1):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False),\n BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i-j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = num_inchannels[i]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3, \n momentum=BN_MOMENTUM)))\n else:\n num_outchannels_conv3x3 = num_inchannels[j]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3,\n momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def get_num_inchannels(self):\n return self.num_inchannels\n\n def forward(self, x):\n if self.num_branches == 1:\n return [self.branches[0](x[0])]\n\n for i in range(self.num_branches):\n x[i] = self.branches[i](x[i])\n\n x_fuse = []\n for i in range(len(self.fuse_layers)):\n y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n elif j > i:\n width_output = x[i].shape[-1]\n height_output = x[i].shape[-2]\n y = y + F.interpolate(\n self.fuse_layers[i][j](x[j]),\n size=[height_output, width_output],\n mode='bilinear')\n else:\n y = y + self.fuse_layers[i][j](x[j])\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nblocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck\n}\n\n\nclass HighResolutionNet(nn.Module):\n\n def __init__(self, config, **kwargs):\n extra = config.MODEL.EXTRA\n super(HighResolutionNet, self).__init__()\n\n # stem net\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n \n self.stage1_cfg = extra['STAGE1']\n num_channels = self.stage1_cfg['NUM_CHANNELS'][0]\n block = blocks_dict[self.stage1_cfg['BLOCK']]\n num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]\n self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)\n stage1_out_channel = block.expansion*num_channels\n\n self.stage2_cfg = extra['STAGE2']\n num_channels = self.stage2_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage2_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition1 = self._make_transition_layer(\n [stage1_out_channel], num_channels)\n self.stage2, pre_stage_channels = self._make_stage(\n self.stage2_cfg, num_channels)\n\n self.stage3_cfg = extra['STAGE3']\n num_channels = self.stage3_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage3_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition2 = self._make_transition_layer(\n pre_stage_channels, num_channels)\n self.stage3, pre_stage_channels = self._make_stage(\n self.stage3_cfg, num_channels)\n\n self.stage4_cfg = extra['STAGE4']\n num_channels = self.stage4_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage4_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition3 = self._make_transition_layer(\n pre_stage_channels, num_channels)\n self.stage4, pre_stage_channels = self._make_stage(\n self.stage4_cfg, num_channels, multi_scale_output=True)\n \n # last_inp_channels = np.int(np.sum(pre_stage_channels))\n\n # self.last_layer = nn.Sequential(\n # nn.Conv2d(\n # in_channels=last_inp_channels,\n # out_channels=last_inp_channels,\n # kernel_size=1,\n # stride=1,\n # padding=0),\n # BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(\n # in_channels=last_inp_channels,\n # out_channels=config.DATASET.NUM_CLASSES,\n # kernel_size=extra.FINAL_CONV_KERNEL,\n # stride=1,\n # padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0)\n # )\n\n def _make_transition_layer(\n self, num_channels_pre_layer, num_channels_cur_layer):\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(nn.Sequential(\n nn.Conv2d(num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n 3,\n 1,\n 1,\n bias=False),\n BatchNorm2d(\n num_channels_cur_layer[i], momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i+1-num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = num_channels_cur_layer[i] \\\n if j == i-num_branches_pre else inchannels\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(\n inchannels, outchannels, 3, 2, 1, bias=False),\n BatchNorm2d(outchannels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n transition_layers.append(nn.Sequential(*conv3x3s))\n\n return nn.ModuleList(transition_layers)\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _make_stage(self, layer_config, num_inchannels,\n multi_scale_output=True):\n num_modules = layer_config['NUM_MODULES']\n num_branches = layer_config['NUM_BRANCHES']\n num_blocks = layer_config['NUM_BLOCKS']\n num_channels = layer_config['NUM_CHANNELS']\n block = blocks_dict[layer_config['BLOCK']]\n fuse_method = layer_config['FUSE_METHOD']\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n modules.append(\n HighResolutionModule(num_branches,\n block,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n reset_multi_scale_output)\n )\n num_inchannels = modules[-1].get_num_inchannels()\n\n return nn.Sequential(*modules), num_inchannels\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.layer1(x)\n\n x_list = []\n for i in range(self.stage2_cfg['NUM_BRANCHES']):\n if self.transition1[i] is not None:\n x_list.append(self.transition1[i](x))\n else:\n x_list.append(x)\n y_list = self.stage2(x_list)\n\n x_list = []\n for i in range(self.stage3_cfg['NUM_BRANCHES']):\n if self.transition2[i] is not None:\n x_list.append(self.transition2[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n y_list = self.stage3(x_list)\n\n x_list = []\n for i in range(self.stage4_cfg['NUM_BRANCHES']):\n if self.transition3[i] is not None:\n x_list.append(self.transition3[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n x = self.stage4(x_list)\n\n # # Upsampling\n # x0_h, x0_w = x[0].size(2), x[0].size(3)\n # x1 = F.upsample(x[1], size=(x0_h, x0_w), mode='bilinear')\n # x2 = F.upsample(x[2], size=(x0_h, x0_w), mode='bilinear')\n # x3 = F.upsample(x[3], size=(x0_h, x0_w), mode='bilinear')\n\n # x = torch.cat([x[0], x1, x2, x3], 1)\n\n # x = self.last_layer(x)\n\n return x\n\n def init_weights(self, pretrained='',):\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if os.path.isfile(pretrained):\n pretrained_dict = torch.load(pretrained, map_location='cpu')\n logger.info('=> loading pretrained model {}'.format(pretrained))\n print('=> loading pretrained model {}'.format(pretrained))\n model_dict = self.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items()\n if k in model_dict.keys()}\n #for k, _ in pretrained_dict.items():\n # logger.info(\n # '=> loading {} pretrained model {}'.format(k, pretrained))\n model_dict.update(pretrained_dict)\n self.load_state_dict(model_dict)\n\ndef get_seg_model(cfg, **kwargs):\n model = HighResolutionNet(cfg, **kwargs)\n model.init_weights(cfg.MODEL.PRETRAINED)\n\n return model\n\nfrom .default_config import _C, update_from_yaml\n\ndef get_hrnet_w48_backbone(pretrain_pth=None):\n update_from_yaml(_C, './hrnet/seg_hrnet_w48_473x473_sgd_lr7e-3_wd5e-4_bs_40_epoch150.yaml')\n model = HighResolutionNet(_C)\n if pretrain_pth is None:\n model.init_weights(_C.MODEL.PRETRAINED)\n else:\n model.init_weights(pretrain_pth)\n # model.init_weights('')\n return model\n\ndef get_hrnet_w32_backbone(pretrain_pth=None):\n update_from_yaml(_C, './hrnet/seg_hrnet_w32_473x473_sgd_lr7e-3_wd5e-4_bs_40_epoch150.yaml')\n model = HighResolutionNet(_C)\n if pretrain_pth is None:\n model.init_weights(_C.MODEL.PRETRAINED)\n else:\n model.init_weights(pretrain_pth)\n # model.init_weights('')\n return model\n\ndef get_hrnet_w18_backbone(pretrain_pth=None):\n update_from_yaml(_C, './hrnet/seg_hrnet_w18_473x473_sgd_lr7e-3_wd5e-4_bs_40_epoch150.yaml')\n model = HighResolutionNet(_C)\n if pretrain_pth is None:\n model.init_weights('')\n elif pretrain_pth == 'IN':\n model.init_weights(_C.MODEL.PRETRAINED)\n else:\n model.init_weights(pretrain_pth)\n # model.init_weights('')\n return model\n"
] | [
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IndyMPO/IndyGeoprocessingTools | [
"968f9befc37252e065e8d8085c0d10f17a871152"
] | [
"AccessibilityCalculator/AccessibilityCalculator.py"
] | [
"#This script copyright 2017 Indianapolis Metropolitan Planning Organization\nimport arcpy\nimport numpy as np\n\n#Read in parameters\nauto_skim_file = arcpy.GetParameterAsText(0)\ntransit_skim_file = arcpy.GetParameterAsText(1)\nauto_time_threshold = arcpy.GetParameter(2)\nauto_function_decay = arcpy.GetParameter(3)\ntransit_time_threshold = arcpy.GetParameter(4)\ntransit_function_decay = arcpy.GetParameter(5)\ntaz_file = arcpy.GetParameterAsText(6)\ntaz_field = arcpy.GetParameterAsText(7)\npop_field = arcpy.GetParameterAsText(8)\nemp_field = arcpy.GetParameterAsText(9)\nret_field = arcpy.GetParameterAsText(10)\naccpop_name = 'ACC_POP'\naccret_name = 'ACC_RET'\naccnre_name = 'ACC_NRE'\nattpop_name = 'ATT_POP'\nattret_name = 'ATT_RET'\nattnre_name = 'ATT_NRE'\ntrnacc_name = 'TRN_ACC'\n\nif auto_function_decay <= 0:\n auto_function_decay = np.inf\nif transit_function_decay <= 0:\n transit_function_decay = np.inf\n\ndef extract_skim_from_csv(csv_file):\n '''\n Reads in a skim with labels as a csv and returns a 2-dimensional numpy array and a dictionary mapping zone to array index\n\n Parameters\n ----------\n csv_file (str):\n Filepath of the csv to read in\n '''\n data = np.genfromtxt(csv_file, delimiter = ',', filling_values = np.inf) #Read in data\n zones = data[0, :]\n zone_map = {zones[i+1]: i for i in range(len(zones)-1)} #Create dictionary for mapping zones\n skim = data[1:, 1:] #Define actual skim data\n return skim, zone_map\n\n##def under_auto_threshold(time):\n## '''\n## Checks if a time is less than or equal to the auto threshold\n##\n## Parameters\n## ----------\n## time (numeric):\n## Travel time value to be tested\n##\n## Returns\n## -------\n## is_under (bool):\n## True if the time is less than or equal to the threshold, False otherwise\n## '''\n## global auto_time_threshold\n## return time <= auto_time_threshold\n##\n##create_auto_bool_skim = np.vectorize(under_auto_threshold)\n##\n##def under_transit_threshold(time):\n## '''\n## Checks if a time is less than or equal to the transit threshold\n##\n## Parameters\n## ----------\n## time (numeric):\n## Travel time value to be tested\n##\n## Returns\n## -------\n## is_under (bool):\n## True if the time is less than or equal to the threshold, False otherwise\n## '''\n## global transit_time_threshold\n## return time <= transit_time_threshold\n##\n##create_transit_bool_skim = np.vectorize(under_transit_threshold)\n##\n##def create_bool_skim(skim, threshold):\n## '''\n## Creates a boolean \"skim\" that is True if an origin-destination pair is less than or equal to a specified threshold and False otherwise\n##\n## Parameters\n## ----------\n## skim (ndarray):\n## 2-dimensional skim array\n## threshold (numeric):\n## Travel time threshold\n##\n## Returns\n## -------\n## out (ndarray):\n## Array with same dimensions as input skim full of ones and zeros indicating if a pair is less than or equal to `threshold`\n## '''\n## out = np.empty_like(skim)\n## #Iterate over rows and columns, checking each skim individually\n## for i in range(skim.shape[0]):\n## for j in range(skim.shape[1]):\n## out[i, j] = skim[i, j] <= threshold\n## return out\n\ndef apply_auto_decay_function(skim):\n '''\n Applies a decay function to each element of an auto skim matrix\n\n Parameters\n ----------\n skim (ndarray):\n Skim matrix\n\n Returns\n -------\n weight_skim (ndarray):\n A matrix of weights with the same shape as `skim`\n '''\n global auto_time_threshold, auto_function_decay\n return np.exp(-0.5*np.power(skim/auto_time_threshold, auto_function_decay))\n\ndef apply_transit_decay_function(skim):\n '''\n Applies a decay function to each element of an auto skim matrix\n\n Parameters\n ----------\n skim (ndarray):\n Skim matrix\n\n Returns\n -------\n weight_skim (ndarray):\n A matrix of weights with the same shape as `skim`\n '''\n global transit_time_threshold, transit_function_decay\n return np.exp(-0.5*np.power(skim/transit_time_threshold, transit_function_decay))\n\ndef calc_auto_acc(weight_skim, zone_map, taz_file):\n '''\n Calculates the number of people, retail, and non-retail jobs within a threshold of each zone. Accessibilities in the input shapefile are updated.\n\n Parameters\n ----------\n weight_skim (ndarray):\n Weight skim\n zone_map (dict):\n Dictionary mapping zone number to index in the skim\n taz_file (str):\n Filepath for a TAZ shapefile\n '''\n global taz_field, pop_field, emp_field, ret_field, accpop_name, accret_name, accnre_name\n\n pop = np.empty_like(weight_skim, dtype = int)\n ret = np.empty_like(weight_skim, dtype = int)\n nre = np.empty_like(weight_skim, dtype = int)\n\n #For each zone, create ndarrays that represent the number of people, retail, and non-retail jobs that can be reached within a specific time\n zones = arcpy.da.SearchCursor(taz_file, field_names = [taz_field, pop_field, emp_field, ret_field])\n for zone in zones:\n pop[:, zone_map[zone[0]]] = zone[1]*weight_skim[:, zone_map[zone[0]]]\n ret[:, zone_map[zone[0]]] = zone[3]*weight_skim[:, zone_map[zone[0]]]\n nre[:, zone_map[zone[0]]] = (zone[2] - zone[3])*weight_skim[:, zone_map[zone[0]]]\n\n #Calculate the row sums to get the total number of people, retail, and non-retail jobs that can be reached within a specific time for each origin zone\n acc_pop = pop.sum(1)\n acc_ret = ret.sum(1)\n acc_nre = nre.sum(1)\n\n #Update the shapefile with the calculated accessibilities\n zones = arcpy.da.UpdateCursor(taz_file, field_names = [taz_field, accpop_name, accret_name, accnre_name])\n for zone in zones:\n zone[1] = acc_pop[zone_map[zone[0]]]\n zone[2] = acc_ret[zone_map[zone[0]]]\n zone[3] = acc_nre[zone_map[zone[0]]]\n zones.updateRow(zone)\n\n #Unlock the shapefile\n del zone\n del zones\n\ndef calc_auto_att(weight_skim, zone_map, taz_file):\n '''\n Calculates the number of people, retail, and non-retail jobs that can reach each zone within a specific time. Attractivenesses in the input shapefile are updated.\n\n Parameters\n ----------\n weight_skim (ndarray):\n Weight skim\n zone_map (dict):\n Dictionary mapping zone number to index in the skim\n taz_file (str):\n Filepath for a TAZ shapefile\n '''\n global taz_field, pop_field, emp_field, ret_field, attpop_name, attret_name, attnre_name\n\n pop = np.empty_like(weight_skim, dtype = int)\n ret = np.empty_like(weight_skim, dtype = int)\n nre = np.empty_like(weight_skim, dtype = int)\n\n #For each zone, create ndarrays that represent the number of people, retail, and non-retail jobs that can reach a zone within a specific time\n zones = arcpy.da.SearchCursor(taz_file, field_names = [taz_field, pop_field, emp_field, ret_field])\n for zone in zones:\n pop[zone_map[zone[0]], :] = zone[1]*weight_skim[zone_map[zone[0]], :]\n ret[zone_map[zone[0]], :] = zone[3]*weight_skim[zone_map[zone[0]], :]\n nre[zone_map[zone[0]], :] = (zone[2] - zone[3])*weight_skim[zone_map[zone[0]], :]\n\n #Calculate the column sums to get the total number of people, retail, and non-retail jobs that can reach each destination zone within a specific time\n att_pop = pop.sum(0)\n att_ret = ret.sum(0)\n att_nre = nre.sum(0)\n\n #Update the shapefile with the calculated attractivenesses\n zones = arcpy.da.UpdateCursor(taz_file, field_names = [taz_field, attpop_name, attret_name, attnre_name])\n for zone in zones:\n zone[1] = att_pop[zone_map[zone[0]]]\n zone[2] = att_ret[zone_map[zone[0]]]\n zone[3] = att_nre[zone_map[zone[0]]]\n zones.updateRow(zone)\n\n #Unlock the shapefile\n del zone\n del zones\n\ndef calc_transit_acc(weight_skim, zone_map, taz_file):\n '''\n Calculates the number of jobs that can be accessed by transit within a specified time for each zone. Accessibilities in the input shapefile are updated.\n\n Parameters\n ----------\n weight_skim (ndarray):\n Weight skim\n zone_map (dict):\n Dictionary mapping zone number to index in the skim\n taz_file (str):\n Filepath for a TAZ shapefile\n '''\n global taz_field, emp_field, trnacc_name\n\n emp = np.empty_like(weight_skim, dtype = int)\n\n #Calculate number of jobs that can be reached by each origin zone in each destination zone within a specific time\n zones = arcpy.da.SearchCursor(taz_file, field_names = [taz_field, emp_field])\n for zone in zones:\n emp[:, zone_map[zone[0]]] = zone[1]*weight_skim[:, zone_map[zone[0]]]\n\n #Calculate the row sums to get the total number of jobs that each origin zone can reach within a specific time\n trn_acc = emp.sum(1)\n\n #Update the shapefile with the calculated accessbilities\n zones = arcpy.da.UpdateCursor(taz_file, field_names = [taz_field, trnacc_name])\n for zone in zones:\n zone[1] = trn_acc[zone_map[zone[0]]]\n zones.updateRow(zone)\n\n #Unlock the shapefiles\n del zone\n del zones\n\n\narcpy.AddMessage('Adding Fields if they are absent')\ncurrent_fields = [field.aliasName for field in arcpy.ListFields(taz_file)]\nnew_fields = [accpop_name, accret_name, accnre_name, attpop_name, attret_name, attnre_name, trnacc_name]\nfor field in new_fields:\n if field not in current_fields:\n try:\n arcpy.AddField_management(taz_file, field, 'LONG')\n except Exception:\n arcpy.AddMessage(\"Adding a new field didn't work\")\n continue\n\narcpy.AddMessage('Reading in Auto Skim')\n(auto, auto_zones) = extract_skim_from_csv(auto_skim_file)\n\narcpy.AddMessage('Creating auto weight skim')\nauto_weight = apply_auto_decay_function(auto)\n\narcpy.AddMessage('Reading in Transit Skim')\n(transit, transit_zones) = extract_skim_from_csv(transit_skim_file)\n\narcpy.AddMessage('Creating transit weight skim')\ntransit_weight = apply_transit_decay_function(transit)\n\narcpy.AddMessage('Calculating Auto Accessibility')\ncalc_auto_acc(auto_weight, auto_zones, taz_file)\n\narcpy.AddMessage('Calculating Auto Attractiveness')\ncalc_auto_att(auto_weight, auto_zones, taz_file)\n\narcpy.AddMessage('Calculting Transit Accessibility')\ncalc_transit_acc(transit_weight, transit_zones, taz_file)\n"
] | [
[
"numpy.empty_like",
"numpy.power",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JCSDA/MAOOAM | [
"aadb43bc6ca2245ab79b9f8278e598ca975c94a4"
] | [
"nuopc/test/plot_evol.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nndim = 36\n\n# Read in data\n\n#(a) add single coupled model (black)\ninfile='../SingleModelProto/evol_field.dat'\nwith open(infile, 'r') as f:\n x0 = f.read().splitlines()\n\nS0 = np.zeros((len(x0),ndim))\nfor i,s0 in enumerate(x0):\n s0_ = s0.split()\n s = list(map(float,s0_[1:]))\n print('s = ')\n print (s)\n\n S0[i,:] = s\n# plt.plot(S0[i,:],'ko')\n\n#plt.axis([0, len(s), min(s), max(s)])\n#plt.show()\n \n\n#(b) add AtmOcnProto\ninfile='../AtmOcnProto/evol_atmos.dat'\nwith open(infile, 'r') as f:\n x1a = f.read().splitlines()\n\nS1a = np.zeros((len(x1a),ndim))\nfor i,s1a in enumerate(x1a):\n s1a_ = s1a.split()\n s = list(map(float,s1a_[1:]))\n print('s = ')\n print (s)\n\n S1a[i,:] = s\n s = S1a[i,:] - S0[i,:]\n print('s = ')\n print (s)\n\n plt.plot(s,'bo')\n\nplt.axis([0, len(s), min(s), max(s)])\nplt.show()\n \n\ninfile='../AtmOcnProto/evol_ocean.dat'\nwith open(infile, 'r') as f:\n x1o = f.read().splitlines()\n\nS1o = np.zeros((len(x1o),ndim))\nfor i,s1o in enumerate(x1o):\n s1o_ = s1o.split()\n s = list(map(float,s1o_[1:]))\n print('s = ')\n print (s)\n\n S1o[i,:] = s\n s = S1o[i,:] - S0[i,:]\n print('s = ')\n print (s)\n\n plt.plot(s,'go')\n\nplt.axis([0, len(s), min(s), max(s)])\nplt.show()\n\n#(c) add AtmOcnMedProto\ninfile='../AtmOcnMedProto/evol_atmos.dat'\nwith open(infile, 'r') as f:\n x2a = f.read().splitlines()\n\nS2a = np.zeros((len(x2a),ndim))\nfor i,s2a in enumerate(x2a):\n s2a_ = s2a.split()\n s = list(map(float,s2a_[1:]))\n print('s = ')\n print (s)\n\n S2a[i,:] = s\n s = S2a[i,:] - S0[i,:]\n print('s = ')\n print (s)\n\n plt.plot(s,'bo')\n\nplt.axis([0, len(s), min(s), max(s)])\nplt.show()\n \ninfile='../AtmOcnMedProto/evol_ocean.dat'\nwith open(infile, 'r') as f:\n x2o = f.read().splitlines()\n\nS2o = np.zeros((len(x2o),ndim))\nfor i,s2o in enumerate(x2o):\n s2o_ = s2o.split()\n s = list(map(float,s2o_[1:]))\n print('s = ')\n print (s)\n\n S2o[i,:] = s\n s = S2o[i,:] - S0[i,:]\n print('s = ')\n print (s)\n\n plt.plot(s,'go')\n\nplt.axis([0, len(s), min(s), max(s)])\nplt.show()\n\n#(d) add ingest proto\ninfile='../AtmOcnMedIngestFromConfigProto/evol_atmos.dat'\nwith open(infile, 'r') as f:\n x3a = f.read().splitlines()\n\nS3a = np.zeros((len(x3a),ndim))\nfor i,s3a in enumerate(x3a):\n s3a_ = s3a.split()\n s = list(map(float,s3a_[1:]))\n print('s = ')\n print (s)\n\n S3a[i,:] = s\n s = S3a[i,:] - S0[i,:]\n print('s = ')\n print (s)\n\n plt.plot(s,'bo')\n\nplt.axis([0, len(s), min(s), max(s)])\nplt.show()\n \ninfile='../AtmOcnMedIngestFromConfigProto/evol_ocean.dat'\nwith open(infile, 'r') as f:\n x3o = f.read().splitlines()\n\nS3o = np.zeros((len(x3o),ndim))\nfor i,s3o in enumerate(x3o):\n s3o_ = s3o.split()\n s = list(map(float,s3o_[1:]))\n print('s = ')\n print (s)\n\n S3o[i,:] = s\n s = S3o[i,:] - S0[i,:]\n print('s = ')\n print (s)\n\n plt.plot(s,'go')\n\nplt.axis([0, len(s), min(s), max(s)])\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uber/orb | [
"b329326b8fd9382310645927846315714386de50"
] | [
"orbit/utils/kernels.py"
] | [
"import numpy as np\n\n\ndef reduce_by_max(x, n=2):\n out = x.copy()\n out[np.argsort(x)[:-n]] = 0\n return out\n\n\n# Gaussian-Kernel\n# https://en.wikipedia.org/wiki/Kernel_smoother\ndef gauss_kernel(x, x_i, rho=0.1, alpha=1.0, n_reduce=-1, point_to_flatten=1):\n \"\"\"\n Parameters\n ----------\n x : array-like\n points required to compute kernel weight\n x_i : array-like\n reference points location used to compute correspondent distance of each entry points\n rho : float\n smoothing parameter known as \"length-scale\" in gaussian process\n alpha : float\n marginal standard deviation parameter in gaussian process; one should use 1 in kernel regression\n n_reduce : int\n if greater 0 (default=-1), reduce number of positive weights to such input\n point_to_flatten : float\n the time point starting to flatten the weights; default is 1 for normalized time points\n\n Returns\n -------\n np.ndarray\n 2D array with size N x M such that\n N as the number of entry points\n M as the number of reference points\n matrix entries hold the value of weight of each element\n\n See Also\n --------\n 1. https://mc-stan.org/docs/2_24/stan-users-guide/gaussian-process-regression.html\n 2. https://en.wikipedia.org/wiki/Local_regression\n \"\"\"\n N = len(x)\n M = len(x_i)\n k = np.zeros((N, M), np.double)\n alpha_sq = alpha ** 2\n rho_sq_t2 = 2 * rho ** 2\n for n in range(N):\n if x[n] <= point_to_flatten:\n k[n, :] = alpha_sq * np.exp(-1 * (x[n] - x_i) ** 2 / rho_sq_t2)\n else:\n # last weights carried forward for future time points\n k[n, :] = alpha_sq * np.exp(-1 * (point_to_flatten - x_i) ** 2 / rho_sq_t2)\n\n if n_reduce > 0:\n k = np.apply_along_axis(reduce_by_max, axis=1, arr=k, n=n_reduce)\n\n k = k / np.sum(k, axis=1, keepdims=True)\n\n return k\n\n\ndef sandwich_kernel(x, x_i):\n \"\"\"\n Parameters\n ----------\n x : array-like\n points required to compute kernel weight\n x_i : array-like\n reference points location used to compute correspondent distance of each entry points\n\n Returns\n -------\n np.ndarray :\n 2D array with size N x M such that\n N as the number of entry points\n M as the number of reference points\n matrix entries hold the value of weight of each element\n\n See Also\n --------\n 1. https://mc-stan.org/docs/2_24/stan-users-guide/gaussian-process-regression.html\n 2. https://en.wikipedia.org/wiki/Local_regression\n \"\"\"\n N = len(x)\n M = len(x_i)\n k = np.zeros((N, M), dtype=np.double)\n\n np_idx = np.where(x < x_i[0])\n k[np_idx, 0] = 1\n\n for m in range(M - 1):\n np_idx = np.where(np.logical_and(x >= x_i[m], x < x_i[m + 1]))\n total_dist = x_i[m + 1] - x_i[m]\n backward_dist = x[np_idx] - x_i[m]\n forward_dist = x_i[m + 1] - x[np_idx]\n k[np_idx, m] = forward_dist / total_dist\n k[np_idx, m + 1] = backward_dist / total_dist\n\n np_idx = np.where(x >= x_i[M - 1])\n k[np_idx, M - 1] = 1\n\n # TODO: it is probably not needed\n k = k / np.sum(k, axis=1, keepdims=True)\n\n return k\n\n\ndef parabolic_kernel(x, x_i):\n # TODO: docstring\n N = len(x)\n M = len(x_i)\n k = np.zeros((N, M), dtype=np.double)\n\n # boundary case\n np_idx = np.where(x < x_i[0])\n if len(np_idx) > 0:\n k[np_idx, 0] = 1\n\n for m in range(M - 1):\n np_idx = np.where(np.logical_and(x >= x_i[m], x < x_i[m + 1]))\n total_dist = x_i[m + 1] - x_i[m]\n backward_dist = x[np_idx] - x_i[m]\n forward_dist = x_i[m + 1] - x[np_idx]\n k[np_idx, m] = 0.75 * (1 - (backward_dist / total_dist) ** 2)\n k[np_idx, m + 1] = 0.75 * (1 - (forward_dist / total_dist) ** 2)\n\n # boundary case\n np_idx = np.where(x >= x_i[M - 1])\n if len(np_idx) > 0:\n k[np_idx, M - 1] = 1\n\n # TODO: it is probably not needed\n k = k / np.sum(k, axis=1, keepdims=True)\n\n return k\n"
] | [
[
"numpy.apply_along_axis",
"numpy.where",
"numpy.exp",
"numpy.argsort",
"numpy.logical_and",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yiqian-wang/eyepy | [
"0523e8cea78c23a9c1bcf2d5b47a8f0fb59712e5"
] | [
"eyepy/core/eyedata.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom eyepy import config\nfrom skimage.transform._geometric import GeometricTransform\n\n\nclass EyeData:\n def __init__(\n self,\n volume: \"EyeVolume\",\n localizer: \"EyeEnface\",\n transformation: GeometricTransform,\n ):\n self.volume = volume\n self.localizer = localizer\n self.localizer_transformation = (\n transformation # Localizer to OCT transformation\n )\n\n def save(self, path):\n pass\n\n @classmethod\n def load(cls, path):\n pass\n\n @property\n def drusen_projection(self):\n # Sum the all B-Scans along their first axis (B-Scan height)\n # Swap axis such that the volume depth becomes the projections height not width\n # We want the first B-Scan to be located at the bottom hence flip along axis 0\n return np.flip(np.swapaxes(np.sum(self.drusen, axis=0), 0, 1), axis=0)\n\n @property\n def drusen_enface(self):\n \"\"\"Drusen projection warped into the localizer space.\"\"\"\n return transform.warp(\n self.drusen_projection.astype(float),\n self.tform_oct_to_localizer,\n output_shape=self.localizer_shape,\n order=0,\n )\n\n # Data Access:\n # Bscans r\n # Projections r(w)\n # Shadows r(w)\n # Annotations rw\n # Registrations rw\n # Meta rw\n\n # Bscan View into the volume\n\n # Projection views of volume in enface space\n\n # Projection views of volume annotation in enface space\n\n # Save and load data\n\n # Export annotations only\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lipucky/nlp-gym | [
"d6d0175f038a777f0ed07586053e89fa9b1b6d2d"
] | [
"nlp_gym/envs/multi_label/featurizer.py"
] | [
"import re\nimport string\nfrom typing import List, Union\n\nimport flair\nimport torch\nfrom flair.data import Sentence\nfrom flair.embeddings import (BytePairEmbeddings, DocumentPoolEmbeddings,\n Embeddings, WordEmbeddings)\nfrom nltk import SnowballStemmer\nfrom nltk.corpus import stopwords\nfrom nlp_gym.envs.common.action_space import ActionSpace\nfrom nlp_gym.envs.multi_label.observation import ObservationFeaturizer, Observation\n\n\nclass TextPreProcessor:\n def __init__(self, language: str):\n self.language = language\n\n def _remove_digits(self, text: str) -> str:\n text = re.sub(r\"\\d+\", \"\", text)\n return text\n\n def _remove_punctuation(self, text: str) -> str:\n text = text.translate(str.maketrans('', '', string.punctuation))\n return text\n\n def process(self, text: str) -> str:\n text = text.lower()\n text = self._remove_punctuation(text)\n text = self._remove_digits(text)\n text = self._remove_stop_words(text)\n text = self._stem(text)\n return text\n\n def _remove_stop_words(self, text: str) -> str:\n stop_words_list = stopwords.words(self.language)\n return ' '.join([word for word in text.split() if word not in stop_words_list])\n\n def _stem(self, text: str) -> str:\n stemmer = SnowballStemmer(language=self.language)\n return ' '.join([stemmer.stem(word) for word in text.split()])\n\n def get_id(self) -> str:\n return f\"advanced_{self.language}\"\n\n\nclass EmbeddingRegistry:\n _registry_mapping = {\n \"byte_pair\": {\n \"cls\": [BytePairEmbeddings],\n \"params\": [\"en\"]\n },\n \"fasttext\": {\n \"cls\": [WordEmbeddings],\n \"params\": [\"en-crawl\"]\n },\n \"stacked\": {\n \"cls\": [WordEmbeddings, BytePairEmbeddings],\n \"params\": [\"en-crawl\", \"en\"]\n }\n }\n\n @staticmethod\n def get_embedding(embedding_type: str) -> List[Embeddings]:\n cls_ = EmbeddingRegistry._registry_mapping[embedding_type][\"cls\"]\n params_ = EmbeddingRegistry._registry_mapping[embedding_type][\"params\"]\n embeddings = [embedding_cls(embedding_param) for embedding_cls, embedding_param in zip(cls_, params_)]\n return embeddings\n\n\nclass DefaultFeaturizerForMultiLabelRank(ObservationFeaturizer):\n def __init__(self, action_space: ActionSpace, embedding_type: str = \"fasttext\", pre_process: bool = False,\n device: str = \"cpu\"):\n self.device = device\n self.pre_process = pre_process\n self.text_pre_processor = TextPreProcessor(language=\"english\")\n self._setup_device()\n embeddings = EmbeddingRegistry.get_embedding(embedding_type)\n self.doc_embeddings = DocumentPoolEmbeddings(embeddings).to(torch.device(self.device))\n self.action_space = action_space\n self._current_input_embeddings = None\n\n def _setup_device(self):\n flair.device = torch.device(self.device)\n\n def init_on_reset(self, input_text: Union[List[str], str]):\n # pooled document embeddings\n text = self.text_pre_processor.process(input_text) if self.pre_process else input_text\n sent = Sentence(text)\n self.doc_embeddings.embed(sent)\n self._current_input_embeddings = torch.tensor(sent.embedding.cpu().detach().numpy())\n\n def featurize(self, observation: Observation) -> torch.Tensor:\n input_vector = self._current_input_embeddings\n context_vector = self._featurize_context(observation.get_current_action_history())\n concatenated = torch.cat((input_vector, context_vector), dim=0)\n return concatenated\n\n def get_observation_dim(self) -> int:\n return self._get_input_dim() + self._get_context_dim()\n\n def _featurize_input(self, input_index: int) -> torch.Tensor:\n # the input does not change on each step\n return self._current_input_embeddings\n\n def _featurize_context(self, context: List[str]) -> torch.Tensor:\n # bag of actions representation\n context_vector = torch.zeros(self.action_space.size())\n action_indices = [self.action_space.action_to_ix(action) for action in context]\n context_vector[action_indices] = 1.0\n return context_vector\n\n def _get_input_dim(self):\n sent = Sentence(\"A random text to get the embedding dimension\")\n self.doc_embeddings.embed(sent)\n dim = sent[0].embedding.shape[0]\n sent.clear_embeddings()\n return dim\n\n def _get_context_dim(self):\n return self.action_space.size()\n\n\nif __name__ == \"__main__\":\n embeddings = EmbeddingRegistry.get_embedding(\"stacked\")\n print(embeddings)\n"
] | [
[
"torch.device",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PrajaktaSathe/HacktoberFest2020 | [
"e84fc7a513afe3dd75c7c28db1866d7f5e6a8147"
] | [
"Machine_Learning/KNN.py"
] | [
"import math\r\n\r\n\r\ndef EDistance(Instace1, Instance2, length):\r\n Distance = 0\r\n for i in range(length):\r\n Distance += pow((Instace1[i] - Instance2[i]), 2)\r\n return math.sqrt(Distance)\r\n\r\n\r\ndef getNeighbour(trainingfeature, trainingLabel, testInstance, k):\r\n if k > len(trainingfeature):\r\n k = len(trainingfeature)\r\n distance = []\r\n length = len(testInstance)\r\n for i in range(len(trainingfeature)):\r\n Dist = EDistance(trainingfeature[i], testInstance, length)\r\n distance.append([Dist, trainingLabel[i]])\r\n distance.sort(key=lambda x: x[0])\r\n Neighbour = []\r\n for i in range(k):\r\n Neighbour.append(distance[i][1])\r\n return Neighbour\r\n\r\n\r\ndef getResponse(Neighbour):\r\n ClassVotes = {}\r\n MaxVote = 0\r\n for vote in Neighbour:\r\n if vote in ClassVotes:\r\n ClassVotes[vote] += 1\r\n else:\r\n ClassVotes[vote] = 1\r\n if ClassVotes[vote] > MaxVote:\r\n MaxVote = ClassVotes[vote]\r\n for Class, vote in ClassVotes.items():\r\n if vote == MaxVote:\r\n return Class\r\n\r\n\r\ndef getAccuracy(TestingData, Predection):\r\n correct = 0\r\n for i in range(len(TestingData)):\r\n if TestingData[i] == Predection[i]:\r\n correct += 1\r\n return (correct / float(len(TestingData))) * 100\r\n\r\n\r\ndef main():\r\n from sklearn import datasets\r\n from sklearn.model_selection import train_test_split\r\n Iris = datasets.load_iris()\r\n feature = Iris['data']\r\n Label = Iris['target']\r\n X_train, X_test, y_train, y_test = train_test_split(\r\n feature,\r\n Label,\r\n test_size=0.33,\r\n )\r\n Predections = []\r\n k = 5\r\n for i in range(len(X_test)):\r\n Neighbour = getNeighbour(X_train, y_train, X_test[i], k)\r\n result = getResponse(Neighbour)\r\n Predections.append(result)\r\n print('> Predicted : ' + repr(result) +\r\n ' > Actual :' + repr(y_test[i]))\r\n accuracy = getAccuracy(y_test, Predections)\r\n print(\" Accuracy : \", accuracy)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n exit(0)\r\n"
] | [
[
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anthony-walker/me499 | [
"1ec5761a822956b4e18f83b3e0cda93715b74b3e"
] | [
"cacheing/fibonacci.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\" This script illustrates the following concepts:\n Caching - how to save calculations in a variable to speed up computation\n Timing comparisons - how to use time() to figure out how long\n a computation took\n Try-except to catch parameter errors and to determine if\n the caching variables exist\n Associating attributes (variables) with functions\n - using default function attributes, like __name__\n Four versions: Caching y/n and instrumented for timing y/n\n\n Note: If you were to do this \"cleanly\" you would put all this in a\n class.\n \"\"\"\n\n\ndef fib_basic(n):\n \"\"\"\n Simple Fibonacci function, defined recursively.\n :param n: A non-negative integer.\n :return: The Fibonacci number for n.\n \"\"\"\n\n # Check to make sure the argument is valid.\n if n < 0:\n raise ValueError('Argument must be non-negative')\n\n # If n is 0 or 1, just return the Fibonacci number. Otherwise return the sum of the previous two\n # Fibonacci numbers.\n if n < 2:\n return n\n else:\n return fib_basic(n - 1) + fib_basic(n - 2)\n\n\ndef fib_cached(n):\n \"\"\"\n Adding caching to fib_basic\n :param n: The Fibonacci number to calculate.\n :return: The Fibonacci number\n \"\"\"\n\n # Make sure the inputs are valid\n if n < 0:\n raise ValueError('Argument must be non-negative')\n\n # Try to return the value from the cache.\n try:\n # Yes, you can do this - remember, functions are just dictionaries (stored\n # in a dictionary of functions).\n # So you can add an entry (variable) to that dictionary\n # Note - we have to actually *declare* the cacge variable below, or it won't\n # exist when we try to access it.\n return fib_cached.cache[n]\n except KeyError:\n # Calculate the new value\n retval = fib_cached(n - 1) + fib_cached(n - 2)\n\n # Add it to the cache\n fib_cached.cache[n] = retval\n\n # Return the value.\n return retval\n\n\n# This attribute, associated with the function above,\n# is a casche. Initialize the cache with the base case values.\nfib_cached.cache = {0: 0, 1: 1}\n\n\ndef fib_instrumented(n):\n \"\"\"\n Simple Fibonacci function, defined recursively, instrumented to count calls.\n :param n: A non-negative integer.\n :return: The Fibonacci number for n.\n \"\"\"\n\n # Increment the call count\n # Yes, you can do this - remember, functions are just dictionaries (stored\n # in a dictionary of functions).\n # So you can add an entry (variable) to that dictionary\n # Note - we have to actually *declare* this variable below, or it won't\n # exist when we try to add 1 to it.\n fib_instrumented.call_count += 1\n\n # Check to make sure the argument is valid.\n if n < 0:\n raise ValueError('Argument must be non-negative')\n\n # If n is 0 or 1, just return the Fibonacci number. Otherwise return the sum of the previous two\n # Fibonacci numbers.\n if n < 2:\n return n\n else:\n return fib_instrumented(n - 1) + fib_instrumented(n - 2)\n\n\n# This attribute, associated with the function above, will let us keep track of the number of calls.\nfib_instrumented.call_count = 0\n\n\ndef fib_cached_instrumented(n):\n \"\"\"\n This Fibonacci function uses caching to speed things up.\n It's also instrumented, to show how many function calls are taken\n :param n:\n :return:\n \"\"\"\n # Increment the call count - see fib_instrumented for explanation\n fib_cached_instrumented.call_count += 1\n\n # Check to make sure the argument is valid.\n if n < 0:\n raise ValueError('Argument must be non-negative')\n\n # Try to return a number from the cache. If that fails, then we have to calculate it (recursively) from scratch,\n # add it to the cache, and then return it.\n try:\n return fib_cached_instrumented.cache[n] # Again, this is a variable defined on the function\n except KeyError:\n # Calculate the value recursively\n retval = fib_cached_instrumented(n - 1) + fib_cached_instrumented(n - 2)\n\n # Store it in the cache\n fib_cached_instrumented.cache[n] = retval\n\n # And, finally, return it\n return retval\n\n\n# Initialize the cache with the two base cases and the counter with 0.\nfib_cached_instrumented.call_count = 0\nfib_cached_instrumented.cache = {0: 0, 1: 1}\n\n\ndef time_fib(f, n):\n \"\"\"\n A wrapper function to time the execution of a Fibonacci function.\n :param f: The function to time.\n :param n: The Fibonacci number to evaluate.\n :return: The Fibonacci number.\n \"\"\"\n # You can put the import here - it will only happen if time_fib is called\n from time import time\n\n # Reset the call count\n # For the functions that *don't* have instrumentation, this will\n # create that variable\n f.call_count = 0\n\n # Time a call to the Fibonacci function\n start_time = time()\n f(n)\n stop_time = time()\n\n # Return the time and the call count.\n return stop_time - start_time, f.call_count\n\n\nif __name__ == '__main__':\n # You can put the imports here\n import matplotlib.pyplot as plt\n\n # Do some tests to make sure fibonacci functions are working as expected\n # This is a hard-coded set of correct answers to test against\n fib_tests = {0: 0, 1: 1, 2: 1, 3: 2, 4: 3, 5: 5, 6: 8, 7: 13}\n # This is an array of the functions we're going to test\n # Notice we're not calling the functions (no ()) - we're just putting the\n # function variables in the list\n functions = [fib_basic, fib_cached, fib_instrumented, fib_cached_instrumented]\n\n # Test each of the functions on all of the test cases.\n for func in functions: # Yes, this works - func is one of the fib_* functions\n print('{0}():'.format(func.__name__)) # .__name__ is defined on every function\n\n # Since fib_tests is a dictionary, and we want both the key and\n # the value, we use the .items() iteration method\n for number, value in fib_tests.items():\n # Call the function and compare it to the test value\n if func(number) != value:\n print(' failed for {0}: expected {1}, got {2}'.format(number, value, func(number)))\n\n # Make sure we catch the invalid cases.\n try:\n func(-1)\n print(' failed for negative value')\n except ValueError:\n # This is the error the functions should throw if given a -1 - so it\n # passed the test, don't do anything\n pass\n\n print(' tests complete')\n\n # Do some timing tests to see how long things take. We're going to look at wall clock time and also\n # at the number of calls we make to the fib() function. To do that, we're going to make a new version\n # of the function that's instrumented to count the number of calls, which we will call fib2().\n N = 15 # test on 1 to 15\n times, counts = zip(*(time_fib(fib_instrumented, n) for n in range(N)))\n cached_times, cached_counts = zip(*(time_fib(fib_cached_instrumented, n) for n in range(N)))\n\n # Let's plot the times and the call counts\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n ax1.set_title('Fibonacci performance')\n\n # Plot the times\n time_plot, = ax1.plot(times)\n cached_time_plot, = ax1.plot(cached_times)\n ax1.set_ylabel('time (seconds)')\n ax1.legend([time_plot, cached_time_plot], ['Basic', 'Cached'])\n\n # Plot the call counts\n call_plot, = ax2.plot(counts)\n cached_call_plot, = ax2.plot(cached_counts)\n ax2.set_xlabel('n')\n ax2.set_ylabel('function calls')\n ax2.legend([call_plot, cached_call_plot], ['Basic', 'Cached'])\n\n # Display the graphs.\n plt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zloben69/DBGLOGReader | [
"62d3fd2090f86be5aaa49856911941246ea4818f"
] | [
"DBGLOGReader.py"
] | [
"import struct, binascii, os, time\nimport pandas as pd\n# import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nFloat = os.getcwd() + '/log/2018 01 22 0000 (Float).DAT'\n\ndef Get_tag_names():\n Tagname = Float[0:-11]+'(Tagname).DAT'\n try:\n with open(Tagname, 'r') as fi:\n fi.read(10)\n names = tuple(fi.read().split()[1:][::3])\n return names\n except:\n return tuple(x for x in xrange(10000))\n\ndef read_data(file):\n data_list_temp=[]\n flag = False\n print ('read_data')\n with open(os.getcwd() + '/' + Float[-27:-12] + '.csv', \"a\") as filik:\n while True:\n read = binascii.hexlify(file.read(1))\n if read.lower() == '0D'.lower():\n flag = True\n elif read.lower() == '20'.lower() and flag==True:\n # frm_reader('<19s3s2sdcci', , temp_names)\n data_list_temp.append(list(struct.unpack('<19s3s2sdcci', file.read(38))))\n elif read.lower() == \"1A\".lower():\n break\n # a = max(x[2] for x in (data_list_temp[:500]))\n print ('read_data DONE!')\n return data_list_temp#, a\n\ndef conver_date(listik):\n names = Get_tag_names()\n listik = list(listik)\n for x in xrange(len(listik)):\n listik[x][2] = names[int(listik[x][2])]\n # listik[x][0] = time.strptime(listik[x][0][0:16], '%Y%m%d%H:%M:%S')\n listik[x][0] = '%s-%s-%s %s:%s:%s' % (listik[x][0][:4],\n listik[x][0][4:6],\n listik[x][0][6:8],\n listik[x][0][8:10],\n listik[x][0][11:13],\n listik[x][0][14:16])\n return listik\n\ndef printer(s, name):\n with open(os.getcwd() + '/' + name + '.csv', \"w\") as filik:\n print >> filik, 'Datetime, Milisecond, Tag, Value, Index'\n for t in xrange(len(s)):\n print >> filik, '%s-%s-%s %s:%s:%s, ms:%s, %s, %.3f, %s' % (s[t][0][:4], # year\n s[t][0][4:6], # month\n s[t][0][6:8], # day\n s[t][0][8:10], # hour\n s[t][0][11:13], # minut\n s[t][0][14:16], # second\n s[t][0][16:], # msecond\n s[t][2], # Tagname\n s[t][3], # Value\n s[t][6]) #Index\n\ndef getkey(item):\n return item[2]\n\ndef get_dick(datalist):\n d = {'Datetime':[x[0] for x in datalist],\n 'Tag':[x[2] for x in datalist],\n 'Value':[x[3] for x in datalist],\n 'Index':[x[-1] for x in datalist]}\n return d\n\ndef get_min_max_tag_index(tags, name):\n unic = [x for x in xrange(len(tags)) if name in tags[x]]\n return unic[0], unic[-1]\n\nstart_time = time.time()\nprint (\"Start: %s\" % start_time)\nwith open(Float, 'rb') as fi:\n data_list = read_data(fi)#, maximum = read_data(fi)\nsortedlist = sorted(conver_date(data_list), key=getkey)\ndata = get_dick(sortedlist)\nprint ('<Reading time %.3f seconds. Drop to disk>' % (time.time() - start_time))\n# print set(data['Tag'])\nmin, max = get_min_max_tag_index(data['Tag'], '[N3]VKT[3].TEMPERATURE')\ndf = pd.DataFrame(data['Value'][min:max], index=[pd.Timestamp(x) for x in data['Datetime'][min:max]])\ndf.plot(kind='line')\nplt.show()\nprinter(sortedlist, Float[-27:-12])\nprint ('<Total execution time %.3f seconds>' % (time.time() - start_time))\n\n\n\n#~~~~~~~~~~~ 3.5 - 4 sec for t in xrange(len(s)):~~~~~~~~~~~~\n# def printer(s, name, maximus , part='', z=0, sep=0):\n# print \"Drop to disk \" + str(z)\n# temp_names = Get_tag_names()\n# with open(os.getcwd() + '/' + name + part +'.csv', \"w\") as filik:\n# for t in xrange(len(s)):\n# print >> filik, '%s-%s-%s %s:%s:%s, ms:%s, %s, %.3f, %s' % (s[t+sep][0][:4], # year\n# s[t+sep][0][4:6], # month\n# s[t+sep][0][6:8], # day\n# s[t+sep][0][8:10], # hour\n# s[t+sep][0][11:13], # minut\n# s[t+sep][0][14:16], # second\n# s[t+sep][0][16:], # msecond\n# temp_names[int(s[t+sep][2])], # Tagname\n# int(s[t+sep][3]), # Value\n# s[t+sep][6])\n# if t > 999999 and int(s[t][2]) == 0:\n# z+=1\n# printer(s, name, maximus, ' Part ' + str(z), z, t)\n\n#~~~~~~~~~~~ 3.2 - 3.5 sec for t in s:~~~~~~~~~~~~\n# def printer(s, name , i='', j=0):\n# x = 0\n# print \"Drop to disk\"\n# temp_names = Get_tag_names()\n# with open(os.getcwd() + '/' + name + i[:-1] +'.csv', \"w\") as filik:\n# for t in s:\n# print >> filik, '%s-%s-%s %s:%s:%s, ms:%s, %s, %.3f, %s' % (t[0][:4], # year\n# t[0][4:6], # month\n# t[0][6:8], # day\n# t[0][8:10], # hour\n# t[0][11:13], # minut\n# t[0][14:16], # second\n# t[0][16:], # msecond\n# temp_names[int(t[2])], # Tagname\n# int(t[3]), # Value\n# t[6])\n# x += 1\n# if x > 999999:\n# i = ' part ' + str((len(s) / 1000000) * 10)\n# printer(s, name, i, x + j)\n\n# listik[x][0] = time.strptime(listik[x][0][0:16], '%Y%m%d%H:%M:%S'"
] | [
[
"matplotlib.pyplot.show",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lyndonchan/wsss-analysis | [
"75d534894a1c9e033c43346d63de27d83c95cd1e"
] | [
"scripts/extract_eval.py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\n\nSETS = ['ADP-morph_tuning_VGG16', 'ADP-morph_tuning_X1.7', 'ADP-morph_segtest_VGG16', 'ADP-morph_segtest_X1.7',\n 'ADP-func_tuning_VGG16', 'ADP-func_tuning_X1.7', 'ADP-func_segtest_VGG16', 'ADP-func_segtest_X1.7',\n 'VOC2012_VGG16', 'VOC2012_M7',\n 'DeepGlobe_VGG16', 'DeepGlobe_M7',\n 'DeepGlobe_balanced_VGG16', 'DeepGlobe_balanced_M7']\n\n# SEC/DSRG\ndef to_underscore(x):\n return x.replace('-VGG16', '_VGG16').replace('-X1.7', '_X1.7').replace('-M7', '_M7')\n\ndef to_dash(x):\n return x.replace('_VGG16', '-VGG16').replace('_X1.7', '-X1.7').replace('_M7', '-M7')\n\nDIR = '../03a_sec-dsrg/eval'\neval_sec_dsrg = {'SEC': {}, 'DSRG': {}}\ndef get_miou(fpath):\n if not os.path.exists(fpath):\n return np.nan\n else:\n df = pd.read_excel(fpath)\n return df['IoU'][df['Class'] == 'Mean'].values[0]\nfor method in ['SEC', 'DSRG']:\n folders = os.listdir(os.path.join(DIR, method))\n for folder in folders:\n if 'ADP' in folder:\n for s in [to_dash(folder.replace('train', 'tuning')), to_dash(folder.replace('train', 'segtest'))]:\n fpath = os.path.join(DIR, method, folder, 'metrics_%s.xlsx' % s)\n key = to_underscore(s)\n eval_sec_dsrg[method][key] = get_miou(fpath)\n elif 'DeepGlobe' in folder:\n s = to_dash(folder.replace('train_', 'test_'))\n fpath = os.path.join(DIR, method, folder, 'metrics_%s.xlsx' % s)\n key = folder.replace('_train_', '_')\n eval_sec_dsrg[method][key] = get_miou(fpath)\n else:\n s = to_dash(folder.replace('train_', 'val_'))\n fpath = os.path.join(DIR, method, folder, 'metrics_%s.xlsx' % s)\n key = to_underscore(s).replace('val_', '')\n eval_sec_dsrg[method][key] = get_miou(fpath)\n\n# Grad-CAM/IRNet\nDIR = '../03b_irn/eval'\nfolders = os.listdir(DIR)\neval_cam = {}\neval_irn = {}\ndef irn_folder_to_key(folder):\n if folder.startswith('adp_morph'):\n key = 'ADP-morph'\n elif folder.startswith('adp_func'):\n key = 'ADP-func'\n elif folder.startswith('voc12'):\n key = 'VOC2012'\n elif folder.startswith('deepglobe_balanced'):\n key = 'DeepGlobe_balanced'\n elif folder.startswith('deepglobe'):\n key = 'DeepGlobe'\n if folder.endswith('tuning'):\n key += '_tuning'\n elif folder.endswith('evaluation'):\n key += '_segtest'\n if 'vgg16' in folder:\n key += '_VGG16'\n elif 'x1.7' in folder:\n key += '_X1.7'\n elif 'm7' in folder:\n key += '_M7'\n return key\nfor folder in folders:\n key = irn_folder_to_key(folder)\n if 'cam' in folder:\n fname = folder + '_cam_iou.csv'\n df = pd.read_csv(os.path.join(DIR, folder, fname))\n eval_cam[key] = df[df['Unnamed: 0'] == 'mean']['iou'].values[0]\n else:\n fname = folder + '_iou.csv'\n df = pd.read_csv(os.path.join(DIR, folder, fname))\n eval_irn[key] = df[df['Unnamed: 0'] == 'miou']['iou'].values[0]\n\n# HistoSegNet\nDIR = '../03c_hsn/eval'\nfolders = os.listdir(DIR)\neval_hsn = {}\n\nfor folder in folders:\n assert folder in SETS\n fnames = [x for x in os.listdir(os.path.join(DIR, folder)) if x.endswith('.xlsx') and not x.startswith('~')]\n assert len(fnames) == 1\n fname = fnames[0]\n df = pd.read_excel(os.path.join(DIR, folder, fname))\n eval_hsn[folder] = df['IoU'][df['Class'] == 'Mean'].values[0]\n\ndf_eval = pd.DataFrame({'Grad-CAM': eval_cam, 'SEC': eval_sec_dsrg['SEC'], 'DSRG': eval_sec_dsrg['DSRG'],\n 'IRNet': eval_irn, 'HistoSegNet': eval_hsn})\npd.set_option('display.max_columns', None)\nprint(df_eval)\na=1"
] | [
[
"pandas.set_option",
"pandas.read_excel",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
drgriffis/ctakes-utils | [
"d5da557ee68918fc33326a029f37a24d39acb10a"
] | [
"python/ctakes/format/common.py"
] | [
"'''\nShared methods for cTAKES output file format processing.\n\nNot included in from ctakes.format import *\n'''\n\nimport re\nimport codecs\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom ..exceptions import *\nfrom ..annotations import *\n\ndef getAttributeValue(line, attr_name):\n '''Return the value of the specified attribute in the input line\n '''\n match = re.findall('%s=\".+\"' % attr_name, line)\n if len(match) != 1:\n raise AttributeNotFoundException(attr_name)\n match = match[0]\n\n opn = match.index('\"')\n cls = match[opn+1:].index('\"')\n return match[opn+1:opn+cls+1]\n\ndef getTokens(fpath, mentions=None, get_POS_tags=False, by_sentence=False, _token_types=[], _sentence_type=None):\n '''Get the ordered list of tokens from the document, as\n tokenized by cTAKES.\n\n If list of Mentions is provided, the token list will consist\n of the following types:\n - str : non-mention token\n - Mention : non-overlapping entity mention\n - list : overlapping entity mentions within the same text span;\n each item consists of non-mention tokens and a single Mention\n e.g. [(Mention:\"weight loss\"), (\"weight\", Mention:\"loss\")]\n\n Parameters:\n fpath :: path to XMI file to process\n mentions :: (optional) list of Mention objects to include\n in place of the appropriate token\n get_POS_tags :: Boolean flag to return POS tag information along\n with token strings\n by_sentence :: return lists of tokens, where each corresponds to a\n single sentence as partitioned by cTAKES\n '''\n\n # if including mentions, index them by beginning index\n # (may be multiple beginning at same index)\n if mentions != None:\n indexed_mentions = {}\n for m in mentions:\n if indexed_mentions.get(m.begin, None) == None: indexed_mentions[m.begin] = []\n indexed_mentions[m.begin].append(m)\n\n # storage for instances of each token type\n tokens = [[] for _ in _token_types]\n # storage for starting bound of token type instances\n starts = [[] for _ in _token_types]\n \n # parse the XML file\n hook = codecs.open(fpath, 'r', 'utf-8')\n soup = BeautifulSoup(hook.read(), 'lxml-xml')\n hook.close()\n \n # iterate over the types of token nodes we're looking for\n for (node_type, regex) in _token_types:\n # get valid child nodes and sort by beginning index\n validated_nodes = _get_and_validate_children(soup, node_type, regex)\n sorted_nodes = _sort_by_position(validated_nodes, attr='begin')\n # pull out token and beginning index, store separately\n typed_tokens, typed_starts = [], []\n for node in sorted_nodes:\n token_string = node['normalizedForm']\n if get_POS_tags:\n try: token_pos = node['partOfSpeech']\n except KeyError: token_pos = None\n typed_tokens.append( (token_string, token_pos) )\n else:\n typed_tokens.append(token_string)\n typed_starts.append(int(node['begin']))\n tokens.append(typed_tokens)\n starts.append(typed_starts)\n\n # if getting by sentence, fetch and order all the tagged sentences\n if by_sentence:\n (sentence_node_type, sentence_regex) = _sentence_type\n # get sentence nodes and sort by beginning index\n validated_nodes = _get_and_validate_children(soup, sentence_node_type, sentence_regex)\n sorted_sentences = _sort_by_position(validated_nodes, attr='begin')\n # cast to Sentence objects\n sorted_sentences = [Sentence(bounds=(int(s['begin']), int(s['end']))) for s in sorted_sentences]\n \n ordered_tokens = []\n\n # starts_remaining tracks the start indices for the token types left\n # to empty; contains pairs of token type (index) and start indices\n starts_remaining = []\n for i in range(len(starts)):\n if len(starts[i]) > 0: starts_remaining.append((i,starts[i]))\n \n cur_mentions = [] # current list of overlapping mentions\n overlap_sofar = [] # tokens in the current overlap preceding the next mention\n in_mention = False # are we currently in >0 mentions?\n\n if by_sentence:\n current_sentence = None\n\n while len(starts_remaining) > 0:\n # find the next token type from the text\n next_starts = [start[0] for (_,start) in starts_remaining]\n next_starts_ix = np.argmin(next_starts)\n next_tokentype = starts_remaining[next_starts_ix][0]\n # get the next token\n next_token = tokens[next_tokentype].pop(0)\n next_token_start = starts_remaining[next_starts_ix][1][0]\n\n # if still in one or more mentions, try to resolve them\n if in_mention:\n # check for completed mentions and add token appropriately\n in_mention = False\n for (before, m, after) in cur_mentions:\n if m.end > next_token_start:\n m.text.append(next_token)\n in_mention = True\n else:\n after.append(next_token)\n\n # make sure no mentions are going outside a sentence\n if by_sentence and not current_sentence is None:\n assert m.end <= current_sentence.end\n\n # if all mentions completed, flush them as a block\n if not in_mention:\n for (before, m, after) in cur_mentions:\n m.text = ' '.join(m.text)\n after.pop(-1) # the last token is spurious\n if by_sentence and not current_sentence is None:\n current_sentence.tokens.append(cur_mentions)\n else:\n ordered_tokens.append(cur_mentions)\n # and reset the overlap trackers\n cur_mentions, overlap_sofar = [], []\n\n # if in a sentence, try to resolve it\n if by_sentence and not current_sentence is None:\n if current_sentence.end <= next_token_start:\n ordered_tokens.append(current_sentence)\n current_sentence = None\n # if at the start of a sentence, load it in\n if by_sentence and len(sorted_sentences) > 0 and next_token_start >= sorted_sentences[0].begin:\n current_sentence = sorted_sentences.pop(0)\n\n # if starting a mention\n if mentions != None and indexed_mentions.get(next_token_start, None) != None:\n new_mentions = []\n for m in indexed_mentions[next_token_start]:\n m.text = [next_token]\n cur_mentions.append([ overlap_sofar.copy(), m, [] ])\n overlap_sofar.append(next_token)\n in_mention = True\n\n # otherwise, just add the word\n if not in_mention:\n if by_sentence and not current_sentence is None:\n current_sentence.tokens.append(next_token)\n else:\n ordered_tokens.append(next_token)\n\n # remove the starting index\n starts_remaining[next_starts_ix][1].pop(0)\n # check if any token types are complete\n new_starts_remaining = []\n for (i,start) in starts_remaining:\n if len(start) > 0: new_starts_remaining.append((i,start))\n starts_remaining = new_starts_remaining\n\n # check if still have a mention buffered\n if in_mention:\n for (before, m, after) in cur_mentions:\n m.text = ' '.join(m.text)\n if by_sentence and not current_sentence is None:\n current_sentence.tokens.append(cur_mentions)\n else:\n ordered_tokens.append(cur_mentions)\n\n # check if still have a sentence buffered\n if by_sentence and not current_sentence is None:\n ordered_tokens.append(current_sentence)\n current_sentence = None\n\n # flatten mentions and contexts\n output_tokens = _flatten_mention_spans(ordered_tokens)\n\n return output_tokens\n\ndef _get_and_validate_children(soup, node_type, validation_regex):\n candidate_nodes, validated_nodes = soup.findChildren(node_type), []\n for node in candidate_nodes:\n if matchesRegex(validation_regex, repr(node)): validated_nodes.append(node)\n return validated_nodes\n\ndef _sort_by_position(nodes, attr='begin'):\n node_sorter = { int(node[attr]): node for node in nodes }\n assert len(node_sorter) == len(nodes) # no duplicate 'begin' indices\n indices, sorted_nodes = list(node_sorter.keys()), []\n indices.sort()\n for index in indices:\n sorted_nodes.append(node_sorter[index])\n return sorted_nodes\n\ndef _flatten_mention_spans(token_list):\n output_tokens = []\n for t in token_list:\n if type(t) == Sentence:\n t.tokens = _flatten_mention_spans(t.tokens)\n output_tokens.append(t)\n elif type(t) == list:\n if len(t) == 1:\n (before, m, after) = t[0]\n output_tokens.append(m)\n else:\n output_tokens.append([\n flatten([before, m, after])\n for (before, m, after) in t\n ])\n else: output_tokens.append(t)\n return output_tokens\n\n\n### Utility methods #####################\n\ndef matchesRegex(regex, string):\n '''Returns Boolean indicating if the input regex found a positive (non-zero)\n match in the input string.\n '''\n mtch = re.match(regex, string)\n return mtch != None and mtch.span() != (0,0)\n\ndef flatten(arr):\n '''Given an array of N-dimensional objects (N can vary), returns 1-dimensional\n list of the contained objects.\n '''\n results = []\n for el in arr:\n if type(el) == list or type(el) == tuple: results.extend(flatten(el))\n else: results.append(el)\n return results\n\n\n### Inheritance methods #################\n\ndef inheritDocstring(local, src):\n local.__doc__ = src.__doc__\n"
] | [
[
"numpy.argmin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HiEST/AI4DL | [
"620d78dc0dafca1fa0bd77e2a138149de54076c9"
] | [
"package/AI4DL/crbm_tools.py"
] | [
"\nfrom timeit import default_timer as timer\nimport numpy as np\nfrom numpy import outer as np_outer\nimport time\nimport matplotlib.pyplot as plt\nimport numexpr as ne\nfrom numexpr import evaluate \nimport sys\nimport os\nimport random\nimport inspect\nimport json\nimport pickle\n\nclass CRBM:\n \n def __init__(self, n_vis, n_hid, n_his,\n sigma=0.2, monitor_time=True, scale_factor = 0,\n n_epochs=100, learning_rate=0.005, momentum=0.0, verbose=1, random_state=1234,\n patience=3, dtype=\"Float32\"):\n\n self.n_vis = n_vis\n self.n_hid = n_hid\n self.n_his = n_his\n self.sigma = sigma\n self.monitor_time = monitor_time\n self.scale_factor = scale_factor\n self.dtype = dtype\n self.random_state = random_state\n\n ## values relevant for the fit method\n self.n_epochs = n_epochs\n self.learning_rate = learning_rate\n self.momentum = momentum\n self.verbose = verbose\n self.num_epochs_trained = 0\n self.patience = patience\n\n ## Initialize the random value sof the parameters\n\n if scale_factor == 0: #scale factor for the random initialization of the weights\n scale_factor = 1./(n_vis * n_his)\n \n if dtype == \"Float32\":\n dtype = np.float32\n elif dtype == \"Float64\":\n dtype = np.float64\n\n np.random.seed(self.random_state) \n self.W = scale_factor * np.random.normal(0, sigma, [n_hid, n_vis]).astype(dtype) # vis to hid\n self.A = scale_factor * np.random.normal(0, sigma, [n_vis, n_vis * n_his]).astype(dtype) # cond to vis\n self.B = scale_factor * np.random.normal(0, sigma, [n_hid, n_vis * n_his]).astype(dtype) # cond to hid\n self.v_bias = np.zeros([n_vis, 1]).astype(dtype)\n self.h_bias = np.zeros([n_hid, 1]).astype(dtype)\n self.dy_v_bias = np.zeros([n_vis, 1]).astype(dtype)\n self.dy_h_bias = np.zeros([n_hid, 1]).astype(dtype) \n\n \n def save(self, model_path, model_name):\n \"\"\"\n Function to save the information contained in the class in a folder.\n The folder will contain 2 `.json` files.\n \"\"\"\n \n ### Create a folder where to save models (if it does not exist) \n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n ### Create a folder for the current model with name `model_name`\n model_path = os.path.join(model_path, model_name)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n else:\n print(\"The model {} inside folder {} already exists!\".format(model_name, model_path))\n return 0\n\n ### Save all the information to instanciate the same model again\n arguments_init = inspect.signature(CRBM)\n init_ctx = {k:self.__dict__[k] for k in arguments_init.parameters.keys()} \n \n with open( os.path.join(model_path, \"model_initializer\") + '.json', 'w') as outfile:\n json.dump(init_ctx, outfile, ensure_ascii=False)\n \n with open( os.path.join(model_path, \"model_dict\") + '.pickle', 'wb') as outfile:\n pickle.dump(self.__dict__, outfile, protocol=pickle.HIGHEST_PROTOCOL)\n \n @classmethod\n def load(self, model_path):\n\n if not os.path.exists(model_path):\n print(\"The model {} does not exist!\".format(model_path))\n return\n \n if not os.path.exists( os.path.join(model_path, \"model_initializer.json\")):\n print( \"File {} is not found.\".format(os.path.join(model_path, \"model_initializer.json\")))\n return\n \n if not os.path.exists( os.path.join(model_path, \"model_dict.pickle\")):\n print( \"File {} is not found.\".format(os.path.join(model_path, \"model_dict.pickle\")))\n return\n \n with open( os.path.join(model_path, \"model_initializer\") + '.json', 'rb') as file:\n model_initializer = json.load(file)\n \n with open( os.path.join(model_path, \"model_dict\") + '.pickle', 'rb') as file:\n model_dict = pickle.load(file)\n \n crbm = CRBM(**model_initializer)\n crbm.__dict__ = model_dict\n\n return crbm\n\n def fit(self, X_slices: list):\n \"\"\"\n Train a CRBM given a list of slices of the time-series.\n \n - x in X_slices should be a np.ndarray of ndim=2 containing features as columns\n \"\"\"\n\n if self.momentum >0:\n ctx = { \"W_vel\" : np.zeros(self.W.shape), \n \"A_vel\" : np.zeros(self.A.shape),\n \"B_vel\" : np.zeros(self.B.shape), \n \"v_bias_vel\" : np.zeros(self.v_bias.shape), \n \"h_bias_vel\" : np.zeros(self.h_bias.shape)}\n\n self.rec_error_per_epoch = []\n self.patience_counter =0\n\n n_samples = len(X_slices)\n t_start = time.time()\n previous_rec_error = np.inf\n for n in range(self.n_epochs):\n t_iter = time.time()\n err_epoch = 0\n random.shuffle(X_slices)\n for X_curr in X_slices:\n \n dW, dA, dB, dv_bias, dh_bias, rec_error = self.compute_gradient(X_curr)\n grads = (dW, dA, dB, dv_bias, dh_bias)\n\n if self.momentum >0:\n #update_weights_sgd_momentum(crbm, grads, learning_rate, ctx, momentum=0.)\n raise NotImplementedError(\"Momentum learning To be implemented\")\n else:\n self.update_weights_sgd(grads, self.learning_rate)\n \n err_epoch += rec_error\n \n self.num_epochs_trained +=1\n current_rec_error = err_epoch/n_samples\n self.rec_error_per_epoch.append(current_rec_error)\n\n if current_rec_error >= previous_rec_error:\n self.patience_counter +=1\n else:\n self.patience_counter = 0\n\n if self.verbose ==1:\n print(\"ep {:04} | min {:.2f} sec => rec error: {:.4f}\".format(n, \n int(time.time() - t_start)/60, \n current_rec_error))#,end=\"\\r\")\n\n\n if self.patience_counter == self.patience:\n print(f\"Maximum patience={self.patience} achieved\")\n break \n \n\n def split_vis(self, vis: np.ndarray):\n\n n_his = vis.shape[0]\n cond = vis[0:(n_his-1), :].T\n x = vis[[n_his-1],:].T\n \n assert self.n_vis == x.shape[0] and self.n_vis == cond.shape[0], \\\n \"crbm.n_vis = {}, is different from x.shape[0] = {} or cond.shape[0] = {}\".format(self.n_vis,\n x.shape[0],\n cond.shape[0])\n return x, cond\n\n\n def history_mat_to_vec(self, cond):\n return np.array([cond.flatten('F')]).T\n\n\n def sample_hiddens(self, v: np.ndarray, cond: np.ndarray):\n h_mean = sig( np.dot(self.W, v) + np.dot(self.B, cond) + self.h_bias)\n h_sample = h_mean > np.random.random(h_mean.shape).astype(np.float32)\n return h_sample, h_mean\n\n def sample_visibles(self, h: np.ndarray, cond: np.ndarray):\n \"\"\"\n Notice we don't sample or put the sigmoid here since visible units are Gaussian\n \"\"\"\n v_mean = np.dot(self.W.T, h) + np.dot(self.A, cond) + self.v_bias \n return v_mean\n\n def CDK(self, vis,cond, K=1):\n v_pos_mean = vis\n h_pos_sample, h_pos_mean = self.sample_hiddens(v_pos_mean, cond)\n v_neg_mean = self.sample_visibles(h_pos_mean, cond)\n h_neg_sample, h_neg_mean = self.sample_hiddens(v_neg_mean, cond)\n\n for i in range(K-1):\n v_neg_mean = self.sample_visibles(h_neg_mean, cond)\n h_neg, h_neg_mean = self.sample_hiddens(v_neg_mean, cond)\n \n return v_pos_mean, h_pos_mean , v_neg_mean, h_neg_mean\n\n def compute_gradient(self, X):\n \"\"\"\n Computes an approximated gradient of the likelihod (for a given minibatch X) with\n respect to the parameters. \n \"\"\"\n vis, cond = self.split_vis(X)\n cond = self.history_mat_to_vec(cond)\n \n v_pos, h_pos, v_neg, h_neg = self.CDK(vis, cond)\n n_obs = vis.shape[1]\n \n # for a sigle observation: dW = h * v^T - h_hat * v_hat^T\n dW = ( np.dot(h_pos, v_pos.T) - np.dot(h_neg, v_neg.T) ) * (1./n_obs)\n dA = ( np.dot(v_pos, cond.T) - np.dot(v_neg, cond.T) ) * (1./n_obs)\n dB = ( np.dot(h_pos, cond.T) - np.dot(h_neg, cond.T) ) * (1./n_obs) \n \n dv_bias = np.mean(v_pos - v_neg, axis=1, keepdims=True)\n dh_bias = np.mean(h_pos - h_neg, axis=1, keepdims=True)\n #print(\"n_obs:\", n_obs)\n\n rec_error = np.linalg.norm(v_pos - v_neg)\n #print( np.sqrt(np.sum((v_pos - v_neg)**2)))\n \n return dW, dA, dB, dv_bias, dh_bias, rec_error\n\n\n def update_weights_sgd(self, grads, learning_rate):\n \n dW, dA, dB, dv_bias, dh_bias = grads #rec_error = compute_gradient(crbm, X)\n self.W += dW * learning_rate\n self.A += dA * learning_rate\n self.B += dB * learning_rate\n \n self.v_bias += dv_bias * learning_rate\n self.h_bias += dh_bias * learning_rate\n\n\n def update_weights_sgd_momentum(self, grads, learning_rate, ctx, momentum=0.9):\n \n dW, dA, dB, dv_bias, dh_bias = grads \n \n ctx[\"W_vel\"] = ctx[\"W_vel\"] * self.momentum + dW * learning_rate\n ctx[\"A_vel\"] = ctx[\"A_vel\"] * self.momentum + dA * learning_rate\n ctx[\"B_vel\"] = ctx[\"B_vel\"] * self.momentum + dB * learning_rate\n ctx[\"v_bias_vel\"] = ctx[\"v_bias_vel\"] * self.momentum + dv_bias * learning_rate\n ctx[\"h_bias_vel\"] = ctx[\"h_bias_vel\"] * self.momentum + dh_bias * learning_rate\n \n self.W += ctx[\"W_vel\"]\n self.A += ctx[\"A_vel\"]\n self.B += ctx[\"B_vel\"]\n \n self.v_bias += ctx[\"v_bias_vel\"]\n self.h_bias += ctx[\"h_bias_vel\"]\n\n\n def generate(self, vis, cond_as_vec, n_gibbs=10):\n \"\"\" \n Given initialization(s) of visibles and matching history, generate a sample in the future.\n \n vis: n_vis * 1 array\n \n cond_as_vec: n_hist * n_vis array\n \n n_gibbs : int\n number of alternating Gibbs steps per iteration\n \"\"\"\n \n assert cond_as_vec.shape[1] ==1, \"cond_as_vec has to be a column vector\"\n \n n_seq = vis.shape[0]\n v_pos, h_pos, v_neg, h_neg = CDK_sa(self, vis, cond_as_vec, n_gibbs)\n \n return v_neg\n\n def persistentCDK(self, presistent_vis, persistent_his, K=1):\n \n vis_sample = presistent_vis\n his = persistent_his\n \n h_pos_sample, h_pos_mean = self.sample_hiddens(vis_sample, his)\n v_neg_mean = self.sample_visibles(h_pos_sample, his)\n h_neg_sample, h_neg_mean = self.sample_hiddens(v_neg_mean, his)\n \n for i in range(K-1):\n v_neg_mean = self.sample_visibles(h_neg_sample, his)\n h_neg, h_neg_mean = self.sample_hiddens(v_neg_mean, his)\n\n return vis_sample, h_pos_mean , v_neg_mean, h_neg_mean\n\n\n def CDK_sa(self, vis,cond, K=1):\n \n v_pos_mean = vis\n h_pos_sample, h_pos_mean = self.sample_hiddens(v_pos_mean, cond)\n v_neg_mean = self.sample_visibles(h_pos_sample, cond)\n h_neg_sample, h_neg_mean = self.sample_hiddens(v_neg_mean, cond)\n \n for i in range(K-1):\n v_neg_mean = self.sample_visibles(h_neg_sample, cond)\n h_neg, h_neg_mean = self.sample_hiddens(v_neg_mean, cond)\n\n return v_pos_mean, h_pos_mean , v_neg_mean, h_neg_mean\n\n def generate_given_chain(self, persistent_vis, persistent_his_as_vec, n_gibbs=10):\n \"\"\" \n Given initialization: visible vector and current history, generate a sample in the future.\n \n persistent_vis: (n_vis, 1) array\n \n persistent_his_as_vec: (n_hist, n_vis) array \n \n n_gibbs : int \n number of alternating Gibbs steps per iteration\n \"\"\"\n \n assert persistent_his_as_vec.shape[1] ==1, \"his_as_vec has to be a column vector\"\n \n n_seq = persistent_vis.shape[0]\n v_pos, h_pos, v_neg, h_neg = self.persistentCDK(persistent_vis, persistent_his_as_vec, n_gibbs)\n \n return v_neg\n\n\n def update_history_as_vec(self, current_hist_vec, v_new):\n n_feat = v_new.shape[0]\n current_hist_vec[0:-n_feat] = current_hist_vec[n_feat:] \n current_hist_vec[-n_feat:] = v_new\n return current_hist_vec\n\n\n def generate_n_samples(self, vis, his_as_vec, n_samples, n_gibbs=30, persitent_chain=False):\n \"\"\" \n Given initialization(s) of visibles and matching history, generate a n_samples in the future.\n \n \n persistent_chain=True\n In the positive phase, PCD does not differ from CD training. \n In the negative phase, however, instead of running a new chain for each parameter update, \n PCD maintains a single per- sistent chain. The update at time t takes the state of the\n Gibbs chain at time t − 1, performs one round of Gibbs sampling, and uses this state \n in the negative gradient estimates. \n \n \"\"\"\n \n assert his_as_vec.shape[1] ==1, \"his_as_vec has to be a column vector\"\n samples = []\n \n if persitent_chain is False:\n for i in range(n_samples):\n v_new = self.generate(vis, his_as_vec, n_gibbs)\n self.update_history_as_vec(his_as_vec, v_new)\n samples.append(v_new) \n\n else:\n persistent_vis_chain = vis\n persistent_his_as_vec = his_as_vec\n \n for i in range(n_samples):\n persistent_vis_chain = self.generate_given_chain( persistent_vis_chain, persistent_his_as_vec, n_gibbs)\n self.update_history_as_vec(persistent_his_as_vec, persistent_vis_chain)\n samples.append(persistent_vis_chain)\n\n return samples\n\n\n def predict(self, seq):\n\n n_seq = len(seq)\n\n if n_seq < self.n_his + 1:\n if self.verbose:\n print(f\"Warning, input sequence has len {n_seq} but history has len {self.n_his}\")\n\n activations = np.zeros((n_seq, self.n_hid))\n \n for k in range(self.n_his+1, n_seq):\n X_slice = seq[(k-self.n_his-1):k, :]\n history_vec = self.history_mat_to_vec(X_slice[0:-1,:])\n vis = X_slice[[-1],:].T\n h_preact, h_activations = self.sample_hiddens(vis, history_vec)\n activations[k,:] = h_activations.T\n\n return activations\n\ndef sig(v):\n return ne.evaluate(\"1/(1 + exp(-v))\")\n\ndef split_vis(crbm: CRBM, vis: np.ndarray):\n n_his = vis.shape[0]\n cond = vis[0:(n_his-1), :].T\n x = vis[[n_his-1],:].T\n \n assert crbm.n_vis == x.shape[0] and crbm.n_vis == cond.shape[0], \\\n \"crbm.n_vis = {}, is different from x.shape[0] = {} or cond.shape[0] = {}\".format(crbm.n_vis,\n x.shape[0],\n cond.shape[0])\n return x, cond\n\ndef dynamic_biases_up(crbm: CRBM, cond: np.ndarray):\n crbm.dy_v_bias = np.dot(crbm.A, cond) + crbm.v_bias \n crbm.dy_h_bias = np.dot(crbm.B, cond) + crbm.h_bias \n \ndef hid_means(crbm: CRBM, vis: np.ndarray):\n p = np.dot(crbm.W, vis) + crbm.dy_h_bias\n return sig(p)\n \ndef vis_means(crbm: CRBM, hid: np.ndarray): \n p = np.dot(crbm.W.T, hid) + crbm.dy_v_bias\n return sig(p)\n\ndef sample_hiddens(crbm: CRBM, v: np.ndarray, cond: np.ndarray):\n h_mean = sig( np.dot(crbm.W, v) + np.dot(crbm.B, cond) + crbm.h_bias)\n h_sample = h_mean > np.random.random(h_mean.shape).astype(np.float32)\n return h_sample, h_mean\n\ndef sample_visibles(crbm: CRBM, h: np.ndarray, cond: np.ndarray):\n \"\"\"\n Notice we don't sample or put the sigmoid here since visible units are Gaussian\n \"\"\"\n v_mean = np.dot(crbm.W.T, h) + np.dot(crbm.A, cond) + crbm.v_bias \n return v_mean\n\ndef CDK(crbm, vis,cond, K=1):\n v_pos_mean = vis\n h_pos_sample, h_pos_mean = sample_hiddens(crbm, v_pos_mean, cond)\n v_neg_mean = sample_visibles(crbm, h_pos_mean, cond)\n h_neg_sample, h_neg_mean = sample_hiddens(crbm, v_neg_mean, cond)\n\n for i in range(K-1):\n v_neg_mean = sample_visibles(crbm, h_neg_mean, cond)\n h_neg, h_neg_mean = sample_hiddens(crbm, v_neg_mean, cond)\n \n return v_pos_mean, h_pos_mean , v_neg_mean, h_neg_mean\n\ndef update_history_as_vec(current_hist_vec, v_new):\n n_feat = v_new.shape[0]\n current_hist_vec[0:-n_feat] = current_hist_vec[n_feat:] \n current_hist_vec[-n_feat:] = v_new\n return current_hist_vec\n\ndef history_mat_to_vec(cond):\n return np.array([cond.flatten('F')]).T\n\n\ndef compute_gradient(crbm, X):\n \"\"\"\n Computes an approximated gradient of the likelihod (for a given minibatch X) with\n respect to the parameters. \n \"\"\"\n vis, cond = split_vis(crbm, X)\n cond = history_mat_to_vec(cond)\n \n v_pos, h_pos, v_neg, h_neg = CDK(crbm, vis, cond)\n n_obs = vis.shape[1]\n \n # for a sigle observation: dW = h * v^T - h_hat * v_hat^T\n dW = ( np.dot(h_pos, v_pos.T) - np.dot(h_neg, v_neg.T) ) * (1./n_obs)\n dA = ( np.dot(v_pos, cond.T) - np.dot(v_neg, cond.T) ) * (1./n_obs)\n dB = ( np.dot(h_pos, cond.T) - np.dot(h_neg, cond.T) ) * (1./n_obs) \n \n dv_bias = np.mean(v_pos - v_neg, axis=1, keepdims=True)\n dh_bias = np.mean(h_pos - h_neg, axis=1, keepdims=True)\n #print(\"n_obs:\", n_obs)\n\n rec_error = np.linalg.norm(v_pos - v_neg)\n #print( np.sqrt(np.sum((v_pos - v_neg)**2)))\n \n return dW, dA, dB, dv_bias, dh_bias, rec_error\n\ndef update_weights_sgd(crbm, grads, learning_rate):\n \n dW, dA, dB, dv_bias, dh_bias = grads #rec_error = compute_gradient(crbm, X)\n crbm.W += dW * learning_rate\n crbm.A += dA * learning_rate\n crbm.B += dB * learning_rate\n \n crbm.v_bias += dv_bias * learning_rate\n crbm.h_bias += dh_bias * learning_rate\n\ndef update_weights_sgd_momentum(crbm, grads, learning_rate, ctx, momentum=0.9):\n \n dW, dA, dB, dv_bias, dh_bias = grads \n \n ctx[\"W_vel\"] = ctx[\"W_vel\"] * momentum + dW * learning_rate\n ctx[\"A_vel\"] = ctx[\"A_vel\"] * momentum + dA * learning_rate\n ctx[\"B_vel\"] = ctx[\"B_vel\"] * momentum + dB * learning_rate\n ctx[\"v_bias_vel\"] = ctx[\"v_bias_vel\"] * momentum + dv_bias * learning_rate\n ctx[\"h_bias_vel\"] = ctx[\"h_bias_vel\"] * momentum + dh_bias * learning_rate\n \n crbm.W += ctx[\"W_vel\"]\n crbm.A += ctx[\"A_vel\"]\n crbm.B += ctx[\"B_vel\"]\n \n crbm.v_bias += ctx[\"v_bias_vel\"]\n crbm.h_bias += ctx[\"h_bias_vel\"]\n\ndef get_slice_at_position_k(X, k, n_his):\n \"\"\"\n Returns a slice of shape `(n_his + 1)` with the last column beeing the visible\n vector at the current time step `k`.\n \"\"\"\n assert k > n_his, \"Position k = {} is lower than n_his = {}\".format(k, n_his)\n assert k <= X.shape[1], \"Position k = {} is bigger than number of timesteps of X.shape[1] = {}\".format(k, X.shape[0])\n return X[:, (k-(n_his+1)):k]\n\ndef build_slices_from_list_of_arrays(list_of_arrays, n_his, n_feat, verbose=0):\n \"\"\"\n This function creates a list of slices of shape (n_his + 1, n_feat)\n \"\"\"\n assert list_of_arrays[0].shape[1] == n_feat, \"list_of_arrays[0].shape[1]={} but n_feat={}\".format( list_of_arrays[0].shape[1], n_feat)\n \n X_slices = []\n \n for m, arr in enumerate(list_of_arrays):\n if arr.shape[0] < n_his + 1:\n if verbose>0:\n print(\"Sequence {} has length {}\".format(m, arr.shape[0])) \n else:\n for k in range(n_his+1, arr.shape[0] + 1):\n X_slice = arr[(k-n_his-1):k, :]\n if X_slice.shape[0] != n_his+1:\n if verbose>0:\n print(\"error!\")\n X_slices.append(X_slice)\n \n return X_slices\n\ndef CDK_sa(crbm, vis,cond, K=1):\n \n v_pos_mean = vis\n h_pos_sample, h_pos_mean = sample_hiddens(crbm, v_pos_mean, cond)\n v_neg_mean = sample_visibles(crbm, h_pos_sample, cond)\n h_neg_sample, h_neg_mean = sample_hiddens(crbm, v_neg_mean, cond)\n \n \n for i in range(K-1):\n v_neg_mean = sample_visibles(crbm, h_neg_sample, cond)\n h_neg, h_neg_mean = sample_hiddens(crbm, v_neg_mean, cond)\n\n return v_pos_mean, h_pos_mean , v_neg_mean, h_neg_mean\n\ndef generate(crbm, vis, cond_as_vec, n_gibbs=10):\n \"\"\" \n Given initialization(s) of visibles and matching history, generate a sample in the future.\n \n vis: n_vis * 1 array\n \n cond_as_vec: n_hist * n_vis array\n \n n_gibbs : int\n number of alternating Gibbs steps per iteration\n \"\"\"\n \n assert cond_as_vec.shape[1] ==1, \"cond_as_vec has to be a column vector\"\n \n n_seq = vis.shape[0]\n #import pdb; pdb.set_trace()\n #v_pos, h_pos, v_neg, h_neg = CDK(crbm, vis, cond_as_vec, n_gibbs)\n v_pos, h_pos, v_neg, h_neg = CDK_sa(crbm, vis, cond_as_vec, n_gibbs)\n \n return v_neg\n \n\ndef generate_n_samples(crbm, vis, cond_as_vec, n_samples, n_gibbs=100):\n \"\"\" \n Given initialization(s) of visibles and matching history, generate a n_samples in the future.\n \"\"\"\n \n assert cond_as_vec.shape[1] ==1, \"cond_as_vec has to be a column vector\"\n \n samples = []\n for i in range(n_samples):\n v_new = generate(crbm, vis, cond_as_vec, n_gibbs)\n \n # This should not be here\n #v_new = v_new/np.linalg.norm(v_new) \n #print(\"i:\", i, \"\\tv_new:\", v_new.T)\n #print(\"cond_as_vec:\", cond_as_vec[-8:].T, \"\\n\\n\")\n #v_new[v_new<0] = 0\n \n update_history_as_vec(cond_as_vec, v_new)\n \n samples.append(v_new)\n\n return samples\n"
] | [
[
"numpy.dot",
"numpy.random.random",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.mean",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
code6levels/Adaptive-Spatial-Feature-Pooling | [
"200e10ae5fd99a1a13d7525beedc10912fdb2397"
] | [
"Adaptive-Spatial-Feature-Pooling/train_2.py"
] | [
"\r\nimport torch\r\nimport torch.optim as optim\r\nfrom network import PB_atrous,PB_resnet\r\nfrom tool.dataset import VOC_Dataset\r\nimport argparse\r\nfrom torchvision import transforms\r\nfrom torch.utils.data import DataLoader\r\nimport torch.nn as nn\r\nfrom sklearn.metrics import average_precision_score\r\nimport torch.nn.functional as F\r\nfrom tool.utils import save_cam, save_mask\r\nimport numpy as np\r\nfrom evaluation import do_python_eval\r\nfrom tqdm import tqdm\r\nimport yaml\r\n\r\nvoc_root = '/input/VOC2012/VOCdevkit/VOC2012'\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--batch_size', default=64, type=int)\r\n parser.add_argument('--lr', default=1e-4, type=float)\r\n parser.add_argument('--gpu_index', default='1', type=str)\r\n parser.add_argument('--crop_size', default=224, type=int)\r\n parser.add_argument('--num_epochs', default=7, type=int)\r\n parser.add_argument('--session_name', required=True, type=str)\r\n parser.add_argument(\"--train_list\", default=\"voc12/train_aug.txt\", type=str)\r\n parser.add_argument(\"--val_list\", default=\"voc12/val.txt\", type=str)\r\n parser.add_argument('--gpu_num', default=1, type=int)\r\n parser.add_argument('--COEFF', default=12.0, type=float)\r\n # parser.add_argument('--drop', action='store_true', default=False)\r\n parser.add_argument('--drop_rate', type=float, default=0.5)\r\n\r\n\r\n args = parser.parse_args()\r\n print(args)\r\n import os\r\n\r\n os.makedirs(args.session_name, exist_ok=True)\r\n os.makedirs(f'{args.session_name}/model_weights', exist_ok=True)\r\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index\r\n\r\n train_dataset = VOC_Dataset(args.train_list, voc_root, transform=transforms.Compose([\r\n # transforms.RandomResizedCrop(model_ft.input_size),\r\n transforms.RandomCrop(args.crop_size, pad_if_needed=True),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ]))\r\n\r\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8,\r\n pin_memory=True, drop_last=True)\r\n val_dataset = VOC_Dataset(args.val_list, voc_root, transform=transforms.Compose([\r\n transforms.Resize(args.crop_size),\r\n transforms.CenterCrop(args.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ]))\r\n # loglist = do_python_eval(f'baseline_3_5/pred_dir', args, 0)\r\n\r\n val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False, num_workers=4, pin_memory=False,\r\n drop_last=True)\r\n\r\n test_dataset = VOC_Dataset(args.val_list, voc_root, transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ]))\r\n test_dataloader = DataLoader(test_dataset, shuffle=False, pin_memory=True)\r\n\r\n model = PB_resnet.net( args.COEFF).cuda()\r\n # model.load_state_dict(torch.load(\"/users4/mxtuo/zhanghan/DBnet/Lip_pool_cf=17.5_bn=128_init_baseline/model_weights/res50-bs=128-cf=17.5-mIoU=47.140.pth\"))\r\n if (args.gpu_num > 1):\r\n model = nn.DataParallel(model)\r\n params = filter(lambda x: x.requires_grad, model.parameters())\r\n\r\n # optim = optim.Adam(params,args.lr,weight_decay=2e-4)\r\n optim = optim.Adam(params, args.lr)\r\n criterion = nn.MultiLabelSoftMarginLoss()\r\n best_mAP = 0.0\r\n best_mIoU = 0.0\r\n\r\n for epoch in range(args.num_epochs):\r\n model.train()\r\n for batch, pack in enumerate(train_dataloader):\r\n img = pack[1].cuda()\r\n label = pack[2].cuda()\r\n _, output = model(img)\r\n output = output.view_as(label)\r\n\r\n loss = criterion(output, label)\r\n\r\n optim.zero_grad()\r\n loss.backward()\r\n optim.step()\r\n print(f'epoch: {epoch+1}/{args.num_epochs} batch: {batch}/{len(train_dataloader)} batch_loss: {loss:.3f} ')\r\n print(f'epoch: {epoch+1}/{args.num_epochs} epoch_loss: {loss:.3f} ')\r\n\r\n model.eval()\r\n groud_truth = []\r\n pred_scores = []\r\n\r\n for name, img, label in val_dataloader:\r\n img = img.cuda()\r\n label = label.cuda()\r\n with torch.set_grad_enabled(False):\r\n cam, output = model(img)\r\n output = output.view_as(label)\r\n\r\n scores = torch.sigmoid(output).cpu()\r\n pred_scores.append(scores)\r\n groud_truth.append(label.cpu())\r\n\r\n b, c, h, w = cam.shape\r\n cam = F.relu(cam)\r\n cam_max = torch.max(cam.view(b, c, -1), dim=-1)[0].view(b, c, 1, 1) + 1e-5\r\n cam = F.relu(cam - 1e-5, inplace=True) / cam_max\r\n cam = cam * (label.view(b, c, 1, 1))\r\n\r\n save_cam(img[0], label[0], cam[0], scores[0], name[0], epoch, args)\r\n\r\n pred_scores = torch.cat(tuple(pred_scores))\r\n groud_truth = torch.cat(tuple(groud_truth))\r\n mAP = average_precision_score(groud_truth, pred_scores)\r\n f = open(f'{args.session_name}/results_mAP.txt', 'a')\r\n print(f'epoch: {epoch}/{args.num_epochs} mAP: {mAP:.3f} ')\r\n print(f'epoch: {epoch}/{args.num_epochs} mAP: {mAP:.3f}', file=f)\r\n f.close()\r\n if (mAP > best_mAP):\r\n best_mAP = mAP\r\n\r\n if (mAP > 0.845):\r\n for name, img, label in tqdm(test_dataloader):\r\n img = img.cuda()\r\n with torch.set_grad_enabled(False):\r\n cam, _, = model(img, is_eval=True)\r\n b, c, h, w = cam.shape\r\n cam = F.relu(cam).cpu()\r\n cam_max = torch.max(cam.view(b, c, -1), dim=-1)[0].view(b, c, 1, 1) + 1e-5\r\n cam = F.relu(cam - 1e-5, inplace=True) / cam_max\r\n cam = cam * (label.view(b, c, 1, 1))\r\n cam = F.interpolate(cam, img.shape[2:], mode='bilinear', align_corners=False).squeeze()\r\n bg_socre = (torch.ones(img.shape[2:]) * 0.2).unsqueeze(0)\r\n pred = torch.argmax(torch.cat((bg_socre, cam), dim=0), dim=0).numpy().astype(np.uint8)\r\n save_mask(pred, args, name[0])\r\n loglist = do_python_eval(f'{args.session_name}/mask_results', args, epoch)\r\n mIoU = loglist['mIoU']\r\n\r\n if (best_mIoU < mIoU):\r\n best_mIoU = mIoU\r\n torch.save(model.state_dict(),\r\n f'/root/zh/Adpnet/{args.session_name}/model_weights/res50-bs={args.batch_size}-cf=cf={args.COEFF}-{mIoU:.3f}.pth')\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"torch.sigmoid",
"torch.ones",
"torch.cat",
"torch.nn.MultiLabelSoftMarginLoss",
"torch.utils.data.DataLoader",
"torch.nn.functional.relu",
"torch.set_grad_enabled",
"sklearn.metrics.average_precision_score",
"torch.nn.functional.interpolate",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cambel/ur_openai_gym | [
"bed90641e24b06fdb36df35eb64418dc15e96292"
] | [
"ur_rl/scripts/tf2rl_sac.py"
] | [
"#!/usr/bin/env python\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #tensorflow logging disabled\nfrom shutil import copyfile\nimport rospy\nimport timeit\nimport argparse\nfrom gym.envs.registration import register\n\nfrom tf2rl.algos.sac import SAC\nfrom tf2rl.experiments.cb_trainer import Trainer\n\nimport ur_control.utils as utils\n\nfrom ur_openai.common import load_environment, log_ros_params, clear_gym_params, load_ros_params\nimport numpy as np\nnp.set_printoptions(suppress=True)\n\nimport sys\nimport signal\ndef signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n sys.exit(0)\nsignal.signal(signal.SIGINT, signal_handler)\n\nif __name__ == '__main__':\n\n parser = Trainer.get_argument()\n parser.add_argument('-e', '--env-id', type=int, help='environment ID', default=None)\n parser.set_defaults(batch_size=2048)\n parser.set_defaults(n_warmup=0) # still don't know what it this for\n parser.set_defaults(max_steps=10000) # 10000 for training 200 for evaluation\n parser.set_defaults(save_model_interval=10000)\n parser.set_defaults(test_interval=1e10) # 1e4 for training 200 for evaluation\n parser.set_defaults(test_episodes=1)\n parser.set_defaults(normalize_obs=False)\n parser.set_defaults(auto_alpha=False)\n parser.set_defaults(use_prioritized_rb=True)\n parser.set_defaults(lr=3e-4)\n\n args = parser.parse_args(rospy.myargv()[1:])\n\n\n rospy.init_node('ur3e_tf2rl',\n anonymous=True,\n log_level=rospy.ERROR)\n\n clear_gym_params('ur_gym')\n\n start_time = timeit.default_timer()\n\n args = parser.parse_args()\n param_file = None\n \n if args.evaluate:\n args.n_warmup = 0\n args.max_steps = 150\n args.test_interval = 1\n args.test_episodes = 10\n\n if args.env_id == 0:\n args.dir_suffix = \"task_space\"\n param_file = \"simulation/task_space.yaml\"\n elif args.env_id == 1:\n args.dir_suffix = \"joint_space\"\n param_file = \"simulation/joint_space.yaml\"\n else:\n raise Exception(\"invalid env_id\")\n\n p = utils.TextColors()\n p.error(\"GYM Environment:{} \".format(param_file))\n\n ros_param_path = load_ros_params(rospackage_name=\"ur_rl\",\n rel_path_from_package_to_file=\"config\",\n yaml_file_name=param_file)\n\n args.episode_max_steps = rospy.get_param(\"ur_gym/steps_per_episode\", 200)\n\n env = load_environment(\n rospy.get_param('ur_gym/env_id'),\n max_episode_steps=args.episode_max_steps)\n actor_class = rospy.get_param(\"ur_gym/actor_class\", \"default\")\n\n policy = SAC(\n state_shape=env.observation_space.shape,\n action_dim=env.action_space.high.size,\n max_action=env.action_space.high[0],\n actor_class=actor_class,\n batch_size=args.batch_size,\n n_warmup=args.n_warmup,\n auto_alpha=args.auto_alpha,\n lr=args.lr\n )\n trainer = Trainer(policy, env, args, test_env=None)\n outdir = trainer._output_dir\n rospy.set_param('ur_gym/output_dir', outdir)\n log_ros_params(outdir)\n copyfile(ros_param_path, outdir + \"/ros_gym_env_params.yaml\")\n trainer()\n\n print(\"duration\", (timeit.default_timer() - start_time)/60.,\"min\")\n\n# rosrun ur3e_rl tf2rl_sac.py --env-id=0"
] | [
[
"numpy.set_printoptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gongpx20069/mmRadar_for_HAR_VS | [
"ad40cf88f1fa425333108b6394a7379945300a6e"
] | [
"HAR/codes_v4/train.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport os\r\nfrom MyDataset import MyDataset\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\nfrom PointGNN_boost import HAR_PointGNN\r\n\r\ndevice = torch.device(\"cuda:2\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\ndef test_acc(model, dataset, batch_size):\r\n dataloader = DataLoader(dataset = dataset,batch_size=batch_size,shuffle=False)\r\n model.eval()\r\n\r\n test_correct = 0\r\n for data in dataloader:\r\n inputs, states, targets = data[0].to(device), data[1].to(device), data[2].to(device)\r\n outputs = model(inputs, states)\r\n\r\n _, pred = torch.max(outputs, 1)\r\n test_correct += torch.sum(pred == targets)\r\n # print(test_correct)\r\n del dataloader\r\n print(\"Test Accuracy {:.4f}%\".format(100.0*test_correct/len(dataset)))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n batch_size = 7\r\n test_batch = 14\r\n learning_rate = 0.001\r\n\r\n epoch_num = 100\r\n\r\n dataset_test = MyDataset('../Data/lmdbData_test', padding='zero')\r\n\r\n dataset = MyDataset('../Data/lmdbData_train',padding='zero')\r\n train_loader = DataLoader(dataset = dataset,batch_size=batch_size,shuffle=True)\r\n\r\n model = HAR_PointGNN(r = 0.0005, T=3, state_dim=8)\r\n model.to(device)\r\n\r\n if os.path.exists('./models/HAR_PointGNN.pkl'):\r\n model.load_state_dict(torch.load('./models/HAR_PointGNN.pkl',map_location = device))\r\n print(\"load model sucessfully\")\r\n\r\n adam = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n scheduler = torch.optim.lr_scheduler.StepLR(adam, step_size=1, gamma=0.98)\r\n\r\n crossloss = nn.CrossEntropyLoss()\r\n for epoch in range(1, epoch_num+1):\r\n test_acc(model,dataset_test,test_batch)\r\n model.train()\r\n epoch_loss = 0\r\n\r\n for batch, data in enumerate(train_loader):\r\n inputs, states, targets = data[0].to(device), data[1].to(device), data[2].to(device)\r\n outputs = model(inputs,states)\r\n loss = crossloss(outputs,targets)\r\n\r\n adam.zero_grad()\r\n loss.backward()\r\n adam.step()\r\n epoch_loss += loss\r\n\r\n # print('epoch:{}\\t batch:{}/{}\\t batch loss:{:.4f}'.format(epoch,batch,len(train_loader),loss))\r\n scheduler.step()\r\n print('epoch:{}\\t epoch loss:{:.4f} \\t learning rate:{}'.format(epoch, epoch_loss, adam.param_groups[0]['lr']))\r\n torch.save(model.state_dict(), \"./models/HAR_PointGNN.pkl\")\r\n\r\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.load",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elifesciences-publications/Simpson_Barton_Nanopore_1 | [
"1b509454a9e25a8c81be5092f8e525ca00e7b5a5"
] | [
"pipeline/basecalling_pipeline/scripts/DRS_details/DRS_splitCanonicalSpliceReads.py"
] | [
"#!/usr/bin/env python\n'''\n--------------------------------------------\nONTdrstools.DRS_splitCanonicalSpliceReads.py\n--------------------------------------------\n\nThis script parses an aligned set of ONT DRS data and examines the splicing \ncharacteristics of the dataset. The parses the alignment of each read in the\nbam file looking for splices and, for each splice present, classifies the \nsplice as canonical or non canonical if it has the GU-AG splicing di-nucleotide\nmotif at the terminals of the intron.\n\nOptionally, it will also examine an annotation gtf/gff and will use this to \nclassify the splices as annotated, or not and record which transcripts the splice\nis found in (-a). Importantly, if splice sites are novel (i.e., no annotated), the\nscript examines a padded region (10bp by default) around the splice position for \nalternative annotated splice sites and novel alternative canonical splice sites.\n\nOptionally it will also use the position weight matricies from Sheth et al 2006\n(DOI:10.1093/nar/gkl556) to classify splice sites as either U2 or U2 splice sites\nif the species designation matches one of the wive spiecies they have position\nweight matrices for (human, mouse, fly, c elegans and arabidopsis).\n\nOptionally, the reads can be read out into separate man files for the different\ncategories.\n\nAll the read, splice and transcript classification information is output as json \nfiles, reads are split into new bam files, and several summary pltos are made.\n\n.. moduleauthor:: Nick Schurch <[email protected]>\n\n:module_version: 1.3\n:created_on: 2018-03-23\n\nCommand-line Arguments\n======================\n\n**usage\\:** \n DRS_splitCanonicalSpliceReads.py\n :param: <input bam file>\n :option:`-l|--log` *<file>*\n [:option:`-v|--verbose`] \n [:option:`--version`] \n [:option:`--help`]\n\nRequired Parameters\n-------------------\n\n:para: <input bam file>\n\n The input bam file\n\n:option:`--logfile|-l` \n\n The name (inc. path) of the log file from the wrapper.\n\nOptional Parameter\n------------------\n\n:option:`--help|-h`\n\n Print a basic description of the tool and its options to STDOUT.\n\n:option:`--version` \n\n Show program's version number and exit.\n \n:option:`--verbose|-v` \n\n Turn on verbose logging (recommended).\n\nOutput\n======\n'''\n\nver=1.2\n\n__scriptname__= \"DRS_splitCanonicalSpliceReads\"\n__version__ = str(ver)\n__usage__ = \"\\n\\t%s <input bam file> <input genome file> -l|--logfile\" \\\n \"[--version][-v|--verbose][--help]\"\n__progdesc__ = '''\nThis script parses an aligned set of ONT DRS data and examines the splicing \ncharacteristics of the dataset. The parses the alignment of each read in the\nbam file looking for splices and, for each splice present, classifies the \nsplice as canonical or non canonical if it has the GU-AG splicing di-nucleotide\nmotif at the terminals of the intron.\n\nOptionally, it will also examine an annotation gtf/gff and will use this to \nclassify the splices as annotated, or not and record which transcripts the splice\nis found in (-a). Importantly, if splice sites are novel (i.e., no annotated), the\nscript examines a padded region (10bp by default) around the splice position for \nalternative canonical splice sites, classifying them as annotated or not.\n\nOptionally it will also use the position weight matricies from Sheth et al 2006\n(DOI:10.1093/nar/gkl556) to classify splice sites as either U2 or U2 splice sites\nif the species designation matches one of the five species they have position\nweight matrices for (human, mouse, fly, c elegans and arabidopsis).\n\nOptionally, the reads can be read out into separate man files for the different\ncategories.\n\nAll the read, splice and transcript classification information is output as json \nfiles, reads are split into new bam files, and several summary pltos are made.\n\n'''\n\n__progepi__ = '''\n--------------------------------\nDRS_splitCanonicalSpliceReads.py\n--------------------------------\n'''\n\nimport os, sys, pysam, json, re, math, glob, matplotlib, numpy, itertools, time\nmatplotlib.use('Agg')\nimport script_options.standard_parsers as sp\nimport script_options.custom_callables as cc\nimport script_logging.standard_logging as sl\nfrom Bio import SeqIO, motifs\nfrom Bio.Seq import Seq\nfrom Bio.motifs import matrix\nfrom Bio.Alphabet.IUPAC import unambiguous_dna\nfrom parsing_routines.gff_gtf_tools import annotation\nfrom parsing_routines.general_classes_and_functions import computeIntrons\nfrom bisect import bisect_left\nimport matplotlib.pyplot as plt\n\ndef addScriptOptions(parser, pos_args, kw_args):\n \n \"\"\" add script-specific script options \"\"\"\n \n script_req_group = sp.get_std_req_group(parser)\n\n hlpstr = \"Input bamfile\"\n option_short_name = \"b\"\n option_name = \"bamfile\"\n \n script_req_group.add_argument('-%s' % option_short_name,\n '--%s' % option_name,\n action = 'store',\n dest = option_name,\n type = cc.input_file,\n help = hlpstr)\n kw_args.append((option_name, option_name, None))\n \n hlpstr = \"Input genome fasta\"\n option_short_name = \"g\"\n option_name = \"genomefile\"\n \n script_req_group.add_argument('-%s' % option_short_name,\n '--%s' % option_name,\n action = 'store',\n dest = option_name,\n type = cc.input_file,\n help = hlpstr)\n kw_args.append((option_name, option_name, None))\n\n \n script_options_group = parser.add_argument_group('Options')\n \n hlpstr = \"Path to gtf or gff annotation. If provided, novel splices \" \\\n \"will be identified by comparing the detected intron \" \\\n \"coordinates with the give annotation. Default is None\"\n option_short_name = \"a\"\n option_name = \"annotation\"\n \n script_options_group.add_argument('-%s' % option_short_name,\n '--%s' % option_name,\n action = 'store',\n type = cc.input_file,\n dest = option_name,\n help = hlpstr\n )\n kw_args.append((option_name, option_name, None))\n \n hlpstr = \"Annotation format. Options are 'gff3' or 'gft'. Default is 'gff3'.\"\n option_short_name = \"\"\n option_name = \"input_format\"\n option_default = \"gff3\"\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n type = str,\n help = hlpstr,\n default = option_default,\n choices=[\"gff3\",\"gtf\"]\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Strip 'Chr' from the beginning of annotation reference chromosomes?\"\n option_short_name = \"\"\n option_name = \"stripchr\"\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store_true',\n dest = option_name,\n help = hlpstr\n )\n kw_args.append((option_name, option_name, False))\n \n hlpstr = \"Comma-separated list of chromosome synonyms, delimited by ':'. \" \\\n \"For example: C:Pt,M:Mt\"\n option_short_name = \"\"\n option_name = \"chr_synonyms\"\n option_default = \"\"\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n type = str,\n help = hlpstr,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Use position weight matrices (default is false)\"\n option_short_name = \"\"\n option_name = \"pwm\"\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store_true',\n dest = option_name,\n help = hlpstr\n )\n kw_args.append((option_name, option_name, False))\n \n hlpstr = \"pwm species to use. Options are: A_thaliana, D_melanogaster, \" \\\n \"M_musculus, C_elegans, or H_sapiens. Default is A_thaliana.\"\n option_short_name = \"\"\n option_name = \"pwm_species\"\n option_default = \"A_thaliana\"\n option_choices = [\"A_thaliana\",\"C_elegans\",\"D_melagonaster\",\"M_musculus\",\"H_sapiens\"]\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n type=str,\n help = hlpstr,\n default = option_default,\n choices = option_choices\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"pwm log-odds threshold to use. Default is 3.0\"\n option_short_name = \"\"\n option_name = \"pwm_thresh\"\n option_default = 3.0\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n help = hlpstr,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Path to pwm files. Default is ./position_weight_matricies\"\n option_short_name = \"\"\n option_name = \"pwm_path\"\n option_default = os.path.join(os.path.dirname(os.path.realpath(__file__)),\"position_weight_matricies\")\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n help = hlpstr,\n type = cc.input_path,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Alternative splicing search region size (+-bp). Default is 10\"\n option_short_name = \"\"\n option_name = \"altsplicepad\"\n option_default = 10\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n help = hlpstr,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Minimum skips in bam file to consider the skips and intron (bp).\" \\\n \"Default is 20\"\n option_short_name = \"\"\n option_name = \"minintronsize\"\n option_default = 20\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n help = hlpstr,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Prefix for the output files. Default is the prefex to the input bam file\"\n option_short_name = \"p\"\n option_name = \"prefix\"\n option_default = \"\"\n \n script_options_group.add_argument('-%s' % option_short_name,\n '--%s' % option_name,\n action = 'store',\n type = str,\n dest = option_name,\n help = hlpstr,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n hlpstr = \"Split reads into new bam files?\"\n option_short_name = \"\"\n option_name = \"splitreads\"\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store_true',\n dest = option_name,\n help = hlpstr\n )\n kw_args.append((option_name, option_name, False))\n \n hlpstr = \"Which classifications to subset reads by (comma-separated values). Options are \" \\\n \"one or more of: annotated, canonical, U2. Default is annotated.\"\n option_short_name = \"\"\n option_name = \"spliton\"\n option_default = \"annotated\"\n \n script_options_group.add_argument('--%s' % option_name,\n action = 'store',\n dest = option_name,\n type=str,\n help = hlpstr,\n default = option_default\n )\n kw_args.append((option_name, option_name, option_default))\n \n return(parser, pos_args, kw_args)\n \ndef classifySplice(pwms, chrid, start, stop, strand, genome, donorpad=(3,10), acceptorpad=(14,3), minthresh=3.0,\n species_prefix=\"\"):\n \n donor_splice_site=None\n acceptor_splice_site=None\n if strand==\"-\":\n donor_splice_site = genome[chrid][stop-donorpad[1]:stop+donorpad[0]].reverse_complement()\n acceptor_splice_site = genome[chrid][start-acceptorpad[1]:start+acceptorpad[0]].reverse_complement()\n else:\n donor_splice_site = genome[chrid][start-donorpad[0]:start+donorpad[1]]\n acceptor_splice_site = genome[chrid][stop-acceptorpad[0]:stop+acceptorpad[1]]\n\n pwm_scores={\"acceptor\":{}, \"donor\":{}}\n \n for pwm in pwms.keys():\n thispwm = pwms[pwm]\n pssm = thispwm.log_odds()\n \n spliceend=None\n if \"acceptor\" in pwm:\n splice_site = Seq(str(acceptor_splice_site.seq), pssm.alphabet)\n spliceend = \"acceptor\"\n elif \"donor\" in pwm:\n splice_site = Seq(str(donor_splice_site.seq), pssm.alphabet)\n spliceend = \"donor\"\n \n splicetype = re.sub(\"_{}\".format(spliceend),\"\",pwm)\n \n for position, score in pssm.search(splice_site, threshold=minthresh):\n try:\n pwm_scores[spliceend][splicetype].append((position, score))\n except KeyError:\n pwm_scores[spliceend][splicetype]=[(position, score)]\n \n matching_end_classifications=None\n for key in pwm_scores[\"acceptor\"].keys():\n if key in pwm_scores[\"donor\"]:\n thistuple = (key, pwm_scores[\"donor\"][key], pwm_scores[\"acceptor\"][key])\n if matching_end_classifications is None:\n matching_end_classifications = thistuple\n else:\n if (key.startswith(\"U12\") and matching_end_classifications[0].startswith(\"U12\")) or (key.startswith(\"U2\") and matching_end_classifications[0].startswith(\"U2\")):\n ascore=None\n dscore=None\n for pos, score in thistuple[1]:\n if ascore is None or score > ascore:\n ascore = 2**score\n for pos, score in thistuple[2]:\n if dscore is None or score > dscore:\n dscore = 2**score\n thisscore = ascore+dscore\n prevscore = 2**(matching_end_classifications[1][0][1])+2**(matching_end_classifications[2][0][1])\n if prevscore<thisscore:\n matching_end_classifications = thistuple\n else:\n U12score = None\n U2score=None\n if key.startswith(\"U12\"):\n for pos, score in thistuple[1]:\n if U12score is None or score > U12score:\n U12score = 2**score\n for pos, score in matching_end_classifications[1]:\n if U2score is None or score > U2score:\n U2score = 2**score \n if U12score>25*U2score:\n matching_end_classifications = thistuple\n elif matching_end_classifications[0].startswith(\"U12\"):\n for pos, score in matching_end_classifications[1]:\n if U12score is None or score > U12score:\n U12score = 2**score\n for pos, score in thistuple[1]:\n if U2score is None or score > U2score:\n U2score = 2**score\n if U12score<25*U2score:\n matching_end_classifications = thistuple\n \n return(matching_end_classifications, pwm_scores)\n\ndef charcterizeIntrons(read_iterator, genome, annotation_splices=None, splicepad=0,\n min_intron_length=20, pwms=None, pwmscorethreshold=3.0, logger=None, LOG_EVERY_N=10000,\n pwm_species=\"\"):\n \n \"\"\"Return a dictionary {readID:[(start, stop)...]}\n Listing the intronic sites for each read (identified by 'N' in the cigar strings).\n \n read_iterator can be the result of a .fetch(...) call.\n Or it can be a generator filtering such reads. Example\n samfile.find_introns((read for read in samfile.fetch(...) if read.is_reverse)\n \"\"\"\n \n def locNearestCanonicals(donor, acceptor, padsize, refname, start, stop):\n \n \"\"\" given donor and acceptor regions, locate the nearest canonical splice and \n return the details \"\"\"\n \n import re, numpy, itertools\n \n # we're going to find all instances of GA in the donor region, and AG int he acceptor region and \n # then work out the positions relative to the actual mapped splice\n donor_canonical_matches = numpy.array([m.start() for m in re.finditer('(?=GT)', str(donor))])-padsize\n acceptor_canonical_matches = numpy.array([m.start() for m in re.finditer('(?=AG)', str(acceptor))])-padsize\n possible_canonical_pairs = list(itertools.product(donor_canonical_matches, acceptor_canonical_matches))\n possible_canonical_splices = [\"{}:{}-{}\".format(refname, x[0]+start, x[1]+stop) for x in possible_canonical_pairs if x!=(0,0)]\n return(possible_canonical_splices)\n \n def locNearestAnnotated(keystr, annotation_splices_dict, padsize, regex_match = re.compile(\"^(.+?):(.+?)-(.+?)$\")):\n \n \"\"\" locates any nearby annotated splice sites based on their positions using a \n pre-seperated dictionary and binary search \"\"\"\n \n keyvals = regex_match.match(keystr).groups()\n target_start = int(keyvals[1])\n target_stop = int(keyvals[2])\n \n nearest_splices=[]\n if keyvals[0] in annotation_splices_dict.keys():\n starts = annotation_splices_dict[keyvals[0]][\"starts\"]\n stops = annotation_splices_dict[keyvals[0]][\"stops\"]\n \n i = bisect_left(starts, target_start-padsize)\n \n while i<1E10:\n if i==len(starts):\n break\n if starts[i]>target_start+padsize:\n break\n if starts[i]>target_start-padsize and starts[i]<target_start+padsize and stops[i]>target_stop-padsize and stops[i]<target_stop+padsize:\n nearest_splices.append(\"{}:{}-{}\".format(keyvals[0], starts[i], stops[i]))\n i+=1\n return(nearest_splices)\n \n # setup the output data structures\n read_details={}\n splice_details={}\n splice_summary_numbers={\"canonical_splices\":0, \"non_canonical_splices\":0}\n if annotation_splices is not None and pwms is not None:\n splice_summary_numbers={\"canonical_splices\":0,\n \"non_canonical_splices\":0,\n \"annotated_canonical_splices\":0,\n \"annotated_canonical_undef_splices\":0,\n \"annotated_canonical_U2_splices\":0,\n \"annotated_canonical_U12_splices\":0,\n \"annotated_non_canonical_splices\":0,\n \"annotated_non_canonical_undef_splices\":0,\n \"annotated_non_canonical_U2_splices\":0,\n \"annotated_non_canonical_U12_splices\":0,\n \"novel_canonical_splices\":0,\n \"novel_canonical_undef_splices\":0,\n \"novel_canonical_U2_splices\":0,\n \"novel_canonical_U12_splices\":0,\n \"novel_canonical_splices_with_nearby_annotated_canonical\":0,\n \"novel_canonical_splices_with_nearby_annotated_non_canonical\":0,\n \"novel_non_canonical_splices\":0,\n \"novel_non_canonical_undef_splices\":0,\n \"novel_non_canonical_U2_splices\":0,\n \"novel_non_canonical_U12_splices\":0,\n \"novel_non_canonical_splices_with_nearby_annotated_canonical\":0,\n \"novel_non_canonical_splices_with_nearby_annotated_non_canonical\":0,\n \"novel_non_canonical_splices_with_nearby_novel_canonical\":0\n }\n elif annotation_splices is not None and pwms is None:\n splice_summary_numbers={\"canonical_splices\":0,\n \"non_canonical_splices\":0,\n \"annotated_canonical_splices\":0,\n \"annotated_non_canonical_splices\":0,\n \"novel_canonical_splices\":0,\n \"novel_canonical_splices_with_nearby_annotated_canonical\":0,\n \"novel_canonical_splices_with_nearby_annotated_non_canonical\":0,\n \"novel_non_canonical_splices\":0,\n \"novel_non_canonical_splices_with_nearby_annotated_canonical\":0,\n \"novel_non_canonical_splices_with_nearby_annotated_non_canonical\":0,\n \"novel_non_canonical_splices_with_nearby_novel_canonical\":0,\n }\n elif annotation_splices is None and pwms is not None:\n splice_summary_numbers={\"canonical_splices\":0,\n \"non_canonical_splices\":0,\n \"canonical_undef_splices\":0,\n \"canonical_U2_splices\":0,\n \"canonical_U12_splices\":0,\n \"non_canonical_undef_splices\":0,\n \"non_canonical_U2_splices\":0,\n \"non_canonical_U12_splices\":0}\n \n if annotation_splices is not None:\n # split the annotation information by chromosome and position for efficient searching\n regex_match = re.compile(\"^(.+?):(.+?)-(.+?)$\")\n split_annotation_splices = [regex_match.match(x).groups() for x in sorted(annotation_splices.keys())]\n annotation_splices_dict = {}\n for x,y,z in split_annotation_splices:\n try:\n annotation_splices_dict[x][\"values\"].append((int(y),int(z)))\n except KeyError:\n annotation_splices_dict[x]={\"values\":[]}\n annotation_splices_dict[x][\"values\"].append((int(y),int(z)))\n\n for key in annotation_splices_dict.keys():\n annotation_splices_dict[key][\"values\"].sort(key=lambda r: r[0])\n annotation_splices_dict[key][\"starts\"] = [r[0] for r in annotation_splices_dict[key][\"values\"]]\n annotation_splices_dict[key][\"stops\"] = [r[1] for r in annotation_splices_dict[key][\"values\"]]\n \n # Process the aligned reads looking for splices and classifying them\n nlogs=1\n counter=0\n ts = time.time()\n t0 = time.time()\n for read in read_iterator:\n \n current_read_pos = read.reference_start\n thisread_details={\"canonical_splices\":[],\n \"non_canonical_splices\":[],\n \"is_reverse\":False}\n \n if annotation_splices is not None:\n annot_details={\"annotated_splices\":[],\n \"novel_splices\":[],\n \"transcripts_matching\":{}}\n thisread_details.update(annot_details)\n \n if pwms is not None:\n classification_details={\"unclassified_splices\":[]}\n for key in pwms.keys():\n pwm = pwms[key]\n spliceend=None\n if \"acceptor\" in key:\n spliceend = \"acceptor\"\n elif \"donor\" in key:\n spliceend = \"donor\"\n splicetype = re.sub(\"_{}\".format(spliceend),\"\",key)\n classification_details[\"{}_splices\".format(splicetype)]=[]\n thisread_details.update(classification_details)\n \n # identify and process each splice in the read\n for tup in read.cigartuples:\n \n if tup[0]==3 and tup[1]>min_intron_length:\n \n # define the donor and acceptor splice regions in which we will search for alternative splices.\n donor_splice_site=None\n acceptor_splice_site=None\n strand = \"+\"\n if read.is_reverse:\n strand = \"-\"\n thisread_details[\"is_reverse\"]=True\n donor_splice_site = genome[read.reference_name][current_read_pos+tup[1]-2-splicepad:current_read_pos+tup[1]+splicepad].reverse_complement()\n acceptor_splice_site = genome[read.reference_name][current_read_pos-splicepad:current_read_pos+2+splicepad].reverse_complement()\n else:\n acceptor_splice_site = genome[read.reference_name][current_read_pos+tup[1]-2-splicepad:current_read_pos+tup[1]+splicepad]\n donor_splice_site = genome[read.reference_name][current_read_pos-splicepad:current_read_pos+2+splicepad]\n \n # define the splice genomic coordinates and the terminal dinucleotides\n keystr = \"{}:{}-{}\".format(read.reference_name,\n current_read_pos,\n current_read_pos+tup[1])\n donor_splice_string = donor_splice_site.seq[splicepad:splicepad+2]\n acceptor_splice_string = acceptor_splice_site.seq[splicepad:splicepad+2]\n \n # if the splice has been seen before then we can just record that its been seen in a new read\n # otherwise we have to classify it.\n if keystr in splice_details:\n splice_details[keystr][\"reads\"].append(read.query_name)\n else:\n splice_details[keystr]={\"reads\":[read.query_name],\n \"sites\":(donor_splice_string, acceptor_splice_string)}\n \n # classify splice as cannonical or not.\n if donor_splice_string!=\"GT\" or acceptor_splice_string!=\"AG\":\n splice_details[keystr][\"is_canonical\"]=False\n else:\n splice_details[keystr][\"is_canonical\"]=True\n \n # classify the mapped site as U2/U12 - we only need to do this the first time this splice is seen\n if pwms is not None:\n classification, options = classifySplice(pwms, read.reference_name, current_read_pos,\n current_read_pos+tup[1], strand, genome,\n minthresh=pwmscorethreshold)\n splice_details[keystr][\"U2/U12_classification\"]=classification\n splice_details[keystr][\"U2/U12_scores\"]=options\n \n # classify if splice is annotated or not\n if annotation_splices is not None:\n if keystr in annotation_splices.keys():\n splice_details[keystr][\"is_annotated\"]=True\n else:\n splice_details[keystr][\"is_annotated\"]=False\n \n # locate nearby annotated splice sites - the +1 here is so that we include the dinucleotide\n # motif and then the pad region around it...\n nearby_annotated_splices = locNearestAnnotated(keystr, annotation_splices_dict, splicepad+1)\n \n # if the splice is not annotated, search for nearby splice sites\n nearby_canonical_splices = locNearestCanonicals(donor_splice_site.seq, acceptor_splice_site.seq,\n splicepad, read.reference_name, current_read_pos,\n current_read_pos+tup[1])\n annot_alt_cannonical=[]\n annot_alt_cannonical_classification=[]\n annot_alt_cannonical_scores=[]\n novel_alt_cannonical=[]\n novel_alt_cannonical_classification=[]\n novel_alt_cannonical_scores=[]\n \n for alt in nearby_canonical_splices:\n annotated=False\n \n if alt in annotation_splices.keys():\n nearby_annotated_splices.remove(alt)\n annotated=True\n \n # classify the alternative splices as U2/U12\n if pwms is not None:\n match = re.match(\"(.+):([0-9]+)-([0-9]+)\", alt)\n classification, options = classifySplice(pwms, match.group(1), int(match.group(2)),\n int(match.group(3)), strand, genome,\n minthresh=pwmscorethreshold)\n if annotated:\n annot_alt_cannonical.append(alt)\n annot_alt_cannonical_classification.append(classification)\n annot_alt_cannonical_scores.append(options)\n else:\n novel_alt_cannonical.append(alt)\n novel_alt_cannonical_classification.append(classification)\n novel_alt_cannonical_scores.append(options)\n \n splice_details[keystr][\"annotated_alt_canonical\"]=annot_alt_cannonical\n splice_details[keystr][\"annotated_alt_non_canonical\"]=nearby_annotated_splices\n splice_details[keystr][\"annotated_alt_canonical_U2/U12_classification\"]=annot_alt_cannonical_classification\n splice_details[keystr][\"annotated_alt_canonical_U2/U12_scores\"]=annot_alt_cannonical_scores\n splice_details[keystr][\"novel_alt_canonical\"]=novel_alt_cannonical\n splice_details[keystr][\"novel_alt_canonical_U2/U12_classification\"]=novel_alt_cannonical_classification\n splice_details[keystr][\"novel_alt_canonical_U2/U12_scores\"]=novel_alt_cannonical_scores\n \n # build summary information\n try:\n \n if splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"canonical_splices\"]+=1\n else:\n splice_summary_numbers[\"non_canonical_splices\"]+=1\n \n if \"is_annotated\" in splice_details[keystr].keys() and \"U2/U12_classification\" in splice_details[keystr].keys():\n \n if splice_details[keystr][\"is_annotated\"] and splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"annotated_canonical_splices\"]+=1\n if splice_details[keystr][\"U2/U12_classification\"] is None:\n splice_summary_numbers[\"annotated_canonical_undef_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U2\"):\n splice_summary_numbers[\"annotated_canonical_U2_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U12\"):\n splice_summary_numbers[\"annotated_canonical_U12_splices\"]+=1\n \n elif splice_details[keystr][\"is_annotated\"] and not splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"annotated_non_canonical_splices\"]+=1\n if splice_details[keystr][\"U2/U12_classification\"] is None:\n splice_summary_numbers[\"annotated_non_canonical_undef_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U2\"):\n splice_summary_numbers[\"annotated_non_canonical_U2_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U12\"):\n splice_summary_numbers[\"annotated_non_canonical_U12_splices\"]+=1\n \n elif not splice_details[keystr][\"is_annotated\"] and splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"novel_canonical_splices\"]+=1\n if len(splice_details[keystr][\"annotated_alt_canonical\"])>0:\n splice_summary_numbers[\"novel_canonical_splices_with_nearby_annotated_canonical\"]+=1\n if len(splice_details[keystr][\"annotated_alt_non_canonical\"])>0:\n splice_summary_numbers[\"novel_canonical_splices_with_nearby_annotated_non_canonical\"]+=1\n if splice_details[keystr][\"U2/U12_classification\"] is None:\n splice_summary_numbers[\"novel_canonical_undef_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U2\"):\n splice_summary_numbers[\"novel_canonical_U2_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U12\"):\n splice_summary_numbers[\"novel_canonical_U12_splices\"]+=1\n \n elif not splice_details[keystr][\"is_annotated\"] and not splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"novel_non_canonical_splices\"]+=1\n if len(splice_details[keystr][\"annotated_alt_canonical\"])>0:\n splice_summary_numbers[\"novel_non_canonical_splices_with_nearby_annotated_canonical\"]+=1\n if len(splice_details[keystr][\"annotated_alt_non_canonical\"])>0:\n splice_summary_numbers[\"novel_non_canonical_splices_with_nearby_annotated_non_canonical\"]+=1\n if len(splice_details[keystr][\"novel_alt_canonical\"])>0:\n splice_summary_numbers[\"novel_non_canonical_splices_with_nearby_novel_canonical\"]+=1\n if splice_details[keystr][\"U2/U12_classification\"] is None:\n splice_summary_numbers[\"novel_non_canonical_undef_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U2\"):\n splice_summary_numbers[\"novel_non_canonical_U2_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U12\"):\n splice_summary_numbers[\"novel_non_canonical_U12_splices\"]+=1\n \n elif \"annotated\" in splice_details[keystr].keys() and not \"U2/U12_classification\" in splice_details[keystr].keys():\n \n if splice_details[keystr][\"is_annotated\"] and splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"annotated_canonical_splices\"]+=1\n elif splice_details[keystr][\"is_annotated\"] and not splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"annotated_non_canonical_splices\"]+=1\n elif not splice_details[keystr][\"is_annotated\"] and splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"novel_canonical_splices\"]+=1\n if len(splice_details[keystr][\"annotated_alt_canonical\"])>0:\n splice_summary_numbers[\"novel_canonical_splices_with_nearby_annotated_canonical\"]+=1\n if len(splice_details[keystr][\"annotated_alt_non_canonical\"])>0:\n splice_summary_numbers[\"novel_canonical_splices_with_nearby_annotated_non_canonical\"]+=1\n elif not splice_details[keystr][\"is_annotated\"] and not splice_details[keystr][\"is_canonical\"]:\n splice_summary_numbers[\"novel_non_canonical_splices\"]+=1\n if len(splice_details[keystr][\"annotated_alt_canonical\"])>0:\n splice_summary_numbers[\"novel_non_canonical_splices_with_nearby_annotated_canonical\"]+=1\n if len(splice_details[keystr][\"annotated_alt_non_canonical\"])>0:\n splice_summary_numbers[\"novel_non_canonical_splices_with_nearby_annotated_non_canonical\"]+=1\n if len(splice_details[keystr][\"novel_alt_canonical\"])>0:\n splice_summary_numbers[\"novel_non_canonical_splices_with_nearby_novel_canonical\"]+=1\n \n elif \"annotated\" not in splice_details[keystr].keys() and \"U2/U12_classification\" in splice_details[keystr].keys():\n \n if splice_details[keystr][\"is_canonical\"]:\n if splice_details[keystr][\"U2/U12_classification\"] is None:\n splice_summary_numbers[\"canonical_undef_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U2\"):\n splice_summary_numbers[\"canonical_U2_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U12\"):\n splice_summary_numbers[\"canonical_U12_splices\"]+=1\n \n elif not splice_details[keystr][\"is_canonical\"]:\n if splice_details[keystr][\"U2/U12_classification\"] is None:\n splice_summary_numbers[\"non_canonical_undef_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U2\"):\n splice_summary_numbers[\"non_canonical_U2_splices\"]+=1\n elif splice_details[keystr][\"U2/U12_classification\"][0].startswith(\"U12\"):\n splice_summary_numbers[\"non_canonical_U12_splices\"]+=1\n\n except:\n print(\"splice_details\")\n for key in sorted(splice_details.keys()):\n print(key, splice_details[key])\n print(\"splice_summary_numbers\")\n for key in sorted(splice_summary_numbers.keys()):\n print(key, splice_summary_numbers[key])\n raise\n \n if splice_details[keystr][\"is_canonical\"] :\n thisread_details[\"canonical_splices\"].append(keystr)\n else:\n thisread_details[\"non_canonical_splices\"].append(keystr)\n \n if annotation_splices is not None:\n if splice_details[keystr][\"is_annotated\"]:\n thisread_details[\"annotated_splices\"].append(keystr)\n thisread_details[\"transcripts_matching\"][keystr] = annotation_splices[keystr][\"transcripts\"]\n else:\n thisread_details[\"novel_splices\"].append(keystr)\n \n if pwms is not None:\n if splice_details[keystr][\"U2/U12_classification\"] is not None:\n thisread_details[\"{}_splices\".format(splice_details[keystr][\"U2/U12_classification\"][0])].append(keystr)\n else:\n thisread_details[\"unclassified_splices\"].append(keystr)\n \n #print(read.query_name, current_read_pos, current_read_pos+tup[1], donor_splice_site.seq, acceptor_splice_site.seq, read_is_all_canonical)\n current_read_pos += tup[1]\n elif tup[0]==0 or tup[0]==2:\n current_read_pos += tup[1]\n \n read_details[read.query_name] = thisread_details\n \n counter+=1\n \n if (counter % LOG_EVERY_N)==0:\n msg=\"processed {these} reads (dt = {sec:.2f}s) ...\".format(these=(nlogs*LOG_EVERY_N), sec=time.time()-t0)\n if logger is not None:\n logger.info(msg)\n else:\n print(msg)\n nlogs+=1\n t0=time.time()\n \n msg = \"Finished processed {these} reads (dt = {sec:.2f}s).\".format(these=(nlogs*LOG_EVERY_N)+counter,\n sec=time.time()-t0)\n \n return(read_details, splice_details, splice_summary_numbers)\n\ndef getAnnotationIntrons(annot, genome, chr_synonym_dic={}, logger=None, LOG_EVERY_N=10000):\n \n \"\"\" return a dictionary with all the introns in a given annotation \"\"\"\n \n if logger is not None:\n logger.info(\"parsing transcript exon structures....\")\n \n annot.clear_all()\n annot.set_feature(\"exons\")\n exons = annot.get_selection()\n tx_exons={}\n for exon in exons:\n if type(exon.desc[\"parent\"]) is list:\n for parent in exon.desc[\"parent\"]:\n try:\n tx_exons[parent].append(exon)\n except KeyError:\n tx_exons[parent]=[exon]\n else:\n try:\n tx_exons[exon.desc[\"parent\"]].append(exon)\n except KeyError:\n tx_exons[exon.desc[\"parent\"]]=[exon]\n \n splice_details={}\n nlogs=1\n counter=0\n for transcript in tx_exons.keys():\n these_introns = computeIntrons(tx_exons[transcript], logger=logger)\n for intron in these_introns:\n donor_splice_site=None\n acceptor_splice_site=None\n \n this_chrid = intron.chrid\n if intron.chrid not in genome.keys():\n if intron.chrid in chr_synonym_dic.keys() and chr_synonym_dic[intron.chrid] in genome.keys():\n this_chrid = chr_synonym_dic[intron.chrid]\n else:\n msg = \"There is a mismatch between the annotation and \" \\\n \"genome chromosome IDs that is not accounted for \" \\\n \"in the provided chromosome synonyms list. Details: \" \\\n \"Annotation ID: {}, genome IDs: {}, synonyms: {} \" \\\n \"\".format(intron.chrid, genome.keys(), chr_synonym_dic)\n raise ValueError(msg)\n \n if intron.strand==\"-\":\n donor_splice_site = genome[this_chrid][intron.stop-2:intron.stop].reverse_complement()\n acceptor_splice_site = genome[this_chrid][intron.start-1:intron.start+1].reverse_complement()\n else:\n acceptor_splice_site = genome[this_chrid][intron.stop-2:intron.stop]\n donor_splice_site = genome[this_chrid][intron.start-1:intron.start+1]\n \n keystr = \"{}:{}-{}\".format(intron.chrid,\n intron.start-1,\n intron.stop)\n \n try:\n splice_details[keystr][\"transcripts\"].append(transcript)\n except:\n splice_details[keystr]={\"transcripts\":[transcript],\n \"sites\":(str(donor_splice_site.seq),\n str(acceptor_splice_site.seq))}\n if donor_splice_site.seq!=\"GT\" or acceptor_splice_site.seq!=\"AG\":\n splice_details[keystr][\"is_canonical\"]=False\n else:\n splice_details[keystr][\"is_canonical\"]=True\n \n counter+=1\n \n if (counter % LOG_EVERY_N)==0:\n msg=\"processed {these} transcripts...\".format(these=(nlogs*LOG_EVERY_N))\n if logger is not None:\n logger.info(msg)\n else:\n print(msg)\n nlogs+=1\n \n return(splice_details)\n\ndef writeBamFiles(thisbam, splice_read_details, prefix, splits, pwms=None, logger=None):\n \n \"\"\" write bam files separating out the reads into their categories \"\"\"\n \n annot_opts = [\"annotated\",\"novel\"]\n cann_opts = [\"canonical\",\"non_canonical\"]\n \n pwm_opts=[\"unclassified\"]\n if pwms is not None:\n for key in pwms.keys():\n spliceend=None\n if \"acceptor\" in key:\n spliceend = \"acceptor\"\n splicetype = re.sub(\"_{}\".format(spliceend),\"\",key)\n pwm_opts.append(splicetype)\n \n bamfiles = {}\n if \"annotated\" in splits and \"U2\" in splits and \"canonical\" in splits:\n combos = list(itertools.product(annot_opts, pwm_opts, cann_opts))\n for combo in combos:\n pwmcombo = re.sub(\"_\",\"\",combo[1])\n bamfile = \"{}{}_{}_{}_splices.bam\".format(prefix, combo[0], pwmcombo, combo[2])\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n elif \"annotated\" in splits and \"U2\" in splits and \"canonical\" not in splits:\n combos = list(itertools.product(annot_opts, pwm_opts))\n for combo in combos:\n pwmcombo = re.sub(\"_\",\"\",combo[1])\n bamfile = \"{}{}_{}_splices.bam\".format(prefix, combo[0], pwmcombo)\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n elif \"annotated\" in splits and \"U2\" not in splits and \"canonical\" in splits:\n combos = list(itertools.product(annot_opts, cann_opts))\n for combo in combos:\n bamfile = \"{}{}_{}_splices.bam\".format(prefix, combo[0], combo[1])\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n elif \"annotated\" not in splits and \"U2\" in splits and \"canonical\" in splits:\n combos = list(itertools.product(pwm_opts, cann_opts))\n for combo in combos:\n pwmcombo = re.sub(\"_\",\"\",combo[0])\n bamfile = \"{}{}_{}_splices.bam\".format(prefix, pwmcombo, combo[1])\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n elif \"annotated\" in splits and \"U2\" not in splits and \"canonical\" not in splits:\n combos = annot_opts\n for combo in combos:\n bamfile = \"{}{}_splices.bam\".format(prefix, combo[0])\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n elif \"annotated\" not in splits and \"U2\" in splits and \"canonical\" not in splits:\n combos = pwm_opts\n for combo in combos:\n pwmcombo = re.sub(\"_\",\"\",combo[0])\n bamfile = \"{}{}_splices.bam\".format(prefix, pwmcombo)\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n elif \"annotated\" not in splits and \"U2\" not in splits and \"canonical\" in splits:\n combos = cann_opts\n for combo in combos:\n bamfile = \"{}{}_splices.bam\".format(prefix, combo[0])\n bamfiles[combo]={\"filename\":bamfile,\n \"filehandle\":pysam.AlignmentFile(bamfile, \"wb\", template=thisbam)}\n \n read_summary_numbers={}\n \n for read in thisbam.fetch():\n if read.query_name in splice_read_details.keys():\n this_read = splice_read_details[read.query_name]\n for combo in combos:\n writetothiscombo=True\n for val in combo:\n if len(this_read[\"{}_splices\".format(val)])==0:\n writetothiscombo=False\n if writetothiscombo:\n bamfiles[combo][\"filehandle\"].write(read)\n previouslywritten = combo\n try:\n read_summary_numbers[bamfiles[combo][\"filename\"]]+=1\n except KeyError:\n read_summary_numbers[bamfiles[combo][\"filename\"]]=1\n \n return(read_summary_numbers)\n\ndef calAltDists(mapsplice, altsplices):\n mapsplicematch = re.match(\".+:([0-9]+)-([0-9]+)\", mapsplice)\n dists=[]\n for val in altsplices:\n altsplicematch = re.match(\".+:([0-9]+)-([0-9]+)\", val)\n thisdist = abs(int(altsplicematch.group(1))-(int(mapsplicematch.group(1))))+abs(int(altsplicematch.group(2))-(int(mapsplicematch.group(2))))\n dists.append(thisdist)\n dists=numpy.array(dists)\n return(dists)\n\ndef mkAnnotPWMBoxPlots(splice_details, plotfile, pointsize=4, title=\"\", legend_fontsize=10, legendloc=(0.2,0.7),\n logger=None):\n \n \"\"\" plot the results of the analysis as a set of cool box plots...\"\"\"\n \n if logger is not None:\n logger.info(\"Plotting classified annotated splicing details boxplots...\")\n \n annotated_splice_exprs_data={}\n novel_splice_exprs_data={}\n alt_splice_dists = {}\n alt_splice_annotated = {}\n \n for splice in splice_details.keys():\n this_splice = splice_details[splice]\n exprs = len(this_splice[\"reads\"])\n splice_class = \"None\"\n if this_splice['U2/U12_classification'] is not None:\n splice_class = this_splice['U2/U12_classification'][0]\n if this_splice['is_annotated']:\n try:\n annotated_splice_exprs_data[splice_class].append(exprs)\n except KeyError:\n annotated_splice_exprs_data[splice_class]=[exprs]\n else:\n dist=0\n annot=False\n \n if \"annotated_alt_canonical\" in splice_details[splice].keys():\n if len(splice_details[splice][\"annotated_alt_canonical\"])>0:\n dist = calAltDists(splice, splice_details[splice][\"annotated_alt_canonical\"]).min()\n annot=True\n \n if not annot and \"novel_alt_canonical\" in splice_details[splice].keys():\n if len(splice_details[splice][\"novel_alt_canonical\"])>0:\n dist = calAltDists(splice, splice_details[splice][\"novel_alt_canonical\"]).min()\n \n try:\n novel_splice_exprs_data[splice_class].append(exprs)\n alt_splice_dists[splice_class].append(dist)\n alt_splice_annotated[splice_class].append(annot)\n except KeyError:\n novel_splice_exprs_data[splice_class]=[exprs]\n alt_splice_dists[splice_class]=[dist]\n alt_splice_annotated[splice_class]=[annot]\n \n fig=plt.figure(figsize=(15,10))\n \n data = []\n labels = list(set(sorted(annotated_splice_exprs_data.keys())+sorted(novel_splice_exprs_data.keys())))\n for label in labels:\n if label in annotated_splice_exprs_data.keys():\n data.append(numpy.log10(numpy.array(annotated_splice_exprs_data[label])))\n else:\n data.append(numpy.array([]))\n if label in novel_splice_exprs_data.keys():\n data.append(numpy.log10(numpy.array(novel_splice_exprs_data[label])))\n else:\n data.append(numpy.array([]))\n \n plt.ylim((-0.1,5))\n axtickspos = (numpy.arange(len(labels))*2)+1.5\n boxpos=[]\n for val in axtickspos:\n boxpos.append(val-0.25)\n boxpos.append(val+0.25)\n \n plotaxs=[]\n plotays=[]\n plotuaxs=[]\n plotuays=[]\n plotann=[]\n plotzerodist=[]\n \n i=0\n while i<len(boxpos):\n ay = data[i]\n uay = data[i+1]\n ax = numpy.random.normal(boxpos[i], 0.05, len(ay))\n \n if len(uay)==0:\n xdata = []\n else:\n xdata = numpy.array(alt_splice_dists[labels[int((i+1)/2)]])\n for val in xdata:\n if val==0:\n plotzerodist.append(True)\n else:\n plotzerodist.append(False)\n \n scalewidth=0.4\n if len(xdata)>0:\n if xdata.max()==0:\n uax=xdata\n else:\n uax = boxpos[i+1]+((xdata/xdata.max())*scalewidth)-(scalewidth/2)\n else:\n uax=xdata\n \n for xval in ax:\n plotaxs.append(xval)\n for xval in uax:\n plotuaxs.append(xval)\n for yval in ay:\n plotays.append(yval)\n for yval in uay:\n plotuays.append(yval)\n \n if len(uay)>0:\n for val in alt_splice_annotated[labels[int((i+1)/2)]]:\n plotann.append(val)\n i+=2\n \n plotaxs=numpy.array(plotaxs)\n plotays=numpy.array(plotays)\n plotuaxs=numpy.array(plotuaxs)\n plotuays=numpy.array(plotuays)\n plotann=numpy.array(plotann)\n plotzerodist=numpy.array(plotzerodist)\n \n i=0\n j=0\n while i<len(labels):\n if labels[i] in annotated_splice_exprs_data.keys():\n plt.text(boxpos[j], 4.6, sum(numpy.array(annotated_splice_exprs_data[labels[i]])), size=legend_fontsize, ha=\"center\", color=\"indianred\")\n plt.text(boxpos[j], 4.8, len(numpy.array(annotated_splice_exprs_data[labels[i]])), size=legend_fontsize, ha=\"center\", color=\"indianred\")\n if labels[i] in novel_splice_exprs_data.keys():\n plt.text(boxpos[j+1], 4.6, sum(numpy.array(novel_splice_exprs_data[labels[i]])), size=legend_fontsize, ha=\"center\", color=\"steelblue\")\n plt.text(boxpos[j+1], 4.8, len(numpy.array(novel_splice_exprs_data[labels[i]])), size=legend_fontsize, ha=\"center\", color=\"steelblue\")\n i+=1\n j+=2\n\n bp = plt.boxplot(data, sym=\"\", widths=0.2, positions=boxpos)\n acaregories = plt.scatter(plotaxs, plotays, s=pointsize, c=\"indianred\", alpha=0.3, label=\"annotated splices\")\n if len(plotann)>0:\n uacaregories_uanearby = plt.scatter(plotuaxs[numpy.invert(plotann)], plotuays[numpy.invert(plotann)], s=pointsize, c='y', alpha=1.0, label=\"novel splices with novel canonical splices nearby\")\n uacaregories_anearby = plt.scatter(plotuaxs[plotann], plotuays[plotann], s=pointsize, c='darkcyan', alpha=0.3, label=\"novel splices with annotated canonical splices nearby\")\n if len(plotzerodist)>0:\n uacaregories_nonearby = plt.scatter(plotuaxs[plotzerodist], plotuays[plotzerodist], s=pointsize, c='black', alpha=1.0, label=\"novel splices with no canonical splices nearby\")\n \n lw=1.5\n for key in bp.keys():\n for box in bp[key]:\n box.set(linewidth=lw, color='0.4')\n \n plt.ylabel(r\"$log_{10}(counts)$\")\n ax = plt.gca()\n ax.set_xticks(axtickspos)\n xtixks = ax.set_xticklabels([re.sub(\"_\",\" \",x) for x in labels], rotation=20, ha=\"right\", size=10)\n plt.title(title)\n plt.legend(loc=legendloc, fontsize=legend_fontsize)\n plt.tight_layout()\n \n plt.savefig(plotfile, dpi=300, format=\"svg\")\n\ndef mkPWMBoxPlots(splice_details, plotfile, pointsize=4, title=\"\", logger=None):\n \n \"\"\" plot the results of the analysis as a set of cool box plots...\"\"\"\n \n if logger is not None:\n logger.info(\"Plotting classified splicing details boxplots...\")\n \n splice_exprs_data = {}\n \n for splice in splice_details.keys():\n this_splice = splice_details[splice]\n exprs = len(this_splice[\"reads\"])\n splice_class = \"None\"\n if this_splice['U2/U12_classification'] is not None:\n splice_class = this_splice['U2/U12_classification'][0]\n try:\n splice_exprs_data[splice_class].append(exprs)\n except KeyError:\n splice_exprs_data[splice_class]=[exprs]\n \n fig=plt.figure(figsize=(15,10))\n \n data = []\n labels = list(sorted(splice_exprs_data.keys()))\n for label in labels:\n data.append(numpy.log10(numpy.array(splice_exprs_data[label])))\n \n plt.ylim((-0.1,5))\n boxpos = (numpy.arange(len(labels))*2)+1.5\n \n plotxs=[]\n plotys=[]\n \n i=0\n while i<len(boxpos):\n y = data[i]\n x = numpy.random.normal(boxpos[i], 0.05, len(y))\n \n for xval in x:\n plotxs.append(xval)\n for yval in y:\n plotys.append(yval)\n i+=1\n \n plotxs=numpy.array(plotxs)\n plotys=numpy.array(plotys)\n \n i=0\n j=0\n while i<len(labels):\n if labels[i] in splice_exprs_data.keys():\n plt.text(boxpos[j], 4.6, sum(numpy.array(splice_exprs_data[labels[i]])), size=10, ha=\"center\", color=\"indianred\")\n plt.text(boxpos[j], 4.8, len(numpy.array(splice_exprs_data[labels[i]])), size=10, ha=\"center\", color=\"indianred\")\n i+=1\n j+=1\n\n bp = plt.boxplot(data, sym=\"\", widths=0.2, positions=boxpos)\n caregories = plt.scatter(plotxs, plotys, s=pointsize, c=\"indianred\", alpha=0.3)\n \n lw=1.5\n for key in bp.keys():\n for box in bp[key]:\n box.set(linewidth=lw, color='0.4')\n \n plt.ylabel(r\"$log_{10}(counts)$\")\n ax = plt.gca()\n ax.set_xticks(boxpos)\n xtixks = ax.set_xticklabels([re.sub(\"_\",\" \",x) for x in labels], rotation=20, ha=\"right\", size=10)\n plt.title(title)\n plt.tight_layout()\n \n plt.savefig(plotfile, dpi=300, format=\"svg\")\n\ndef mkAnnotBoxPlots(splice_details, plotfile, pointsize=4, title=\"\", legend_fontsize=8, logger=None):\n \n \"\"\" plot the results of the analysis as a set of cool box plots...\"\"\"\n \n if logger is not None:\n logger.info(\"Plotting annotated splicing details boxplots...\")\n \n splice_exprs_data={\"annotated_canonical\":[],\n \"annotated_non_canonical\":[],\n \"novel_canonical_with_nearby_annotated_canonical\":[],\n \"novel_canonical_no_nearby_annotated_canonical\":[],\n \"novel_non_canonical_with_nearby_annotated_canonical\":[],\n \"novel_non_canonical_with_nearby_novel_canonical\":[],\n \"novel_non_canonical_no_nearby_canonical\":[]}\n \n alt_splice_distances = {\"novel_canonical_with_nearby_annotated_canonical\":[],\n \"novel_non_canonical_with_nearby_annotated_canonical\":[],\n \"novel_non_canonical_with_nearby_novel_canonical\":[]} \n \n for splice in splice_details.keys():\n exprs = len(splice_details[splice][\"reads\"])\n \n if splice_details[splice]['is_annotated'] and splice_details[splice]['is_canonical']:\n splice_exprs_data[\"annotated_canonical\"].append(exprs)\n \n elif splice_details[splice]['is_annotated'] and not splice_details[splice]['is_canonical']:\n splice_exprs_data[\"annotated_non_canonical\"].append(exprs)\n \n elif splice_details[splice]['is_canonical'] and not splice_details[splice]['is_annotated']:\n if \"annotated_alt_canonical\" in splice_details[splice].keys():\n if len(splice_details[splice][\"annotated_alt_canonical\"])>0:\n splice_exprs_data[\"novel_canonical_with_nearby_annotated_canonical\"].append(exprs)\n dists = calAltDists(splice, splice_details[splice][\"annotated_alt_canonical\"])\n alt_splice_distances[\"novel_canonical_with_nearby_annotated_canonical\"].append(dists.min())\n else:\n splice_exprs_data[\"novel_canonical_no_nearby_annotated_canonical\"].append(exprs)\n else:\n splice_exprs_data[\"novel_canonical_no_nearby_annotated_canonical\"].append(exprs)\n \n elif not splice_details[splice]['is_canonical'] and not splice_details[splice]['is_annotated']:\n if \"annotated_alt_canonical\" in splice_details[splice].keys():\n if len(splice_details[splice][\"annotated_alt_canonical\"])>0:\n splice_exprs_data[\"novel_non_canonical_with_nearby_annotated_canonical\"].append(exprs)\n dists = calAltDists(splice, splice_details[splice][\"annotated_alt_canonical\"])\n alt_splice_distances[\"novel_non_canonical_with_nearby_annotated_canonical\"].append(dists.min())\n elif \"novel_alt_canonical\" in splice_details[splice].keys():\n if len(splice_details[splice][\"novel_alt_canonical\"])>0:\n splice_exprs_data[\"novel_non_canonical_with_nearby_novel_canonical\"].append(exprs)\n dists = calAltDists(splice, splice_details[splice][\"novel_alt_canonical\"])\n alt_splice_distances[\"novel_non_canonical_with_nearby_novel_canonical\"].append(dists.min())\n else:\n splice_exprs_data[\"novel_non_canonical_no_nearby_canonical\"].append(exprs)\n elif \"novel_alt_canonical\" in splice_details[splice].keys():\n if len(splice_details[splice][\"novel_alt_canonical\"])>0:\n splice_exprs_data[\"novel_non_canonical_with_nearby_novel_canonical\"].append(exprs)\n dists = calAltDists(splice, splice_details[splice][\"novel_alt_canonical\"])\n alt_splice_distances[\"novel_non_canonical_with_nearby_novel_canonical\"].append(dists.min())\n else:\n splice_exprs_data[\"novel_non_canonical_no_nearby_canonical\"].append(exprs)\n \n fig=plt.figure(figsize=(15,10))\n \n data = []\n labels = list(splice_exprs_data.keys())\n for label in labels:\n data.append(numpy.log10(numpy.array(splice_exprs_data[label])))\n \n plt.ylim((-0.1,5))\n \n plotxs=[]\n plotys=[]\n cplotxs=[]\n cplotys=[]\n cplotcol=[]\n for i in numpy.arange(len(labels)):\n y = data[i]\n if labels[i] in alt_splice_distances.keys() and len(y)>0:\n #xvals = numpy.random.normal(i+1, 0.05, len(y))\n xdata = numpy.array(alt_splice_distances[labels[i]])\n scalewidth=0.4\n xvals = (i+1)+((xdata/xdata.max())*scalewidth)-(scalewidth/2)\n for xval in xvals:\n cplotxs.append(xval)\n for yval in y:\n cplotys.append(yval)\n for cval in alt_splice_distances[labels[i]]:\n cplotcol.append(cval)\n else:\n x = numpy.random.normal(i+1, 0.05, len(y))\n for xval in x:\n plotxs.append(xval)\n for yval in y:\n plotys.append(yval)\n plt.text(i+1, 4.8, len(numpy.array(splice_exprs_data[labels[i]])), size=10, ha=\"center\")\n plt.text(i+1, 4.6, sum(numpy.array(splice_exprs_data[labels[i]])), size=10, ha=\"center\")\n \n nearbys = plt.scatter(cplotxs, cplotys, s=pointsize, c=cplotcol, alpha=0.3, cmap=\"viridis_r\")\n cbar = plt.colorbar(shrink=0.4, pad=0.02)\n cbar.ax.tick_params(labelsize=legend_fontsize)\n cbar.set_label('distance to nearest alternative (bp)', rotation=270, labelpad=13, fontsize=legend_fontsize)\n caregories = plt.scatter(plotxs, plotys, s=pointsize, c=\"indianred\", alpha=0.3)\n bp = plt.boxplot(data, sym=\"\")\n \n lw=1.5\n for key in bp.keys():\n for box in bp[key]:\n box.set(linewidth=lw, color='0.4')\n \n plt.ylabel(r\"$log_{10}(counts)$\")\n ax = plt.gca()\n xticks = ax.set_xticklabels([re.sub(\"_\",\" \",x) for x in labels], rotation=20, ha=\"right\", size=10)\n plt.title(title)\n plt.tight_layout()\n \n plt.savefig(plotfile, dpi=300, format=\"svg\")\n\ndef mkBoxPlots(splice_details, plotfile, pointsize=4, title=\"\", logger=None):\n \n \"\"\" plot the results of the analysis as a set of cool box plots...\"\"\"\n \n if logger is not None:\n logger.info(\"Plotting basic splicing details boxplots...\")\n \n splice_exprs_data = {\"canonical\":[],\n \"non_canonical\":[]}\n \n for splice in splice_details.keys():\n this_splice = splice_details[splice]\n exprs = len(this_splice[\"reads\"])\n splice_class = \"None\"\n try:\n if this_splice['is_canonical']:\n splice_exprs_data[\"canonical\"].append(exprs)\n else:\n splice_exprs_data[\"non_canonical\"].append(exprs)\n except KeyError:\n if this_splice['is_canonical']:\n splice_exprs_data[\"canonical\"]=[exprs]\n else:\n splice_exprs_data[\"non_canonical\"]=[exprs]\n \n fig=plt.figure(figsize=(15,10))\n \n data = []\n labels = list(sorted(splice_exprs_data.keys()))\n for label in labels:\n data.append(numpy.log10(numpy.array(splice_exprs_data[label])))\n \n plt.ylim((-0.1,5))\n boxpos = (numpy.arange(len(labels))*2)+1.5\n \n plotxs=[]\n plotys=[]\n \n i=0\n while i<len(boxpos):\n y = data[i]\n x = numpy.random.normal(boxpos[i], 0.05, len(y))\n \n for xval in x:\n plotxs.append(xval)\n for yval in y:\n plotys.append(yval)\n i+=1\n \n plotxs=numpy.array(plotxs)\n plotys=numpy.array(plotys)\n \n i=0\n j=0\n while i<len(labels):\n if labels[i] in splice_exprs_data.keys():\n plt.text(boxpos[j], 4.6, sum(numpy.array(splice_exprs_data[labels[i]])), size=10, ha=\"center\", color=\"indianred\")\n plt.text(boxpos[j], 4.8, len(numpy.array(splice_exprs_data[labels[i]])), size=10, ha=\"center\", color=\"indianred\")\n i+=1\n j+=1\n\n bp = plt.boxplot(data, sym=\"\", widths=0.2, positions=boxpos)\n caregories = plt.scatter(plotxs, plotys, s=pointsize, c=\"indianred\", alpha=0.3)\n \n lw=1.5\n for key in bp.keys():\n for box in bp[key]:\n box.set(linewidth=lw, color='0.4')\n \n plt.ylabel(r\"$log_{10}(counts)$\")\n ax = plt.gca()\n ax.set_xticks(boxpos)\n xtixks = ax.set_xticklabels([re.sub(\"_\",\" \",x) for x in labels], rotation=20, ha=\"right\", size=10)\n plt.title(title)\n plt.tight_layout()\n \n plt.savefig(plotfile, dpi=300, format=\"svg\")\n\ndef makePWM(pwmtext):\n \n nreg = '\\s-?[1-9]+[0-9]*.?[0-9]*E-?\\+?[0-9]+\\s?'\n pwmdict={\"A\":[], \"T\":[], \"G\":[], \"C\":[]}\n alphorder = None\n for line in pwmtext:\n if alphorder is not None:\n vals = [float(x) for x in line.strip().split(\"\\t\")]\n i=0\n while i<len(alphorder):\n pwmdict[alphorder[i]].append(vals[i])\n i+=1\n\n alphmatch = re.match(\"(A|T|G|C)\\t(A|T|G|C)\\t(A|T|G|C)\\t(A|T|G|C)\\n\", line)\n if alphmatch:\n alphorder = alphmatch.groups()\n \n pwm = motifs.matrix.PositionWeightMatrix(unambiguous_dna, pwmdict)\n return(pwm)\n\nclass Encoder(json.JSONEncoder):\n \"\"\" Special json encoder for numpy types \"\"\"\n def default(self, obj):\n if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,\n numpy.int16, numpy.int32, numpy.int64, numpy.uint8,\n numpy.uint16, numpy.uint32, numpy.uint64)):\n return int(obj)\n elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32, \n numpy.float64)):\n return float(obj)\n elif isinstance(obj,(numpy.ndarray,)): #### This is the fix\n return obj.tolist()\n elif isinstance(obj, Seq):\n return(str(obj))\n return json.JSONEncoder.default(self, obj)\n\nif __name__ == '__main__':\n\n # parse command line options\n # Set standard parser\n parser, pos_args, kw_args = sp.standard_parser(__version__,\n prog = __scriptname__, \n usage = __usage__,\n description = __progdesc__,\n epilog = __progepi__,\n infile = False,\n outfile = False,\n tmpdir = False)\n \n parser, pos_args, kw_args = addScriptOptions(parser, pos_args, kw_args)\n \n args = parser.parse_args()\n \n # setup standard logging information\n script_logger = sl.standard_logger(__version__, sys.argv, args, pos_args, \n kw_args, script_name=__scriptname__)\n \n splits = args.spliton.split(\",\")\n \n # ok, first lets check that the parameters specified are consistent.\n if args.annotation is None and args.splitreads and \"annotated\" in splits:\n script_logger.warn(\"Warning: cannot split on annotation because no annotation was provided.\")\n splits.remove(\"annotated\")\n \n if not args.pwm and \"U2\" in splits:\n script_logger.warn(\"Warning: cannot split on U2/U12 classification because no \" \\\n \"position weight matrices were provided.\")\n splits.remove(\"U2\")\n \n # load the genome\n script_logger.info(\"loading genome....\")\n genome = {}\n for seq_record in SeqIO.parse(args.genomefile, \"fasta\"):\n genome[seq_record.name] = seq_record\n \n # load the annotation and enable novel splicing detection\n annot_details = None\n if args.annotation is not None:\n script_logger.info(\"Novel splicing detection enabled, loading annotation...\")\n annot = annotation(args.annotation, filetype=args.input_format, stripChr=args.stripchr)\n \n # build a quick dictionary of chromosome synonyms to check for....\n chr_synonym_dic = {}\n for val in args.chr_synonyms.split(\",\"):\n vals = val.split(\":\")\n if len(vals)==2:\n chr_synonym_dic[vals[0]]=vals[1]\n chr_synonym_dic[vals[1]]=vals[0]\n \n annot_details = getAnnotationIntrons(annot, genome, chr_synonym_dic,\n logger=script_logger)\n \n # load the annotation and enable novel splicing detection\n pwm_details = None\n if args.pwm:\n script_logger.info(\"U2/U12 calssification detection enabled, loading pwms...\")\n PWMfiles = glob.glob(\"{}*.pwm\".format(os.path.join(args.pwm_path,args.pwm_species)))\n pwm_details={}\n for PWMfile in PWMfiles:\n with open(PWMfile,\"r\") as fh:\n pwmkey = re.sub(\"{}_\".format(args.pwm_species), \"\", os.path.splitext(os.path.basename(PWMfile))[0])\n pwm_details[pwmkey] = makePWM(fh.readlines())\n \n # classify intron splices \n script_logger.info(\"loading bamfile and classifying data....\")\n thisbam = pysam.AlignmentFile(args.bamfile, \"rb\")\n\n read_details, splice_details, splice_summary_numbers = charcterizeIntrons((read for read in thisbam.fetch()),\n genome,\n annotation_splices = annot_details,\n splicepad = args.altsplicepad,\n min_intron_length = args.minintronsize,\n pwms = pwm_details,\n pwmscorethreshold = args.pwm_thresh,\n logger=script_logger)\n \n # get output file prefix sorted\n prefix = args.prefix\n if prefix==\"\":\n prefix = os.path.splitext(args.bamfile)[0]\n \n plotfile = \"{}spliceplot.svg\".format(prefix)\n if args.pwm and args.annotation is not None:\n mkAnnotPWMBoxPlots(splice_details, plotfile, pointsize=4, title=os.path.basename(args.bamfile),\n legend_fontsize=10, legendloc=(0.2,0.7), logger=script_logger)\n elif args.pwm and args.annotation is None:\n mkPWMBoxPlots(splice_details, plotfile, pointsize=4, title=os.path.basename(args.bamfile),\n logger=script_logger)\n elif not args.pwm and args.annotation is not None:\n mkAnnotBoxPlots(splice_details, plotfile, pointsize=4, title=os.path.basename(args.bamfile),\n legend_fontsize=8, logger=script_logger)\n elif not args.pwm and args.annotation is None:\n mkBoxPlots(splice_details, plotfile, pointsize=4, title=os.path.basename(args.bamfile)\n , logger=script_logger)\n \n # log some details\n script_logger.info(\"Dataset {bamfile} contains {nsplices} detected splicing event \" \\\n \"(from {nreads} reads encompassing one or more splicing events).\" \\\n \"\".format(bamfile = args.bamfile, nsplices = len(splice_details.keys()),\n nreads = len(read_details.keys())))\n \n strs=[\"The breakdown of the splicing events is:\"]\n \n for key in sorted(splice_summary_numbers.keys()):\n thisstr = \"\\t{}:\\t{} ({:.2%})\".format(re.sub(\"_\",\" \",key),\n splice_summary_numbers[key],\n splice_summary_numbers[key]/len(splice_details.keys()))\n strs.append(thisstr)\n \n script_logger.info(\"\\n\".join(strs))\n \n strs=[\"Splices have been split into different alignment files:\"]\n if args.splitreads:\n read_summary_numbers = writeBamFiles(thisbam, read_details, prefix, splits, pwms=pwm_details, logger=script_logger)\n for key in sorted(read_summary_numbers.keys()):\n thisstr = \"\\t{}:\\t{} ({:.2%})\".format(key, read_summary_numbers[key],\n read_summary_numbers[key]/len(read_details.keys()))\n strs.append(thisstr)\n \n script_logger.info(\"\\n\".join(strs))\n \n script_logger.info(\"Graphical summary of the splicing details has been written to {}\".format(plotfile))\n \n nc_stats_file = \"{}splice_stats.json\".format(prefix)\n script_logger.info(\"writing details of all detected splices to {}...\".format(nc_stats_file))\n fh = open(nc_stats_file,\"w\")\n json.dump(splice_details, fh, sort_keys=True, indent=4, cls=Encoder)\n fh.close()\n \n nc_stats_file = \"{}read_stats.json\".format(prefix)\n script_logger.info(\"writing details of the splices in each read to {}...\".format(nc_stats_file))\n fh = open(nc_stats_file,\"w\")\n json.dump(read_details, fh, sort_keys=True, indent=4, cls=Encoder)\n fh.close()\n \n script_logger.info(\"Finished. Have a nice day and don't forget to index the new bam files! ;)\")\n"
] | [
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.invert",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TomAugspurger/dota | [
"38f4021370bb41a94d3edfd8e844e0ed43f4c9a8"
] | [
"dota/helpers.py"
] | [
"# -*- coding: utf-8 -*-\nimport re\nimport pathlib\nfrom itertools import chain\ntry:\n from io import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nimport numpy as np\nimport pandas as pd\n\nimport dota.api as a\n\n\ndef cached_games(directory, regex=r\"[\\w\\/]*?(\\d+)\\.json\"):\n \"\"\"\n Return the match ids of all games.\n\n Parameters\n ----------\n directory : str or pathlib.Path\n regex : str. Alternative regex. Used to match games\n\n Returns\n -------\n\n match_ids : iterable of Paths\n\n \"\"\"\n if not isinstance(directory, (pathlib.Path, pathlib.PosixPath,\n pathlib.WindowsPath)):\n directory = pathlib.Path(directory)\n\n regex = re.compile(regex)\n match_ids = filter(lambda x: regex.match(str(x)), directory.iterdir())\n return match_ids\n\n\ndef open_or_stringIO(f, as_string=False):\n \"\"\"\n Useful for testing, but not sure how good it actually is.\n \"\"\"\n try:\n p = pathlib.Path(f)\n if p.exists() and not as_string:\n return open(f)\n else:\n return StringIO(f)\n except OSError:\n return StringIO(f)\n\n\ndef pb_team_id(df, order=0):\n return df.team_id_f.iloc[order]\n\n\ndef pb_opponent_id(df, order=0):\n \"\"\"\n Get the opponent id from a pick / ban Frame.\n\n Parameters\n ----------\n df : DataFrame\n formatted like a pick / ban frame\n order : int\n pick / ban order (1 .. 19)\n\n Returns\n -------\n opponent_id : int\n \"\"\"\n x = df['team_id_f'].unique()\n other_team = {x[0]: x[1], x[1]: x[0]}\n return df.team_id_f.map(other_team).iloc[order]\n\n\ndef pb_previous_pbs(df, order=0):\n \"\"\"\n Get the hero id's for all prior picks and bans.\n\n Parameters\n ----------\n df : DataFrame\n formatted like a pick / ban frame\n order : int\n pick / ban order (1 .. 19)\n\n Returns\n -------\n prior_pbs : Series\n index labels are pick0, b0 ... or just order?\n values are hero_id_f\n \"\"\"\n pbs = pd.DataFrame(df.hero_id_f.iloc[:order].values,\n index=df.order.iloc[:order].values).T\n pbs = pbs.rename(columns=lambda x: 'pb_' + str(x))\n return pbs\n\n\ndef pb_only_complete_drafts(df):\n \"\"\"\n Remove any matches where at least one team_id is NaN.\n Or where the draft has fewer that 20 picks / bans.\n \"\"\"\n good_ids = (~pd.isnull(df['team_id'])).groupby(df['match_id']).all()\n good_ids = good_ids[good_ids].index\n\n full_drafts = df.groupby('match_id').apply(len)\n full_drafts = full_drafts[full_drafts == 20].index\n good_ids = good_ids & full_drafts\n return df.query('match_id in @good_ids')\n\n#-----------------------------------------------------------------------------\n# Feature extraction\n\n\ndef extract_hero_role():\n \"\"\"\n An array [n_heros x n_roles] with 1's if that hero is that role.\n\n\n Notes\n -----\n I'm creating role_id to be an int from the roles in\n\n roles = set(list(chain(*api._hero_roles.values())))\n\n \"\"\"\n # need to persist this to disk I think.\n # then update as neeeded.\n by_hero = a._hero_roles\n all_heroes = sorted(a._hero_names_to_id.keys())\n n_heros = len(all_heroes)\n roles = sorted(set(list(chain(*by_hero.values()))))\n n_roles = len(roles)\n\n df = pd.DataFrame(np.zeros(shape=(n_heros, n_roles)),\n index=all_heroes,\n columns=roles)\n\n for hero, hero_roles in by_hero.items():\n for role in hero_roles:\n df.loc[hero, role] = 1\n return df\n"
] | [
[
"pandas.isnull",
"numpy.zeros",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
psc-g/Psc2 | [
"6676fc67263c9268ff65784d583cb838cfd42c28"
] | [
"Psc2/modes/mlsplainer.py"
] | [
"\"\"\"A MLSplainer mode for hijacking human notes with machine learning ones.\"\"\"\n\nimport os\nimport OSC\nimport tensorflow as tf\nimport threading\n\nimport magenta\nfrom magenta.models.melody_rnn import melody_rnn_model\nfrom magenta.models.melody_rnn import melody_rnn_generate\nfrom magenta.models.melody_rnn import melody_rnn_sequence_generator\nfrom magenta.music import sequences_lib\nfrom magenta.protobuf import generator_pb2\nfrom magenta.protobuf import music_pb2\nfrom Psc2 import ascii_arts\n\n\nfrom Psc2.modes import mode\n\nclass MLSplainer(mode.Mode):\n \"\"\"A MLSplainer mode for hijacking human notes with machine learning ones.\n\n It will fill a buffer with human notes, send those over to PerformanceRNN,\n fill a buffer with machine-learning generated notes, and then hijack the\n human notes (but not the rhythm) until the buffer is empty. Then repeat.\n \"\"\"\n\n def __init__(self,\n client,\n base_models_path='/home/psc/Psc2/magenta_models',\n model_name='attention_rnn.mag',\n min_primer_length=20,\n max_robot_length=20,\n temperature=1.0,\n print_ascii_arts=False):\n tf.logging.set_verbosity(tf.logging.ERROR)\n self.client = client\n self.min_primer_length = min_primer_length\n self.max_robot_length = max_robot_length\n self.accumulated_primer_melody = []\n self.generated_melody = []\n # Mapping of notes (defaults to identity).\n self.note_mapping = {i:i for i in range(21, 109)}\n self.improv_status = 'human' # One of 'human' or 'robot'.\n melody_model_path = '{}/{}'.format(base_models_path, model_name)\n self.melody_bundle = magenta.music.read_bundle_file(melody_model_path)\n self.temperature = temperature\n self.print_ascii_arts = print_ascii_arts\n os.system('clear')\n print(ascii_arts.arts['psc'])\n\n def reset(self):\n self.accumulated_primer_melody = []\n self.generated_melody = []\n\n def _generate_melody(self):\n melody_config_id = self.melody_bundle.generator_details.id\n melody_config = melody_rnn_model.default_configs[melody_config_id]\n generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(\n model=melody_rnn_model.MelodyRnnModel(melody_config),\n details=melody_config.details,\n steps_per_quarter=melody_config.steps_per_quarter,\n checkpoint=melody_rnn_generate.get_checkpoint(),\n bundle=self.melody_bundle)\n generator_options = generator_pb2.GeneratorOptions()\n generator_options.args['temperature'].float_value = self.temperature\n generator_options.args['beam_size'].int_value = 1\n generator_options.args['branch_factor'].int_value = 1\n generator_options.args['steps_per_iteration'].int_value = 1\n primer_melody = magenta.music.Melody(self.accumulated_primer_melody)\n qpm = magenta.music.DEFAULT_QUARTERS_PER_MINUTE\n primer_sequence = primer_melody.to_sequence(qpm=qpm)\n seconds_per_step = 60.0 / qpm / generator.steps_per_quarter\n # Set the start time to begin on the next step after the last note ends.\n last_end_time = (max(n.end_time for n in primer_sequence.notes)\n if primer_sequence.notes else 0)\n melody_total_seconds = last_end_time * 3\n generate_section = generator_options.generate_sections.add(\n start_time=last_end_time + seconds_per_step,\n end_time=melody_total_seconds)\n generated_sequence = generator.generate(primer_sequence, generator_options)\n self.generated_melody = [n.pitch for n in generated_sequence.notes]\n # Get rid of primer melody.\n self.generated_melody = self.generated_melody[\n len(self.accumulated_primer_melody):]\n # Make sure generated melody is not too long.\n self.generated_melody = self.generated_melody[:self.max_robot_length]\n self.accumulated_primer_melody = []\n\n def _send_stopnote(self, note, velocity):\n msg = OSC.OSCMessage()\n msg.setAddress('/stopthru')\n msg.append([note, velocity])\n self.client.send(msg)\n\n def process_note_off(self, note, velocity):\n mapped_note = self.note_mapping[note]\n self._send_stopnote(mapped_note, velocity)\n self._send_stopnote(note, velocity)\n\n def _send_playnote(self, note, velocity):\n msg = OSC.OSCMessage()\n msg.setAddress('/playthru')\n msg.append([note, velocity])\n self.client.send(msg)\n\n def process_note(self, note, velocity):\n \"\"\"Receive a new note to process.\n\n Args:\n note: int, pitch to check.\n velocity: int, possibly used for playback.\n\n Returns:\n False, no pattern to match.\n \"\"\"\n if len(self.generated_melody):\n if self.improv_status != 'robot':\n self.improv_status = 'robot'\n os.system('clear')\n print(ascii_arts.arts[self.improv_status])\n # To avoid stuck notes, send a note off for previous mapped note.\n prev_note = self.note_mapping[note]\n self.process_note_off(prev_note, velocity)\n self.note_mapping[note] = self.generated_melody[0]\n note = self.generated_melody[0]\n self.generated_melody = self.generated_melody[1:]\n else:\n if self.improv_status != 'human':\n self.improv_status = 'human'\n os.system('clear')\n print(ascii_arts.arts['psc'])\n self.accumulated_primer_melody.append(note)\n self._send_playnote(note, velocity)\n if len(self.accumulated_primer_melody) >= self.min_primer_length:\n magenta_thread = threading.Thread(target = self._generate_melody)\n magenta_thread.start()\n"
] | [
[
"tensorflow.logging.set_verbosity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zhkmxx9302013/SoftwarePilot | [
"826098465b800085774946c20a7a283f369f1d21"
] | [
"externalModels/Cracks/CrackDetect/dataset.py"
] | [
"import numpy as np\r\nimport os\r\nimport pickle\r\n########################################################################\r\ndef cache(cache_path, fn, *args, **kwargs):\r\n \"\"\"\r\n Cache-wrapper for a function or class. If the cache-file exists\r\n then the data is reloaded and returned, otherwise the function\r\n is called and the result is saved to cache. The fn-argument can\r\n also be a class instead, in which case an object-instance is\r\n created and saved to the cache-file.\r\n :param cache_path:\r\n File-path for the cache-file.\r\n :param fn:\r\n Function or class to be called.\r\n :param args:\r\n Arguments to the function or class-init.\r\n :param kwargs:\r\n Keyword arguments to the function or class-init.\r\n :return:\r\n The result of calling the function or creating the object-instance.\r\n \"\"\"\r\n\r\n # If the cache-file exists.\r\n if os.path.exists(cache_path):\r\n # Load the cached data from the file.\r\n with open(cache_path, mode='rb') as file:\r\n obj = pickle.load(file)\r\n\r\n print(\"- Data loaded from cache-file: \" + cache_path)\r\n else:\r\n # The cache-file does not exist.\r\n\r\n # Call the function / class-init with the supplied arguments.\r\n obj = fn(*args, **kwargs)\r\n\r\n # Save the data to a cache-file.\r\n with open(cache_path, mode='wb') as file:\r\n pickle.dump(obj, file)\r\n\r\n print(\"- Data saved to cache-file: \" + cache_path)\r\n\r\n return obj\r\n########################################################################\r\n\r\ndef load_cached(cache_path, in_dir):\r\n \"\"\"\r\n Wrapper-function for creating a DataSet-object, which will be\r\n loaded from a cache-file if it already exists, otherwise a new\r\n object will be created and saved to the cache-file.\r\n This is useful if you need to ensure the ordering of the\r\n filenames is consistent every time you load the data-set,\r\n :param cache_path:\r\n File-path for the cache-file.\r\n :param in_dir:\r\n Root-dir for the files in the data-set.\r\n This is an argument for the DataSet-init function.\r\n :return:\r\n The DataSet-object.\r\n \"\"\"\r\n\r\n print(\"Creating dataset from the files in: \" + in_dir)\r\n\r\n dataset = cache(cache_path=cache_path,\r\n fn=DataSet,\r\n in_dir=in_dir)\r\n\r\n return dataset\r\n\r\n########################################################################\r\n\r\ndef one_hot_encoded(class_numbers, num_classes=None):\r\n \"\"\"\r\n Generate the One-Hot encoded class-labels from an array of integers.\r\n For example, if class_number=2 and num_classes=4 then\r\n the one-hot encoded label is the float array: [0. 0. 1. 0.]\r\n :param class_numbers:\r\n Array of integers with class-numbers.\r\n Assume the integers are from zero to num_classes-1 inclusive.\r\n :param num_classes:\r\n Number of classes. If None then use max(class_numbers)+1.\r\n :return:\r\n 2-dim array of shape: [len(class_numbers), num_classes]\r\n \"\"\"\r\n\r\n # Find the number of classes if None is provided.\r\n # Assumes the lowest class-number is zero.\r\n if num_classes is None:\r\n num_classes = np.max(class_numbers) + 1\r\n\r\n return np.eye(num_classes, dtype=float)[class_numbers]\r\n\r\n\r\n########################################################################\r\n\r\n\r\nclass DataSet:\r\n def __init__(self, in_dir, exts='.jpg'):\r\n \"\"\"\r\n This code automatically detects how many classes depending the directory structure.\r\n Please adhere to the following dir-structure:(if \"in_dir = Master/\") -\r\n Master/class1/ - Contains all the training images for class 1\r\n Master/class2/ - Contains all the training images for class 2\r\n Master/class3/ - Contains all the training images for class 3\r\n Master/class1/test/ - Contains all the validation images for class 1\r\n Master/class2/test/ - Contains all the validation images for class 2\r\n Master/class3/test/ - Contains all the validation images for class 3\r\n This means there are 3 classes called: class1, class2 and class3.\r\n The number of folders in \"Masters\" will correspond to the number of classes\r\n :param in_dir:\r\n Root-dir for the files in the data-set.\r\n This would be 'Master/' in the example above.\r\n :param exts:\r\n String or tuple of strings with valid filename-extensions.\r\n Not case-sensitive.\r\n :return:\r\n Object instance.\r\n \"\"\"\r\n\r\n # Extend the input directory to the full path.\r\n in_dir = os.path.abspath(in_dir)\r\n\r\n # Input directory.\r\n self.in_dir = in_dir\r\n\r\n # Convert all file-extensions to lower-case.\r\n self.exts = tuple(ext.lower() for ext in exts)\r\n\r\n # Names for the classes.\r\n self.class_names = []\r\n\r\n # Filenames for all the files in the training-set.\r\n self.filenames = []\r\n\r\n # Filenames for all the files in the test-set.\r\n self.filenames_test = []\r\n\r\n # Class-number for each file in the training-set.\r\n self.class_numbers = []\r\n\r\n # Class-number for each file in the test-set.\r\n self.class_numbers_test = []\r\n\r\n # Total number of classes in the data-set.\r\n self.num_classes = 0\r\n\r\n # For all files/dirs in the input directory.\r\n for name in os.listdir(in_dir):\r\n # Full path for the file / dir.\r\n current_dir = os.path.join(in_dir, name)\r\n\r\n # If it is a directory.\r\n if os.path.isdir(current_dir):\r\n # Add the dir-name to the list of class-names.\r\n self.class_names.append(name)\r\n\r\n # Training-set.\r\n\r\n # Get all the valid filenames in the dir (not sub-dirs).\r\n filenames = self._get_filenames(current_dir)\r\n\r\n # Append them to the list of all filenames for the training-set.\r\n self.filenames.extend(filenames)\r\n\r\n # The class-number for this class.\r\n class_number = self.num_classes\r\n\r\n # Create an array of class-numbers.\r\n class_numbers = [class_number] * len(filenames)\r\n\r\n # Append them to the list of all class-numbers for the training-set.\r\n self.class_numbers.extend(class_numbers)\r\n\r\n # Test-set\r\n\r\n # Get all the valid filenames in the sub-dir named 'test'.\r\n filenames_test = self._get_filenames(os.path.join(current_dir, 'test'))\r\n\r\n # Append them to the list of all filenames for the test-set.\r\n self.filenames_test.extend(filenames_test)\r\n\r\n # Create an array of class-numbers.\r\n class_numbers = [class_number] * len(filenames_test)\r\n\r\n # Append them to the list of all class-numbers for the test-set.\r\n self.class_numbers_test.extend(class_numbers)\r\n\r\n # Increase the total number of classes in the data-set.\r\n self.num_classes += 1\r\n print(\"Number of Classes : {}\".format(self.num_classes))\r\n\r\n def _get_filenames(self, dir):\r\n \"\"\"\r\n Create and return a list of filenames with matching extensions in the given directory.\r\n :param dir:\r\n Directory to scan for files. Sub-dirs are not scanned.\r\n :return:\r\n List of filenames. Only filenames. Does not include the directory.\r\n \"\"\"\r\n\r\n # Initialize empty list.\r\n filenames = []\r\n\r\n # If the directory exists.\r\n if os.path.exists(dir):\r\n # Get all the filenames with matching extensions.\r\n for filename in os.listdir(dir):\r\n if filename.lower().endswith(self.exts):\r\n filenames.append(filename)\r\n\r\n return filenames\r\n\r\n def get_paths(self, test=False):\r\n \"\"\"\r\n Get the full paths for the files in the data-set.\r\n :param test:\r\n Boolean. Return the paths for the test-set (True) or training-set (False).\r\n :return:\r\n Iterator with strings for the path-names.\r\n \"\"\"\r\n\r\n if test:\r\n # Use the filenames and class-numbers for the test-set.\r\n filenames = self.filenames_test\r\n class_numbers = self.class_numbers_test\r\n\r\n # Sub-dir for test-set.\r\n test_dir = \"test/\"\r\n else:\r\n # Use the filenames and class-numbers for the training-set.\r\n filenames = self.filenames\r\n class_numbers = self.class_numbers\r\n\r\n # Don't use a sub-dir for test-set.\r\n test_dir = \"\"\r\n\r\n for filename, cls in zip(filenames, class_numbers):\r\n # Full path-name for the file.\r\n path = os.path.join(self.in_dir, self.class_names[cls], test_dir, filename)\r\n\r\n yield path\r\n\r\n def get_training_set(self):\r\n \"\"\"\r\n Return the list of paths for the files in the training-set,\r\n and the list of class-numbers as integers,\r\n and the class-numbers as one-hot encoded arrays.\r\n \"\"\"\r\n\r\n return list(self.get_paths()), \\\r\n np.asarray(self.class_numbers), \\\r\n one_hot_encoded(class_numbers=self.class_numbers,\r\n num_classes=self.num_classes)\r\n\r\n def get_test_set(self):\r\n \"\"\"\r\n Return the list of paths for the files in the test-set,\r\n and the list of class-numbers as integers,\r\n and the class-numbers as one-hot encoded arrays.\r\n \"\"\"\r\n\r\n return list(self.get_paths(test=True)), \\\r\n np.asarray(self.class_numbers_test), \\\r\n one_hot_encoded(class_numbers=self.class_numbers_test,\r\n num_classes=self.num_classes)\r\n\r\n\r\n"
] | [
[
"numpy.asarray",
"numpy.max",
"numpy.eye"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lcx1997213/sbp | [
"cfd8f4e1f971065344e8d03b764e18f36fb71c7f"
] | [
"SBP_ANN_RBM/gui.py"
] | [
"from Tkinter import *\nfrom model import Network\nimport numpy as np\nfrom PIL import Image, ImageTk\nimport sys\nfrom threading import Thread\nimport time\n\nclass GUI(Tk):\n\n def __init__(self,name):\n\n Tk.__init__(self, None)\n\n self.title('Equilibrium Propagation')\n self.net = Network(name=name,hyperparameters={\"batch_size\":1})\n self.hidden_sizes = self.net.hyperparameters[\"hidden_sizes\"]\n self.n_layers = len(self.hidden_sizes) + 2\n\n self.canvas = Canvas(self, width=600, height=(self.n_layers+1)*100)\n self.canvas.pack(side=BOTTOM)\n\n # INDEX OF TEST EXAMPLE (IN THE TEST SET)\n Label(self, text=\"image\").pack(side=LEFT)\n self.index = StringVar()\n self.index.set(\"0\")\n Entry(self, textvariable=self.index, width=5).pack(side=LEFT)\n\n self.update_canvas(first_time=True)\n\n Thread(target = self.run).start() \n\n def update_canvas(self, first_time = False):\n\n units = [(28,28)] +[(10,n/10) for n in self.hidden_sizes]+[(1,10)]\n pixels = [(140,140)]+ [(n/2,50) for n in self.hidden_sizes]+[(250,25)]\n\n arrays = [256*layer.eval().reshape(dimensions) for layer,dimensions in zip(self.net.layers,units)]\n images = [Image.fromarray(array).resize(dimensions) for array,dimensions in zip(arrays,pixels)]\n self.imgTks = [ImageTk.PhotoImage(image) for image in images]\n\n [energy, cost, _] = self.net.measure()\n\n if first_time:\n self.img_canvas = [self.canvas.create_image(400, (self.n_layers-k)*100, image=imgTk) for k,imgTk in enumerate(self.imgTks)]\n self.energy_canvas = self.canvas.create_text( 20, 100, anchor=W, font=\"Purisa\", text=\"Energy = %.1f\" % (energy))\n self.cost_canvas = self.canvas.create_text( 20, 200, anchor=W, font=\"Purisa\", text=\"Cost = %.4f\" % (cost))\n else:\n for img_canvas, imgTk in zip(self.img_canvas,self.imgTks):\n self.canvas.itemconfig(img_canvas, image=imgTk)\n self.canvas.itemconfig(self.energy_canvas, text=\"Energy = %.1f\" % (energy))\n self.canvas.itemconfig(self.cost_canvas, text=\"Cost = %.4f\" % (cost))\n\n def run(self):\n\n while True:\n\n index = self.index.get() # index of the test example in the test set\n if index.isdigit():\n index = int(index)\n index = (hash(index) % 10000) + 60000\n self.net.change_mini_batch_index(index)\n\n self.net.sbp_phase(n_iterations=1, epsilon=np.float32(.1))\n \n self.update_canvas()\n\n # FREQUENCY OF UPDATES (IN SECONDS)\n time.sleep(.1)\n\nif __name__ == \"__main__\":\n\n name = sys.argv[1]\n GUI(name).mainloop()"
] | [
[
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dzubke/speech-lite | [
"65f83ac2b7551650820f079ce5152741f2a6fdb8"
] | [
"convert_model/export.py"
] | [
"import torch\nimport argparse\nimport pickle\nimport matplotlib\n\ndef preproc_pickle():\n with open('/Users/dustin/CS/consulting/firstlayerai/phoneme_classification/src/awni_speech/speech/examples/librispeech/models/ctc_models/20200121/20200127/best_preproc.pyc', 'rb') as fid:\n preproc = pickle.load(fid)\n print(f\"self.mean, self.std: {preproc.mean}, {preproc.std}\")\n preproc_dict = {'mean':preproc.mean, \n 'std': preproc.std, \n \"_input_dim\": preproc._input_dim, \n \"start_and_end\": preproc.start_and_end, \n \"int_to_char\": preproc.int_to_char,\n \"char_to_int\": preproc.char_to_int\n }\n\n\n with open('./20200121-0127_preproc_dict_pickle', 'wb') as fid:\n pickle.dump(preproc_dict, fid)\n\n\n with open('./20200121-0127_preproc_dict_pickle', 'rb') as fid:\n preproc = pickle.load(fid) \n print(preproc)\n\n\ndef export_state_dict(model_in_path, params_out_path):\n model = torch.load(model_in_path, map_location=torch.device('cpu'))\n torch.save(model.state_dict(), params_out_path)\n\ndef main(model_path, params_path):\n \n export_state_dict(model_path, params_path)\n preproc_pickle()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model_path\")\n parser.add_argument(\"params_path\")\n args = parser.parse_args()\n\n main(args.model_path, args.params_path)\n\n"
] | [
[
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mmaguero/classic-text-classification | [
"ddc1c9dbc9b8fb6e255572e7aeac6d94e022c548"
] | [
"training.py"
] | [
"import sys\nimport pandas as pd\npd.options.display.max_columns = 30\nimport numpy as np\nfrom time import time\n#\nimport warnings \nwarnings.filterwarnings('ignore')\n#\nimport nltk\nfrom nltk.corpus import stopwords\nstop_words = set(stopwords.words('english'))\n#\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer#, HashingVectorizer\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report, ConfusionMatrixDisplay, plot_confusion_matrix\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.utils import parallel_backend\n#\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC#, SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom xgboost import XGBClassifier\n#\nfrom joblib import dump, load\n# custom\nfrom preprocessing import load_data, split_data, preprocessing, clean_text\n\n\n# prepare models pipeline\ndef benchmark(frame, x, y, models, test_size=0.20, train_model=True, model_target='all', report=True):\n \n # 1.\n X_train, Y_train, X_test, Y_test, X_evalua, Y_evalua = split_data(frame, x, y, test_size)\n \n # 2.\n pipeline = {}\n # iter\n for name, model in models.items():\n # specific model train/test\n if model_target not in [name,'all']:\n continue\n \n # Define a pipeline combining a text feature extractor with classifier\n pipeline[name] = Pipeline([\n ('vect', CountVectorizer(stop_words=stop_words,preprocessor=clean_text)),\n ('tfidf', TfidfVectorizer(preprocessor=str)),\n ('clf', model),\n ], verbose=1)\n \n print('... Processing')\n # train the model \n with parallel_backend('threading'):\n if train_model:\n print('Init train {}'.format(name))\n pipeline[name].fit(X_train, Y_train)\n print('End train {}'.format(name))\n \n # save or load model\n if train_model:\n dump(pipeline[name], 'models/{}_{}.joblib'.format(y,name), compress=7 if name=='RFC' else 0) # compress 1 low 9 high, RFC is too big\n else:\n pipeline[name] = load('models/{}_{}.joblib'.format(y,name)) \n print('Save/load model {}_{}'.format(y,name))\n \n # test the model \n with parallel_backend('threading'):\n if not report: # print metrics...?\n continue\n pred = pipeline[name].predict(X_test)\n score = accuracy_score(Y_test, pred)\n print(\"accuracy_test: %0.3f\" % score)\n eval_ = pipeline[name].predict(X_evalua)\n score = accuracy_score(Y_evalua, eval_)\n print(\"accuracy_eval: %0.3f\" % score)\n \n # \n print(\"classification report:\")\n print('TEST\\n',classification_report(Y_test, pred))\n print('EVAL\\n',classification_report(Y_evalua, eval_))\n \n print(\"confusion matrix:\")\n cm = confusion_matrix(Y_test, pred)\n print('TEST\\n',cm)\n ConfusionMatrixDisplay(cm).plot()\n cm = confusion_matrix(Y_evalua, eval_)\n print('EVAL\\n',cm)\n ConfusionMatrixDisplay(cm).plot()\n \n return pipeline\n\n# train models\ndef train_codes(frame, x, y, models, test_size=0.20, train_model=True, model_target='all',report=True):\n \n # preprocessing\n df_code = (preprocessing(frame,x,y))\n \n # train benchmark\n benchmark(df_code,x,y,models, test_size=0.20, train_model=True, model_target='all',report=True)\n \n return\n \n# call from main \ndef run_train(path, x, y, test_size=0.20, train_model=True, model_target='all',report=True):\n df = load_data(path=path, extension=\"\",output=\"data/corpus.csv\")\n\n # define models\n models = {\"MNB\": MultinomialNB(fit_prior=True, class_prior=None),\n \"SVC\":LinearSVC(),\n \"LogReg\":LogisticRegression(solver='sag',n_jobs=-1),\n \"XGB\":XGBClassifier(n_jobs=-1,eval_metric='merror'),\n \"RFC\":RandomForestClassifier(n_jobs=-1),\n \"KNN\":KNeighborsClassifier(n_neighbors=10,n_jobs=-1)} # slow, use 10 neighbors\n\n # target\n train_codes(df, x, y, models, test_size=0.20, train_model=True, model_target='all',report=True)\n \n"
] | [
[
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.ConfusionMatrixDisplay",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.metrics.confusion_matrix",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.utils.parallel_backend",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.svm.LinearSVC",
"sklearn.metrics.classification_report",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
key06/AShareData | [
"2112fe1cbd2d5f963f31f8c4b5e40e368ac063e0"
] | [
"AShareData/data_source/TDXData.py"
] | [
"import datetime as dt\nfrom collections import OrderedDict\n\nimport pandas as pd\nfrom pytdx.hq import TdxHq_API\nfrom tqdm import tqdm\n\nfrom .DataSource import DataSource\nfrom .. import utils\nfrom ..config import get_global_config\nfrom ..DBInterface import DBInterface\nfrom ..Tickers import StockTickers\n\n\nclass TDXData(DataSource):\n def __init__(self, db_interface: DBInterface = None, host: str = None, port: int = None):\n super().__init__(db_interface)\n if host is None:\n conf = get_global_config()\n host = conf['tdx_server']['host']\n port = conf['tdx_server']['port']\n self.api = TdxHq_API()\n self.host = host\n self.port = port\n self._factor_param = utils.load_param('tdx_param.json')\n self.stock_ticker = StockTickers(db_interface)\n\n def connect(self):\n self.api.connect(self.host, self.port)\n\n def __enter__(self):\n self.connect()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.api.disconnect()\n\n def update_stock_minute(self):\n \"\"\"更新股票分钟行情\"\"\"\n table_name = '股票分钟行情'\n db_timestamp = self.db_interface.get_latest_timestamp(table_name, dt.datetime(2015, 1, 1))\n start_date = self.calendar.offset(db_timestamp.date(), 1)\n end_date = dt.datetime.today()\n dates = self.calendar.select_dates(start_date, end_date)\n for date in dates:\n self.get_stock_minute(date)\n\n def get_stock_minute(self, date: dt.datetime) -> None:\n \"\"\"获取 ``date`` 的股票分钟行情\"\"\"\n minute_data = self._get_stock_minute(date)\n auction_time = date + dt.timedelta(hours=9, minutes=25)\n auction_db_data = self.db_interface.read_table('股票集合竞价数据', columns=['成交价', '成交量', '成交额'], dates=auction_time)\n df = self.left_shift_minute_data(minute_data=minute_data, auction_db_data=auction_db_data)\n\n self.db_interface.insert_df(df, '股票分钟行情')\n\n def _get_stock_minute(self, date: dt.datetime) -> pd.DataFrame:\n num_days = self.calendar.days_count(date, dt.date.today())\n start_index = num_days * 60 * 4\n tickers = self.stock_ticker.ticker(date)\n\n storage = []\n with tqdm(tickers) as pbar:\n for ticker in tickers:\n pbar.set_description(f'下载 {ticker} 在 {date} 的分钟数据')\n code, market = self._split_ticker(ticker)\n data = self.api.get_security_bars(category=8, market=market, code=code, start=start_index, count=240)\n data = self._formatting_data(data, ticker)\n storage.append(data)\n pbar.update()\n\n df = pd.concat(storage)\n return df\n\n def _formatting_data(self, info: OrderedDict, ticker: str) -> pd.DataFrame:\n df = pd.DataFrame(info)\n df['datetime'] = df['datetime'].apply(self.str2datetime)\n df = df.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1).rename(self._factor_param['行情数据'], axis=1)\n df['ID'] = ticker\n\n df = df.set_index(['DateTime', 'ID'], drop=True)\n return df\n\n @staticmethod\n def _split_ticker(ticker: str) -> [str, int]:\n code, market_str = ticker.split('.')\n market = 0 if market_str == 'SZ' else 1\n return code, market\n\n @staticmethod\n def str2datetime(date: str) -> dt.datetime:\n return dt.datetime.strptime(date, '%Y-%m-%d %H:%M')\n"
] | [
[
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dirkmcpherson/gym-novel-gridworlds | [
"5cd04fdf65e20cee51a9a6ed8eead662aad39259"
] | [
"tests/keyboard_interface.py"
] | [
"import os\nimport time\n\nimport gym\nimport gym_novel_gridworlds\nfrom gym_novel_gridworlds.constant import env_key\nfrom gym_novel_gridworlds.wrappers import SaveTrajectories, LimitActions\nfrom gym_novel_gridworlds.observation_wrappers import LidarInFront, AgentMap\nfrom gym_novel_gridworlds.novelty_wrappers import *\n\nimport keyboard\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['keymap.quit'].pop(plt.rcParams['keymap.quit'].index('q'))\n\n\ndef assign_keys(env_):\n\n if hasattr(env_, 'limited_actions_id'):\n actions_id = env_.limited_actions_id\n else:\n if hasattr(env_, 'actions_id'):\n actions_id = env_.actions_id\n else:\n key_action_id_dict = env_key[env_id]\n return key_action_id_dict\n\n actions_key = {'Forward': 'w', 'Left': 'a', 'Right': 'd', 'Break': 'e', 'Chop': 'q', 'Jump': 'space',\n 'Place_tree_tap': 'z', 'Extract_rubber': 'x', 'Extract_string': 'x'}\n\n if env_.env_id in ['NovelGridworld-v6', 'NovelGridworld-Bow-v0', 'NovelGridworld-Bow-v1', 'NovelGridworld-Pogostick-v0', 'NovelGridworld-Pogostick-v1']:\n key_action_id_dict = {}\n for action in actions_key:\n if action in actions_id:\n key_action_id_dict[actions_key[action]] = actions_id[action]\n\n action_count = 1\n for action in sorted(actions_id):\n if action.startswith('Craft'):\n key_action_id_dict[str(action_count)] = actions_id[action]\n action_count += 1\n\n alpha_keys = 'abcdefghijklmnopqrstuvwxyz'\n alpha_keys_idx = 0\n for action in sorted(env_.select_actions_id):\n if action not in actions_id:\n continue\n while True:\n if alpha_keys_idx < len(alpha_keys):\n if alpha_keys[alpha_keys_idx] not in key_action_id_dict:\n key_action_id_dict[alpha_keys[alpha_keys_idx]] = actions_id[action]\n alpha_keys_idx += 1\n break\n else:\n alpha_keys_idx += 1\n else:\n print(\"No keys left to assign\")\n break\n else:\n key_action_id_dict = env_key[env_id]\n\n return key_action_id_dict\n\ndef print_play_keys(env_, key_action_dict):\n\n print(\"Press a key to play: \")\n if hasattr(env_, 'limited_actions_id'):\n actions_id = env_.limited_actions_id\n else:\n if hasattr(env_, 'actions_id'):\n actions_id = env_.actions_id\n else:\n for key, key_id in key_action_dict.items():\n print(key, \": \", env_.action_str[key_id])\n return\n\n for key, action_id in key_action_dict.items():\n print(key, \": \", list(actions_id.keys())[list(actions_id.values()).index(action_id)])\n\ndef get_action_id_from_keyboard(key_action_dict):\n while True:\n key_pressed = keyboard.read_key()\n # return index of action if valid key is pressed\n if key_pressed:\n if key_pressed in key_action_dict:\n return key_action_dict[key_pressed]\n elif key_pressed == \"esc\":\n print(\"You pressed esc, exiting!!\")\n break\n else:\n print(\"You pressed wrong key. Press Esc key to exit.\")\n\ndef fix_item_location(item, location):\n result = np.where(env.map == env.items_id[item])\n if len(result) > 0:\n r, c = result[0][0], result[1][0]\n env.map[r][c] = 0\n env.map[location[0]][location[1]] = env.items_id[item]\n else:\n env.map[location[0]][location[1]] = env.items_id[item]\n\n\nif __name__ == \"__main__\":\n env_id = 'NovelGridworld-Pogostick-v1' # NovelGridworld-v6, NovelGridworld-Bow-v0, NovelGridworld-Pogostick-v0\n env = gym.make(env_id)\n\n # env.map_size = 12 # np.random.randint(low=10, high=20, size=1)[0]\n # env.reward_done = 1000\n # env.reward_intermediate = 50\n\n # wrappers\n # env = SaveTrajectories(env, save_path=\"saved_trajectories\")\n # env = LimitActions(env, {'Forward', 'Left', 'Right', 'Break', 'Craft_bow', 'Craft_iron_axe', 'Select_iron_axe'})\n\n # observation_wrappers\n # env = LidarInFront(env, num_beams=8)\n # env = AgentMap(env)\n\n # novelty_wrappers\n # novelty_name:\n # addchop, additem, addjump, axe, axetobreak, breakincrease, crate, extractincdec, fence, fencerestriction, firewall\n # remapaction, replaceitem\n novelty_name = ''\n # novelty_arg1:\n # additem - any item name (e.g. arrow, spring) | axe & axetobreak - iron, wooden |\n # breakincrease - optional: any existing item (e.g. tree_log) | extractincdec - increase or decrease |\n # fence & fencerestriction - oak, jungle | replaceitem - any existing item (e.g. wall) |\n novelty_arg1 = ''\n # novelty_arg2:\n # axe - optional: true, false (default) | replaceitem - any item name (e.g. brick)\n novelty_arg2 = ''\n # difficulty\n # Only used for: additem, axe, axetobreak, crate, fence, fencerestriction, firewall, remapaction, replaceitem\n difficulty = 'medium' # easy, medium, hard\n\n if novelty_name:\n env = inject_novelty(env, novelty_name, difficulty, novelty_arg1, novelty_arg2)\n\n # env = BlockItem(env)\n # env = ReplaceItem(env, 'easy', 'wall', 'brick')\n\n KEY_ACTION_DICT = assign_keys(env)\n print(\"KEY_ACTION_DICT: \", KEY_ACTION_DICT)\n print(\"action_space:\", env.action_space)\n if hasattr(env, 'actions_id'):\n print(\"actions_id:\", len(env.actions_id), env.actions_id)\n else:\n print(\"actions_id:\", len(env.action_str), env.action_str)\n print(\"observation_space:\", env.observation_space)\n print(\"items_id: \", len(env.items_id), env.items_id)\n print(\"inventory_items_quantity: \", len(env.inventory_items_quantity), env.inventory_items_quantity)\n\n # fix_item_location('crafting_table', (3, 2))\n\n obs = env.reset()\n env.render()\n for i in range(1000):\n print_play_keys(env, KEY_ACTION_DICT)\n action_id = get_action_id_from_keyboard(KEY_ACTION_DICT) # take action from keyboard\n observation, reward, done, info = env.step(action_id)\n\n if hasattr(env, 'limited_actions_id'):\n print(\"action: \", action_id, list(env.limited_actions_id.keys())[list(env.limited_actions_id.values()).index(action_id)])\n else:\n if hasattr(env, 'actions_id'):\n print(\"action: \", action_id, list(env.actions_id.keys())[list(env.actions_id.values()).index(action_id)])\n else:\n print(\"action: \", action_id, env.action_str[action_id])\n print(\"Step: \" + str(i) + \", reward: \", reward)\n print(\"observation: \", len(observation), observation)\n\n print(\"inventory_items_quantity: \", len(env.inventory_items_quantity), env.inventory_items_quantity)\n\n try:\n print(\"step_cost, message: \", info['step_cost'], info['message'])\n print(\"selected_item: \", env.selected_item)\n except:\n pass\n\n time.sleep(0.2)\n print(\"\")\n\n if i == 2:\n # env.add_new_items({'rock': 3, 'axe': 1})\n # env.block_item(item_to_block='crafting_table', item_to_block_from='tree_log')\n pass\n\n env.render()\n if done:\n print(\"Finished after \" + str(i) + \" timesteps\\n\")\n time.sleep(2)\n obs = env.reset()\n env.render()\n\n # env.save()\n env.close()\n"
] | [
[
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hjorvardr/RL_Library | [
"26eb576f7a8e33bf9c21ced5b8a5ce08dfc9abd9"
] | [
"CartPole/dqn_cart_pole.py"
] | [
"import os\nimport time\nimport gym\nimport keras.optimizers \nfrom keras import backend as K\nfrom keras.layers import Dense\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom dqn_lib import DQNAgent\n\nos.environ['PYTHONHASHSEED'] = '0'\nseed = 73\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\n\ndef accuracy(results):\n \"\"\"\n Evaluate the accuracy of results, considering victories and defeats.\n\n Args:\n results: List of 2 elements representing the number of victories and defeats\n\n Returns:\n results accuracy\n \"\"\"\n return results[1] / (results[0] + results[1]) * 100\n\n\ndef experiment(n_episodes, default_policy=False, policy=None, render = False):\n \"\"\"\n Run a RL experiment that can be either training or testing\n\n Args:\n n_episodes: number of train/test episodes\n default_policy: boolean to enable testing/training phase\n policy: numpy tensor with a trained policy\n render: enable OpenAI environment graphical rendering\n agent_config: DQNAgent object\n\n Returns:\n Dictionary with:\n cumulative experiments outcomes\n list of steps per episode\n list of cumulative rewards\n trained policy\n \"\"\"\n res = [0, 0] # array of results accumulator: {[0]: Loss, [1]: Victory}\n scores = [] # Cumulative rewards\n steps = [] # steps per episode\n \n env = gym.make('CartPole-v0')\n env = env.unwrapped\n env.seed(seed)\n\n input_dim = env.observation_space.shape[0]\n output_dim = env.action_space.n\n \n if default_policy:\n agent = DQNAgent(output_dim, None, use_ddqn=True, default_policy=True, model_filename=policy,\n epsilon=0, epsilon_lower_bound=0, learn_thresh=0, tb_dir=None)\n else:\n layer1 = Dense(10, input_dim=input_dim, activation='relu')\n layer2 = Dense(output_dim)\n agent = DQNAgent(output_dim, [layer1, layer2], use_ddqn=True, learn_thresh=2000, update_rate=100,\n epsilon_decay_function=lambda e: e - 0.001, epsilon_lower_bound=0.1,\n optimizer=keras.optimizers.RMSprop(0.001), memory_size=2000, tb_dir=None)\n\n for _ in tqdm(range(n_episodes), desc=\"Episode\"):\n state = env.reset()\n cumulative_reward = 0\n\n state = np.reshape(state, [1, 4])\n \n t = 0\n while True:\n if (render):\n env.render()\n time.sleep(0.1)\n\n next_action = agent.act(state)\n new_state, reward, end, _ = env.step(next_action)\n\n x, x_dot, theta, theta_dot = new_state\n new_state = np.reshape(new_state, [1, 4])\n \n # Reward shaping\n r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8\n r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5\n r3 = -abs(theta_dot)\n reward = r1 + r2 + r3\n \n agent.memoise((state, next_action, reward, new_state, end))\n\n if end or t > 199:\n if t < 195:\n res[0] += 1\n else:\n res[1] += 1\n # print(\"ENTRATO!,\", t, \"steps\",\"reward: \",cumulative_reward)\n\n steps.append(t)\n break\n else:\n state = new_state\n cumulative_reward += reward\n\n agent.learn()\n t += 1\n\n cumulative_reward += reward\n scores.append(cumulative_reward)\n env.close()\n return {\"results\": np.array(res), \"steps\": np.array(steps), \"scores\": np.array(scores), \"agent\": agent }\n\n\n# Training\ntrain_res = experiment(500)\ntrain_res[\"agent\"].save_model(\"ddqn\")\ntraining_mean_steps = train_res[\"steps\"].mean()\ntraining_mean_score = train_res[\"scores\"].mean()\n\n# np.savetxt(\"results/training/ddqn.csv\", train_res[\"steps\"], delimiter=',')\n\n# Testing\ntest_res = experiment(500, default_policy=True, policy=\"ddqn\")\ntesting_accuracy = accuracy(test_res[\"results\"])\ntesting_mean_steps = test_res[\"steps\"].mean()\ntesting_mean_score = test_res[\"scores\"].mean()\n\n# np.savetxt(\"results/testing/ddqn.csv\", test_res[\"steps\"], delimiter=',')\n\nprint(\"Training episodes:\", len(train_res[\"steps\"]), \"Training mean score:\", training_mean_score, \\\n\"Training mean steps\", training_mean_steps, \"\\nAccuracy:\", testing_accuracy, \"Test mean score:\", testing_mean_score, \"Test mean steps:\", testing_mean_steps)\n\n# Rendering\n# experiment(1, render=True, default_policy=True, policy=\"model_cp\")\n"
] | [
[
"tensorflow.set_random_seed",
"numpy.array",
"numpy.reshape",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
fastboardAI/linguisticFeatureExtractor | [
"6cb2b6e7133e0f42acde1ce6e0344ffcbb578a7a"
] | [
"build/lib/fling/tfidfModule.py"
] | [
"from imp import reload\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport nltk,re,pprint\nimport sys,glob,os\nimport operator, string, argparse, math\n\n# class to read and preprocess data\nclass dataProcessor:\n def __init__(self, fname, keep_factors = ['Job Description', 'Company Name', 'Industry'], group_column = 'Industry'):\n self.dataInitial = pd.read_csv(fname, encoding=\"latin\")\n self.dataInitialSmall = self.dataInitial[['Job Description', 'Company Name', 'Industry']]\n self.swords = set(stopwords.words('english'))\n #print(len(self.swords),\"stopwords present!\")\n self.dataInitialGrouped = self.dataInitialSmall.groupby([group_column]).count()\n pd.set_option('display.max_rows', 50)\n print(self.dataInitialGrouped.sort_values(by=['Job Description'], ascending=False))\n\n # pipeline for purifying the text, write-pipeline, so just output filename can be provided\n def rem_stop_punct(self,originalText, ofilename):\n splittedText = originalText.split()\n lenl = len(splittedText)\n print(\"Length is: \",lenl, splittedText[:5])\n ofile = open(ofilename,'a')\n \n for r in range(lenl):\n linex = splittedText[r]\n linex2 = \"\".join(c for c in linex if c not in ('!','.',':',',','?',';','``','&','-','\"','(',')','[',']','0','1','2','3','4','5','6','7','8','9'))\n linex3 = linex2.split()\n #prog=(r+1)/len(rawlines)\n for s in range(len(linex3)):\n noword = linex3[s].lower()\n if noword not in self.swords:\n ofile.write(noword)\n ofile.write(\" \")\n\n# primary tf-idf class\nclass flingTFIDF:\n def __init__(self,data,cname):\n self.idfMatrix = {}\n self.distanceMatrix = {}\n self.termsforIDF = []\n self.cname = cname\n self.data = data\n self.lenv = len(self.data)\n self.swords = set(stopwords.words('english'))\n\n def drawProgressBar(self,percent, barLen = 50):\t\t\t#just a progress bar so that you dont lose patience\n sys.stdout.write(\"\\r\")\n progress = \"\"\n for i in range(barLen):\n if i<int(barLen * percent):\n progress += \"=\"\n else:\n progress += \" \"\n sys.stdout.write(\"[ %s ] %.2f%%\" % (progress, percent * 100))\n sys.stdout.flush()\n\n def rem_stop_punct(self,originalText):\n splittedText = originalText.split()\n lenl = len(splittedText)\n wordFiltered = []\n tSent = []\n for r in range(lenl):\n wordx_1 = splittedText[r]\n wordx_2 = \"\".join(c for c in wordx_1 if c not in ('!','.',':',',','?',';','``','&','-','\"','(',')','[',']','0','1','2','3','4','5','6','7','8','9')) \n sWord = wordx_2.lower()\n if sWord not in self.swords:\n tSent.append(sWord)\n return \" \".join(tSent)\n \n def smartTokenizeColumn(self):\n self.stopsRemoved = []\n for index, row in self.data.iterrows():\n prog=(index+1)/self.lenv\n originText = row[self.cname]\n sentx = self.rem_stop_punct(originText)\n self.drawProgressBar(prog)\n self.data.loc[index,'stopsRemoved'] = sentx\n self.cname = 'stopsRemoved'\n \n def getTF(self):\n print(\"\\nAdding term frequency column based on\",self.cname)\n tfMatrixList = []\n for index, row in self.data.iterrows():\n words_in_column = row[self.cname].split()\n if len(words_in_column)!=0:\n counts_all = Counter(words_in_column)\n words, count_values = zip(*counts_all.items())\n values_sorted, words_sorted = zip(*sorted(zip(count_values, words), key=operator.itemgetter(0), reverse=True))\n tfMatrixList.append(pd.DataFrame({'word': words_sorted, 'tf': values_sorted}))\n #self.data.loc[index,'tfMatrix'] = countdf\n else:\n #self.data.loc[index,'tfMatrix'] = pd.DataFrame(columns = ['word','tf'])\n tfMatrixList.append(pd.DataFrame(columns = ['word','tf']))\n prog=(index+1)/self.lenv\n self.drawProgressBar(prog)\n self.data['tfMatrix'] = tfMatrixList\n \n def getTFIDF(self):\n print(\"\\nComputing and adding TF-IDF column based on\",self.cname)\n for index, row in self.data.iterrows():\n tfmatrixThisrow = row['tfMatrix']\n tempTFIDF = []\n for indx, rwx in tfmatrixThisrow.iterrows():\n trmx = rwx['word']\n tfx = rwx['tf']\n idfx = self.idfMatrix[trmx]\n tfidfx = tfx*idfx\n tempTFIDF.append(tfidfx)\n #tfmatrixThisrow.loc[index,'tf-idf'] = tfidfx\n tfmatrixThisrow['tf-idf'] = tempTFIDF\n #sumtfidf = tfmatrixThisrow['tf-idf'].sum() \n prog=(index+1)/self.lenv\n self.drawProgressBar(prog)\n \n def computeIDFlistofterms(self):\n totalwords = 0\n print(\"\\nComputing list of words for IDF...\\n\")\n for index, row in self.data.iterrows():\n words_in_column = set(row[self.cname].split()) \n for word in words_in_column:\n if word not in self.termsforIDF:\n self.termsforIDF.append(word)\n totalwords+=1\n print(\"Created list of terms for IDF matrix with\", totalwords,\" terms.\") \n \n def getIdf(self,term):\n countPresentDocs = 0\n lenidf = len(self.termsforIDF)\n for i in range(lenidf):\n tfx = self.getTermFreq(i,term)\n if tfx>0:\n countPresentDocs+=1\n prog=(i+1)/lenidf\n self.drawProgressBar(prog)\n return countPresentDocs\n \n def computeIDFmatrix(self):\n self.computeIDFlistofterms()\n print(\"\\nComputing global IDF matrix...\\n\")\n for term in self.termsforIDF:\n self.idfMatrix[term]=0\n for index, row in self.data.iterrows():\n listofterms = list(self.data['tfMatrix'][index]['word'])\n for term in listofterms:\n self.idfMatrix[term]=self.idfMatrix[term]+1\n prog=(index+1)/self.lenv\n self.drawProgressBar(prog)\n for term in self.termsforIDF:\n idfx = self.idfMatrix[term] \n idfy = self.lenv/float(1+idfx)\n idfz = math.log(idfy,10)\n self.idfMatrix[term] = idfz\n \n def showData(self):\n print(self.data['tfMatrix'])\n \n def createDistanceMetadata(self):\n #sumList = []\n for index, row in self.data.iterrows():\n tfmatrixThisrow = row['tfMatrix']\n sumTFIDF = tfmatrixThisrow['tf-idf'].sum()\n #sumList.append({'sumTFIDF':sumTFIDF})\n self.data.loc[index,'sumTFIDF'] = sumTFIDF\n prog=(index+1)/self.lenv\n self.drawProgressBar(prog)\n \n def distanceBtnTwoDocs(self, docId_1, docId_2):\n listWords_1 = set(list(self.data['tfMatrix'][docId_1]['word']))\n listWords_2 = set(list(self.data['tfMatrix'][docId_2]['word']))\n common = listWords_1.intersection(listWords_2)\n diff1_2 = listWords_1.difference(listWords_2)\n diff2_1 = listWords_2.difference(listWords_1)\n sumwt1 = self.data['sumTFIDF'][docId_1]\n sumwt2 = self.data['sumTFIDF'][docId_2]\n score_common, score_doc1, score_doc2 = 0,0,0\n for word_c in common:\n score_1 = float(self.data['tfMatrix'][docId_1].loc[self.data['tfMatrix'][docId_1]['word'] == word_c]['tf-idf'])\n score_2 = float(self.data['tfMatrix'][docId_2].loc[self.data['tfMatrix'][docId_2]['word'] == word_c]['tf-idf'])\n score_common += abs(score_1/float(sumwt1) - score_2/float(sumwt2))\n for word_d12 in diff1_2:\n score_1 = float(self.data['tfMatrix'][docId_1].loc[self.data['tfMatrix'][docId_1]['word'] == word_d12]['tf-idf'])\n score_doc1 += score_1/float(sumwt1)\n for word_d21 in diff2_1:\n score_2 = float(self.data['tfMatrix'][docId_2].loc[self.data['tfMatrix'][docId_2]['word'] == word_d21]['tf-idf'])\n score_doc2 += score_2/float(sumwt2)\n score_total = score_common + score_doc1 + score_doc2\n return(score_total)\n \n def computeDistanceBtnAllDocs(self):\n for j in range(100):\n for k in range(10):\n numx = j*10+k\n dist = self.distanceBtnTwoDocs(j,k)\n self.distanceMatrix[(j,k)] = dist\n prog=(numx+1)/1000\n self.drawProgressBar(prog)\n \n print(self.distanceMatrix[:10])\n \n def writeToFile(self,fname):\n self.data.to_csv(fname)"
] | [
[
"pandas.set_option",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
radiasoft/rsopt | [
"6d4d123dd61e30c7f562b2f5a28c3ccbbcddbde3"
] | [
"rsopt/libe_tools/generator_functions/sobal.py"
] | [
"import numpy as np\n\n\ndef uniform_random_sample(H, persis_info, gen_specs, _):\n \"\"\"\n Generates ``gen_specs['user']['gen_batch_size']`` points uniformly over the domain\n defined by ``gen_specs['user']['ub']`` and ``gen_specs['user']['lb']``.\n\n .. seealso::\n `test_uniform_sampling.py <https://github.com/Libensemble/libensemble/blob/develop/libensemble/tests/regression_tests/test_uniform_sampling.py>`_ # noqa\n \"\"\"\n ub = gen_specs['user']['ub']\n lb = gen_specs['user']['lb']\n\n n = len(lb)\n b = gen_specs['user']['gen_batch_size']\n\n H_o = np.zeros(b, dtype=gen_specs['out'])\n\n H_o['x'] = persis_info['rand_stream'].uniform(lb, ub, (b, n))\n\n return H_o, persis_info"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gitjeff05/quality-control | [
"873e2bb8a2655d80e0f0a7a38fbd834b562a5726"
] | [
"app/data/data_source.py"
] | [
"#\n# Manages all the data needed for checks:\n#\n# 1. The DEV worksheet in Google sheets\n# 2. The historical data pulled from the API\n# 3. The current data pulled from the API (redundant w/the historical data)\n#\n# This module is responsible for type conversion and renaming the fields for consistency.\n#\n\nfrom typing import List, Dict\nfrom loguru import logger\nimport pandas as pd\nfrom urllib.request import urlopen\nimport json\nimport numpy as np\nimport re\nimport requests\nimport socket\nimport io\n\nfrom app.util import state_abbrevs\nimport app.util.udatetime as udatetime\nfrom app.data.worksheet_wrapper import WorksheetWrapper\nfrom app.log.error_log import ErrorLog\n\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\nKEY_PATH = \"credentials-scanner.json\"\n\ndef get_remote_csv(xurl: str) -> pd.DataFrame:\n r = requests.get(xurl, timeout=1)\n if r.status_code >= 300: \n raise Exception(f\"Could not get {xurl}, status={r.status_code}\")\n f = io.StringIO(r.text)\n df = pd.read_csv(f)\n return df\n\n\n\nclass DataSource:\n\n def __init__(self):\n\n self._target_date = None\n self.log = ErrorLog()\n\n self.failed = {}\n\n # worksheet dates\n self.last_publish_time = \"\"\n self.last_push_time = \"\"\n self.current_time = \"\"\n\n # internal datasources\n self._working: pd.DataFrame = None\n self._history: pd.DataFrame = None\n self._current: pd.DataFrame = None\n\n # external datasources\n self._cds_counties: pd.DataFrame = None\n self._csbs_counties: pd.DataFrame = None\n self._nyt_counties: pd.DataFrame = None\n self._county_rollup: pd.DataFrame = None\n\n @property\n def working(self) -> pd.DataFrame:\n \" the working dataset\"\n if self._working is None:\n if self.failed.get(\"working\"): return None\n try:\n self._working = self.load_working()\n except socket.timeout:\n self.failed[\"working\"] = True\n self.log.error(f\"Could not fetch working\")\n except Exception as ex:\n logger.exception(ex) \n self.failed[\"working\"] = True\n self.log.error(f\"Could not load working\", exception=ex)\n return self._working\n\n @property\n def history(self) -> pd.DataFrame:\n \" the daily history dataset\"\n if self._history is None:\n if self.failed.get(\"history\"): return None\n try:\n self._history = self.load_history()\n except socket.timeout:\n self.failed[\"history\"] = True\n self.log.error(f\"Could not fetch history\")\n except Exception as ex:\n self.failed[\"history\"] = True\n self.log.error(f\"Could not load history\", exception=ex)\n return self._history\n\n @property\n def current(self) -> pd.DataFrame:\n \" today's dataset\"\n if self._current is None:\n if self.failed.get(\"current\"): return None\n try:\n self._current = self.load_current()\n except socket.timeout:\n self.failed[\"current\"] = True\n self.log.error(f\"Could not fetch current\")\n except Exception as ex:\n self.failed[\"current\"] = True\n self.log.error(\"Could not load current\", exception=ex)\n return self._current\n\n @property\n def cds_counties(self) -> pd.DataFrame:\n \" the CDS counties dataset\"\n if self._cds_counties is None:\n if self.failed.get(\"CDS\"): return None\n try:\n self._cds_counties = self.load_cds_counties()\n except socket.timeout:\n self.failed[\"CDS\"] = True\n self.log.warning(f\"Could not fetch CDS counties\")\n except Exception as ex:\n self.failed[\"CDS\"] = True\n self.log.warning(\"Could not load CDS counties\", exception=ex)\n return self._cds_counties\n\n @property\n def csbs_counties(self) -> pd.DataFrame:\n \" the CSBS counties dataset\"\n if self._csbs_counties is None:\n if self.failed.get(\"CSBS\"): return None\n try:\n self._csbs_counties = self.load_csbs_counties()\n except socket.timeout:\n self.failed[\"CSBS\"] = True\n self.log.warning(f\"Could not fetch CSBS counties\")\n except Exception as ex:\n self.failed[\"CSBS\"] = True\n self.log.warning(f\"Could not load CSBS counties\", exception=ex)\n return self._csbs_counties\n\n @property\n def nyt_counties(self) -> pd.DataFrame:\n \" the NYT counties dataset\"\n if self._nyt_counties is None:\n if self.failed.get(\"NYT\"): return None\n try:\n self._nyt_counties = self.load_nyt_counties()\n except socket.timeout:\n self.failed[\"NYT\"] = True\n self.log.warning(f\"Could not fetch NYT counties\")\n except Exception as ex:\n self.failed[\"NYT\"] = True\n self.log.warning(f\"Could not load NYT counties\", exception=ex)\n return self._nyt_counties\n\n @property\n def county_rollup(self) -> pd.DataFrame:\n \"\"\" return a single county dataset of select metrics \"\"\"\n\n metrics = [\"cases\", \"deaths\",\"recovered\"]\n\n if self._county_rollup is None:\n if len(self.failed) > 0: return None\n\n frames = [self.cds_counties, self.csbs_counties, self.nyt_counties]\n if self.log.has_error:\n self.failed[\"counties\"] = True\n logger.warning(\"Could not load datasets for \" + \",\".join(self.failed))\n return None\n\n try:\n long_df = pd.concat(frames, axis=0, sort=False)\n\n self._county_rollup = long_df \\\n .groupby([\"state\", \"source\"])[metrics] \\\n .sum() \\\n .fillna(0) \\\n .astype(int) \\\n .reset_index()\n except Exception as ex:\n self.log.warning(f\"Could not combine counties datasets: {ex}\")\n\n return self._county_rollup\n\n def safe_convert_to_int(self, df: pd.DataFrame, col_name: str) -> pd.Series:\n \" convert a series to int even if it contains bad data\"\n s = df[col_name].str.strip().replace(re.compile(\",\"), \"\")\n\n is_blank = (s == \"\")\n is_bad = (~s.str.isnumeric()) & (~is_blank)\n\n df.loc[is_blank, col_name] = \"-1000\"\n s = df[col_name]\n\n df_errs = df[is_bad]\n if df_errs.shape[0] == 0: return s.astype(np.int)\n\n df_errs = df_errs[[\"state\", col_name]]\n logger.error(f\"invalid input values for {col_name}:\\n{df_errs}\")\n for _, e_row in df_errs.iterrows():\n v = e_row[col_name]\n self.log.error(f\"Invalid {col_name} value ({v}) for {e_row.state}\")\n\n s = s.where(is_bad, other=\"-1001\")\n return s.astype(np.int)\n\n def parse_dates(self, dates: List):\n if len(dates) != 5:\n raise Exception(\"First row layout (containing dates) changed\")\n last_publish_label, last_publish_value, last_push_label, \\\n last_push_value, current_time_field = dates\n\n if last_publish_label != \"Last Publish Time:\":\n raise Exception(\"Last Publish Time (cells V1:U1) moved\")\n if last_push_label != \"Last Push Time:\":\n raise Exception(\"Last Push Time (cells Z1:AA1) moved\")\n if not current_time_field.startswith(\"CURRENT TIME: \"):\n raise Exception(\"CURRENT TIME (cell AG1) moved\")\n\n self.last_publish_time = last_publish_value\n self.last_push_time = last_push_value\n self.current_time = current_time_field[current_time_field.index(\":\")+1:].strip()\n\n\n def load_working(self) -> pd.DataFrame:\n \"\"\"Load the working (unpublished) data from google sheets\"\"\"\n\n # make dev columns match api columns so quality\n # checks run with both inputs\n column_map = {\n 'State':'state',\n\n 'Dashboard': '',\n 'State Name': '',\n 'State COVID-19 Page': '',\n 'State Social Media': '',\n 'State Social Media': '',\n 'Press Conferences': '',\n 'GIS Query': '',\n 'Other': '',\n '#Reporting': '',\n 'URL Watch': '',\n 'Status': '',\n 'URL Watch Diff': '',\n 'Alerted': '',\n 'Last Alert': '',\n 'Error': '',\n 'Prev Last Check (ET)': '',\n 'Freshness': '',\n 'Flagged': '',\n 'Time zone +/–': '',\n 'Public': '',\n '': '',\n # 'Private': '',\n\n 'Local Time':'localTime',\n 'Positive':'positive',\n 'Negative':'negative',\n 'Pending':'pending',\n 'Currently Hospitalized':'hospitalized',\n 'Cumulative Hospitalized':'hospitalizedCumulative',\n 'Currently in ICU':'inIcu',\n 'Cumulative in ICU':'inIcuCumulative',\n 'Currently on Ventilator':'onVentilator',\n 'Cumulative on Ventilator':'onVentilatorCumulative',\n 'Recovered':'recovered',\n 'Deaths':'death',\n 'Total':'total',\n 'Last Update (ET)': 'lastUpdateEt',\n 'Last Check (ET)': 'lastCheckEt',\n 'Checker':'checker',\n 'Doublechecker':'doubleChecker'\n }\n\n gs = WorksheetWrapper()\n dev_id = gs.get_sheet_id_by_name(\"dev\")\n\n dates = gs.read_as_list(dev_id, \"Worksheet 2!V1:AJ1\", ignore_blank_cells=True, single_row=True)\n self.parse_dates(dates)\n\n df = gs.read_as_frame(dev_id, \"Worksheet 2!A2:AL60\", header_rows=1)\n\n # clean up names\n cols = []\n for n in df.columns: \n n1 = n.replace(\"\\r\", \"\").replace(\"\\n\", \" \").replace(\" \", \" \")\n n1 = n1.strip()\n cols.append(n1)\n df.columns = cols\n\n\n # check names and rename/suppress columns\n has_error = False\n names = []\n to_delete = []\n for n in df.columns.values:\n n2 = column_map.get(n)\n if n2 == None:\n has_error = True\n logger.error(f\" Unexpected column: [{n1}] in google sheet\")\n elif n2 == '':\n to_delete.append(n)\n else:\n names.append(n2)\n for n in column_map:\n if not (n1 in df.columns):\n has_error = True\n logger.error(f\" Missing column: [{n}] in google sheet\")\n\n if has_error:\n raise Exception(\"Columns in google have changed\")\n\n for n in to_delete:\n del df[n]\n\n df.columns = names\n\n idx = df.columns.get_loc(\"localTime\")\n eidx = df.columns.get_loc(\"lastUpdateEt\")\n\n for c in df.columns[idx+1:eidx]:\n df[c] = self.safe_convert_to_int(df, c)\n\n def standardize(d: str) -> str:\n sd, err_num = udatetime.standardize_date(d)\n return str(err_num) + sd\n\n def convert_date(df: pd.DataFrame, name: str, as_eastern: bool):\n s = df[name]\n s_date = s.apply(standardize)\n\n s_idx = s_date.str[0].astype(np.int)\n names = [\"\", \"changed\", \"blank\", \"missing date\", \"missing time\", \"bad date\", \"bad time\"]\n s_msg = s_idx.map(lambda x: names[x])\n\n s_date = s_date.str[1:]\n\n #print(pd.DataFrame({ \"before\": s, \"after\": s_date, \"changed\": s_changed}))\n\n s_date = pd.to_datetime(s_date, format=\"%m/%d/%Y %H:%M\")\n if as_eastern:\n s_date = s_date.apply(udatetime.pandas_timestamp_as_eastern)\n\n df[name] = s_date\n df[name + \"_msg\"] = s_msg\n\n # remove current time from first row\n #current_time = df.loc[0, \"lastCheckEt\"].replace(\"CURRENT NAME: \", \"\")\n #df.loc[0, \"lastCheckEt\"] = \"\"\n\n convert_date(df, \"localTime\", as_eastern=False)\n convert_date(df, \"lastUpdateEt\", as_eastern=True)\n convert_date(df, \"lastCheckEt\", as_eastern=True)\n\n df = df[ df.state != \"\"]\n return df\n\n def load_current(self) -> pd.DataFrame:\n \"\"\" load the current values from the API \"\"\"\n\n df = get_remote_csv(\"https://covidtracking.com/api/states.csv\")\n\n df = df.fillna(0)\n df[\"lastUpdateEt\"] = pd.to_datetime(df[\"lastUpdateEt\"].str.replace(\" \", \"/2020 \"), format=\"%m/%d/%Y %H:%M\") \\\n .apply(udatetime.pandas_timestamp_as_eastern)\n df[\"checkTimeEt\"] = pd.to_datetime(df[\"checkTimeEt\"].str.replace(\" \", \"/2020 \"), format=\"%m/%d/%Y %H:%M\") \\\n .apply(udatetime.pandas_timestamp_as_eastern)\n df[\"dateModified\"] = pd.to_datetime(df[\"dateModified\"])\n df[\"dateChecked\"] = pd.to_datetime(df[\"dateChecked\"])\n\n df.fillna(0.0, inplace=True)\n\n # counts\n for c in [\"positive\", \"negative\", \"pending\", \"hospitalized\", \"death\", \"recovered\", \"total\", \"totalTestResults\"]:\n df[c] = df[c].astype(np.int)\n for c in ['hospitalizedCumulative', 'inIcuCumulative', 'onVentilatorCumulative']:\n df[c] = df[c].astype(np.int)\n\n # 0 or 1. score = sum of others so it is 0-4\n for c in [\"positiveScore\", \"negativeScore\", \"negativeRegularScore\", \"commercialScore\", \"score\"]:\n df[c] = df[c].astype(np.int)\n return df\n\n\n def load_history(self) -> pd.DataFrame:\n \"\"\" load daily values over time from the API \"\"\"\n\n df = get_remote_csv(\"https://covidtracking.com/api/states/daily.csv\")\n df.fillna(0.0, inplace=True)\n\n # counts\n for c in [\"positive\", \"negative\", \"pending\", \"hospitalized\", \"death\", \"recovered\", \"total\", \"totalTestResults\"]:\n df[c] = df[c].astype(np.int)\n for c in [\"positiveIncrease\", \"negativeIncrease\", \"hospitalizedIncrease\", \"deathIncrease\", \"totalTestResultsIncrease\"]:\n df[c] = df[c].astype(np.int)\n for c in ['hospitalizedCumulative', 'inIcuCumulative', 'onVentilatorCumulative']:\n df[c] = df[c].astype(np.int)\n\n\n df[\"dateChecked\"] = pd.to_datetime(df[\"dateChecked\"])\n return df\n\n def load_cds_counties(self) -> pd.DataFrame:\n \"\"\" load the CDS county dataset \"\"\"\n\n cds = get_remote_csv(\"https://coronadatascraper.com/data.csv\")\n\n cds = cds \\\n .loc[(cds[\"country\"] == \"USA\") & (~cds[\"county\"].isnull())]\n\n cds[\"county\"] = cds[\"county\"].apply(lambda x: x.replace(\"County\", \"\").strip())\n cds[\"source\"] = \"cds\"\n return cds\n\n def load_csbs_counties(self) -> pd.DataFrame:\n \"\"\" load the CSBS county dataset \"\"\"\n\n xurl = \"http://coronavirus-tracker-api.herokuapp.com/v2/locations?source=csbs\"\n response = urlopen(xurl, timeout=1)\n json_data = response.read().decode('utf-8', 'replace')\n d = json.loads(json_data)\n csbs = pd.json_normalize(d['locations'])\n\n # remove \"extras\"\n csbs = csbs \\\n .loc[csbs[\"country\"] == \"US\"] \\\n .rename(columns={\n \"province\":\"state\",\n \"latest.confirmed\":\"cases\",\n \"latest.deaths\":\"deaths\",\n \"latest.recovered\":\"recovered\",\n \"coordinates.latitude\":\"lat\",\n \"coordinates.longitude\":\"long\"})\n csbs[\"state\"] = csbs[\"state\"].map(state_abbrevs)\n csbs[\"source\"] = \"csbs\"\n return csbs\n\n def load_nyt_counties(self) -> pd.DataFrame:\n\n df = get_remote_csv(\"https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv\")\n\n \"\"\" load the NYT county dataset \"\"\"\n nyt = df.rename(columns={\n \"date\":\"last_updated\"\n })\n nyt = nyt.loc[nyt[\"last_updated\"] == nyt[\"last_updated\"].max()]\n nyt[\"state\"] = nyt[\"state\"].map(state_abbrevs)\n nyt[\"source\"] = \"nyt\"\n return nyt\n\n# ------------------------------------------------------------\n\n# --- simple tests\ndef main():\n\n ds = DataSource()\n logger.info(f\"working\\n{ds.working.info()}\")\n logger.info(f\"history\\n{ds.history.info()}\")\n logger.info(f\"current\\n{ds.current.info()}\")\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.json_normalize",
"pandas.concat",
"pandas.read_csv",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0"
],
"scipy": [],
"tensorflow": []
}
] |
Arnie0426/NVTabular | [
"76e63d9df7b90433d552606e9cf87bd61d7eee3b"
] | [
"tests/unit/test_io.py"
] | [
"#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport glob\nimport json\nimport math\nimport os\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport cudf\nimport dask\nimport dask.dataframe as dd\nimport dask_cudf\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom dask.dataframe import assert_eq\nfrom dask.dataframe.io.demo import names as name_list\n\nimport nvtabular as nvt\nimport nvtabular.io\nfrom nvtabular import ops\nfrom nvtabular.io.parquet import GPUParquetWriter\nfrom tests.conftest import allcols_csv, mycols_csv, mycols_pq\n\n\[email protected](\"engine\", [\"csv\", \"parquet\", \"csv-no-header\"])\ndef test_shuffle_gpu(tmpdir, datasets, engine):\n num_files = 2\n paths = glob.glob(str(datasets[engine]) + \"/*.\" + engine.split(\"-\")[0])\n if engine == \"parquet\":\n df1 = cudf.read_parquet(paths[0])[mycols_pq]\n else:\n df1 = cudf.read_csv(paths[0], header=False, names=allcols_csv)[mycols_csv]\n shuf = GPUParquetWriter(tmpdir, num_out_files=num_files, shuffle=nvt.io.Shuffle.PER_PARTITION)\n shuf.add_data(df1)\n writer_files = shuf.data_paths\n shuf.close()\n if engine == \"parquet\":\n df3 = cudf.read_parquet(writer_files[0])[mycols_pq]\n df4 = cudf.read_parquet(writer_files[1])[mycols_pq]\n else:\n df3 = cudf.read_parquet(writer_files[0])[mycols_csv]\n df4 = cudf.read_parquet(writer_files[1])[mycols_csv]\n assert df1.shape[0] == df3.shape[0] + df4.shape[0]\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"csv\", \"parquet\"])\ndef test_dask_dataset_itr(tmpdir, datasets, engine, gpu_memory_frac):\n paths = glob.glob(str(datasets[engine]) + \"/*.\" + engine.split(\"-\")[0])\n if engine == \"parquet\":\n df1 = cudf.read_parquet(paths[0])[mycols_pq]\n else:\n df1 = cudf.read_csv(paths[0], header=0, names=allcols_csv)[mycols_csv]\n dtypes = {\"id\": np.int32}\n if engine == \"parquet\":\n columns = mycols_pq\n else:\n columns = mycols_csv\n\n size = 0\n ds = nvtabular.io.Dataset(\n paths[0], engine=engine, part_mem_fraction=gpu_memory_frac, dtypes=dtypes\n )\n my_iter = ds.to_iter(columns=columns)\n for chunk in my_iter:\n size += chunk.shape[0]\n assert chunk[\"id\"].dtype == np.int32\n\n assert size == df1.shape[0]\n assert len(my_iter) == size\n\n\[email protected](\"engine\", [\"csv\", \"parquet\", \"csv-no-header\"])\[email protected](\"num_files\", [1, 2])\[email protected](\"cpu\", [None, True])\ndef test_dask_dataset(datasets, engine, num_files, cpu):\n paths = glob.glob(str(datasets[engine]) + \"/*.\" + engine.split(\"-\")[0])\n paths = paths[:num_files]\n if engine == \"parquet\":\n ddf0 = dask_cudf.read_parquet(paths)[mycols_pq]\n dataset = nvtabular.io.Dataset(paths, cpu=cpu)\n result = dataset.to_ddf(columns=mycols_pq)\n else:\n ddf0 = dask_cudf.read_csv(paths, header=None, names=allcols_csv)[mycols_csv]\n dataset = nvtabular.io.Dataset(paths, cpu=cpu, header=None, names=allcols_csv)\n result = dataset.to_ddf(columns=mycols_csv)\n\n # We do not preserve the index in NVTabular\n if engine == \"parquet\":\n assert_eq(ddf0, result, check_index=False)\n else:\n assert_eq(ddf0, result)\n\n # Check that the cpu kwarg is working correctly\n if cpu:\n assert isinstance(result.compute(), pd.DataFrame)\n\n # Should still work if we move to the GPU\n # (test behavior after repetitive conversion)\n dataset.to_gpu()\n dataset.to_cpu()\n dataset.to_cpu()\n dataset.to_gpu()\n result = dataset.to_ddf()\n assert isinstance(result.compute(), cudf.DataFrame)\n else:\n assert isinstance(result.compute(), cudf.DataFrame)\n\n # Should still work if we move to the CPU\n # (test behavior after repetitive conversion)\n dataset.to_cpu()\n dataset.to_gpu()\n dataset.to_gpu()\n dataset.to_cpu()\n result = dataset.to_ddf()\n assert isinstance(result.compute(), pd.DataFrame)\n\n\[email protected](\"origin\", [\"cudf\", \"dask_cudf\", \"pd\", \"dd\"])\[email protected](\"cpu\", [None, True])\ndef test_dask_dataset_from_dataframe(tmpdir, origin, cpu):\n\n # Generate a DataFrame-based input\n if origin in (\"pd\", \"dd\"):\n df = pd.DataFrame({\"a\": range(100)})\n if origin == \"dd\":\n df = dask.dataframe.from_pandas(df, npartitions=4)\n elif origin in (\"cudf\", \"dask_cudf\"):\n df = cudf.DataFrame({\"a\": range(100)})\n if origin == \"dask_cudf\":\n df = dask_cudf.from_cudf(df, npartitions=4)\n\n # Convert to an NVTabular Dataset and back to a ddf\n dataset = nvtabular.io.Dataset(df, cpu=cpu)\n result = dataset.to_ddf()\n\n # Check resulting data\n assert_eq(df, result)\n\n # Check that the cpu kwarg is working correctly\n if cpu:\n assert isinstance(result.compute(), pd.DataFrame)\n\n # Should still work if we move to the GPU\n # (test behavior after repetitive conversion)\n dataset.to_gpu()\n dataset.to_cpu()\n dataset.to_cpu()\n dataset.to_gpu()\n result = dataset.to_ddf()\n assert isinstance(result.compute(), cudf.DataFrame)\n dataset.to_cpu()\n else:\n assert isinstance(result.compute(), cudf.DataFrame)\n\n # Should still work if we move to the CPU\n # (test behavior after repetitive conversion)\n dataset.to_cpu()\n dataset.to_gpu()\n dataset.to_gpu()\n dataset.to_cpu()\n result = dataset.to_ddf()\n assert isinstance(result.compute(), pd.DataFrame)\n dataset.to_gpu()\n\n # Write to disk and read back\n path = str(tmpdir)\n dataset.to_parquet(path, out_files_per_proc=1, shuffle=None)\n ddf_check = dask_cudf.read_parquet(path).compute()\n if origin in (\"dd\", \"dask_cudf\"):\n # Multiple partitions are not guarenteed the same\n # order in output file.\n ddf_check = ddf_check.sort_values(\"a\")\n assert_eq(df, ddf_check, check_index=False)\n\n\[email protected](\"cpu\", [None, True])\ndef test_dask_datframe_methods(tmpdir, cpu):\n # Input DataFrame objects\n df1 = cudf.datasets.timeseries(seed=7)[[\"id\", \"y\"]].iloc[:200]\n df2 = cudf.datasets.timeseries(seed=42)[[\"id\", \"x\"]].iloc[:100]\n\n # Initialize and merge Dataset objects\n ds1 = nvtabular.io.Dataset(df1, npartitions=3, cpu=cpu)\n ds2 = nvtabular.io.Dataset(df2, npartitions=2, cpu=not cpu)\n ds3 = nvtabular.io.Dataset.merge(ds1, ds2, on=\"id\", how=\"inner\")\n\n # Check repartitioning\n ds3 = ds3.repartition(npartitions=4)\n assert ds3.npartitions == 4\n\n # Check that head, tail, and persist are recognized\n ds1.head()\n ds1.tail()\n ds1.persist()\n\n # Check merge result\n result = ds3.compute().sort_values([\"id\", \"x\", \"y\"])\n expect = cudf.DataFrame.merge(df1, df2, on=\"id\", how=\"inner\").sort_values([\"id\", \"x\", \"y\"])\n assert_eq(result, expect, check_index=False)\n\n\[email protected](\"output_format\", [\"hugectr\", \"parquet\"])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"x\"], None])\[email protected](\"num_io_threads\", [0, 2])\[email protected](\"use_client\", [True, False])\ndef test_hugectr(\n tmpdir, client, df, dataset, output_format, engine, op_columns, num_io_threads, use_client\n):\n client = client if use_client else None\n\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\"]\n label_names = [\"label\"]\n\n # set variables\n nfiles = 10\n ext = \"\"\n outdir = tmpdir + \"/hugectr\"\n os.mkdir(outdir)\n outdir = str(outdir)\n\n conts = nvt.ColumnGroup(cont_names) >> ops.Normalize\n cats = nvt.ColumnGroup(cat_names) >> ops.Categorify\n\n workflow = nvt.Workflow(conts + cats + label_names)\n transformed = workflow.fit_transform(dataset)\n\n if output_format == \"hugectr\":\n transformed.to_hugectr(\n cats=cat_names,\n conts=cont_names,\n labels=label_names,\n output_path=outdir,\n out_files_per_proc=nfiles,\n num_threads=num_io_threads,\n )\n else:\n transformed.to_parquet(\n output_path=outdir,\n out_files_per_proc=nfiles,\n num_threads=num_io_threads,\n )\n\n # Check for _file_list.txt\n assert os.path.isfile(outdir + \"/_file_list.txt\")\n\n # Check for _metadata.json\n assert os.path.isfile(outdir + \"/_metadata.json\")\n\n # Check contents of _metadata.json\n data = {}\n col_summary = {}\n with open(outdir + \"/_metadata.json\", \"r\") as fil:\n for k, v in json.load(fil).items():\n data[k] = v\n assert \"cats\" in data\n assert \"conts\" in data\n assert \"labels\" in data\n assert \"file_stats\" in data\n assert len(data[\"file_stats\"]) == nfiles if not client else nfiles * len(client.cluster.workers)\n for cdata in data[\"cats\"] + data[\"conts\"] + data[\"labels\"]:\n col_summary[cdata[\"index\"]] = cdata[\"col_name\"]\n\n # Check that data files exist\n ext = \"\"\n if output_format == \"parquet\":\n ext = \"parquet\"\n elif output_format == \"hugectr\":\n ext = \"data\"\n\n data_files = [\n os.path.join(outdir, filename) for filename in os.listdir(outdir) if filename.endswith(ext)\n ]\n\n # Make sure the columns in \"_metadata.json\" make sense\n if output_format == \"parquet\":\n df_check = cudf.read_parquet(os.path.join(outdir, data_files[0]))\n for i, name in enumerate(df_check.columns):\n if i in col_summary:\n assert col_summary[i] == name\n\n\[email protected](\"inp_format\", [\"dask\", \"dask_cudf\", \"cudf\", \"pandas\"])\ndef test_ddf_dataset_itr(tmpdir, datasets, inp_format):\n paths = glob.glob(str(datasets[\"parquet\"]) + \"/*.\" + \"parquet\".split(\"-\")[0])\n ddf1 = dask_cudf.read_parquet(paths)[mycols_pq]\n df1 = ddf1.compute()\n if inp_format == \"dask\":\n ds = nvtabular.io.Dataset(ddf1.to_dask_dataframe())\n elif inp_format == \"dask_cudf\":\n ds = nvtabular.io.Dataset(ddf1)\n elif inp_format == \"cudf\":\n ds = nvtabular.io.Dataset(df1)\n elif inp_format == \"pandas\":\n ds = nvtabular.io.Dataset(df1.to_pandas())\n assert_eq(df1, cudf.concat(list(ds.to_iter(columns=mycols_pq))))\n\n\ndef test_dataset_partition_shuffle(tmpdir):\n ddf1 = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-21\", freq=\"1H\", dtypes={\"name\": str, \"id\": int}\n )\n # Make sure we have enough partitions to ensure\n # random failure is VERY unlikely (prob ~4e-19)\n assert ddf1.npartitions == 20\n columns = list(ddf1.columns)\n ds = nvt.Dataset(ddf1)\n ddf1 = ds.to_ddf()\n\n # Shuffle\n df1 = ddf1.compute().reset_index(drop=True)\n df2_to_ddf = ds.to_ddf(shuffle=True).compute().reset_index(drop=True)\n df2_to_iter = cudf.concat(list(ds.to_iter(columns=columns, shuffle=True))).reset_index(\n drop=True\n )\n\n # If we successfully shuffled partitions,\n # our data should not be in the same order\n df3 = df2_to_ddf[[\"id\"]]\n df3[\"id\"] -= df1[\"id\"]\n assert df3[\"id\"].abs().sum() > 0\n\n # Re-Sort\n df1 = df1.sort_values(columns, ignore_index=True)\n df2_to_ddf = df2_to_ddf.sort_values(columns, ignore_index=True)\n df2_to_iter = df2_to_iter.sort_values(columns, ignore_index=True)\n\n # Check that the shuffle didn't change the data after re-sorting\n assert_eq(df1, df2_to_ddf)\n assert_eq(df1, df2_to_iter)\n\n\[email protected](\"engine\", [\"csv\"])\[email protected](\"num_io_threads\", [0, 2])\[email protected](\"nfiles\", [0, 1, 5]) # Use 5 to test repartition in to_parquet\[email protected](\"shuffle\", [nvt.io.Shuffle.PER_WORKER, None])\[email protected](\"file_map\", [True, False])\ndef test_multifile_parquet(tmpdir, dataset, df, engine, num_io_threads, nfiles, shuffle, file_map):\n\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\"]\n label_names = [\"label\"]\n columns = cat_names + cont_names + label_names\n workflow = nvt.Workflow(nvt.ColumnGroup(columns))\n\n outdir = str(tmpdir.mkdir(\"out\"))\n transformed = workflow.transform(nvt.Dataset(dask_cudf.from_cudf(df, 2)))\n if file_map and nfiles:\n transformed.to_parquet(\n output_path=outdir, num_threads=num_io_threads, shuffle=shuffle, output_files=nfiles\n )\n out_paths = glob.glob(os.path.join(outdir, \"part_*\"))\n assert len(out_paths) == nfiles\n else:\n transformed.to_parquet(\n output_path=outdir,\n num_threads=num_io_threads,\n shuffle=shuffle,\n out_files_per_proc=nfiles,\n )\n out_paths = glob.glob(os.path.join(outdir, \"*.parquet\"))\n\n # Check that our output data is exactly the same\n df_check = cudf.read_parquet(out_paths)\n assert_eq(\n df_check[columns].sort_values([\"x\", \"y\"]),\n df[columns].sort_values([\"x\", \"y\"]),\n check_index=False,\n )\n\n\[email protected](\"freq_threshold\", [0, 1, 2])\[email protected](\"shuffle\", [nvt.io.Shuffle.PER_PARTITION, None])\[email protected](\"out_files_per_proc\", [None, 2])\ndef test_parquet_lists(tmpdir, freq_threshold, shuffle, out_files_per_proc):\n # the cudf 0.17 dev container returns a '0+untagged.1.ga6296e3' version for cudf\n # (which is tough to parse correctly with LooseVersion et al). This also fails\n # to run this test frequently, whereas it works with later versions of cudf.\n # skip if we are running this specific version of cudf (and lets remove this\n # check entirely after we've upgraded the CI container)\n if cudf.__version__.startswith(\"0+untagged\"):\n pytest.skip(\"parquet lists support is flakey here without cudf0.18\")\n\n df = cudf.DataFrame(\n {\n \"Authors\": [[\"User_A\"], [\"User_A\", \"User_E\"], [\"User_B\", \"User_C\"], [\"User_C\"]],\n \"Engaging User\": [\"User_B\", \"User_B\", \"User_A\", \"User_D\"],\n \"Post\": [1, 2, 3, 4],\n }\n )\n\n input_dir = str(tmpdir.mkdir(\"input\"))\n output_dir = str(tmpdir.mkdir(\"output\"))\n filename = os.path.join(input_dir, \"test.parquet\")\n df.to_parquet(filename)\n\n cat_names = [\"Authors\", \"Engaging User\"]\n cats = cat_names >> ops.Categorify(out_path=str(output_dir))\n workflow = nvt.Workflow(cats + \"Post\")\n\n transformed = workflow.fit_transform(nvt.Dataset(filename))\n transformed.to_parquet(\n output_path=output_dir,\n shuffle=shuffle,\n out_files_per_proc=out_files_per_proc,\n )\n\n out_paths = glob.glob(os.path.join(output_dir, \"*.parquet\"))\n df_out = cudf.read_parquet(out_paths)\n df_out = df_out.sort_values(by=\"Post\", ascending=True)\n assert df_out[\"Authors\"].to_arrow().to_pylist() == [[1], [1, 4], [2, 3], [3]]\n\n\[email protected](\"part_size\", [None, \"1KB\"])\[email protected](\"size\", [100, 5000])\[email protected](\"nfiles\", [1, 2])\ndef test_avro_basic(tmpdir, part_size, size, nfiles):\n\n # Require uavro and fastavro library.\n # Note that fastavro is only required to write\n # avro files for testing, while uavro is actually\n # used by AvroDatasetEngine.\n fa = pytest.importorskip(\"fastavro\")\n pytest.importorskip(\"uavro\")\n\n # Define avro schema\n schema = fa.parse_schema(\n {\n \"name\": \"avro.example.User\",\n \"type\": \"record\",\n \"fields\": [\n {\"name\": \"name\", \"type\": \"string\"},\n {\"name\": \"age\", \"type\": \"int\"},\n ],\n }\n )\n\n # Write avro dataset with two files.\n # Collect block and record (row) count while writing.\n nblocks = 0\n nrecords = 0\n paths = [os.path.join(str(tmpdir), f\"test.{i}.avro\") for i in range(nfiles)]\n records = []\n for path in paths:\n names = np.random.choice(name_list, size)\n ages = np.random.randint(18, 100, size)\n data = [{\"name\": names[i], \"age\": ages[i]} for i in range(size)]\n with open(path, \"wb\") as f:\n fa.writer(f, schema, data)\n with open(path, \"rb\") as fo:\n avro_reader = fa.block_reader(fo)\n for block in avro_reader:\n nrecords += block.num_records\n nblocks += 1\n records += list(block)\n if nfiles == 1:\n paths = paths[0]\n\n # Read back with dask.dataframe\n df = nvt.Dataset(paths, part_size=part_size, engine=\"avro\").to_ddf()\n\n # Check basic length and partition count\n if part_size == \"1KB\":\n assert df.npartitions == nblocks\n assert len(df) == nrecords\n\n # Full comparison\n expect = pd.DataFrame.from_records(records)\n expect[\"age\"] = expect[\"age\"].astype(\"int32\")\n assert_eq(df.compute().reset_index(drop=True), expect)\n\n\[email protected](\"engine\", [\"csv\", \"parquet\"])\ndef test_validate_dataset(datasets, engine):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n paths = glob.glob(str(datasets[engine]) + \"/*.\" + engine.split(\"-\")[0])\n if engine == \"parquet\":\n dataset = nvtabular.io.Dataset(str(datasets[engine]), engine=engine)\n\n # Default file_min_size should result in failed validation\n assert not dataset.validate_dataset()\n assert dataset.validate_dataset(file_min_size=1, require_metadata_file=False)\n else:\n dataset = nvtabular.io.Dataset(paths, header=False, names=allcols_csv)\n\n # CSV format should always fail validation\n assert not dataset.validate_dataset()\n\n\ndef test_validate_dataset_bad_schema(tmpdir):\n if LooseVersion(dask.__version__) <= \"2.30.0\":\n # Older versions of Dask will not handle schema mismatch\n pytest.skip(\"Test requires newer version of Dask.\")\n\n path = str(tmpdir)\n for (fn, df) in [\n (\"part.0.parquet\", pd.DataFrame({\"a\": range(10), \"b\": range(10)})),\n (\"part.1.parquet\", pd.DataFrame({\"a\": [None] * 10, \"b\": range(10)})),\n ]:\n df.to_parquet(os.path.join(path, fn))\n\n # Initial dataset has mismatched schema and is missing a _metadata file.\n dataset = nvtabular.io.Dataset(path, engine=\"parquet\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # Schema issue should cause validation failure, even if _metadata is ignored\n assert not dataset.validate_dataset(require_metadata_file=False)\n # File size should cause validation error, even if _metadata is generated\n assert not dataset.validate_dataset(add_metadata_file=True)\n # Make sure the last call added a `_metadata` file\n assert len(glob.glob(os.path.join(path, \"_metadata\")))\n\n # New datset has a _metadata file, but the file size is still too small\n dataset = nvtabular.io.Dataset(path, engine=\"parquet\")\n assert not dataset.validate_dataset()\n # Ignore file size to get validation success\n assert dataset.validate_dataset(file_min_size=1, row_group_max_size=\"1GB\")\n\n\ndef test_validate_and_regenerate_dataset(tmpdir):\n\n # Initial timeseries dataset (in cpu memory)\n ddf = dask.datasets.timeseries(\n start=\"2000-01-01\",\n end=\"2000-01-05\",\n freq=\"60s\",\n partition_freq=\"1d\",\n seed=42,\n )\n ds = nvt.Dataset(ddf)\n\n # Regenerate dataset on disk\n path = str(tmpdir)\n ds.regenerate_dataset(path, part_size=\"50KiB\", file_size=\"150KiB\")\n\n # Check that the regenerated dataset makes sense.\n # Dataset is ~544KiB - Expect 4 data files\n N = math.ceil(ddf.compute().memory_usage(deep=True).sum() / 150000)\n file_list = glob.glob(os.path.join(path, \"*\"))\n assert os.path.join(path, \"_metadata\") in file_list\n assert os.path.join(path, \"_file_list.txt\") in file_list\n assert os.path.join(path, \"_metadata.json\") in file_list\n assert len(file_list) == N + 3 # N data files + 3 metadata files\n\n # Check new dataset validation\n ds2 = nvt.Dataset(path, engine=\"parquet\", part_size=\"64KiB\")\n ds2.validate_dataset(file_min_size=1)\n\n # Check that dataset content is correct\n assert_eq(ddf, ds2.to_ddf().compute())\n\n # Check cpu version of `to_ddf`\n assert_eq(ddf, ds2.engine.to_ddf(cpu=True).compute())\n\n\[email protected](\"preserve_files\", [True, False])\[email protected](\"cpu\", [True, False])\ndef test_dataset_conversion(tmpdir, cpu, preserve_files):\n\n # Generate toy dataset.\n # Include \"hex\" strings to mimic Criteo.\n size = 100\n npartitions = 4\n hex_vals = [\n \"62770d79\",\n \"e21f5d58\",\n \"afea442f\",\n \"945c7fcf\",\n \"38b02748\",\n \"6fcd6dcb\",\n \"3580aa21\",\n \"46dedfa6\",\n ]\n df = pd.DataFrame(\n {\n \"C0\": np.random.choice(hex_vals, size),\n \"I0\": np.random.randint(1_000_000_000, high=10_000_000_000, size=size),\n \"F0\": np.random.uniform(size=size),\n }\n )\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n # Write to csv dataset\n csv_path = os.path.join(str(tmpdir), \"csv_dataset\")\n ddf.to_csv(csv_path, header=False, sep=\"\\t\", index=False)\n\n # Create NVT Dataset\n dtypes = {\"F0\": np.float64, \"I0\": np.int64, \"C0\": \"hex\"}\n ds = nvt.Dataset(\n csv_path,\n cpu=cpu,\n engine=\"csv\",\n dtypes=dtypes,\n sep=\"\\t\",\n names=[\"C0\", \"I0\", \"F0\"],\n )\n\n # Convert csv dataset to parquet.\n # Adding extra ds -> ds2 step to test `base_dataset` usage.\n pq_path = os.path.join(str(tmpdir), \"pq_dataset\")\n ds2 = nvt.Dataset(ds.to_ddf(), base_dataset=ds)\n ds2.to_parquet(pq_path, preserve_files=preserve_files, suffix=\".pq\")\n\n # Check output.\n # Note that we are converting the inital hex strings to int32.\n ds_check = nvt.Dataset(pq_path, engine=\"parquet\")\n df[\"C0\"] = df[\"C0\"].apply(int, base=16).astype(\"int32\")\n assert_eq(ds_check.to_ddf().compute(), df, check_index=False)\n\n # Check that the `suffix=\".pq\"` argument was successful\n assert glob.glob(os.path.join(pq_path, \"*.pq\"))\n assert not glob.glob(os.path.join(pq_path, \"*.parquet\"))\n\n\[email protected](\"use_file_metadata\", [True, None])\[email protected](\"shuffle\", [True, False])\ndef test_parquet_iterator_len(tmpdir, shuffle, use_file_metadata):\n\n ddf1 = dask.datasets.timeseries(\n start=\"2000-01-01\",\n end=\"2000-01-6\",\n freq=\"600s\",\n partition_freq=\"1d\",\n id_lam=10,\n seed=42,\n ).shuffle(\"id\")\n\n # Write to parquet dataset\n ddf1.to_parquet(str(tmpdir))\n\n # Initialize Dataset\n ds = nvt.Dataset(str(tmpdir), engine=\"parquet\")\n\n # Convert ds -> ds2\n ds2 = nvt.Dataset(ds.to_ddf())\n\n # Check that iterator lengths match the partition lengths\n ddf2 = ds2.to_ddf(shuffle=shuffle, seed=42)\n for i in range(ddf2.npartitions):\n _iter = ds2.to_iter(\n shuffle=shuffle,\n seed=42,\n indices=[i],\n use_file_metadata=use_file_metadata,\n )\n assert len(ddf2.partitions[i]) == len(_iter)\n\n\[email protected](\"cpu\", [True, False])\ndef test_hive_partitioned_data(tmpdir, cpu):\n\n # Initial timeseries dataset (in cpu memory).\n # Round the full \"timestamp\" to the hour for partitioning.\n ddf = dask.datasets.timeseries(\n start=\"2000-01-01\",\n end=\"2000-01-03\",\n freq=\"600s\",\n partition_freq=\"6h\",\n seed=42,\n ).reset_index()\n ddf[\"timestamp\"] = ddf[\"timestamp\"].dt.round(\"D\").dt.day\n ds = nvt.Dataset(ddf, engine=\"parquet\")\n\n # Write the dataset to disk\n path = str(tmpdir)\n partition_keys = [\"timestamp\", \"name\"]\n ds.to_parquet(path, partition_on=partition_keys)\n\n # Make sure the directory structure is hive-like\n df_expect = ddf.compute()\n df_expect = df_expect.sort_values([\"id\", \"x\", \"y\"]).reset_index(drop=True)\n timestamp_check = df_expect[\"timestamp\"].iloc[0]\n name_check = df_expect[\"name\"].iloc[0]\n assert glob.glob(\n os.path.join(\n path,\n f\"timestamp={timestamp_check}/name={name_check}/*\",\n )\n )\n\n # Read back with dask.dataframe and check the data\n df_check = dd.read_parquet(path).compute()\n df_check[\"name\"] = df_check[\"name\"].astype(\"object\")\n df_check[\"timestamp\"] = df_check[\"timestamp\"].astype(\"int64\")\n df_check = df_check.sort_values([\"id\", \"x\", \"y\"]).reset_index(drop=True)\n for col in df_expect:\n # Order of columns can change after round-trip partitioning\n assert_eq(df_expect[col], df_check[col], check_index=False)\n\n # Read back with NVT and check the data\n df_check = nvt.Dataset(path, engine=\"parquet\").to_ddf().compute()\n df_check[\"name\"] = df_check[\"name\"].astype(\"object\")\n df_check[\"timestamp\"] = df_check[\"timestamp\"].astype(\"int64\")\n df_check = df_check.sort_values([\"id\", \"x\", \"y\"]).reset_index(drop=True)\n for col in df_expect:\n # Order of columns can change after round-trip partitioning\n assert_eq(df_expect[col], df_check[col], check_index=False)\n\n\[email protected](\"cpu\", [True, False])\[email protected](\"partition_on\", [None, [\"name\", \"id\"], [\"name\"]])\[email protected](\"keys\", [[\"name\"], [\"id\"], [\"name\", \"id\"]])\[email protected](\"npartitions\", [None, 2])\ndef test_dataset_shuffle_on_keys(tmpdir, cpu, partition_on, keys, npartitions):\n\n # Initial timeseries dataset\n size = 60\n df1 = pd.DataFrame(\n {\n \"name\": np.random.choice([\"Dave\", \"Zelda\"], size=size),\n \"id\": np.random.choice([0, 1], size=size),\n \"x\": np.random.uniform(low=0.0, high=10.0, size=size),\n \"y\": np.random.uniform(low=0.0, high=10.0, size=size),\n }\n )\n ddf1 = dd.from_pandas(df1, npartitions=3)\n\n # Write the dataset to disk\n path = str(tmpdir)\n ddf1.to_parquet(str(tmpdir), partition_on=partition_on)\n\n # Construct NVT Dataset\n ds = nvt.Dataset(path, engine=\"parquet\")\n\n # Shuffle the dataset by `keys`\n ds2 = ds.shuffle_by_keys(keys, npartitions=npartitions)\n\n # Inspect the result\n ddf2 = ds2.to_ddf()\n if npartitions:\n assert ddf2.npartitions == npartitions\n\n # A successful shuffle will return the same unique-value\n # count for both the full dask algorithm and a partition-wise sum\n n1 = sum([len(p[keys].drop_duplicates()) for p in ddf2.partitions])\n n2 = len(ddf2[keys].drop_duplicates())\n assert n1 == n2\n\n # Check that none of the rows was changed\n df1 = df1.sort_values([\"id\", \"x\", \"y\"]).reset_index(drop=True)\n df2 = ddf2.compute().sort_values([\"id\", \"x\", \"y\"]).reset_index(drop=True)\n if partition_on:\n # Dask will convert partitioned columns to Categorical\n df2[\"name\"] = df2[\"name\"].astype(\"object\")\n df2[\"id\"] = df2[\"id\"].astype(\"int64\")\n for col in df1:\n # Order of columns can change after round-trip partitioning\n assert_eq(df1[col], df2[col], check_index=False)\n"
] | [
[
"pandas.DataFrame.from_records",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kruda/DetectAndTrack | [
"d66734498a4331cd6fde87d8269499b8577a2842"
] | [
"lib/core/mpii_eval_engine.py"
] | [
"##############################################################\n# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n##############################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport sys\nimport numpy as np\nimport os\nimport os.path as osp\nimport cPickle as pkl\nimport json\nfrom tqdm import tqdm\nimport scipy.io as sio\nimport shutil\nimport tempfile\nfrom functools import partial\nimport time\n\nfrom core.config import cfg\nimport utils.general as gen_utils\nfrom utils.image import get_image_path\n\nnp.random.seed(cfg.RNG_SEED)\nFORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\ncoco_src_keypoints = [\n 'nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',\n 'right_wrist',\n 'left_hip',\n 'right_hip',\n 'left_knee',\n 'right_knee',\n 'left_ankle',\n 'right_ankle']\nposetrack_src_keypoints = [\n 'nose',\n 'head_bottom',\n 'head_top',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',\n 'right_wrist',\n 'left_hip',\n 'right_hip',\n 'left_knee',\n 'right_knee',\n 'left_ankle',\n 'right_ankle']\ndst_keypoints = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_wrist',\n 'right_elbow',\n 'right_shoulder',\n 'left_shoulder',\n 'left_elbow',\n 'left_wrist',\n 'neck',\n 'nose',\n 'head_top']\n\n\ndef _compute_score(conf, global_conf):\n kp_conf_type = cfg.TRACKING.KP_CONF_TYPE\n if kp_conf_type == 'global':\n return global_conf\n elif kp_conf_type == 'local':\n return conf\n elif kp_conf_type == 'scaled':\n return conf * global_conf\n else:\n raise NotImplementedError('Uknown type {}'.format(kp_conf_type))\n\n\ndef coco2posetrack(preds, src_kps, dst_kps, global_score,\n kp_conf_type=cfg.TRACKING.KP_CONF_TYPE):\n data = []\n global_score = float(global_score)\n dstK = len(dst_kps)\n for k in range(dstK):\n if dst_kps[k] in src_kps:\n ind = src_kps.index(dst_kps[k])\n local_score = (preds[2, ind] + preds[2, ind]) / 2.0\n conf = _compute_score(local_score, global_score)\n if local_score >= cfg.EVAL.EVAL_MPII_KPT_THRESHOLD:\n data.append({'id': [k],\n 'x': [float(preds[0, ind])],\n 'y': [float(preds[1, ind])],\n 'score': [conf]})\n elif dst_kps[k] == 'neck':\n rsho = src_kps.index('right_shoulder')\n lsho = src_kps.index('left_shoulder')\n x_msho = (preds[0, rsho] + preds[0, lsho]) / 2.0\n y_msho = (preds[1, rsho] + preds[1, lsho]) / 2.0\n local_score = (preds[2, rsho] + preds[2, lsho]) / 2.0\n conf_msho = _compute_score(local_score, global_score)\n if local_score >= cfg.EVAL.EVAL_MPII_KPT_THRESHOLD:\n data.append({'id': [k],\n 'x': [float(x_msho)],\n 'y': [float(y_msho)],\n 'score': [conf_msho]})\n elif dst_kps[k] == 'head_top':\n rsho = src_kps.index('right_shoulder')\n lsho = src_kps.index('left_shoulder')\n x_msho = (preds[0, rsho] + preds[0, lsho]) / 2.0\n y_msho = (preds[1, rsho] + preds[1, lsho]) / 2.0\n nose = src_kps.index('nose')\n x_nose = preds[0, nose]\n y_nose = preds[1, nose]\n x_tophead = x_nose - (x_msho - x_nose)\n y_tophead = y_nose - (y_msho - y_nose)\n local_score = (preds[2, rsho] + preds[2, lsho]) / 2.0\n conf_htop = _compute_score(local_score, global_score)\n if local_score >= cfg.EVAL.EVAL_MPII_KPT_THRESHOLD:\n data.append({\n 'id': [k],\n 'x': [float(x_tophead)],\n 'y': [float(y_tophead)],\n 'score': [conf_htop]})\n return data\n\n\ndef _convert_data_to_annorect_struct(boxes, poses, tracks):\n \"\"\"\n Args:\n boxes (np.ndarray): Nx5 size matrix with boxes on this frame\n poses (list of np.ndarray): N length list with each element as 4x17 array\n tracks (list): N length list with track ID for each box/pose\n \"\"\"\n num_dets = boxes.shape[0]\n annorect = []\n for j in range(num_dets):\n score = boxes[j, -1]\n if score < cfg.EVAL.EVAL_MPII_DROP_DETECTION_THRESHOLD:\n continue\n point = coco2posetrack(\n poses[j], posetrack_src_keypoints, dst_keypoints, score)\n annorect.append({'annopoints': [{'point': point}],\n 'score': [float(score)],\n 'track_id': [tracks[j]]})\n if num_dets == 0:\n # MOTA requires each image to have at least one detection! So, adding\n # a dummy prediction.\n annorect.append({\n 'annopoints': [{'point': [{\n 'id': [0],\n 'x': [0],\n 'y': [0],\n 'score': [-100.0],\n }]}],\n 'score': [0],\n 'track_id': [0]})\n return annorect\n\n\ndef video2filenames(pathtodir):\n ext_types = '.mat' # .mat/.json\n output = {}\n files = [f for f in os.listdir(pathtodir) if\n osp.isfile(osp.join(pathtodir, f)) and ext_types in f]\n for fname in files:\n if ext_types == '.mat':\n out_fname = fname.replace('.mat', '.json')\n data = sio.loadmat(\n osp.join(pathtodir, fname), squeeze_me=True,\n struct_as_record=False)\n temp = data['annolist'][0].image.name\n elif ext_types == '.json':\n out_fname = fname\n with open(osp.join(pathtodir, fname), 'r') as fin:\n data = json.load(fin)\n temp = data['annolist'][0]['image'][0]['name']\n else:\n raise NotImplementedError()\n video = osp.dirname(temp)\n output[video] = out_fname\n return output\n\n\ndef _run_eval(annot_dir, output_dir, eval_tracking=False, eval_pose=True):\n \"\"\"\n Runs the evaluation, and returns the \"total mAP\" and \"total MOTA\"\n \"\"\"\n from datasets.posetrack.poseval.py import evaluate_simple\n (apAll, _, _), mota = evaluate_simple.evaluate(\n annot_dir, output_dir, eval_pose, eval_tracking,\n cfg.TRACKING.DEBUG.UPPER_BOUND_4_EVAL_UPPER_BOUND)\n return apAll[-1][0], mota[-4][0]\n\n\ndef _run_eval_single_video(vname, out_filenames, output_dir, dataset, eval_tracking):\n per_vid_tmp_dir = tempfile.mkdtemp()\n gen_utils.mkdir_p(per_vid_tmp_dir)\n # in case it previously existed and has anything in it\n gen_utils.mkdir_p(osp.join(per_vid_tmp_dir, 'gt/'))\n gen_utils.mkdir_p(osp.join(per_vid_tmp_dir, 'pred/'))\n voutname = out_filenames[osp.join('images', vname)]\n pred_path = osp.join(\n output_dir, voutname)\n gt_path = osp.join(\n dataset.annotation_directory, voutname)\n shutil.copyfile(gt_path, osp.join(per_vid_tmp_dir, 'gt', voutname))\n shutil.copyfile(pred_path, osp.join(per_vid_tmp_dir, 'pred', voutname))\n try:\n score_ap, score_mot = _run_eval(\n osp.join(per_vid_tmp_dir, 'gt/'),\n osp.join(per_vid_tmp_dir, 'pred/'),\n eval_tracking)\n except Exception as e:\n logger.error('Unable to process video {} due to {}'.format(\n vname, e))\n score_ap = np.nan\n score_mot = np.nan\n gen_utils.run_cmd('rm -rf {}'.format(per_vid_tmp_dir), print_cmd=False)\n return (vname, score_ap, score_mot)\n\n\ndef _run_posetrack_eval(roidb, det_file, dataset, output_dir):\n with open(det_file, 'rb') as fin:\n dets = pkl.load(fin)\n assert len(roidb) == len(dets['all_boxes'][1]), \\\n 'Mismatch {} vs {}'.format(len(roidb), len(dets['all_boxes'][1]))\n gen_utils.mkdir_p(output_dir)\n out_filenames = video2filenames(dataset.annotation_directory)\n out_data = {} # each video to all predictions\n eval_tracking = False\n if 'all_tracks' in dets:\n eval_tracking = True\n for i, entry in enumerate(roidb):\n image_name = get_image_path(entry)[len(dataset.image_directory):]\n video_name = osp.dirname(image_name)\n frame_num = int(osp.basename(image_name).split('.')[0])\n boxes = dets['all_boxes'][1][i]\n kps = dets['all_keyps'][1][i]\n if eval_tracking: # means there is a \"all_tracks\" in the dets\n tracks = dets['all_tracks'][1][i]\n else:\n tracks = [1] * len(kps)\n data_el = {\n 'image': image_name,\n 'imagenum': [frame_num],\n 'annorect': _convert_data_to_annorect_struct(boxes, kps, tracks),\n }\n if video_name in out_data:\n out_data[video_name].append(data_el)\n else:\n out_data[video_name] = [data_el]\n\n logger.info('Saving the JSON files to {}'.format(output_dir))\n # clear out the previous predictions, if any\n gen_utils.run_cmd('rm -r {}/*'.format(output_dir), print_cmd=False)\n for vname in tqdm(out_data.keys(), desc='Writing JSON files for eval'):\n vdata = out_data[vname]\n outfpath = osp.join(\n output_dir, out_filenames[osp.join('images', vname)])\n with open(outfpath, 'w') as fout:\n json.dump({'annolist': vdata}, fout)\n logger.info('Wrote all predictions in JSON to {}'.format(output_dir))\n logger.info('Running dataset level evaluation...')\n st_time = time.time()\n logger.info(_run_eval(dataset.annotation_directory, output_dir, eval_tracking))\n logger.info('...Done in {}'.format(time.time() - st_time))\n # TODO(rgirdhar): Do this better\n if cfg.EVAL.EVAL_MPII_PER_VIDEO: # run the evaluation per-video\n res = []\n logger.info('Running per-video evaluation...')\n st_time = time.time()\n pervid_outpath = osp.join(\n osp.dirname(osp.normpath(output_dir)),\n osp.basename(det_file) + '_per_video_scores.txt')\n # Earlier I used multi-processing to compute the predictions in parallel\n # but now I've updated the eval code itself to use multiprocessing so\n # can not use multiprocessing here (else it gives an error that daemon\n # processes can not spawn children). Hense setting num processes to 0.\n res = map(partial(\n _run_eval_single_video,\n out_filenames=out_filenames,\n output_dir=output_dir,\n dataset=dataset,\n eval_tracking=eval_tracking), out_data.keys())\n logger.info('...Done in {} seconds'.format(time.time() - st_time))\n res = sorted(res, key=lambda x: x[1]) # sort on score\n logger.info('Writing per-video scores to {}'.format(pervid_outpath))\n with open(pervid_outpath, 'w') as fout:\n for el in res:\n fout.write('{} {} {}\\n'.format(el[0], el[1], el[2]))\n\n\ndef run_mpii_eval(test_output_dir, roidb, dataset):\n # Set include_gt True when using the roidb to evalute directly. Not doing\n # that currently\n # det_file = osp.join(test_output_dir, 'detections.pkl')\n tracking_det_file = osp.join(test_output_dir, 'detections_withTracks.pkl')\n ran_once = False\n # all_det_files = [tracking_det_file, det_file]\n all_det_files = [tracking_det_file]\n for file_path in all_det_files:\n json_out_dir = osp.join(\n test_output_dir, osp.basename(file_path) + '_json/')\n if not osp.exists(file_path):\n continue\n ran_once = True\n logger.info('Evaluating {}'.format(file_path))\n _run_posetrack_eval(roidb, file_path, dataset, json_out_dir)\n if not ran_once:\n logger.warning('No detection files found from {}'.format(all_det_files))\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QuantitativeBiology/crispy | [
"c7d5592555e845de0c61077be90ebe5125cca570"
] | [
"crispy/DimensionReduction.py"
] | [
"#!/usr/bin/env python\n# Copyright (C) 2019 Emanuel Goncalves\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom crispy.CrispyPlot import CrispyPlot\nfrom sklearn.decomposition import PCA, FactorAnalysis\n\n\ndef pc_labels(n):\n return [f\"PC{i}\" for i in np.arange(1, n + 1)]\n\n\ndef dim_reduction_pca(df, pca_ncomps=10):\n df_pca = PCA(n_components=pca_ncomps).fit(df.T)\n\n df_pcs = df_pca.transform(df.T)\n df_pcs = pd.DataFrame(df_pcs, index=df.T.index, columns=pc_labels(pca_ncomps))\n df_loadings = pd.DataFrame(df_pca.components_, index=pc_labels(pca_ncomps), columns=df.T.columns)\n\n df_vexp = pd.Series(df_pca.explained_variance_ratio_, index=df_pcs.columns)\n\n return df_pcs, df_vexp, df_loadings\n\n\ndef dim_reduction_fa(df, pca_ncomps=10):\n df_pca = FactorAnalysis(n_components=pca_ncomps).fit(df.T)\n\n df_pcs = df_pca.transform(df.T)\n df_pcs = pd.DataFrame(df_pcs, index=df.T.index, columns=pc_labels(pca_ncomps))\n df_loadings = pd.DataFrame(df_pca.components_, index=pc_labels(pca_ncomps), columns=df.T.columns)\n\n return df_pcs, df_loadings\n\n\ndef dim_reduction(\n df,\n pca_ncomps=50,\n tsne_ncomps=2,\n perplexity=30.0,\n early_exaggeration=12.0,\n learning_rate=200.0,\n n_iter=1000,\n):\n # PCA\n df_pca = dim_reduction_pca(df, pca_ncomps)[0]\n\n # tSNE\n df_tsne = TSNE(\n n_components=tsne_ncomps,\n perplexity=perplexity,\n early_exaggeration=early_exaggeration,\n learning_rate=learning_rate,\n n_iter=n_iter,\n ).fit_transform(df_pca)\n df_tsne = pd.DataFrame(df_tsne, index=df_pca.index, columns=pc_labels(tsne_ncomps))\n\n return df_tsne, df_pca\n\n\ndef plot_dim_reduction(data, palette=None, ctype=\"tSNE\", hue_filed=\"tissue\"):\n if hue_filed not in data.columns:\n data = data.assign(tissue=\"All\")\n\n if palette is None:\n palette = dict(All=CrispyPlot.PAL_DBGD[0])\n\n fig, ax = plt.subplots(1, 1, figsize=(4.0, 4.0), dpi=600)\n\n for t, df in data.groupby(hue_filed):\n ax.scatter(\n df[\"PC1\"],\n df[\"PC2\"],\n c=palette[t],\n marker=\"o\",\n edgecolor=\"\",\n s=5,\n label=t,\n alpha=0.8,\n )\n ax.set_xlabel(\"Dimension 1\" if ctype == \"tSNE\" else \"PC 1\")\n ax.set_ylabel(\"Dimension 2\" if ctype == \"tSNE\" else \"PC 2\")\n ax.axis(\"off\" if ctype == \"tSNE\" else \"on\")\n\n if ctype == \"pca\":\n ax.grid(True, ls=\"-\", lw=0.1, alpha=1.0, zorder=0)\n\n ax.legend(\n loc=\"center left\",\n bbox_to_anchor=(1, 0.5),\n prop={\"size\": 4},\n frameon=False,\n title=hue_filed,\n ).get_title().set_fontsize(\"5\")\n\n return ax\n"
] | [
[
"pandas.Series",
"numpy.arange",
"matplotlib.pyplot.subplots",
"sklearn.manifold.TSNE",
"sklearn.decomposition.FactorAnalysis",
"sklearn.decomposition.PCA"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
angusll/scancer_pip | [
"0c490505f5bb7bccc316676e686ca9fcecf46d40"
] | [
"kubeflow/components/evaluate/src/evaluate.py"
] | [
"import tensorflow as tf\nimport os\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport skimage.io\nimport skimage.transform\nimport logging\nimport argparse\nimport json\n\n\ndef IoU_Score(y_true,y_pred):\n \n y_true = tf.cast(y_true,tf.int32) # must be int32 or float32 or above, uint8 simply cannot have long length of integer, it will clip to int8 length\n y_pred = tf.cast(y_pred,tf.int32) \n \n # check size of y true mask and pred mask, pred mask might be smaller as prediction process trimmed out non square tiles\n if y_true.shape[:2] != y_pred.shape[:2]:\n y_true = y_true[:y_pred.shape[0],:y_pred.shape[1]] # crop right and bottom boundaries of y_true to match the size of y pred\n \n # Flatten \n y_true_f = tf.reshape(y_true, [-1])\n y_pred_f = tf.reshape(y_pred, [-1])\n \n intersection = tf.reduce_sum(y_true_f * y_pred_f)\n union = tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) - intersection\n\n intersection = tf.cast(intersection,tf.float32)\n union = tf.cast(union,tf.float32)\n \n iou = intersection/(union+tf.keras.backend.epsilon())\n return tf.cast(iou,tf.float32).numpy()\n\n\ndef evaluate(tile_mask_uri,train_valid_csv_uri, model_uri, threshold, output_uri, experiment_id,slide_type):\n \n def decode_normalise_images(tile_fp):\n img = tf.io.read_file(tile_fp)\n img = tf.io.decode_png(img,channels = 3)\n img = tf.image.resize(img,[model.input_shape[1],model.input_shape[2]],preserve_aspect_ratio=True)\n img /= 255\n return img\n \n local_dir = \"/tmp\"\n\n local_tile_mask_dir = f'{local_dir}/eval_scn'\n os.makedirs(local_tile_mask_dir,exist_ok=True)\n \n local_ckpt_dir = f'{local_dir}/model_checkpoint/'\n os.makedirs(local_ckpt_dir,exist_ok=True)\n \n df = pd.read_csv(train_valid_csv_uri)\n eval_scns = [Path(scn).stem for scn in df.query('train_valid == \"test\"').scn.unique()]\n \n # copy tiles and npys from bucket\n for eval_scn in eval_scns:\n copy_cmd = f'gsutil -m cp -r \"{tile_mask_uri}/{eval_scn}/\" {local_tile_mask_dir}'\n os.system(copy_cmd)\n \n model_ckpt_URI = tf.io.gfile.glob(os.path.join(output_uri,f'checkpoint/{experiment_id}/*.h5'))[0]\n\n # copy model ckpt from bucket\n tf.io.gfile.copy(model_ckpt_URI,f'{local_ckpt_dir}/{Path(model_ckpt_URI).name}',overwrite=True)\n\n model = tf.keras.models.load_model(f'{local_ckpt_dir}/{Path(model_ckpt_URI).name}',compile=False)\n\n score_df_list = []\n for scn in eval_scns:\n \n tile_fps = tf.io.gfile.glob(f\"{local_tile_mask_dir}/{scn}/*tiles/*png\")\n npy_fps = [fp.replace('tiles','npy').replace('.png','_mask.npy') for fp in tile_fps]\n tile_size = skimage.io.imread(tile_fps[0]).shape\n \n batch_size = 32\n pred_ds = tf.data.Dataset.from_tensor_slices(tile_fps).map(decode_normalise_images,num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(batch_size)\n\n y_hat = model.predict(pred_ds,verbose=2)\n\n pred_masks = np.array([skimage.transform.resize(pred,(tile_size[0],tile_size[1])) for pred in y_hat])\n\n pred_binary_masks = np.where(pred_masks > threshold, 1, 0)\n\n zipped_npy_fp_y_hat = list(zip(npy_fps,pred_binary_masks))\n\n iou_with_tumor = []\n iou_stroma = []\n for fp, pred_mask in zipped_npy_fp_y_hat:\n y_true = np.load(fp)\n\n if (y_true.sum() == 0):\n score = IoU_Score(y_true,pred_mask)\n iou_stroma.append(score)\n\n else:\n score = IoU_Score(y_true,pred_mask)\n iou_with_tumor.append(score)\n\n iou_stroma = np.array(iou_stroma)\n iou_with_tumor = np.array(iou_with_tumor)\n\n if (iou_stroma.sum() == 0):\n iou_stroma = 1 # iou stroma are likely =0 as the intersection of 0 and 0 = 0 \n else:\n iou_stroma = iou_stroma.mean()\n\n mean_iou_dict = {'iou_stroma':iou_stroma,\n 'iou_with_tumor':iou_with_tumor.mean()}\n\n score_df = pd.DataFrame({scn:mean_iou_dict}).T\n score_df_list.append(score_df )\n\n slides_score_df = pd.concat(score_df_list)\n slides_score_df['mean_iou'] = (slides_score_df.iou_stroma+slides_score_df.iou_with_tumor)/2\n slides_score_df.loc['overall_mean'] = slides_score_df.mean()\n \n score_df_save_uri = os.path.join(output_uri,f'evaluation/{slide_type}/id_{experiment_id}/iou.csv')\n slides_score_df.to_csv(score_df_save_uri)\n \n return score_df_save_uri\n \nif __name__ == \"__main__\":\n \n logging.basicConfig(level=logging.INFO)\n \n parser = argparse.ArgumentParser(description=\"AE13 evaluation pipeline\")\n # Input Output paths\n parser.add_argument('--tile_mask_uri', type = str, help= \"GCS bucket URI of tile and masks.\")\n parser.add_argument('--train_valid_csv_uri', type = str, help= \"GCS bucket URI of train valid csv.\")\n parser.add_argument('--output_uri', type = str, help = \"Output URI of bucket\")\n parser.add_argument('--model_uri', type = str, help = \"GCS URI of model\")\n parser.add_argument('--experiment_id', type = str, help = \"Unique id of experiment\")\n parser.add_argument('--slide_type', type = str, help = \"IHC or HE slides\")\n parser.add_argument('--threshold', type=float, default = 0.5, help= \"Threshold for predicted mask\")\n\n args = parser.parse_args()\n\n score_df_save_uri = evaluate(tile_mask_uri = args.tile_mask_uri,\n train_valid_csv_uri = args.train_valid_csv_uri, \n model_uri = args.model_uri,\n threshold = args.threshold,\n experiment_id = args.experiment_id, \n slide_type = args.slide_type,\n output_uri = args.output_uri)\n \n metadata = {\n 'outputs' : [{\n 'type': 'table',\n 'storage': 'gcs',\n 'format': 'csv',\n 'header': ['Unnamed: 0','iou_stroma','iou_with_tumor'],\n 'source': score_df_save_uri\n }]\n }\n with open('/mlpipeline-ui-metadata.json', 'w') as f:\n json.dump(metadata, f)"
] | [
[
"pandas.concat",
"pandas.read_csv",
"tensorflow.io.decode_png",
"tensorflow.reduce_sum",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.io.gfile.glob",
"pandas.DataFrame",
"tensorflow.image.resize",
"tensorflow.keras.backend.epsilon",
"tensorflow.io.read_file",
"numpy.load",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
thunlp/RSN | [
"a6754faded4645cd90b64fdfe7468fbc65006c8f"
] | [
"RSN/train_RSN.py"
] | [
"import tensorflow as tf\n\nimport numpy as np\nimport json\nimport sys\nimport os\nimport argparse\n\nsys.path.append(os.path.abspath('../lib/'))\nfrom dataloader.dataloader import dataloader\nfrom model.siamodel import RSN\nfrom module.clusters import *\nfrom evaluation.evaluation import ClusterEvaluation\nfrom kit.messager import messager\n\ndef train_SN(train_data_file,val_data_file,test_data_file,wordvec_file,load_model_name=None,save_model_name='SN',\n trainset_loss_type='cross',testset_loss_type='none',testset_loss_mask_epoch=3,p_cond=0.03,p_denoise=1.0,\n max_len=120, pos_emb_dim=5,same_ratio=0.06,batch_size=100,batch_num=100000,epoch_num=1,\n val_size=10000,select_cluster='Louvain',omit_relid=None,labeled_sample_num=None):\n\n # preparing saving files\n if load_model_name is not None:\n load_path = os.path.join('model_file',load_model_name).replace('\\\\','/')\n else:\n load_path = None\n save_path = os.path.join('model_file',save_model_name).replace('\\\\','/')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n msger = messager(save_path=save_path,types=['train_data_file','val_data_file','test_data_file','load_model_name','save_model_name',\n 'trainset_loss_type','testset_loss_type','testset_loss_mask_epoch','p_cond','p_denoise','same_ratio','labeled_sample_num'],\n json_name='train_msg.json')\n msger.record_message([train_data_file,val_data_file,test_data_file,load_model_name,save_model_name,\n trainset_loss_type,testset_loss_type,testset_loss_mask_epoch,p_cond,p_denoise,same_ratio,labeled_sample_num])\n msger.save_json()\n\n # train data loading\n print('-----Data Loading-----')\n dataloader_train = dataloader(train_data_file, wordvec_file, max_len=max_len)\n if omit_relid is not None and omit_relid>=4:\n dataloader_train.select_relation(np.arange(2,omit_relid+1,1).tolist())\n if labeled_sample_num is not None:\n dataloader_train.select_sample_num(labeled_sample_num)\n dataloader_testset = dataloader(val_data_file, wordvec_file, max_len=max_len)\n dataloader_test = dataloader(test_data_file, wordvec_file, max_len=max_len)\n word_emb_dim = dataloader_train._word_emb_dim_()\n word_vec_mat = dataloader_train._word_vec_mat_()\n print('word_emb_dim is {}'.format(word_emb_dim))\n\n # compile model\n print('-----Model Intializing-----')\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config = config)\n SN = RSN(session=sess,word_vec_mat=word_vec_mat,max_len=max_len, pos_emb_dim=pos_emb_dim,dropout=0.2)\n SN.set_ph(batch_size=batch_size)\n SN.set_train_op(trainset_loss_type=trainset_loss_type,testset_loss_type=testset_loss_type,p_cond=p_cond,p_denoise=p_denoise,p_mult=0.02)\n SN.init_model(load_path)\n\n print('-----Testing Data Preparing-----')\n\n # preparing testing samples\n val_testset_left_input, val_testset_right_input, val_testset_data_label = \\\n dataloader_testset.next_batch(val_size,same_ratio=same_ratio)\n val_trainset_left_input, val_trainset_right_input, val_trainset_data_label = \\\n dataloader_train.next_batch(val_size,same_ratio=same_ratio)\n\n # intializing parameters\n batch_num_list = [batch_num]*epoch_num\n clustering_test_time = np.arange(19999,batch_num,20000).tolist()\n msger_cluster = messager(save_path=save_path,types=['method','temp_batch_num','F1','precision','recall','msg'],\n json_name='cluster_msg.json')\n\n for epoch in range(epoch_num):\n if epoch<testset_loss_mask_epoch:\n SN.set_train_op(trainset_loss_type=trainset_loss_type,testset_loss_type='none',p_cond=p_cond,p_denoise=p_denoise,p_mult=0.02)\n else:\n SN.set_train_op(trainset_loss_type=trainset_loss_type,testset_loss_type=testset_loss_type,p_cond=p_cond,p_denoise=p_denoise,p_mult=0.02)\n\n # preparing message lists\n msger = messager(save_path=save_path,types=['batch_num','train_tp','train_fp','train_fn','train_tn','train_l',\n 'test_tp','test_fp','test_fn','test_tn','test_l'], json_name='SNmsg'+str(epoch)+'.json')\n\n data_to_cluster, gt = dataloader_test._data_()\n\n print('------epoch {}------'.format(epoch))\n print('max batch num to train is {}'.format(batch_num_list[epoch]))\n for i in range(batch_num_list[epoch]):\n # training\n if omit_relid is not None and omit_relid == 0:\n SN.train_unsup(dataloader_train,dataloader_testset,batch_size=batch_size, same_ratio=same_ratio)\n else:\n SN.train(dataloader_train,dataloader_testset,batch_size=batch_size, same_ratio=same_ratio)\n\n # testing and saving\n if i % 100 == 0:\n print('temp_batch_num: ', i,' total_batch_num: ', batch_num_list[epoch])\n if i % 1000 == 0:\n print(save_model_name,'epoch:',epoch)\n print('trainset:')\n val_trainset_info = SN.validation(val_trainset_left_input, val_trainset_right_input, val_trainset_data_label)\n print('testset:')\n val_testset_info = SN.validation(val_testset_left_input, val_testset_right_input, val_testset_data_label)\n msger.record_message((i,)+val_trainset_info+val_testset_info)\n msger.save_json()\n SN.save_model(save_path=save_path,global_step=i)\n print('model and messages saved.')\n if i in clustering_test_time or i==batch_num_list[epoch]-1:\n if 'Louvain' in select_cluster:\n print('-----Louvain Clustering-----')\n cluster_result, cluster_msg = Louvain_no_isolation(dataset=data_to_cluster,edge_measure=SN.pred_X)\n cluster_eval = ClusterEvaluation(gt,cluster_result).printEvaluation()\n msger_cluster.record_message(['Louvain',i,cluster_eval['F1'],cluster_eval['precision'],\n cluster_eval['recall'],cluster_msg])\n msger_cluster.save_json()\n print(cluster_eval)\n print('clustering messages saved.')\n\n if 'HAC' in select_cluster:\n print('-----HAC Clustering-----')\n cluster_result, cluster_msg = complete_HAC(dataset=data_to_cluster,HAC_dist=SN.pred_X,k=len(list(set(gt))))\n cluster_eval = ClusterEvaluation(gt,cluster_result).printEvaluation()\n msger_cluster.record_message(['HAC',i,cluster_eval['F1'],cluster_eval['precision'],\n cluster_eval['recall'],cluster_msg])\n msger_cluster.save_json()\n print(cluster_eval)\n print('clustering messages saved.')\n\n print('End: The model is:',save_model_name, trainset_loss_type, testset_loss_type,'p_cond is:',p_cond)\n\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gpu\",type=str,default='0')\n parser.add_argument(\"--dataset\",type=str,default='ori')\n parser.add_argument(\"--train_data_file\",type=str,default='../data-bin/fewrel_ori/fewrel80_train.json')\n parser.add_argument(\"--val_data_file\",type=str,default='../data-bin/fewrel_ori/fewrel80_test_train.json')\n parser.add_argument(\"--test_data_file\",type=str,default='../data-bin/fewrel_ori/fewrel80_test_test.json')\n parser.add_argument(\"--wordvec_file\",type=str,default='../data-bin/wordvec/word_vec.json')\n parser.add_argument(\"--load_model_name\",type=str,default=None)\n parser.add_argument(\"--save_model_name\",type=str,default='ori/')\n parser.add_argument(\"--select_cluster\",type=int,default=1)\n parser.add_argument(\"--trainset_loss_type\",type=str,default='v_adv')\n parser.add_argument(\"--testset_loss_type\",type=str,default='v_adv')\n parser.add_argument(\"--testset_loss_mask_epoch\",type=int,default=0)\n parser.add_argument(\"--p_cond\",type=float,default=0.03)\n parser.add_argument(\"--p_denoise\",type=float,default=1.0)\n parser.add_argument(\"--same_ratio\",type=float,default=0.06)\n parser.add_argument(\"--batch_num\",type=int,default=10000)\n parser.add_argument(\"--epoch_num\",type=int,default=5)\n parser.add_argument(\"--val_size\",type=int,default=10000)\n parser.add_argument(\"--omit_relid\",type=int,default=None,help=\n \"None means not omit; 0 means unsupervised mode; otherwise means reserving all the relations with relid<=omit_relid from trainset\")\n parser.add_argument(\"--labeled_sample_num\",type=int,default=None)\n args = parser.parse_args()\n cluster_dict={0:[],1:['Louvain'],2:['HAC'],3:['Louvain','HAC']}\n args.select_cluster=cluster_dict[args.select_cluster]\n\n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\n\n if args.dataset == 'ori':\n args.train_data_file = '../data-bin/fewrel_ori/fewrel80_train.json'\n args.val_data_file = '../data-bin/fewrel_ori/fewrel80_test_train.json'\n args.test_data_file = '../data-bin/fewrel_ori/fewrel80_test_test.json'\n # elif args.dataset =='distant':\n # args.train_data_file = '../data-bin/fewrel_distant/fewrel80_distant_train.json'\n # args.val_data_file = '../data-bin/fewrel_distant/fewrel80_distant_test_omit.json'\n # args.test_data_file = '../data-bin/fewrel_distant/fewrel80_test_test.json'\n else:\n raise Exception('currently only fewrel80 is available')\n\n\n train_SN(\n train_data_file = args.train_data_file,\n val_data_file = args.val_data_file,\n test_data_file = args.test_data_file,\n wordvec_file = args.wordvec_file,\n load_model_name=args.load_model_name,\n save_model_name = args.save_model_name,\n select_cluster=args.select_cluster,\n trainset_loss_type=args.trainset_loss_type,\n testset_loss_type=args.testset_loss_type,\n testset_loss_mask_epoch=args.testset_loss_mask_epoch,\n p_cond=args.p_cond,\n p_denoise=args.p_denoise,\n same_ratio=args.same_ratio,\n batch_num=args.batch_num,\n epoch_num=args.epoch_num,\n val_size=args.val_size,\n omit_relid=args.omit_relid,\n labeled_sample_num=args.labeled_sample_num)"
] | [
[
"tensorflow.ConfigProto",
"numpy.arange",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
thanhtrunghuynh93/holisticEmbeddingsNA | [
"d1bb58e879a9fb868729ea13c198e46c9c5f45c9"
] | [
"utils/random_clone_add.py"
] | [
"from __future__ import print_function, division\nimport numpy as np\nimport random\nimport json\nimport sys\nimport os\nimport argparse\nfrom shutil import copyfile\n\nimport networkx as nx\nfrom networkx.readwrite import json_graph\nimport pdb\n\ndef add_and_remove_edges(G, p_new_connection, num_add=10):\n '''\n for each node,\n add a new connection to random other node, with prob p_new_connection,\n remove a connection, with prob p_remove_connection\n\n operates on G in-place\n '''\n new_edges = []\n rem_edges = []\n count_rm = 0\n count_add = 0\n for node in G.nodes():\n # find the other nodes this one is connected to\n # connected = [to for (fr, to) in G.edges(node)]\n connected = G.neighbors(node)\n # and find the remainder of nodes, which are candidates for new edges\n # unconnected = [n for n in G.nodes() if not n in connected]\n unconnected = [n for n in nx.non_neighbors(G, node)]\n\n # probabilistically add a random edge\n if len(unconnected) and count_add <= num_add: # only try if new edge is possible\n if random.random() < p_new_connection:\n count_add += 1\n new = random.choice(unconnected)\n G.add_edge(node, new)\n new_edges.append( (node, new) )\n # book-keeping, in case both add and remove done in same cycle\n # unconnected.remove(new)\n # connected.append(new)\n if count_add % 1000 == 0:\n print(\"\\t{0}-th new edge:\\t {1} -- {2}\".format(count_add, node, new))\n\n if count_add > num_add:\n break\n return rem_edges, new_edges\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Randomly remove edges and generate dict.\")\n parser.add_argument('--input', default=\"/Users/tnguyen/dataspace/graph/ppi/graphsage/\", help='Path to load data')\n parser.add_argument('--output', default=\"/Users/tnguyen/dataspace/graph/ppi/random_clone/\", help='Path to save data')\n parser.add_argument('--prefix', default=\"ppi\", help='Dataset prefix')\n parser.add_argument('--padd', type=float, default=0.2, help='Probability of adding new edges')\n parser.add_argument('--nadd', type=float, default=0.2, help='Number of added edges')\n parser.add_argument('--seed', type=int, default=123, help='Random seed')\n return parser.parse_args() \n\ndef main(args):\n args.input += \"/\"\n G_data = json.load(open(args.input + \"G.json\"))\n G = json_graph.node_link_graph(G_data)\n print(nx.info(G))\n\n H = G.copy()\n n = len(G.nodes())\n rem_edges, new_edges = add_and_remove_edges(H, args.padd, int(args.nadd * n))\n print(\"Remove {0} and add {1} edges\".format(len(rem_edges), len(new_edges)))\n data = json_graph.node_link_data(H)\n s = json.dumps(data, indent=4, sort_keys=True)\n print(nx.info(H))\n\n args.output += \"/add_edge,p={0},n={1}\".format(args.padd, args.nadd)\n\n if not os.path.isdir(args.output):\n os.makedirs(args.output+'/edgelist')\n os.makedirs(args.output+'/graphsage')\n os.makedirs(args.output+'/dictionaries')\n\n edgelist_dir = args.output + \"/edgelist/\" + args.prefix + \".edgelist\"\n if not os.path.isdir(args.output): os.makedirs(args.output)\n nx.write_edgelist(H, path = edgelist_dir , delimiter=\" \", data=['weight'])\n args.output += \"/graphsage/\"\n with open(args.output + \"G.json\", 'w') as f:\n f.write(s)\n f.close()\n\n copyfile(args.input + \"id2idx.json\", args.output + \"id2idx.json\")\n if os.path.exists(args.input + \"class_map.json\"):\n copyfile(args.input + \"class_map.json\", args.output + \"class_map.json\")\n if os.path.exists(args.input + \"feats.npy\"):\n copyfile(args.input + \"feats.npy\", args.output + \"feats.npy\")\n # if os.path.exists(args.input + \"-walks.txt\"):\n # copyfile(args.input + \"-walks.txt\", args.output + \"-walks.txt\")\n\n return\n\nif __name__ == \"__main__\":\n args = parse_args()\n print(args)\n random.seed(args.seed)\n np.random.seed(args.seed)\n main(args)"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rushic24/dm-haiku | [
"8ee1a2125587831783ae7ae1e74baacec23ae56d"
] | [
"haiku/_src/lift_test.py"
] | [
"# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Lifting parameters in Haiku.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom haiku._src import base\nfrom haiku._src import config\nfrom haiku._src import lift\nfrom haiku._src import module\nfrom haiku._src import test_utils\nfrom haiku._src import transform\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\nIGNORE = lambda u: u.ignore_update()\nUPDATE = lambda u: u.update({})\n\n\nclass Bias(module.Module):\n\n def __call__(self, x):\n b = base.get_parameter(\"b\", (), init=jnp.ones)\n return x + b\n\n\nclass LiftTest(parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n self._prev_check_jax_usage = config.check_jax_usage(True)\n\n def tearDown(self):\n super().tearDown()\n config.check_jax_usage(self._prev_check_jax_usage)\n\n def test_lift_with_vmap(self):\n def inner_fn(x):\n assert x.ndim == 1\n return Bias()(x)\n\n def outer_fn(x):\n assert x.ndim == 2\n x = Bias()(x)\n inner = transform.without_apply_rng(transform.transform(inner_fn))\n inner_p = lift.lift(inner.init)(base.next_rng_key(), x[0])\n vmap_inner = jax.vmap(inner.apply, in_axes=(None, 0))\n return vmap_inner(inner_p, x)\n\n key = jax.random.PRNGKey(428)\n init_key, apply_key = jax.random.split(key)\n data = np.zeros((3, 2))\n\n outer = transform.transform(outer_fn)\n outer_params = outer.init(init_key, data)\n self.assertEqual(outer_params, {\n \"bias\": {\"b\": np.ones(())},\n \"lifted/bias\": {\"b\": np.ones(())},\n })\n\n out = outer.apply(outer_params, apply_key, data)\n np.testing.assert_equal(out, 2 * np.ones((3, 2)))\n\n @parameterized.parameters((lift.lift, lambda: None),\n (lift.lift_with_state, lambda: (None, None)))\n def test_inside_transform(self, lift_fn, init_fn):\n with self.assertRaisesRegex(ValueError, \"must be .* part of .*transform\"):\n lift_fn(init_fn)\n\n @test_utils.transform_and_run\n def test_empty_lift(self):\n f = transform.transform(lambda: None)\n self.assertEmpty(lift.lift(f.init)(None))\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_empty_lift_with_state(self, ignore_update):\n f = transform.transform_with_state(lambda: None)\n init_fn, updater = lift.lift_with_state(f.init)\n params, state = init_fn(None)\n self.assertEmpty(params)\n self.assertEmpty(state)\n if ignore_update:\n updater.ignore_update()\n else:\n updater.update({})\n\n def test_unused_updater(self):\n def f() -> lift.LiftWithStateUpdater:\n f = transform.transform_with_state(lambda: None)\n return lift.lift_with_state(f.init)[1]\n\n f = transform.transform_with_state(f)\n\n with self.assertRaisesRegex(ValueError, \"StateUpdater.*must be used\"):\n f.init(None)\n\n @parameterized.named_parameters((\"ignore then ignore\", IGNORE, IGNORE),\n (\"update then update\", UPDATE, UPDATE),\n (\"ignore then update\", IGNORE, UPDATE),\n (\"update then ignore\", UPDATE, IGNORE))\n @test_utils.transform_and_run\n def test_used_multiple_times(self, update_fn1, update_fn2):\n f = transform.transform_with_state(lambda: None)\n updater = lift.lift_with_state(f.init)[1]\n update_fn1(updater)\n with self.assertRaisesRegex(ValueError, \"must only be used once\"):\n update_fn2(updater)\n\n @test_utils.transform_and_run(run_apply=False)\n def test_lift_raises_with_state(self):\n f = transform.transform_with_state(\n lambda: base.get_state(\"w\", [], init=jnp.zeros))\n lifted = lift.lift(f.init) # pytype: disable=wrong-arg-types\n with self.assertRaisesRegex(ValueError, \"use.*lift_with_state\"):\n lifted(None)\n\n def test_lift_with_state(self):\n def inner():\n w = base.get_state(\"w\", [], init=jnp.zeros)\n w += 1\n base.set_state(\"w\", w)\n return w\n\n inner = transform.transform_with_state(inner)\n\n def outer():\n lifted, updater = lift.lift_with_state(inner.init)\n params, state = lifted(None)\n self.assertEmpty(params)\n out, state = inner.apply(params, state, None)\n updater.update(state)\n return out, state\n\n outer = transform.transform_with_state(outer)\n params, state = outer.init(None)\n self.assertEmpty(params)\n self.assertEqual(jax.tree_map(int, state), {\"lifted/~\": {\"w\": 0}})\n\n for expected in (1, 2, 3):\n (w, inner_state), state = outer.apply(params, state, None)\n self.assertEqual(jax.tree_map(int, inner_state), {\"~\": {\"w\": expected}})\n self.assertEqual(w, expected)\n self.assertEmpty(params)\n self.assertEqual(state, {\"lifted/~\": {\"w\": expected}})\n\n @parameterized.parameters(IGNORE, UPDATE)\n def test_updater_used_in_different_inner_transform(self, updater_fn):\n def f():\n g = transform.transform_with_state(lambda: None)\n _, updater = lift.lift_with_state(g.init)\n transform.transform_with_state(lambda: updater_fn(updater)).init(None)\n\n f = transform.transform_with_state(f)\n\n with self.assertRaisesRegex(\n ValueError, \"must be used within the same call to init/apply\"):\n f.init(None)\n\n def test_transparent_lift(self):\n class OuterModule(module.Module):\n\n def __call__(self, x):\n x += base.get_parameter(\"a\", shape=[10, 10], init=jnp.zeros)\n\n def inner_fn(x):\n return InnerModule(name=\"inner\")(x)\n\n inner_transformed = transform.transform(inner_fn)\n inner_params = lift.transparent_lift(inner_transformed.init)(\n base.next_rng_key(), x)\n x = inner_transformed.apply(inner_params, base.next_rng_key(), x)\n return x\n\n class InnerModule(module.Module):\n\n def __call__(self, x):\n x += base.get_parameter(\"b\", shape=[10, 10], init=jnp.zeros)\n return x\n\n @transform.transform\n def fn(x):\n return OuterModule(name=\"outer\")(x)\n\n correct_weight_names = [\"outer/inner\", \"outer\"]\n rng = jax.random.PRNGKey(0)\n\n params = fn.init(rng, jnp.ones([10, 10]))\n\n self.assertCountEqual(list(params.keys()), correct_weight_names)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mujizi/e2e_coref | [
"8f27b76067c01093f62f9de4c9acb5577ebd0ed1"
] | [
"analysis_score.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\nimport coref_model_sentence_span as cm\nimport util\n\nif __name__ == \"__main__\":\n os.environ[\"GPU\"] = \"0\"\n config = util.initialize_from_env()\n model = cm.CorefModel(config)\n with tf.Session() as session:\n model.restore(session)\n model.analysis_top_score(session, official_stdout=True)"
] | [
[
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
GiggleLiu/scipy | [
"b6eb8ada4e574f334d4d108cb877e91d2cc0ebcc"
] | [
"scipy/optimize/_lsq/least_squares.py"
] | [
"\"\"\"Generic interface for least-square minimization.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom warnings import warn\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.sparse import issparse, csr_matrix\nfrom scipy.sparse.linalg import LinearOperator\nfrom scipy.optimize import _minpack, OptimizeResult\nfrom scipy.optimize._numdiff import approx_derivative, group_columns\nfrom scipy._lib.six import string_types\n\nfrom .trf import trf\nfrom .dogbox import dogbox\nfrom .common import EPS, in_bounds, make_strictly_feasible\n\n\nTERMINATION_MESSAGES = {\n -1: \"Improper input parameters status returned from `leastsq`\",\n 0: \"The maximum number of function evaluations is exceeded.\",\n 1: \"`gtol` termination condition is satisfied.\",\n 2: \"`ftol` termination condition is satisfied.\",\n 3: \"`xtol` termination condition is satisfied.\",\n 4: \"Both `ftol` and `xtol` termination conditions are satisfied.\"\n}\n\n\nFROM_MINPACK_TO_COMMON = {\n 0: -1, # Improper input parameters from MINPACK.\n 1: 2,\n 2: 3,\n 3: 4,\n 4: 1,\n 5: 0\n # There are 6, 7, 8 for too small tolerance parameters,\n # but we guard against it by checking ftol, xtol, gtol beforehand.\n}\n\n\ndef call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):\n n = x0.size\n\n if diff_step is None:\n epsfcn = EPS\n else:\n epsfcn = diff_step**2\n\n # Compute MINPACK's `diag`, which is inverse of our `x_scale` and\n # ``x_scale='jac'`` corresponds to ``diag=None``.\n if isinstance(x_scale, string_types) and x_scale == 'jac':\n diag = None\n else:\n diag = 1 / x_scale\n\n full_output = True\n col_deriv = False\n factor = 100.0\n\n if jac is None:\n if max_nfev is None:\n # n squared to account for Jacobian evaluations.\n max_nfev = 100 * n * (n + 1)\n x, info, status = _minpack._lmdif(\n fun, x0, (), full_output, ftol, xtol, gtol,\n max_nfev, epsfcn, factor, diag)\n else:\n if max_nfev is None:\n max_nfev = 100 * n\n x, info, status = _minpack._lmder(\n fun, jac, x0, (), full_output, col_deriv,\n ftol, xtol, gtol, max_nfev, factor, diag)\n\n f = info['fvec']\n\n if callable(jac):\n J = jac(x)\n else:\n J = np.atleast_2d(approx_derivative(fun, x))\n\n cost = 0.5 * np.dot(f, f)\n g = J.T.dot(f)\n g_norm = norm(g, ord=np.inf)\n\n nfev = info['nfev']\n njev = info.get('njev', None)\n\n status = FROM_MINPACK_TO_COMMON[status]\n active_mask = np.zeros_like(x0, dtype=int)\n\n return OptimizeResult(\n x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,\n active_mask=active_mask, nfev=nfev, njev=njev, status=status)\n\n\ndef prepare_bounds(bounds, n):\n lb, ub = [np.asarray(b, dtype=float) for b in bounds]\n if lb.ndim == 0:\n lb = np.resize(lb, n)\n\n if ub.ndim == 0:\n ub = np.resize(ub, n)\n\n return lb, ub\n\n\ndef check_tolerance(ftol, xtol, gtol):\n message = \"{} is too low, setting to machine epsilon {}.\"\n if ftol < EPS:\n warn(message.format(\"`ftol`\", EPS))\n ftol = EPS\n if xtol < EPS:\n warn(message.format(\"`xtol`\", EPS))\n xtol = EPS\n if gtol < EPS:\n warn(message.format(\"`gtol`\", EPS))\n gtol = EPS\n\n return ftol, xtol, gtol\n\n\ndef check_x_scale(x_scale, x0):\n if isinstance(x_scale, string_types) and x_scale == 'jac':\n return x_scale\n\n try:\n x_scale = np.asarray(x_scale, dtype=float)\n valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)\n except (ValueError, TypeError):\n valid = False\n\n if not valid:\n raise ValueError(\"`x_scale` must be 'jac' or array_like with \"\n \"positive numbers.\")\n\n if x_scale.ndim == 0:\n x_scale = np.resize(x_scale, x0.shape)\n\n if x_scale.shape != x0.shape:\n raise ValueError(\"Inconsistent shapes between `x_scale` and `x0`.\")\n\n return x_scale\n\n\ndef check_jac_sparsity(jac_sparsity, m, n):\n if jac_sparsity is None:\n return None\n\n if not issparse(jac_sparsity):\n jac_sparsity = np.atleast_2d(jac_sparsity)\n\n if jac_sparsity.shape != (m, n):\n raise ValueError(\"`jac_sparsity` has wrong shape.\")\n\n return jac_sparsity, group_columns(jac_sparsity)\n\n\n# Loss functions.\n\n\ndef huber(z, rho, cost_only):\n mask = z <= 1\n rho[0, mask] = z[mask]\n rho[0, ~mask] = 2 * z[~mask]**0.5 - 1\n if cost_only:\n return\n rho[1, mask] = 1\n rho[1, ~mask] = z[~mask]**-0.5\n rho[2, mask] = 0\n rho[2, ~mask] = -0.5 * z[~mask]**-1.5\n\n\ndef soft_l1(z, rho, cost_only):\n t = 1 + z\n rho[0] = 2 * (t**0.5 - 1)\n if cost_only:\n return\n rho[1] = t**-0.5\n rho[2] = -0.5 * t**-1.5\n\n\ndef cauchy(z, rho, cost_only):\n rho[0] = np.log1p(z)\n if cost_only:\n return\n t = 1 + z\n rho[1] = 1 / t\n rho[2] = -1 / t**2\n\n\ndef arctan(z, rho, cost_only):\n rho[0] = np.arctan(z)\n if cost_only:\n return\n t = 1 + z**2\n rho[1] = 1 / t\n rho[2] = -2 * z / t**2\n\n\nIMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,\n cauchy=cauchy, arctan=arctan)\n\n\ndef construct_loss_function(m, loss, f_scale):\n if loss == 'linear':\n return None\n\n if not callable(loss):\n loss = IMPLEMENTED_LOSSES[loss]\n rho = np.empty((3, m))\n\n def loss_function(f, cost_only=False):\n z = (f / f_scale) ** 2\n loss(z, rho, cost_only=cost_only)\n if cost_only:\n return 0.5 * f_scale ** 2 * np.sum(rho[0])\n rho[0] *= f_scale ** 2\n rho[2] /= f_scale ** 2\n return rho\n else:\n def loss_function(f, cost_only=False):\n z = (f / f_scale) ** 2\n rho = loss(z)\n if cost_only:\n return 0.5 * f_scale ** 2 * np.sum(rho[0])\n rho[0] *= f_scale ** 2\n rho[2] /= f_scale ** 2\n return rho\n\n return loss_function\n\n\ndef least_squares(\n fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',\n ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',\n f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},\n jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):\n \"\"\"Solve a nonlinear least-squares problem with bounds on the variables.\n\n Given the residuals f(x) (an m-dimensional function of n real variables) and\n the loss function rho(s) (a scalar function), `least_squares` finds a\n local minimum of the cost function F(x)::\n\n minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)\n subject to lb <= x <= ub\n\n The purpose of the loss function rho(s) is to reduce the influence of\n outliers on the solution.\n\n Parameters\n ----------\n fun : callable\n Function which computes the vector of residuals, with the signature\n ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with\n respect to its first argument. The argument ``x`` passed to this\n function is an ndarray of shape (n,) (never a scalar, even for n=1).\n It must return a 1-d array_like of shape (m,) or a scalar.\n x0 : array_like with shape (n,) or float\n Initial guess on independent variables. If float, it will be treated\n as a 1-d array with one element.\n jac : {'2-point', '3-point', 'cs', callable}, optional\n Method of computing the Jacobian matrix (an m-by-n matrix, where\n element (i, j) is the partial derivative of f[i] with respect to\n x[j]). The keywords select a finite difference scheme for numerical\n estimation. The scheme '3-point' is more accurate, but requires\n twice as much operations compared to '2-point' (default). The\n scheme 'cs' uses complex steps, and while potentially the most\n accurate, it is applicable only when `fun` correctly handles\n complex inputs and can be analytically continued to the complex\n plane. Method 'lm' always uses the '2-point' scheme. If callable,\n it is used as ``jac(x, *args, **kwargs)`` and should return a\n good approximation (or the exact value) for the Jacobian as an\n array_like (np.atleast_2d is applied), a sparse matrix or a\n `scipy.sparse.linalg.LinearOperator`.\n bounds : 2-tuple of array_like, optional\n Lower and upper bounds on independent variables. Defaults to no bounds.\n Each array must match the size of `x0` or be a scalar, in the latter\n case a bound will be the same for all variables. Use ``np.inf`` with\n an appropriate sign to disable bounds on all or some variables.\n method : {'trf', 'dogbox', 'lm'}, optional\n Algorithm to perform minimization.\n\n * 'trf' : Trust Region Reflective algorithm, particularly suitable\n for large sparse problems with bounds. Generally robust method.\n * 'dogbox' : dogleg algorithm with rectangular trust regions,\n typical use case is small problems with bounds. Not recommended\n for problems with rank-deficient Jacobian.\n * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.\n Doesn't handle bounds and sparse Jacobians. Usually the most\n efficient method for small unconstrained problems.\n\n Default is 'trf'. See Notes for more information.\n ftol : float, optional\n Tolerance for termination by the change of the cost function. Default\n is 1e-8. The optimization process is stopped when ``dF < ftol * F``,\n and there was an adequate agreement between a local quadratic model and\n the true model in the last step.\n xtol : float, optional\n Tolerance for termination by the change of the independent variables.\n Default is 1e-8. The exact condition depends on the `method` used:\n\n * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``\n * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is\n a trust-region radius and ``xs`` is the value of ``x``\n scaled according to `x_scale` parameter (see below).\n\n gtol : float, optional\n Tolerance for termination by the norm of the gradient. Default is 1e-8.\n The exact condition depends on a `method` used:\n\n * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where\n ``g_scaled`` is the value of the gradient scaled to account for\n the presence of the bounds [STIR]_.\n * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where\n ``g_free`` is the gradient with respect to the variables which\n are not in the optimal state on the boundary.\n * For 'lm' : the maximum absolute value of the cosine of angles\n between columns of the Jacobian and the residual vector is less\n than `gtol`, or the residual vector is zero.\n\n x_scale : array_like or 'jac', optional\n Characteristic scale of each variable. Setting `x_scale` is equivalent\n to reformulating the problem in scaled variables ``xs = x / x_scale``.\n An alternative view is that the size of a trust region along j-th\n dimension is proportional to ``x_scale[j]``. Improved convergence may\n be achieved by setting `x_scale` such that a step of a given size\n along any of the scaled variables has a similar effect on the cost\n function. If set to 'jac', the scale is iteratively updated using the\n inverse norms of the columns of the Jacobian matrix (as described in\n [JJMore]_).\n loss : str or callable, optional\n Determines the loss function. The following keyword values are allowed:\n\n * 'linear' (default) : ``rho(z) = z``. Gives a standard\n least-squares problem.\n * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth\n approximation of l1 (absolute value) loss. Usually a good\n choice for robust least squares.\n * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works\n similarly to 'soft_l1'.\n * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers\n influence, but may cause difficulties in optimization process.\n * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on\n a single residual, has properties similar to 'cauchy'.\n\n If callable, it must take a 1-d ndarray ``z=f**2`` and return an\n array_like with shape (3, m) where row 0 contains function values,\n row 1 contains first derivatives and row 2 contains second\n derivatives. Method 'lm' supports only 'linear' loss.\n f_scale : float, optional\n Value of soft margin between inlier and outlier residuals, default\n is 1.0. The loss function is evaluated as follows\n ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,\n and ``rho`` is determined by `loss` parameter. This parameter has\n no effect with ``loss='linear'``, but for other `loss` values it is\n of crucial importance.\n max_nfev : None or int, optional\n Maximum number of function evaluations before the termination.\n If None (default), the value is chosen automatically:\n\n * For 'trf' and 'dogbox' : 100 * n.\n * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)\n otherwise (because 'lm' counts function calls in Jacobian\n estimation).\n\n diff_step : None or array_like, optional\n Determines the relative step size for the finite difference\n approximation of the Jacobian. The actual step is computed as\n ``x * diff_step``. If None (default), then `diff_step` is taken to be\n a conventional \"optimal\" power of machine epsilon for the finite\n difference scheme used [NR]_.\n tr_solver : {None, 'exact', 'lsmr'}, optional\n Method for solving trust-region subproblems, relevant only for 'trf'\n and 'dogbox' methods.\n\n * 'exact' is suitable for not very large problems with dense\n Jacobian matrices. The computational complexity per iteration is\n comparable to a singular value decomposition of the Jacobian\n matrix.\n * 'lsmr' is suitable for problems with sparse and large Jacobian\n matrices. It uses the iterative procedure\n `scipy.sparse.linalg.lsmr` for finding a solution of a linear\n least-squares problem and only requires matrix-vector product\n evaluations.\n\n If None (default) the solver is chosen based on the type of Jacobian\n returned on the first iteration.\n tr_options : dict, optional\n Keyword options passed to trust-region solver.\n\n * ``tr_solver='exact'``: `tr_options` are ignored.\n * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.\n Additionally ``method='trf'`` supports 'regularize' option\n (bool, default is True) which adds a regularization term to the\n normal equation, which improves convergence if the Jacobian is\n rank-deficient [Byrd]_ (eq. 3.4).\n\n jac_sparsity : {None, array_like, sparse matrix}, optional\n Defines the sparsity structure of the Jacobian matrix for finite\n difference estimation, its shape must be (m, n). If the Jacobian has\n only few non-zero elements in *each* row, providing the sparsity\n structure will greatly speed up the computations [Curtis]_. A zero\n entry means that a corresponding element in the Jacobian is identically\n zero. If provided, forces the use of 'lsmr' trust-region solver.\n If None (default) then dense differencing will be used. Has no effect\n for 'lm' method.\n verbose : {0, 1, 2}, optional\n Level of algorithm's verbosity:\n\n * 0 (default) : work silently.\n * 1 : display a termination report.\n * 2 : display progress during iterations (not supported by 'lm'\n method).\n\n args, kwargs : tuple and dict, optional\n Additional arguments passed to `fun` and `jac`. Both empty by default.\n The calling signature is ``fun(x, *args, **kwargs)`` and the same for\n `jac`.\n\n Returns\n -------\n `OptimizeResult` with the following fields defined:\n x : ndarray, shape (n,)\n Solution found.\n cost : float\n Value of the cost function at the solution.\n fun : ndarray, shape (m,)\n Vector of residuals at the solution.\n jac : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Modified Jacobian matrix at the solution, in the sense that J^T J\n is a Gauss-Newton approximation of the Hessian of the cost function.\n The type is the same as the one used by the algorithm.\n grad : ndarray, shape (m,)\n Gradient of the cost function at the solution.\n optimality : float\n First-order optimality measure. In unconstrained problems, it is always\n the uniform norm of the gradient. In constrained problems, it is the\n quantity which was compared with `gtol` during iterations.\n active_mask : ndarray of int, shape (n,)\n Each component shows whether a corresponding constraint is active\n (that is, whether a variable is at the bound):\n\n * 0 : a constraint is not active.\n * -1 : a lower bound is active.\n * 1 : an upper bound is active.\n\n Might be somewhat arbitrary for 'trf' method as it generates a sequence\n of strictly feasible iterates and `active_mask` is determined within a\n tolerance threshold.\n nfev : int\n Number of function evaluations done. Methods 'trf' and 'dogbox' do not\n count function calls for numerical Jacobian approximation, as opposed\n to 'lm' method.\n njev : int or None\n Number of Jacobian evaluations done. If numerical Jacobian\n approximation is used in 'lm' method, it is set to None.\n status : int\n The reason for algorithm termination:\n\n * -1 : improper input parameters status returned from MINPACK.\n * 0 : the maximum number of function evaluations is exceeded.\n * 1 : `gtol` termination condition is satisfied.\n * 2 : `ftol` termination condition is satisfied.\n * 3 : `xtol` termination condition is satisfied.\n * 4 : Both `ftol` and `xtol` termination conditions are satisfied.\n\n message : str\n Verbal description of the termination reason.\n success : bool\n True if one of the convergence criteria is satisfied (`status` > 0).\n\n See Also\n --------\n leastsq : A legacy wrapper for the MINPACK implementation of the\n Levenberg-Marquadt algorithm.\n curve_fit : Least-squares minimization applied to a curve fitting problem.\n\n Notes\n -----\n Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares\n algorithms implemented in MINPACK (lmder, lmdif). It runs the\n Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.\n The implementation is based on paper [JJMore]_, it is very robust and\n efficient with a lot of smart tricks. It should be your first choice\n for unconstrained problems. Note that it doesn't support bounds. Also\n it doesn't work when m < n.\n\n Method 'trf' (Trust Region Reflective) is motivated by the process of\n solving a system of equations, which constitute the first-order optimality\n condition for a bound-constrained minimization problem as formulated in\n [STIR]_. The algorithm iteratively solves trust-region subproblems\n augmented by a special diagonal quadratic term and with trust-region shape\n determined by the distance from the bounds and the direction of the\n gradient. This enhancements help to avoid making steps directly into bounds\n and efficiently explore the whole space of variables. To further improve\n convergence, the algorithm considers search directions reflected from the\n bounds. To obey theoretical requirements, the algorithm keeps iterates\n strictly feasible. With dense Jacobians trust-region subproblems are\n solved by an exact method very similar to the one described in [JJMore]_\n (and implemented in MINPACK). The difference from the MINPACK\n implementation is that a singular value decomposition of a Jacobian\n matrix is done once per iteration, instead of a QR decomposition and series\n of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace\n approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.\n The subspace is spanned by a scaled gradient and an approximate\n Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no\n constraints are imposed the algorithm is very similar to MINPACK and has\n generally comparable performance. The algorithm works quite robust in\n unbounded and bounded problems, thus it is chosen as a default algorithm.\n\n Method 'dogbox' operates in a trust-region framework, but considers\n rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.\n The intersection of a current trust region and initial bounds is again\n rectangular, so on each iteration a quadratic minimization problem subject\n to bound constraints is solved approximately by Powell's dogleg method\n [NumOpt]_. The required Gauss-Newton step can be computed exactly for\n dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large\n sparse Jacobians. The algorithm is likely to exhibit slow convergence when\n the rank of Jacobian is less than the number of variables. The algorithm\n often outperforms 'trf' in bounded problems with a small number of\n variables.\n\n Robust loss functions are implemented as described in [BA]_. The idea\n is to modify a residual vector and a Jacobian matrix on each iteration\n such that computed gradient and Gauss-Newton Hessian approximation match\n the true gradient and Hessian approximation of the cost function. Then\n the algorithm proceeds in a normal way, i.e. robust loss functions are\n implemented as a simple wrapper over standard least-squares algorithms.\n\n .. versionadded:: 0.17.0\n\n References\n ----------\n .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n .. [NR] William H. Press et. al., \"Numerical Recipes. The Art of Scientific\n Computing. 3rd edition\", Sec. 5.7.\n .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, \"Approximate\n solution of the trust region problem by minimization over\n two-dimensional subspaces\", Math. Programming, 40, pp. 247-263,\n 1988.\n .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, \"On the estimation of\n sparse Jacobian matrices\", Journal of the Institute of\n Mathematics and its Applications, 13, pp. 117-120, 1974.\n .. [JJMore] J. J. More, \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture\n Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n .. [Voglis] C. Voglis and I. E. Lagaris, \"A Rectangular Trust Region\n Dogleg Approach for Unconstrained and Bound Constrained\n Nonlinear Optimization\", WSEAS International Conference on\n Applied Mathematics, Corfu, Greece, 2004.\n .. [NumOpt] J. Nocedal and S. J. Wright, \"Numerical optimization,\n 2nd edition\", Chapter 4.\n .. [BA] B. Triggs et. al., \"Bundle Adjustment - A Modern Synthesis\",\n Proceedings of the International Workshop on Vision Algorithms:\n Theory and Practice, pp. 298-372, 1999.\n\n Examples\n --------\n In this example we find a minimum of the Rosenbrock function without bounds\n on independed variables.\n\n >>> def fun_rosenbrock(x):\n ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])\n\n Notice that we only provide the vector of the residuals. The algorithm\n constructs the cost function as a sum of squares of the residuals, which\n gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.\n\n >>> from scipy.optimize import least_squares\n >>> x0_rosenbrock = np.array([2, 2])\n >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)\n >>> res_1.x\n array([ 1., 1.])\n >>> res_1.cost\n 9.8669242910846867e-30\n >>> res_1.optimality\n 8.8928864934219529e-14\n\n We now constrain the variables, in such a way that the previous solution\n becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and\n ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter\n to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.\n\n We also provide the analytic Jacobian:\n\n >>> def jac_rosenbrock(x):\n ... return np.array([\n ... [-20 * x[0], 10],\n ... [-1, 0]])\n\n Putting this all together, we see that the new solution lies on the bound:\n\n >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,\n ... bounds=([-np.inf, 1.5], np.inf))\n >>> res_2.x\n array([ 1.22437075, 1.5 ])\n >>> res_2.cost\n 0.025213093946805685\n >>> res_2.optimality\n 1.5885401433157753e-07\n\n Now we solve a system of equations (i.e., the cost function should be zero\n at a minimum) for a Broyden tridiagonal vector-valued function of 100000\n variables:\n\n >>> def fun_broyden(x):\n ... f = (3 - x) * x + 1\n ... f[1:] -= x[:-1]\n ... f[:-1] -= 2 * x[1:]\n ... return f\n\n The corresponding Jacobian matrix is sparse. We tell the algorithm to\n estimate it by finite differences and provide the sparsity structure of\n Jacobian to significantly speed up this process.\n\n >>> from scipy.sparse import lil_matrix\n >>> def sparsity_broyden(n):\n ... sparsity = lil_matrix((n, n), dtype=int)\n ... i = np.arange(n)\n ... sparsity[i, i] = 1\n ... i = np.arange(1, n)\n ... sparsity[i, i - 1] = 1\n ... i = np.arange(n - 1)\n ... sparsity[i, i + 1] = 1\n ... return sparsity\n ...\n >>> n = 100000\n >>> x0_broyden = -np.ones(n)\n ...\n >>> res_3 = least_squares(fun_broyden, x0_broyden,\n ... jac_sparsity=sparsity_broyden(n))\n >>> res_3.cost\n 4.5687069299604613e-23\n >>> res_3.optimality\n 1.1650454296851518e-11\n\n Let's also solve a curve fitting problem using robust loss function to\n take care of outliers in the data. Define the model function as\n ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an\n observation and a, b, c are parameters to estimate.\n\n First, define the function which generates the data with noise and\n outliers, define the model parameters, and generate data:\n\n >>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):\n ... y = a + b * np.exp(t * c)\n ...\n ... rnd = np.random.RandomState(random_state)\n ... error = noise * rnd.randn(t.size)\n ... outliers = rnd.randint(0, t.size, n_outliers)\n ... error[outliers] *= 10\n ...\n ... return y + error\n ...\n >>> a = 0.5\n >>> b = 2.0\n >>> c = -1\n >>> t_min = 0\n >>> t_max = 10\n >>> n_points = 15\n ...\n >>> t_train = np.linspace(t_min, t_max, n_points)\n >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)\n\n Define function for computing residuals and initial estimate of\n parameters.\n\n >>> def fun(x, t, y):\n ... return x[0] + x[1] * np.exp(x[2] * t) - y\n ...\n >>> x0 = np.array([1.0, 1.0, 0.0])\n\n Compute a standard least-squares solution:\n\n >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))\n\n Now compute two solutions with two different robust loss functions. The\n parameter `f_scale` is set to 0.1, meaning that inlier residuals should\n not significantly exceed 0.1 (the noise level used).\n\n >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,\n ... args=(t_train, y_train))\n >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,\n ... args=(t_train, y_train))\n\n And finally plot all the curves. We see that by selecting an appropriate\n `loss` we can get estimates close to optimal even in the presence of\n strong outliers. But keep in mind that generally it is recommended to try\n 'soft_l1' or 'huber' losses first (if at all necessary) as the other two\n options may cause difficulties in optimization process.\n\n >>> t_test = np.linspace(t_min, t_max, n_points * 10)\n >>> y_true = gen_data(t_test, a, b, c)\n >>> y_lsq = gen_data(t_test, *res_lsq.x)\n >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)\n >>> y_log = gen_data(t_test, *res_log.x)\n ...\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(t_train, y_train, 'o')\n >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')\n >>> plt.plot(t_test, y_lsq, label='linear loss')\n >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')\n >>> plt.plot(t_test, y_log, label='cauchy loss')\n >>> plt.xlabel(\"t\")\n >>> plt.ylabel(\"y\")\n >>> plt.legend()\n >>> plt.show()\n \"\"\"\n if method not in ['trf', 'dogbox', 'lm']:\n raise ValueError(\"`method` must be 'trf', 'dogbox' or 'lm'.\")\n\n if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):\n raise ValueError(\"`jac` must be '2-point', '3-point', 'cs' or \"\n \"callable.\")\n\n if tr_solver not in [None, 'exact', 'lsmr']:\n raise ValueError(\"`tr_solver` must be None, 'exact' or 'lsmr'.\")\n\n if loss not in IMPLEMENTED_LOSSES and not callable(loss):\n raise ValueError(\"`loss` must be one of {0} or a callable.\"\n .format(IMPLEMENTED_LOSSES.keys()))\n\n if method == 'lm' and loss != 'linear':\n raise ValueError(\"method='lm' supports only 'linear' loss function.\")\n\n if verbose not in [0, 1, 2]:\n raise ValueError(\"`verbose` must be in [0, 1, 2].\")\n\n if len(bounds) != 2:\n raise ValueError(\"`bounds` must contain 2 elements.\")\n\n if max_nfev is not None and max_nfev <= 0:\n raise ValueError(\"`max_nfev` must be None or positive integer.\")\n\n if np.iscomplexobj(x0):\n raise ValueError(\"`x0` must be real.\")\n\n x0 = np.atleast_1d(x0).astype(float)\n\n if x0.ndim > 1:\n raise ValueError(\"`x0` must have at most 1 dimension.\")\n\n lb, ub = prepare_bounds(bounds, x0.shape[0])\n\n if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):\n raise ValueError(\"Method 'lm' doesn't support bounds.\")\n\n if lb.shape != x0.shape or ub.shape != x0.shape:\n raise ValueError(\"Inconsistent shapes between bounds and `x0`.\")\n\n if np.any(lb >= ub):\n raise ValueError(\"Each lower bound must be strictly less than each \"\n \"upper bound.\")\n\n if not in_bounds(x0, lb, ub):\n raise ValueError(\"`x0` is infeasible.\")\n\n x_scale = check_x_scale(x_scale, x0)\n\n ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)\n\n def fun_wrapped(x):\n return np.atleast_1d(fun(x, *args, **kwargs))\n\n if method == 'trf':\n x0 = make_strictly_feasible(x0, lb, ub)\n\n f0 = fun_wrapped(x0)\n\n if f0.ndim != 1:\n raise ValueError(\"`fun` must return at most 1-d array_like.\")\n\n if not np.all(np.isfinite(f0)):\n raise ValueError(\"Residuals are not finite in the initial point.\")\n\n n = x0.size\n m = f0.size\n\n if method == 'lm' and m < n:\n raise ValueError(\"Method 'lm' doesn't work when the number of \"\n \"residuals is less than the number of variables.\")\n\n loss_function = construct_loss_function(m, loss, f_scale)\n if callable(loss):\n rho = loss_function(f0)\n if rho.shape != (3, m):\n raise ValueError(\"The return value of `loss` callable has wrong \"\n \"shape.\")\n initial_cost = 0.5 * np.sum(rho[0])\n elif loss_function is not None:\n initial_cost = loss_function(f0, cost_only=True)\n else:\n initial_cost = 0.5 * np.dot(f0, f0)\n\n if callable(jac):\n J0 = jac(x0, *args, **kwargs)\n\n if issparse(J0):\n J0 = csr_matrix(J0)\n\n def jac_wrapped(x, _=None):\n return csr_matrix(jac(x, *args, **kwargs))\n\n elif isinstance(J0, LinearOperator):\n def jac_wrapped(x, _=None):\n return jac(x, *args, **kwargs)\n\n else:\n J0 = np.atleast_2d(J0)\n\n def jac_wrapped(x, _=None):\n return np.atleast_2d(jac(x, *args, **kwargs))\n\n else: # Estimate Jacobian by finite differences.\n if method == 'lm':\n if jac_sparsity is not None:\n raise ValueError(\"method='lm' does not support \"\n \"`jac_sparsity`.\")\n\n if jac != '2-point':\n warn(\"jac='{0}' works equivalently to '2-point' \"\n \"for method='lm'.\".format(jac))\n\n J0 = jac_wrapped = None\n else:\n if jac_sparsity is not None and tr_solver == 'exact':\n raise ValueError(\"tr_solver='exact' is incompatible \"\n \"with `jac_sparsity`.\")\n\n jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)\n\n def jac_wrapped(x, f):\n J = approx_derivative(fun, x, rel_step=diff_step, method=jac,\n f0=f, bounds=bounds, args=args,\n kwargs=kwargs, sparsity=jac_sparsity)\n if J.ndim != 2: # J is guaranteed not sparse.\n J = np.atleast_2d(J)\n\n return J\n\n J0 = jac_wrapped(x0, f0)\n\n if J0 is not None:\n if J0.shape != (m, n):\n raise ValueError(\n \"The return value of `jac` has wrong shape: expected {0}, \"\n \"actual {1}.\".format((m, n), J0.shape))\n\n if not isinstance(J0, np.ndarray):\n if method == 'lm':\n raise ValueError(\"method='lm' works only with dense \"\n \"Jacobian matrices.\")\n\n if tr_solver == 'exact':\n raise ValueError(\n \"tr_solver='exact' works only with dense \"\n \"Jacobian matrices.\")\n\n jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'\n if isinstance(J0, LinearOperator) and jac_scale:\n raise ValueError(\"x_scale='jac' can't be used when `jac` \"\n \"returns LinearOperator.\")\n\n if tr_solver is None:\n if isinstance(J0, np.ndarray):\n tr_solver = 'exact'\n else:\n tr_solver = 'lsmr'\n\n if method == 'lm':\n result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,\n max_nfev, x_scale, diff_step)\n\n elif method == 'trf':\n result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,\n gtol, max_nfev, x_scale, loss_function, tr_solver,\n tr_options.copy(), verbose)\n\n elif method == 'dogbox':\n if tr_solver == 'lsmr' and 'regularize' in tr_options:\n warn(\"The keyword 'regularize' in `tr_options` is not relevant \"\n \"for 'dogbox' method.\")\n tr_options = tr_options.copy()\n del tr_options['regularize']\n\n result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,\n xtol, gtol, max_nfev, x_scale, loss_function,\n tr_solver, tr_options, verbose)\n\n result.message = TERMINATION_MESSAGES[result.status]\n result.success = result.status > 0\n\n if verbose >= 1:\n print(result.message)\n print(\"Function evaluations {0}, initial cost {1:.4e}, final cost \"\n \"{2:.4e}, first-order optimality {3:.2e}.\"\n .format(result.nfev, initial_cost, result.cost,\n result.optimality))\n\n return result\n"
] | [
[
"numpy.dot",
"numpy.resize",
"scipy.optimize._numdiff.group_columns",
"numpy.arctan",
"numpy.asarray",
"numpy.all",
"numpy.zeros_like",
"numpy.any",
"numpy.iscomplexobj",
"scipy.optimize.OptimizeResult",
"scipy.sparse.issparse",
"numpy.atleast_1d",
"numpy.log1p",
"scipy.optimize._minpack._lmdif",
"scipy.optimize._numdiff.approx_derivative",
"scipy.sparse.csr_matrix",
"numpy.atleast_2d",
"numpy.sum",
"scipy.optimize._minpack._lmder",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
laanlabs/train_detector | [
"8ac09073bdb1e8526805672434eb1a4db2932f2f"
] | [
"modified_yamnet.py"
] | [
"\n\"\"\"\nSlightly modified yamnet keras Model that outputs \nthe dense feature vectors along with the class predictions\n\nOriginally: \nTF_MODELS_REPO/models/research/audioset/yamnet/yamnet.py\n\n\"\"\"\n\nimport sys\n\nyamnet_base = './models/research/audioset/yamnet/'\nsys.path.append(yamnet_base)\n\nimport os\nassert os.path.exists(yamnet_base)\n\n# yamnet imports \nimport params\n#import modified_yamnet as yamnet_model\nimport features as features_lib\n\n# TF / keras \nfrom tensorflow.keras import Model, layers\nimport tensorflow as tf\n\nfrom yamnet import _YAMNET_LAYER_DEFS\n\n\n\ndef yamnet(features):\n \"\"\"Define the core YAMNet mode in Keras.\"\"\"\n net = layers.Reshape(\n (params.PATCH_FRAMES, params.PATCH_BANDS, 1),\n input_shape=(params.PATCH_FRAMES, params.PATCH_BANDS))(features)\n for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS):\n net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters)(net)\n net = layers.GlobalAveragePooling2D()(net)\n logits = layers.Dense(units=params.NUM_CLASSES, use_bias=True)(net)\n predictions = layers.Activation(\n name=params.EXAMPLE_PREDICTIONS_LAYER_NAME,\n activation=params.CLASSIFIER_ACTIVATION)(logits)\n return predictions, net\n\n\ndef yamnet_frames_model(feature_params):\n \"\"\"Defines the YAMNet waveform-to-class-scores model.\n\n Args:\n feature_params: An object with parameter fields to control the feature\n calculation.\n\n Returns:\n A model accepting (1, num_samples) waveform input and emitting a\n (num_patches, num_classes) matrix of class scores per time frame as\n well as a (num_spectrogram_frames, num_mel_bins) spectrogram feature\n matrix.\n \"\"\"\n waveform = layers.Input(batch_shape=(1, None))\n # Store the intermediate spectrogram features to use in visualization.\n spectrogram = features_lib.waveform_to_log_mel_spectrogram(\n tf.squeeze(waveform, axis=0), feature_params)\n patches = features_lib.spectrogram_to_patches(spectrogram, feature_params)\n predictions, net = yamnet(patches)\n frames_model = Model(name='yamnet_frames', \n inputs=waveform, outputs=[predictions, spectrogram, net, patches])\n return frames_model, net"
] | [
[
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.squeeze",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
rivernuthead/DoD_analysis | [
"b06219d4026e89a9b9f1e8939010a63612750c80"
] | [
"DoD_analysis_v1-7.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 15 09:44:30 2021\n\n@author: erri\n\"\"\"\nimport os\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize as opt\n# from matplotlib.colors import ListedColormap, BoundaryNorm\n\nstart = time.time() # Set initial time\n######################################################################################\n# FUNCTIONS\n######################################################################################\ndef interpolate(func, xData, yData, ic=None, bounds=(-np.inf, np.inf)):\n # Interpolate data by fitting a given function, then returns the interpolated curve as a 1d array.\n par, covar = opt.curve_fit(func, xData, yData, p0=ic, maxfev=8000, bounds=bounds)\n if len(par) == 2:\n intCurve = func(xData, par[0], par[1])\n elif len(par) == 3:\n intCurve = func(xData, par[0], par[1], par[2])\n elif len(par) == 4:\n intCurve = func(xData, par[0], par[1], par[2], par[3])\n else:\n print(\"Interpolation failed. The interpolation function must have 2 or 3 parameters\")\n intCurve = -1 * np.ones(len(xData))\n return par, intCurve, covar\n\n# Scour and deposition volumes interpolation function\ndef func_exp(x,A,B):\n y = A*(1-np.exp(-x/B))\n return y\n\ndef func_exp2(x,A,B,C):\n y = C + A*(1-np.exp(-x/B))\n return y\n\n# morphW interpolation function:\ndef func_exp3(x,A,B):\n y = ((A + (1-np.exp(-x/B)))/(A+1))*0.8\n return y\n\ndef func_exp4(x,A,B,C):\n y = A*C**(x/C)\n return y\n\ndef func_ln(x,A,B):\n y=A*np.ln(x/B)\n return y\n\n\ndef GaussPoints(NG):\n '''\n Funzione per il calcolo dei punti e dei pesi di Gauss\n\n Argomenti\n ---------\n NG: int\n numero di punti di Gauss\n\n Output\n ------\n p: numpy.ndarray\n array dei punti di Gauss\n w: numpy.ndarray\n array dei pesi\n '''\n p, w = None, None\n if NG==2:\n p = np.array([ -1/np.sqrt(3),\n +1/np.sqrt(3) ])\n w = np.array([ 1, 1 ])\n elif NG==3:\n p = np.array([-(1/5)*np.sqrt(15),\n 0,\n (1/5)*np.sqrt(15)])\n w = np.array([5/9, 8/9, 5/9])\n elif NG==4:\n p = np.array([+(1/35)*np.sqrt(525-70*np.sqrt(30)),\n -(1/35)*np.sqrt(525-70*np.sqrt(30)),\n +(1/35)*np.sqrt(525+70*np.sqrt(30)),\n -(1/35)*np.sqrt(525+70*np.sqrt(30))])\n w = np.array([(1/36)*(18+np.sqrt(30)),\n (1/36)*(18+np.sqrt(30)),\n (1/36)*(18-np.sqrt(30)),\n (1/36)*(18-np.sqrt(30))])\n\n return p, w\n\n\n# Steady flow function\ndef MotoUniforme( S, y_coord, z_coord, D, NG, teta_c, ds):\n '''\n Calcola i parametri di moto uniforme per assegnato tirante\n\n Argomenti\n ---------\n\n S: float\n pendenza del canale\n y_coord: numpy.ndarray\n coordinate trasversali dei punti della sezione\n z_coord: numpy.ndarray\n coordinate verticali dei punti della sezione\n D: float\n profondità alla quale calcolare i parametri di moto uniforme\n NG: int [default=2]\n numero di punti di Gauss\n teta_c: float\n parametro di mobilità critico di Shiels\n ds: float\n diamentro medio dei sedimenti\n\n Output\n ------\n Q: float\n portata alla quale si realizza la profondità D di moto uniforme\n Omega: float\n area sezione bagnata alla profondita' D\n b: float\n larghezza superficie libera alla profondita' D\n alpha: float\n coefficiente di ragguaglio dell'energia alla profondita' D\n beta: float\n coefficiente di ragguaglio della qdm alla profondita' D\n '''\n # Punti e pesi di Gauss\n xj, wj = GaussPoints( NG ) # Calcola i putni e i pesi di Gauss\n\n #Dati\n delta = 1.65\n g = 9.806\n k = 5.3 # C = 2.5*ln(11*D/(k*ds))\n\n # Inizializzo\n Omega = 0 # Area bagnata\n array_teta = [] # Shields parameter array\n b = 0 # Larghezza superficie libera\n sumQs = 0 # Portata solida\n B=0\n #I coefficienti di ragguaglio sono relativi a tutta la sezione, si calcolano alla fine.\n num_alpha = 0 # Numeratore di alpha\n num_beta = 0 # Numeratore di beta\n den = 0 # Base del denominatore di alpha e beta\n Di = D - (z_coord-z_coord.min()) # Distribuzione trasversale della profondita'\n N = Di.size # Numero di punti sulla trasversale\n\n # N punti trasversali -> N-1 intervalli (trapezi)\n for i in range( N-1 ): # Per ogni trapezio\n\n # vertical stripe\n #\n # dy\n #\n # o-----o <- water level\n # | |\n # | | DR\n # | |\n # | o zR _ _\n # DL | / ^ |\n # | / dB | |\n # | / | | dz\n # | /\\\\ phi | _|_\n # zL o ------ |\n # ^ |\n # | |\n # ------------------- z_coord=0\n\n yL, yR = y_coord[i], y_coord[i+1]\n zL, zR = z_coord[i], z_coord[i+1]\n DL, DR = Di[i], Di[i+1]\n dy = yR - yL\n dz = zR - zL\n dB = np.sqrt(dy**2+dz**2)\n cosphi = dy/dB\n # Geometric parameters:\n if DL<=0 and DR<=0:\n dy, dz = 0, 0\n DL, DR = 0, 0\n elif DL<0:\n dy = -dy*DR/dz\n dz = DR\n DL = 0\n elif DR<0:\n dy = dy*DL/dz\n dz = DL\n DR = 0\n\n #Metodo di Gauss:\n SUM = np.zeros(3)\n C = 0\n Dm = 0\n teta1=0\n\n # Gauss weight loop\n for j in range(NG):\n Dm = (DR+DL)/2# + (DR-DL)/2*xj[j]\n # print(Dm)\n # print('tirante:', Dm, ' k:', k, ' ds:', ds)\n\n if Dm==0 or 2.5*np.log(11*Dm/(k*ds))<0:\n C=0\n else:\n C = 2.5*np.log(11*Dm/(k*ds))\n\n #den\n SUM[0] += wj[j]*C*Dm**(3/2)\n #num_alpha\n SUM[1] += wj[j]*C**(3)*Dm**(2.5)\n #num_beta\n SUM[2] += wj[j]*C**(2)*Dm**(2)\n\n den += dy/2*cosphi**(1/2)*SUM[0]\n num_alpha += dy/2*cosphi**(3/2)*SUM[1]\n num_beta += dy/2*cosphi*SUM[2]\n\n dOmega = (DR + DL)*dy/2\n\n #Calcolo di Omega: superficie della sezione\n Omega += dOmega\n\n #Calcolo di B: lunghezza del perimetro bagnato\n\n B += dB\n\n #Calcolo di b: larghezza della superficie libera\n b += dy\n\n #Calcolo di b: larghezza della superficie libera\n #Rh=Omega/B\n\n #Shields parameter\n teta_primo = (Dm*cosphi)*S/(delta*ds)\n array_teta = np.append(array_teta, teta_primo)\n\n\n count_active = np.count_nonzero(np.where(array_teta>=teta_c, 1, 0))\n\n\n\n #Calcolo della portata Q\n Q = np.sqrt(S*g)*den\n\n #Calcolo della capacità di trasporto\n teta1 = (Omega/B)*S/(delta*ds)\n if teta1 >= teta_c:\n Qs = 8*(teta1-teta_c)**1.5*np.sqrt(9.81*delta*ds**3)*b\n else:\n Qs = 0\n # sumQs += qs\n Qs = sumQs\n\n #Condizione per procedere al calcolo anche quando il punto i è sommerso\n # mentre i+1 no.\n if den==0:\n alpha = None\n beta = None\n else:\n alpha = Omega**2*(g*S)**(3/2)*num_alpha/den**3\n beta = Omega*g*S*num_beta/den**2\n\n return Q, Omega, b, B, alpha, beta, Qs, count_active\n\n\n###############################################################################\n# SETUP SCRIPT PARAMETERS and RUN MODE\n###############################################################################\n\n# SINGLE RUN NAME\nrun = 'q07_1'\n\n'''\nRun mode:\n 1 = one run at time\n 2 = bath process\nDEM analysis mode:\n 0 = do not perform DEM analysis\n 1 = perform DEM analysis\nMask mode:\n 1 = mask the flume edge\n 2 = mask the upstream half flume\n 3 = mask the downstream half flume\nProcess mode: (NB: set DEMs name)\n 1 = batch process\n 2 = single run process\nSave mode:\n 0 = save only reports\n 1 = save all chart and figure\n\n'''\nrun_mode = 2\nDEM_analysis_mode = 0\nmask_mode = 1\nprocess_mode = 1\nsave_plot_mode = 1\n\n\n###############################################################################\n# SETUP FOLDERS\n###############################################################################\n# setup working directory and DEM's name\nhome_dir = os.getcwd()\nDoDs_dir = os.path.join(home_dir, 'DoDs')\nreport_dir = os.path.join(home_dir, 'output')\nplot_dir = os.path.join(home_dir, 'plot')\nrun_dir = os.path.join(home_dir, 'surveys')\n\n\n# Check if morphWact_matrix.txt already exists. If yes, remove it.\n# The script append all the data, so if the file already exixist all the new\n# data will be appedned to the old file.\n# if os.path.exists(os.path.join(report_dir, 'morphWact_matrix.txt')):\n# os.remove(os.path.join(report_dir, 'morphWact_matrix.txt'))\n# else:\n# pass\n\n\n# Create the run name list\nRUNS=[]\nif run_mode ==2:\n for RUN in sorted(os.listdir(run_dir)):\n if RUN.startswith('q'):\n RUNS = np.append(RUNS, RUN)\nelif run_mode==1:\n RUNS=run.split()\n\n# Define volume time scale report matrix:\n# B_dep, SD(B_dep), B_sco, SD(B_sco)\nvolume_temp_scale_report=np.zeros((len(RUNS), 4))\n\n# Define morphW time scale report matrix:\n# B_morphW [min], SD(B_morphW)\nmorphW_temp_scale_report = np.zeros((len(RUNS), 2))\n\n# Define Engelund Gauss model report matrix:\n# D [m], Q [m^3/s], Wwet/W [-]\nengelund_model_report=np.zeros((len(RUNS),3))\n\n# Array that collect all the morphWact_array dimension.\n# It will be used to create the morphWact_matrix\nmorphWact_dim = [] # Array with the dimensions of morphWact_values array\n\n# Print a report with xData as real time in minutes and the value of scour and deposition volumes for each runs\n# Check if the file already exists\nif os.path.exists(os.path.join(report_dir, 'volume_over_time.txt')):\n os.remove(os.path.join(report_dir, 'volume_over_time.txt'))\nelse:\n pass\n\n###############################################################################\n# MAIN LOOP OVER RUNS\n###############################################################################\nfor run in RUNS:\n\n ###########################################################################\n # SETUP FOLDERS\n ###########################################################################\n print('######')\n print(run)\n print('######')\n print()\n # setup working directory and DEM's name\n input_dir = os.path.join(home_dir, 'surveys', run)\n\n\n # Create folders\n if not(os.path.exists(report_dir)):\n os.mkdir(report_dir)\n if not(os.path.exists(DoDs_dir)):\n os.mkdir(DoDs_dir)\n if not(os.path.exists(plot_dir)):\n os.mkdir(plot_dir)\n\n\n # Import parameters from file parameters.txt\n # variable run must be as 'q' + discharge + '_' repetition number\n # Parameters.txt structure:\n # discharge [l/s],repetition,run time [min],Texner discretization [-], Channel width [m], slome [m/m]\n # Load parameter matrix\n parameters = np.loadtxt(os.path.join(home_dir, 'parameters.txt'),\n delimiter=',',\n skiprows=1)\n # Extract run parameter\n run_param = parameters[np.intersect1d(np.argwhere(parameters[:,1]==float(run[-1:])),np.argwhere(parameters[:,0]==float(run[1:3])/10)),:]\n\n dt = run_param[0,2] # dt between runs in minutes (real time)\n dt_xnr = run_param[0,3] # temporal discretization in terms of Exner time (Texner between runs)\n\n DEM1_single_name = 'matrix_bed_norm_q07S5.txt' # DEM1 name\n DEM2_single_name = 'matrix_bed_norm_q07S6.txt' # DEM2 name\n\n # Thresholds values\n thrs_1 = 2.0 # [mm] # Lower threshold\n thrs_2 = 15.0 # [mm] # Upper threshold\n neigh_thrs = 5 # [-] # Number of neighborhood cells for validation\n\n # Flume parameters\n W = run_param[0,4] # Flume width [m]\n S = run_param[0,5] # Flume slope\n\n # Pixel dimension\n px_x = 50 # [mm]\n px_y = 5 # [mm]\n\n # Not a number raster value (NaN)\n NaN = -999\n\n # Engelund-Gauss model parameters\n g = 9.806 # Gravity\n ds = 0.001 # Sediment grainsize [mm]\n Q = run_param[0,0] # Run discharge [l/s]\n teta_c = 0.02 # Schield parameter [-]\n NG=4 # Number of Gauss points\n max_iter = 100000 # Maximum numer of iterations\n toll = 0.00001\n\n\n files=[] # initializing filenames list\n # Creating array with file names:\n for f in sorted(os.listdir(input_dir)):\n path = os.path.join(input_dir, f)\n if os.path.isfile(path) and f.endswith('.txt') and f.startswith('matrix_bed_norm_'+run+'s'):\n files = np.append(files, f)\n\n # Initialize arrays\n comb = np.array([]) # combination of differences\n DoD_count_array=[] # Active pixel\n volumes_array=[] # Tot volume\n dep_array=[] # Deposition volume\n sco_array=[] # Scour volume\n morph_act_area_array=[] # Total active area array\n morph_act_area_array_dep=[] # Deposition active area array\n morph_act_area_array_sco=[] # Active active area array\n act_width_mean_array=[] # Total active width mean array\n act_width_mean_array_dep=[] # Deposition active width mean array\n act_width_mean_array_sco=[] # Scour active width mean array\n morphWact_values=[] # morphWact values for each section of all the DoD\n report_matrix = [] #Report matrix\n # matrix_volumes=np.zeros((len(files)-1, len(files)+1)) # Volumes report matrix\n matrix_volumes=np.zeros((len(files)-1, len(files)+1)) # Volumes report matrix\n # matrix_dep=np.zeros((len(files)-1, len(files)+1)) # Deposition volume report matrix\n matrix_dep=np.zeros((len(files)+3, len(files)+1)) # Deposition volume report matrix\n matrix_morph_act_area=np.zeros((len(files)+3, len(files)+1)) # Active area report matrix\n # matrix_sco=np.zeros((len(files)-1, len(files)+1)) # Scour volume report matrix\n matrix_sco=np.zeros((len(files)+3, len(files)+1)) # Scour volume report matrix\n matrix_Wact=np.zeros((len(files)+3, len(files)+3)) # Active width report matrix\n matrix_Wact_max=np.zeros((len(files)+3, len(files)+1)) # Max active width report matrix\n matrix_Wact_min=np.zeros((len(files)+3, len(files)+1)) # Minimum active width report matrix\n matrix_act_thickness = np.zeros((len(files)-1, len(files)+1)) # Matrix where collect active thickness data\n matrix_act_volume = np.zeros((len(files)-1, len(files)+1)) # Matrix where collect volume data\n\n matrix_DEM_analysis = np.zeros((len(files), len(files)))\n\n ###########################################################################\n # CHECK DEMs SHAPE\n ###########################################################################\n # Due to differences between DEMs shape (not the same ScanArea.txt laser survey file)\n # a preliminary loop over the all DEMs is required in order to define the target\n # dimension of the reshaping operation\n array_dim_x = []\n array_dim_y = []\n for f in files:\n path_DEM = os.path.join(input_dir, f)\n DEM = np.loadtxt(path_DEM,\n # delimiter=',',\n skiprows=8\n )\n array_dim_x = np.append(array_dim_x, DEM.shape[0])\n array_dim_y = np.append(array_dim_y, DEM.shape[1])\n\n # Define target dimension:\n shp_target_x, shp_target_y = int(min(array_dim_x)), int(min(array_dim_y))\n\n arr_shape = np.array([shp_target_x, shp_target_y]) # Define target shape\n\n\n ###########################################################################\n # SETUP MASKS\n ###########################################################################\n # array mask for filtering data outside the channel domain\n # Different mask will be applied depending on the run due to different ScanArea\n # used during the laser surveys\n runs_list = ['q10_1', 'q10_2', 'q15_1', 'q20_1', 'q20_2'] # Old runs with old ScanArea\n array_mask_name, array_mask_path = 'array_mask.txt', home_dir # Mask for runs 07 onwards\n\n if run in runs_list:\n array_mask_name, array_mask_path = 'array_mask_0.txt', home_dir\n print(array_mask_name)\n\n\n # Load mask\n array_mask = np.loadtxt(os.path.join(array_mask_path, array_mask_name))\n # Reshape mask:\n array_mask_rshp = array_mask[:shp_target_x,:shp_target_y] # Array mask reshaped\n\n # Create array mask:\n # - array_mask: np.array with 0 and 1\n # - array_mask_nan: np.array with np.nan and 1\n array_mask_rshp = np.where(array_mask_rshp==NaN, 0, 1) # Convert in mask with 0 and 1\n array_mask_rshp_nan = np.where(array_mask_rshp==0, np.nan, 1) # Convert in mask with np.nan and 1\n\n # Here we can split in two parts the DEMs or keep the entire one\n if mask_mode==1:\n pass\n elif mask_mode==2: # Working downstream, masking upstream\n array_mask_rshp[:,:-int(array_mask_rshp.shape[1]/2)] = NaN\n array_mask_rshp=np.where(array_mask_rshp==NaN, np.nan, array_mask_rshp)\n\n elif mask_mode==3: # Working upstream, masking downstream\n array_mask_rshp[:,int(array_mask_rshp.shape[1]/2):] = NaN\n array_mask_rshp=np.where(array_mask_rshp==NaN, np.nan, array_mask_rshp)\n\n ###########################################################################\n # DEM ANALYSIS\n ###########################################################################\n if DEM_analysis_mode==1:\n # - Residual slope, for each DEM\n # - Bed Relief Index (BRI) averaged, for each DEM\n # - STDEV (SD) of the bed elevation, for each DEM\n # Initialize arrays\n slope_res = [] # Rsidual slope array\n BRI=[] # BRi array\n SD = [] # SD array\n engelund_model_array=[] # Engelund model array (Q, D, Wwet/w])\n water_dept_array=[] # Water dept array [m]\n discharge_array=[] # Discarge [m^3/s]\n Wwet_array = [] # Wwet array [Wwet/W]\n # morphWact_values = [] # All the morphological active width values for each runs\n\n for f in files:\n DEM_path = os.path.join(input_dir, f) # Set DEM path\n DEM = np.loadtxt(DEM_path, # Load DEM data\n #delimiter=',',\n skiprows=8)\n DEM = np.where(np.isclose(DEM, NaN), np.nan, DEM)\n\n # DEM reshaping according to arr_shape...\n DEM=DEM[0:arr_shape[0], 0:arr_shape[1]]\n\n # DEM masking...\n DEM = DEM*array_mask_rshp_nan\n\n # Residual slope\n # NB: this operation will be performed to detrended DEMs\n # Averaged crosswise bed elevation array:\n bed_profile = np.nanmean(DEM, axis=0) # Bed profile\n # Linear regression of bed profile:\n # Performing linear regression\n x_coord = np.linspace(0, px_x*len(bed_profile), len(bed_profile)) # Longitudinal coordinate\n linear_model = np.polyfit(x_coord, bed_profile,1) # linear_model[0]=m, linear_model[1]=q y=m*x+q\n slope_res = np.append(slope_res, linear_model[0]) # Append residual slope values\n\n # PLOT cross section mean values and trendline\n # fig, ax1 = plt.subplots(dpi=200)\n # ax1.plot(x_coord, bed_profile)\n # ax1.plot(x_coord, x_coord*linear_model[0]+linear_model[1], color='red')\n # ax1.set(xlabel='longitudinal coordinate (mm)', ylabel='Z (mm)',\n # title=run+'\\n'+'Residual slope:'+str(linear_model[0]))\n\n # BRI calculation\n BRI=np.append(BRI,np.mean(np.nanstd(DEM, axis=0)))\n\n # Bed elevation STDEV\n SD = np.append(SD,np.nanstd(DEM))\n\n # Create report matrix:\n # Structure: DEM name, residual slope [m/m], BRI [mm], SD [mm]\n matrix_DEM_analysis = np.transpose(np.stack((slope_res, BRI, SD)))\n\n # Build report\n report_DEM_header = 'DEM name, residual slope [m/m], BRI [mm], SD [mm]'\n report_DEM_name = run+'_DEM_report.txt'\n with open(os.path.join(report_dir, report_DEM_name), 'w') as fp:\n fp.write(report_DEM_header)\n fp.writelines(['\\n'])\n for i in range(0,len(matrix_DEM_analysis[:,0])):\n for j in range(0, len(matrix_DEM_analysis[0,:])+1):\n if j == 0:\n fp.writelines([files[i]+', '])\n elif j==1:\n # fp.writelines([\"%.6f, \" % float(matrix_DEM_analysis[i,j-1])])\n fp.writelines([\"{:e},\".format(matrix_DEM_analysis[i,j-1])])\n else:\n fp.writelines([\"%.3f, \" % float(matrix_DEM_analysis[i,j-1])])\n fp.writelines(['\\n'])\n fp.close()\n\n # DEM detrending (DEM detrended both with slope and residual slope)\n DEM_detrended = DEM\n for i in range(0,DEM.shape[1]):\n DEM_detrended[:,i] = DEM[:,i]-linear_model[0]*i*px_x\n\n # Create equivalent cross section as sorted DEM vaues excluding NaN\n DEM_values = sorted(DEM_detrended[np.logical_not(np.isnan(DEM_detrended))])\n # cross_section_eq = DEM_values[::100] # Resize DEM value to be lighter (100 res resampling)\n cross_section_eq = np.interp(np.arange(0,len(DEM_values),50), np.arange(0,len(DEM_values)), DEM_values)\n # Add cross section banks as the double of the maximum DEM's value:\n z_coord = np.pad(cross_section_eq, (1,1), mode='constant', constant_values=int(cross_section_eq.max()*2))\n z_coord = z_coord/1000 # Convert z_coord in meters\n\n # Create cross-wise coordination\n y_coord = np.arange(0,W*1000, W*1000/len(z_coord))\n y_coord = y_coord/1000 # Convert y_coord in meters\n\n # Engenlund-Gauss implementation\n\n Dmax = z_coord.max()-z_coord.min() # Maximum water dept\n Dmin = 0 # Minimum water level\n i=0 # Initialize iteration counter\n\n # Guess values:\n D0 = (Dmax-Dmin)/2 # Water dept\n Qn, Omega, b, B, alpha, beta, Qs, count_active = MotoUniforme(S, y_coord, z_coord, D0, NG, teta_c, ds) # Discharge\n # Discharge extreme values\n Qmax, Omega, b, B, alpha, beta, Qs, count_active = MotoUniforme(S, y_coord, z_coord, Dmax, NG, teta_c, ds)\n Qmin, Omega, b, B, alpha, beta, Qs, count_active = MotoUniforme(S, y_coord, z_coord, Dmin, NG, teta_c, ds)\n Q_target = Q/1000 # Target discharge [m^3/s]\n if np.sign(Qmax-Q_target)==np.sign(Qmin-Q_target):\n print(' Soluntion out of boundaries')\n else:\n # Check if h<h_min:\n while abs(Qn - Q_target)>toll:\n if i>max_iter:\n print('ERROR: max iterations reached!')\n break\n i+=1\n D0 = (Dmax+Dmin)/2\n Q0, Omega, b, B, alpha, beta, Qs, count_active = MotoUniforme(S, y_coord, z_coord, D0, NG, teta_c, ds)\n # print(i)\n # print(D0)\n # print(Q0)\n if Q0>Q_target:\n Dmax=D0 # Update Dmax\n elif Q0<Q_target:\n Dmin=D0 # Update Dmin\n Qn=Q0\n\n water_dept_array=np.append(water_dept_array, D0) # Water dept array\n discharge_array=np.append(discharge_array, Q0) # Discarge\n Wwet_array = np.append(Wwet_array, b/W)\n\n water_dept=np.mean(water_dept_array) # Average water dept\n discharge=np.mean(discharge_array) # Average discarge\n Wwet = np.mean(Wwet_array)\n print('Engelund-Gauss model results:')\n print('Reached discharge: ', discharge, ' m^3/s')\n print('Water dept: ', water_dept, ' m')\n print('Wwet/W: ', Wwet)\n\n # Append values as: run name, D [m], Q [m^3/s], Wwet/W [-]\n engelund_model_array = np.append(engelund_model_array,(water_dept, discharge, Wwet))\n if run_mode ==2:\n engelund_model_report[int(np.where(RUNS==run)[0]),:]=engelund_model_array\n\n # Print averaged residual slope:\n print()\n print('Averaged DEMs residual slope: ', np.average(slope_res))\n\n\n ###########################################################################\n # LOOP OVER ALL DEMs COMBINATIONS\n ###########################################################################\n # Perform difference over all combination of DEMs in the working directory\n for h in range (0, len(files)-1):\n for k in range (0, len(files)-1-h):\n DEM1_name=files[h]\n DEM2_name=files[h+1+k]\n comb = np.append(comb, DEM2_name + '-' + DEM1_name)\n\n # write DEM1 and DEM2 names below to avoid batch differences processing\n if process_mode==1:\n pass\n elif process_mode==2:\n DEM1_name = DEM1_single_name\n DEM2_name = DEM2_single_name\n\n # Specify DEMs path...\n path_DEM1 = os.path.join(input_dir, DEM1_name)\n path_DEM2 = os.path.join(input_dir, DEM2_name)\n # ...and DOD name.\n DoD_name = 'DoD_' + DEM2_name[-6:-4] + '-' + DEM1_name[-6:-4] + '_'\n\n # Setup output folder\n output_name = 'script_outputs_' + DEM2_name[20:21] + '-' + DEM1_name[20:21] # Set outputs name\n\n\n path_out = os.path.join(home_dir, 'DoDs', 'DoD_'+run) # Set DoD outputs directory\n if not(os.path.exists(path_out)):\n os.mkdir(path_out)\n\n\n ###################################################################\n # DATA READING...\n ###################################################################\n # Header initialization and extraction\n lines = []\n header = []\n\n with open(path_DEM1, 'r') as file:\n for line in file:\n lines.append(line) # lines is a list. Each item is a row of the input file\n # Header extraction...\n for i in range(0, 7):\n header.append(lines[i])\n # Header printing in a file txt called header.txt\n with open(path_out + '/' + DoD_name + 'header.txt', 'w') as head:\n head.writelines(header)\n\n ###################################################################\n # DATA LOADING...\n ###################################################################\n # Load DEMs\n DEM1 = np.loadtxt(path_DEM1,\n # delimiter=',',\n skiprows=8\n )\n DEM2 = np.loadtxt(path_DEM2,\n # delimiter=',',\n skiprows=8)\n\n\n # DEMs reshaping according to arr_shape...\n DEM1=DEM1[0:arr_shape[0], 0:arr_shape[1]]\n DEM2=DEM2[0:arr_shape[0], 0:arr_shape[1]]\n\n \n ###################################################################\n # PERFORM DEM OF DIFFERENCE - DEM2-DEM1\n ###################################################################\n # Print DoD name\n print(DEM2_name, '-', DEM1_name)\n # Raster dimension\n dim_x, dim_y = DEM1.shape\n # dim_y, dim_x = DEM1.shape\n \n DoD_length = DEM1.shape[1]*px_x/1000 # DoD length in meters\n\n # Creating DoD array with np.nan\n DoD_raw = np.zeros(DEM1.shape)\n DoD_raw = np.where(np.logical_or(DEM1 == NaN, DEM2 == NaN), np.nan, DEM2 - DEM1)\n # Masking with array mask\n DoD_raw = DoD_raw*array_mask_rshp_nan\n # Creating GIS readable DoD array (np.nan as -999)\n DoD_raw_rst = np.zeros(DoD_raw.shape)\n DoD_raw_rst = np.where(np.isnan(DoD_raw), NaN, DoD_raw)\n\n\n # Count the number of pixels in the channel area\n DoD_count = np.count_nonzero(np.where(np.isnan(DoD_raw), 0, 1))\n print('Active pixels:', DoD_count)\n DoD_count_array = np.append(DoD_count_array, DoD_count)\n\n # DoD statistics\n # print('The minimum DoD value is:\\n', np.nanmin(DoD_raw))\n # print('The maximum DoD value is:\\n', np.nanmax(DoD_raw))\n # print('The DoD shape is:\\n', DoD_raw.shape)\n\n ###################################################################\n # DATA FILTERING...\n ###################################################################\n\n # Perform domain-wide average\n domain_avg = np.pad(DoD_raw, 1, mode='edge') # i size pad with edge values domain\n DoD_mean = np.zeros(DEM1.shape)\n for i in range (0, dim_x):\n for j in range (0, dim_y):\n if np.isnan(DoD_raw[i, j]):\n DoD_mean[i, j] = np.nan\n else:\n ker1 = np.array([[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]],\n [domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2]],\n [domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, j + 2]]])\n w = np.array([[0, 1, 0],\n [0, 2, 0],\n [0, 1, 0]])\n w_norm = w / (sum(sum(w))) # Normalizing weight matrix\n DoD_mean[i, j] = np.nansum(ker1 * w_norm)\n #TODO convert Array in a %.1f format\n # # Filtered array weighted average by nan.array mask\n # DoD_mean = DoD_mean * array_msk_nan\n # Create a GIS readable DoD mean (np.nan as -999)\n DoD_mean = np.round(DoD_mean, 1) # Round data to 1 decimal precision\n DoD_mean_rst = np.where(np.isnan(DoD_mean), NaN, DoD_mean)\n\n\n # Threshold and Neighbourhood analysis process\n DoD_filt = np.copy(DoD_mean) # Initialize filtered DoD array as a copy of the averaged one\n DoD_filt_domain = np.pad(DoD_filt, 1, mode='edge') # Create neighbourhood analysis domain\n\n for i in range(0,dim_x):\n for j in range(0,dim_y):\n if abs(DoD_filt[i,j]) < thrs_1: # Set as \"no variation detected\" all variations lower than thrs_1\n DoD_filt[i,j] = 0\n if abs(DoD_filt[i,j]) >= thrs_1 and abs(DoD_filt[i,j]) <= thrs_2: # Perform neighbourhood analysis for variations between thrs_1 and thrs_2\n # Create kernel\n ker2 = np.array([[DoD_filt_domain[i, j], DoD_filt_domain[i, j + 1], DoD_filt_domain[i, j + 2]],\n [DoD_filt_domain[i + 1, j], DoD_filt_domain[i + 1, j + 1], DoD_filt_domain[i + 1, j + 2]],\n [DoD_filt_domain[i + 2, j], DoD_filt_domain[i + 2, j + 1], DoD_filt_domain[i + 2, j + 2]]])\n if not((DoD_filt[i,j] > 0 and np.count_nonzero(ker2 > 0) >= neigh_thrs) or (DoD_filt[i,j] < 0 and np.count_nonzero(ker2 < 0) >= neigh_thrs)):\n # So if the nature of the selected cell is not confirmed...\n DoD_filt[i,j] = 0\n\n DoD_filt = np.round(DoD_filt, 1) # Round data to 1 decimal precision\n # Create a GIS readable filtered DoD (np.nann as -999)\n DoD_filt_rst = np.where(np.isnan(DoD_filt), NaN, DoD_filt)\n\n # Avoiding zero-surrounded pixel\n DoD_filt_nozero=np.copy(DoD_filt) # Initialize filtered DoD array as a copy of the filtered one\n zerosur_domain = np.pad(DoD_filt_nozero, 1, mode='edge') # Create analysis domain\n for i in range(0,dim_x):\n for j in range(0,dim_y):\n if DoD_filt_nozero[i,j] != 0 and not(np.isnan(DoD_filt_nozero[i,j])): # Limiting the analysis to non-zero numbers\n # Create kernel\n ker3 = np.array([[zerosur_domain[i, j], zerosur_domain[i, j + 1], zerosur_domain[i, j + 2]],\n [zerosur_domain[i + 1, j], zerosur_domain[i + 1, j + 1], zerosur_domain[i + 1, j + 2]],\n [zerosur_domain[i + 2, j], zerosur_domain[i + 2, j + 1], zerosur_domain[i + 2, j + 2]]])\n zero_count = np.count_nonzero(ker3 == 0) + np.count_nonzero(np.isnan(ker3))\n if zero_count == 8:\n DoD_filt_nozero[i,j] = 0\n else:\n pass\n\n # Create GIS-readable DoD filtered and zero-surrounded avoided\n DoD_filt_nozero_rst = np.where(np.isnan(DoD_filt_nozero), NaN, DoD_filt_nozero)\n\n '''\n Output files:\n DoD_raw: it's just the dem of difference, so DEM2-DEM1\n DoD_raw_rst: the same for DoD_raw, but np.nan=Nan\n DoD_mean: DoD_raw with a smoothing along the Y axes, see the weight in the averaging process\n DoD_mean_rst: the same for DoD_mean but np.nan=Nan\n DoD_filt: DoD_mean with a neighbourhood analysis applie\n DoD_filt_rst: the same for DoD_filt but np.nan=Nan\n DoD_filt_nozero: DoD_filt with an avoiding zero-surrounded process applied\n DoD_filt_nozero_rst: the same for DoD_filt_nozero but with np.nan=NaN\n '''\n\n ###################################################################\n # PLOT RAW DOD, MEAN DOD AND FILTERED DOD\n ###################################################################\n # # Plot data using nicer colors\n # colors = ['linen', 'lightgreen', 'darkgreen', 'maroon']\n # class_bins = [-10.5, -1.5, 0, 1.5, 10.5]\n # cmap = ListedColormap(colors)\n # norm = BoundaryNorm(class_bins,\n # len(colors))\n\n # fig, (ax1, ax2, ax3) = plt.subplots(3,1)\n\n # raw= ax1.imshow(DoD_raw, cmap=cmap, norm=norm)\n # ax1.set_title('raw DoD')\n\n # mean = ax2.imshow(DoD_mean_th1, cmap=cmap, norm=norm)\n # ax2.set_title('mean DoD')\n\n # filt = ax3.imshow(DoD_out, cmap=cmap, norm=norm)\n # ax3.set_title('Filtered DoD')\n\n # #fig.colorbar()\n # fig.tight_layout()\n # plt.show()\n # plt.savefig(path_out + '/raster.pdf') # raster (png, jpg, rgb, tif), vector (pdf, eps), latex (pgf)\n # #plt.imshow(DoD_out, cmap='RdYlGn')\n\n ##############################################################################\n # VOLUMES\n ##############################################################################\n # DoD filtered name: DoD_filt\n # Create new raster where apply volume calculation\n # DoD>0 --> Deposition, DoD<0 --> Scour\n # =+SUMIFS(A1:JS144, A1:JS144,\">0\")*5*50(LibreCalc function)\n\n # Define total volume matrix, Deposition matrix and Scour matrix\n DoD_vol = np.where(np.isnan(DoD_filt_nozero), 0, DoD_filt_nozero) # Total volume matrix\n dep_DoD = (DoD_vol>0)*DoD_vol # DoD of only deposition data\n sco_DoD = (DoD_vol<0)*DoD_vol # DoD of only scour data\n \n \n tot_vol = np.sum(DoD_vol)*px_x*px_y/(W*DoD_length*1000) # Total volume as V/(L*W) [mm]\n dep_vol = np.sum(dep_DoD)*px_x*px_y/(W*DoD_length*1000) # Deposition volume as V/(L*W) [mm]\n sco_vol = np.sum(sco_DoD)*px_x*px_y/(W*DoD_length*1000) # Scour volume as V/(L*W) [mm]\n \n \n #Print results:\n print('Total volume V/(L*W) [mm]:', \"{:.1f}\".format(tot_vol))\n print('Deposition volume V/(L*W) [mm]:', \"{:.1f}\".format(dep_vol))\n print('Scour volume V/(L*W) [mm]:', \"{:.1f}\".format(sco_vol))\n\n # Append values to output data array\n volumes_array = np.append(volumes_array, tot_vol)\n dep_array = np.append(dep_array, dep_vol)\n sco_array = np.append(sco_array, sco_vol)\n\n \n ###################################################################\n # Active_pixel analysis\n ###################################################################\n \n act_px_matrix = np.where(DoD_vol!=0, 1, 0) # Active pixel matrix, both scour and deposition\n act_px_matrix_dep = np.where(dep_DoD != 0, 1, 0) # Active deposition matrix \n act_px_matrix_sco = np.where(sco_DoD != 0, 1, 0) # Active scour matrix\n \n morph_act_area = np.count_nonzero(act_px_matrix)*px_x*px_y # Active area both in terms of scour and deposition [mm²]\n morph_act_area_dep = np.count_nonzero(act_px_matrix_dep)*px_x*px_y # Active deposition area [mm²]\n morph_act_area_sco = np.count_nonzero(act_px_matrix_sco)*px_x*px_y # Active scour area [mm²]\n \n morph_act_area_array = np.append(morph_act_area_array, morph_act_area) # For each DoD, append total active area data\n morph_act_area_array_dep = np.append(morph_act_area_array_dep, morph_act_area_dep) # For each DoD, append deposition active area data\n morph_act_area_array_sco = np.append(morph_act_area_array_sco, morph_act_area_sco) # For each DoD, append scour active area data\n \n act_width_mean = (morph_act_area/(DoD_length*1000))/(W*1000) # Total mean active width [%] - Wact/W\n act_width_mean_dep = (morph_act_area_dep/(DoD_length*1000))/(W*1000) # Deposition mean active width [%] - Wact/W\n act_width_mean_sco = (morph_act_area_sco/(DoD_length*1000))/(W*1000) # Scour mean active width [%] - Wact/W\n \n act_width_mean_array = np.append(act_width_mean_array, act_width_mean) # For each DoD append total active width values\n act_width_mean_array_dep = np.append(act_width_mean_array_dep, act_width_mean_dep) # For each DoD append deposition active width values\n act_width_mean_array_sco = np.append(act_width_mean_array_sco, act_width_mean_sco) # For each DoD append scour active width values\n \n act_width_array = np.array([np.nansum(act_px_matrix, axis=0)])*px_y/1000/W # Array of the crosswise morphological total active width [Wact/W]\n act_width_array_dep = np.array([np.nansum(act_px_matrix_dep, axis=0)])*px_y/1000/W # Array of the crosswise morphological deposition active width [Wact/W]\n act_width_array_sco = np.array([np.nansum(act_px_matrix_sco, axis=0)])*px_y/1000/W # Array of the crosswise morphological scour active width [Wact/W]\n \n # Calculate active thickness for total volumes. deposition volumes and scour volumes\n act_thickness = (np.sum(np.abs(DoD_vol))*px_x*px_y)/morph_act_area # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]\n act_thickness_dep = (np.sum(np.abs(dep_DoD))*px_x*px_y)/morph_act_area_dep # Deposition active thickness (abs(V_sco) + V_dep)/act_area [mm]\n act_thickness_sco = (np.sum(np.abs(sco_DoD))*px_x*px_y)/morph_act_area_sco # Scour active thickness (abs(V_sco) + V_dep)/act_area [mm]\n \n print('Active thickness [mm]:')\n print(act_thickness)\n\n print('Morphological active area: ', \"{:.1f}\".format(morph_act_area), '[mm²]')\n print('Morphological active width (mean):', \"{:.3f}\".format(act_width_mean), '%')\n print()\n print()\n\n # Create output matrix as below:\n # DoD step0 1-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 9-8 average STDEV\n # DoD step1 2-0 3-1 4-2 5-3 6-4 7-5 8-6 9-7 average STDEV\n # DoD step2 3-0 4-1 5-2 6-3 7-4 8-5 9-6 average STDEV\n # DoD step3 4-0 5-1 6-2 7-3 8-4 9-5 average STDEV\n # DoD step4 5-0 6-1 7-2 8-3 9-4 average STDEV\n # DoD step5 6-0 7-1 8-2 9-3 average STDEV\n # DoD step6 7-0 8-1 9-2 average STDEV\n # DoD step7 8-0 9-1 average STDEV\n # DoD step8 9-0 average STDEV\n # A A A A A A A A A\n # SD(A) SD(A) SD(A) SD(A) SD(A) SD(A) SD(A) SD(A) SD(A)\n # B B B B B B B B B\n # SD(B) SD(B) SD(B) SD(B) SD(B) SD(B) SD(B) SD(B) SD(B)\n\n DEM1_num=DEM1_name[-5:-4] # DEM1 number\n DEM2_num=DEM2_name[-5:-40] # DEM2 number\n delta=int(DEM2_name[-5:-4])-int(DEM1_name[-5:-4]) # Calculate delta between DEM\n\n # Build up morphWact/W array for the current run boxplot\n # This array contain all the morphWact/W values for all the run repetition in the same line\n # This array contain only adjacent DEMs DoD\n if delta==1:\n morphWact_values = np.append(morphWact_values, act_width_array)\n\n # Fill Scour, Deposition and morphWact/w matrix:\n if delta != 0:\n # Fill matrix with values\n matrix_volumes[delta-1,h]=np.sum(DoD_vol)*px_x*px_y/(W*DoD_length*1000) # Total volumes as the sum of scour and deposition volumes\n matrix_dep[delta-1,h]=np.sum(dep_DoD)*px_x*px_y/(W*DoD_length*1000) # Deposition volumes as V/(W*L) [mm]\n matrix_sco[delta-1,h]=np.sum(sco_DoD)*px_x*px_y/(W*DoD_length*1000) # Scour volumes\n matrix_morph_act_area[delta-1,h]=morph_act_area # Total morphological active area\n matrix_morph_act_area_sco\n matrix_morph_act_area_dep\n\n # Fill last two columns with AVERAGE and STDEV\n matrix_volumes[delta-1,-2]=np.average(matrix_volumes[delta-1,:len(files)-delta])\n matrix_dep[delta-1,-2]=np.average(matrix_dep[delta-1,:len(files)-delta])\n matrix_sco[delta-1,-2]=np.average(matrix_sco[delta-1,:len(files)-delta])\n matrix_morph_act_area[delta-1,-2]=np.average(matrix_morph_act_area[delta-1,:len(files)-delta])\n\n matrix_volumes[delta-1,-1]=np.std(matrix_volumes[delta-1,:len(files)-delta])\n matrix_dep[delta-1,-1]=np.std(matrix_dep[delta-1,:len(files)-delta])\n matrix_sco[delta-1,-1]=np.std(matrix_sco[delta-1,:len(files)-delta])\n matrix_morph_act_area[delta-1,-1]=np.std(matrix_morph_act_area[delta-1,:len(files)-delta])\n\n # Fill active thickness matrix:\n matrix_act_thickness[delta-1,h]=act_thickness\n matrix_act_thickness[delta-1,-2]=np.average(matrix_act_thickness[delta-1,:len(files)-delta])\n matrix_act_thickness[delta-1,-1]=np.std(matrix_act_thickness[delta-1,:len(files)-delta])\n\n\n # Fill Wact/W MEAN matrix as below:\n # DoD step0 1-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 9-8 MIN MAX average STDEV\n # DoD step1 2-0 3-1 4-2 5-3 6-4 7-5 8-6 9-7 MIN MAX average STDEV\n # DoD step2 3-0 4-1 5-2 6-3 7-4 8-5 9-6 MIN MAX average STDEV\n # DoD step3 4-0 5-1 6-2 7-3 8-4 9-5 MIN MAX average STDEV\n # DoD step4 5-0 6-1 7-2 8-3 9-4 MIN MAX average STDEV\n # DoD step5 6-0 7-1 8-2 9-3 MIN MAX average STDEV\n # DoD step6 7-0 8-1 9-2 MIN MAX average STDEV\n # DoD step7 8-0 9-1 MIN MAX average STDEV\n # DoD step8 9-0 MIN MAX average STDEV\n\n matrix_Wact[delta-1,h]=act_width_mean\n matrix_Wact[delta-1,-2]=np.average(matrix_Wact[delta-1,:len(files)-delta])\n matrix_Wact[delta-1,-1]=np.std(matrix_Wact[delta-1,:len(files)-delta])\n\n\n # Fill Wact/W MAX (MIN) matrix as below:\n # NB: MIN and MAX columns are to be intended as the maximum and the minimum value\n # of the maximum (or minimum) values of DoDs row. So the MIN value of the\n # matrix_Wact_max is the minimum value between the maximum value.\n # DoD step0 1-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 9-8 MIN MAX\n # DoD step1 2-0 3-1 4-2 5-3 6-4 7-5 8-6 9-7 MIN MAX\n # DoD step2 3-0 4-1 5-2 6-3 7-4 8-5 9-6 MIN MAX\n # DoD step3 4-0 5-1 6-2 7-3 8-4 9-5 MIN MAX\n # DoD step4 5-0 6-1 7-2 8-3 9-4 MIN MAX\n # DoD step5 6-0 7-1 8-2 9-3 MIN MAX\n # DoD step6 7-0 8-1 9-2 MIN MAX\n # DoD step7 8-0 9-1 MIN MAX\n # DoD step8 9-0 MIN MAX\n\n # Fill MAX Wact/W matrix:\n matrix_Wact_max[delta-1,h]=np.max(act_width_array)\n matrix_Wact_max[delta-1,-2]=np.min(matrix_Wact_max[delta-1,:len(files)-delta])\n matrix_Wact_max[delta-1,-1]=np.max(matrix_Wact_max[delta-1,:len(files)-delta])\n\n # Fill MIN Wact/W matrix:\n matrix_Wact_min[delta-1,h]=np.min(act_width_array)\n matrix_Wact_min[delta-1,-2]=np.min(matrix_Wact_min[delta-1,:len(files)-delta])\n matrix_Wact_min[delta-1,-1]=np.max(matrix_Wact_min[delta-1,:len(files)-delta]) \n \n \n else:\n pass\n\n # Stack consecutive DoDs in a 3D array\n if h==0 and k==0: # initialize the first array with the DEM shape\n DoD_stack = np.zeros([len(files)-1, dim_x, dim_y])\n else:\n pass\n\n if delta==1:\n DoD_stack[h,:,:] = DoD_filt_nozero_rst[:,:]\n\n ###################################################################\n # SAVE DATA\n ###################################################################\n\n # RAW DoD\n # Print raw DoD in txt file (NaN as np.nan)\n np.savetxt(path_out + '/' + DoD_name + 'raw.txt', DoD_raw, fmt='%0.1f', delimiter='\\t')\n # Printing raw DoD in txt file (NaN as -999)\n np.savetxt(path_out + '/' + DoD_name + 'raw_rst.txt', DoD_raw_rst, fmt='%0.1f', delimiter='\\t')\n\n # MEAN DoD\n # Print DoD mean in txt file (NaN as np.nan)\n np.savetxt(path_out + '/' + DoD_name + 'mean.txt', DoD_mean , fmt='%0.1f', delimiter='\\t')\n # Print filtered DoD (with NaN as -999)\n np.savetxt(path_out + '/' + DoD_name + 'mean_rst.txt', DoD_mean_rst , fmt='%0.1f', delimiter='\\t')\n\n # FILTERED DoD\n # Print filtered DoD (with np.nan)...\n np.savetxt(path_out + '/' + DoD_name + 'filt_.txt', DoD_filt, fmt='%0.1f', delimiter='\\t')\n # Print filtered DoD (with NaN as -999)\n np.savetxt(path_out + '/' + DoD_name + 'filt_rst.txt', DoD_filt_rst, fmt='%0.1f', delimiter='\\t')\n\n # AVOIDED ZERO SURROUNDED DoD\n # Print filtered DoD (with np.nan)...\n np.savetxt(path_out + '/' + DoD_name + 'nozero.txt', DoD_filt_nozero, fmt='%0.1f', delimiter='\\t')\n # Print filtered DoD (with NaN as -999)\n np.savetxt(path_out + '/' + DoD_name + 'filt_nozero_rst.txt', DoD_filt_nozero_rst, fmt='%0.1f', delimiter='\\t')\n\n # ACTIVE PIXEL DoD\n # Print boolean map of active pixel: 1=active, 0=not active\n np.savetxt(path_out + '/' + DoD_name + 'active.txt', act_px_matrix, fmt='%0.1f', delimiter='\\t')\n\n # Print DoD and filtered DoD (with NaN as -999) in a GIS readable format (ASCII grid):\n with open(path_out + '/' + DoD_name + 'header.txt') as f_head:\n w_header = f_head.read() # Header\n with open(path_out + '/' + DoD_name + 'raw_rst.txt') as f_DoD:\n w_DoD_raw= f_DoD.read() # Raw DoD\n with open(path_out + '/' + DoD_name + 'mean_rst.txt') as f_DoD_mean:\n w_DoD_mean = f_DoD_mean.read() # Mean DoD\n with open(path_out + '/' + DoD_name + 'filt_rst.txt') as f_DoD_filt:\n w_DoD_filt = f_DoD_filt.read() # Filtered DoD\n with open(path_out + '/' + DoD_name + 'filt_nozero_rst.txt') as f_DoD_filt_nozero:\n w_DoD_filt_nozero = f_DoD_filt_nozero.read() # Avoided zero surrounded pixel DoD\n\n # Print GIS readable raster [raw DoD, mean DoD, filtered DoD]\n DoD_raw_gis = w_header + w_DoD_raw\n DoD_mean_gis = w_header + w_DoD_mean\n DoD_filt_gis = w_header + w_DoD_filt\n DoD_filt_nozero_gis = w_header + w_DoD_filt_nozero\n\n with open(path_out + '/' +'gis-'+ DoD_name + 'raw.txt', 'w') as fp:\n fp.write(DoD_raw_gis)\n with open(path_out + '/' +'gis-'+ DoD_name + 'mean.txt', 'w') as fp:\n fp.write(DoD_mean_gis)\n with open(path_out + '/' + 'gis-' + DoD_name + 'filt.txt', 'w') as fp:\n fp.write(DoD_filt_gis)\n with open(path_out + '/' + 'gis-' + DoD_name + 'filt_nozero_rst.txt', 'w') as fp:\n fp.write(DoD_filt_nozero_gis)\n\n # Print the last DoD outcome\n if save_plot_mode == 1:\n fig, ax = plt.subplots(dpi=200, tight_layout=True)\n im = ax.imshow(np.where(DoD_filt_nozero_rst==NaN, np.nan, DoD_filt_nozero_rst), cmap='RdBu', vmin=-25, vmax=25, aspect='0.1')\n plt.colorbar(im)\n plt.title(DoD_name[:-1], fontweight='bold')\n # plt.savefig(os.path.join(plot_dir, run +'_DoD.png'), dpi=200)\n plt.show()\n else:\n pass\n\n ###########################################################################\n # VOLUME AND MORPHOLOGICA ACTIVE WIDTH INTERPOLATION\n ###########################################################################\n '''\n Interpolation performed all over the volume data.\n Standard deviation is then applied to function parameters\n '''\n # Initialize arrays\n xData=[] # xData as time array\n yData_dep=[] # yData_dep deposition volume array\n yData_sco=[] # yData_sco scour volume array\n yData_morphW=[] # yData_morphW morphological active width array\n\n for i in range(0,len(files)-1):\n xData=np.append(xData, np.ones(len(files)-i-1)*(i+1)*dt) # Create xData array for all the volume points\n yData_dep=np.append(yData_dep, matrix_dep[i,:len(files)-i-1]) # deposition volumes (unroll yData)\n yData_sco=np.append(yData_sco, abs(matrix_sco[i,:len(files)-i-1])) # scour volumes (unroll yData)\n yData_morphW=np.append(yData_morphW, abs(matrix_Wact[i,:len(files)-i-1])) # scour volumes (unroll yData)\n\n\n\n # Define interpolation array and initial guess:\n volume_temp_scale_array = [] # Define volume temporal scale array\n morphW_temp_scale_array = [] # Define morphW temporal scale array\n ic_dep=np.array([np.mean(yData_dep),np.min(xData)]) # Initial deposition parameter guess\n ic_sco=np.array([np.mean(yData_sco),np.min(xData)]) # Initial scour parameter guess\n ic_morphW=np.array([np.mean(yData_morphW),np.min(xData)]) # Initial morphW parameter guess\n\n # Perform interpolation for deposition and scour volumes, and for morphological active width\n par_dep, intCurve_dep, covar_dep = interpolate(func_exp, xData, yData_dep, ic_dep) # Deposition interpolation\n par_sco, intCurve_sco, covar_sco = interpolate(func_exp, xData, yData_sco, ic_sco) # Scour interpolation\n par_morphW, intCurve_morphW, covar_morphW = interpolate(func_exp3, xData, yData_morphW, ic_morphW) # morphW interpolation\n\n\n # Build up volume temporal scale array for each runs\n if run_mode==2:\n volume_temp_scale_array = np.append(volume_temp_scale_array, (par_dep[1], covar_dep[1,1], par_sco[1], covar_sco[1,1])) # Append values\n volume_temp_scale_report[int(np.where(RUNS==run)[0]),:]=volume_temp_scale_array # Populate temporal scale report\n\n # Build up morphW temporal scale array for each runs\n if run_mode==2:\n morphW_temp_scale_array = np.append(morphW_temp_scale_array, (par_morphW[1], covar_morphW[1,1])) # Append values\n morphW_temp_scale_report[int(np.where(RUNS==run)[0]),:]=morphW_temp_scale_array # Populate temporal scale report\n\n print()\n print('All volume points interpolation parameters:')\n print('Deposition interpolation parameters')\n print('A=', par_dep[0], 'Variance=', covar_dep[0,0])\n print('B=', par_dep[1], 'Variance=', covar_dep[1,1])\n print('Scour interpolation parameters')\n print('A=', par_sco[0], 'Variance=', covar_sco[0,0])\n print('B=', par_sco[1], 'Variance=', covar_sco[1,1])\n print()\n print('All morphW points interpolation parameters:')\n print('A=', par_morphW[0], 'Variance=', covar_morphW[0,0])\n print('B=', par_morphW[1], 'Variance=', covar_morphW[1,1])\n\n\n if save_plot_mode == 1:\n fig1, axs = plt.subplots(2,1,dpi=200, sharex=True, tight_layout=True)\n axs[0].plot(xData, yData_dep, 'o')\n axs[0].plot(xData, intCurve_dep, c='red')\n axs[0].set_title('Deposition volumes interpolation '+run)\n axs[0].set_xlabel('Time [min]')\n axs[0].set_ylabel('Volume V/(L*W) [mm]')\n axs[1].plot(xData, yData_sco, 'o')\n axs[1].plot(xData, intCurve_sco, c='red')\n axs[1].set_title('Scour volumes interpolation '+run)\n axs[1].set_xlabel('Time [min]')\n axs[1].set_ylabel('Volume V/(L*W) [mm]')\n plt.savefig(os.path.join(plot_dir, run +'_volume_interp.png'), dpi=200)\n plt.show()\n\n fig2, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)\n axs.plot(xData, yData_morphW, 'o', c='brown')\n axs.plot(xData, intCurve_morphW, c='green')\n axs.set_title('Morphological active width (morphW/W) '+run)\n axs.set_xlabel('Time [min]')\n axs.set_ylabel('morphW/W [-]')\n plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)\n plt.show()\n\n else:\n pass\n\n\n\n\n\n # # Fill scour and deposition report matrix with interpolation parameters\n # for i in range(0, len(files)-3): # Last three columns have 1 or 2 or 3 values: not enought -> interpolation skipped\n # xData = np.ones(len(files)-i-1)*(i+1)*dt # Create xData array for all the volume points\n # # xData = np.arange(0, len(files)-i-1, 1)\n\n # #Fill deposition matrix\n # yData_dep=matrix_dep[:len(files)-i-1,i] # yData as value of deposition volume\n # ic_dep=np.array([np.mean(yData_dep),np.min(xData)]) # Initial deposition parameter guess\n # par_dep, intCurve, covar_dep = interpolate(func, xData, yData_dep, ic_dep)\n # matrix_dep[-4,i], matrix_dep[-2,i]= par_dep[0], par_dep[1] # Parameter A and B\n # matrix_dep[-3,i], matrix_dep[-1,i]= covar_dep[0,0], covar_dep[1,1] # STD(A) and STD(B)\n\n # # Fill scour matrix\n # yData_sco=np.absolute(matrix_sco[:len(files)-i-1,i])\n # ic_sco=np.array([np.mean(yData_sco),np.min(xData)]) # Initial scour parameter guess\n # par_sco, intCurve, covar_sco = interpolate(func, xData, yData_sco, ic_sco)\n # matrix_sco[-4,i], matrix_sco[-2,i]= par_sco[0], par_sco[1] # Parameter A and B\n # matrix_sco[-3,i], matrix_sco[-1,i]= covar_sco[0,0], covar_sco[1,1] # STD(A) and STD(B)\n\n # print(xData)\n # print(yData_dep)\n # print(yData_sco)\n\n ###############################################################################\n # SAVE DATA MATRIX\n ###############################################################################\n # Create report matrix\n report_matrix = np.array(np.transpose(np.stack((comb, DoD_count_array, volumes_array, dep_array, sco_array, morph_act_area_array, act_width_mean_array))))\n report_header = 'DoD_combination, Active pixels, Total volume [mm^3], Deposition volume [mm^3], Scour volume [mm^3], Active area [mm^2], Active width mean [%]'\n\n report_name = run + '_report.txt'\n with open(os.path.join(report_dir , report_name), 'w') as fp:\n fp.write(report_header)\n fp.writelines(['\\n'])\n for i in range(0,len(report_matrix[:,0])):\n for j in range(0, len(report_matrix[0,:])):\n if j == 0:\n fp.writelines([report_matrix[i,j]+', '])\n else:\n fp.writelines([\"%.3f, \" % float(report_matrix[i,j])])\n fp.writelines(['\\n'])\n fp.close()\n\n\n # Create deposition matrix report\n report_dep_name = os.path.join(report_dir, run +'_dep_report.txt')\n np.savetxt(report_dep_name, matrix_dep, fmt='%.1f', delimiter=',', newline='\\n')\n\n # Create scour matrix report\n report_sco_name = os.path.join(report_dir, run +'_sco_report.txt')\n np.savetxt(report_sco_name, matrix_sco, fmt='%.1f', delimiter=',', newline='\\n')\n \n # Create active thickness matrix report\n report_act_thickness_name = os.path.join(report_dir, run +'_act_thickness_report.txt')\n np.savetxt(report_act_thickness_name, matrix_act_thickness , fmt='%.3f', delimiter=',', newline='\\n')\n \n # Create active area matrix report\n report_act_area_name = os.path.join(report_dir, run + '_act_area_report.txt')\n np.savetxt(report_act_area_name, matrix_morph_act_area, fmt='%.3f', delimiter=',', newline='\\n')\n\n # Create Wact report matrix\n matrix_Wact[:,len(files)-1]=matrix_Wact_min[:,len(files)-1] # Fill matrix_Wact report with minimum values\n matrix_Wact[:,len(files)]=matrix_Wact_max[:,len(files)-1] # Fill matrix_Wact report with maximum values\n matrix_Wact=matrix_Wact[:len(files)-1,:]\n report_Wact_name = os.path.join(report_dir, run +'_morphWact_report.txt')\n np.savetxt(report_Wact_name, matrix_Wact, fmt='%.3f', delimiter=',', newline='\\n')\n\n # For each runs collect the dimension of the morphWact_array:\n if delta==1:\n morphWact_dim = np.append(morphWact_dim, len(morphWact_values))\n\n\n # Create morphWact/W matrix as following:\n # all morphWact/W values are appended in the same line for each line in the morphWact_values array\n # Now a matrix in which all row are all morphWact/W values for each runs is built\n # morphWact_matrix_header = 'run name, morphWact/W [-]'\n # run name, morphWact/w [-]\n with open(os.path.join(report_dir, run + '_morphWact_array.txt'), 'w') as fp:\n # fp.write(morphWact_matrix_header)\n # fp.writelines(['\\n'])\n for i in range(0, len(morphWact_values)):\n if i == len(morphWact_values)-1:\n fp.writelines([\"%.3f\" % float(morphWact_values[i])])\n else:\n fp.writelines([\"%.3f,\" % float(morphWact_values[i])])\n fp.writelines(['\\n'])\n fp.close()\n\n\n\n ###########################################################################\n # PLOTS\n ###########################################################################\n # Define arrays for scour and volume data over time\n xData=np.arange(1, len(files), 1)*dt_xnr # Time in Txnr\n yData_sco=np.absolute(matrix_sco[:len(files)-1,0])\n yError_sco=matrix_sco[:len(files)-1,-1]\n yData_dep=np.absolute(matrix_dep[:len(files)-1,0])\n yError_dep=matrix_dep[:len(files)-1,-1]\n yData_act_thickness=matrix_act_thickness[:len(files)-1,0]\n yError_act_thickness=matrix_act_thickness[:len(files)-1,-1]\n\n if save_plot_mode==1:\n fig3, axs = plt.subplots(2,1,dpi=80, figsize=(10,6), sharex=True, tight_layout=True)\n fig3.suptitle(run + ' - Volume')\n axs[0].errorbar(xData,yData_sco, yError_sco, linestyle='--', marker='^', color='red')\n axs[0].set_ylim(bottom=0)\n axs[0].set_title('Scour')\n # axs[0].set_xlabel()\n axs[0].set_ylabel('Scour volume V/(L*W) [mm]')\n axs[1].errorbar(xData,yData_dep, yError_dep, linestyle='--', marker='^', color='blue')\n axs[1].set_ylim(bottom=0)\n axs[1].set_title('Deposition')\n axs[1].set_xlabel('Exner time')\n axs[1].set_ylabel('Scour olume V/(L*W) [mm]')\n plt.savefig(os.path.join(plot_dir, run +'dep_scour.png'), dpi=200)\n plt.show()\n \n \n fig4, axs = plt.subplots(1,1,dpi=80, figsize=(10,6), sharex=True, tight_layout=True)\n axs.errorbar(xData,yData_act_thickness, yError_act_thickness, linestyle='--', marker='^', color='purple')\n axs.set_ylim(bottom=0)\n axs.set_title(run + '- Active thickness')\n axs.set_xlabel('Exner time')\n axs.set_ylabel('Active thickness [mm]')\n plt.savefig(os.path.join(plot_dir, run +'active_thickness_.png'), dpi=200)\n plt.show()\n else:\n pass\n\n # # Print a report with xData as real time in minutes and the value of scour and deposition volumes for each runs\n # Create report matrix as:\n # run\n # time\n # V_dep\n # V_sco\n\n xData=np.arange(1, len(files), 1)*dt\n volume_over_time_matrix = []\n volume_over_time_matrix = np.stack((xData, yData_dep, -yData_sco))\n\n # Append rows to the current file\n with open(os.path.join(report_dir, 'volume_over_time.txt'), 'a') as fp:\n fp.writelines([run+', '])\n fp.writelines(['\\n'])\n for i in range(0,volume_over_time_matrix.shape[0]):\n for j in range(0,volume_over_time_matrix.shape[1]):\n fp.writelines([\"%.3f, \" % float(volume_over_time_matrix[i,j])])\n fp.writelines(['\\n'])\n fp.writelines(['\\n'])\n fp.close()\n\n\n\n\n\n # if save_plot_mode == 1:\n # # Print scour volumes over increasing timestep:\n # fig1, ax1 = plt.subplots(dpi=100)\n # # ax1.bar(np.arange(0, len(matrix_sco[:,0]), 1),abs(matrix_sco[:,0]))\n # # ax1.plot(t[int(len(t)/10):-int(len(t)/10)], m*t[int(len(t)/10):-int(len(t)/10)]+q)\n # # xData=np.arange(1, len(files), 1)*dt # Time in minutes\n # xData=np.arange(1, len(files), 1)*dt_xnr # Time in Txnr\n # yData=np.absolute(matrix_sco[:len(files)-1,0])\n # yError=matrix_sco[:len(files)-1,-1]\n # ax1.errorbar(xData,yData, yError, linestyle='--', marker='^')\n # ax1.set_ylim(bottom=0)\n # ax1.set_title(run)\n # ax1.set_xlabel('Exner time')\n # ax1.set_ylabel('Scour volume [mm³]')\n # plt.savefig(os.path.join(plot_dir, run +'_scour.png'), dpi=200)\n # plt.show()\n\n # # Print deposition volumes over increasing timestep:\n # fig1, ax1 = plt.subplots(dpi=100)\n # # ax1.bar(np.arange(0, len(matrix_sco[:,0]), 1),abs(matrix_sco[:,0]))\n # # ax1.plot(t[int(len(t)/10):-int(len(t)/10)], m*t[int(len(t)/10):-int(len(t)/10)]+q)\n # # xData=np.arange(1, len(files), 1)*dt # Time in minutes\n # xData=np.arange(1, len(files), 1)*dt_xnr # Time in Txnr\n # yData=np.absolute(matrix_dep[:len(files)-1,0])\n # yError=matrix_sco[:len(files)-1,-1]\n # ax1.errorbar(xData,yData, yError, linestyle='--', marker='^')\n # ax1.set_ylim(bottom=0)\n # ax1.set_title(run)\n # ax1.set_xlabel('Exner time')\n # ax1.set_ylabel('Deposition volume [mm³]')\n # plt.savefig(os.path.join(plot_dir, run +'_dep.png'), dpi=200)\n # plt.show()\n # else:\n # pass\n\n\nif run_mode==2:\n # Print vulume teporal scale report\n volume_temp_scale_report_header = 'run name, B_dep [min], SD(B_dep) [min], B_sco [min], SD(B_sco) [min]'\n # Write temporl scale report as:\n # run name, B_dep, SD(B_dep), B_sco, SD(B_sco)\n with open(os.path.join(report_dir, 'volume_temp_scale_report.txt'), 'w') as fp:\n fp.write(volume_temp_scale_report_header)\n fp.writelines(['\\n'])\n for i in range(0,len(RUNS)):\n for j in range(0, volume_temp_scale_report.shape[1]+1):\n if j == 0:\n fp.writelines([RUNS[i]+', '])\n else:\n fp.writelines([\"%.3f, \" % float(volume_temp_scale_report[i,j-1])])\n fp.writelines(['\\n'])\n fp.close()\n\n # Print morphW teporal scale report\n morphW_temp_scale_report_header = 'run name, B_morphW [min], SD(B_morphW) [min]'\n # Write morphW temporl scale report as:\n # run name, B_morphW, SD(B_morphW)\n with open(os.path.join(report_dir, 'morphW_temp_scale_report.txt'), 'w') as fp:\n fp.write(morphW_temp_scale_report_header)\n fp.writelines(['\\n'])\n for i in range(0,len(RUNS)):\n for j in range(0, morphW_temp_scale_report.shape[1]+1):\n if j == 0:\n fp.writelines([RUNS[i]+', '])\n else:\n fp.writelines([\"%.3f, \" % float(morphW_temp_scale_report[i,j-1])])\n fp.writelines(['\\n'])\n fp.close()\n\n if DEM_analysis_mode==1:\n engelund_model_report_header = 'run name, D [m], Q [m^3/s], Wwet/W [-]'\n # Write temporl scale report as:\n # run name, B_dep, SD(B_dep), B_sco, SD(B_sco)\n with open(os.path.join(report_dir, 'engelund_model_report.txt'), 'w') as fp:\n fp.write(engelund_model_report_header)\n fp.writelines(['\\n'])\n for i in range(0,len(RUNS)):\n for j in range(0, engelund_model_report.shape[1]+1):\n if j == 0:\n fp.writelines([RUNS[i]+', '])\n elif j==2:\n fp.writelines([\"%.5f, \" % float(engelund_model_report[i,j-1])])\n else:\n fp.writelines([\"%.3f, \" % float(engelund_model_report[i,j-1])])\n fp.writelines(['\\n'])\n fp.close()\n\n\n\n# Create morphWact/W runs boxplot\n# Define active width matrix\nmorphWact_matrix=np.zeros((len(RUNS), int(np.max(morphWact_dim))))\nfor i in range(0,len(RUNS)):\n data=np.loadtxt(os.path.join(report_dir, RUNS[i] + '_morphWact_array.txt'), delimiter=',')\n morphWact_matrix[i,:len(data)]=data\n\n# Set zero as np.nan\nmorphWact_matrix = np.where(morphWact_matrix==0, np.nan, morphWact_matrix)\n\n# Filter np.nan\nfig, ax = plt.subplots(dpi=80, figsize=(10,6))\nfig.suptitle('Dimensionless morphological active width', fontsize = 18)\nfor i in range(0, len(RUNS)):\n bplot=ax.boxplot(morphWact_matrix[i,:][~np.isnan(morphWact_matrix[i,:])], positions=[i], widths=0.5) # Data were filtered by np.nan values\nax.yaxis.grid(True)\nax.set_xlabel('Runs', fontsize=12)\nax.set_ylabel('morphWact/W [-]', fontsize=12)\nplt.xticks(np.arange(0,len(RUNS), 1), RUNS)\nplt.savefig(os.path.join(plot_dir, 'morphWact_boxplot.png'), dpi=200)\nplt.show()\n\n\n\nend = time.time()\nprint()\nprint('Execution time: ', (end-start), 's')\n"
] | [
[
"numpy.polyfit",
"numpy.sqrt",
"numpy.round",
"numpy.max",
"numpy.mean",
"numpy.nanmean",
"numpy.nanstd",
"numpy.exp",
"scipy.optimize.curve_fit",
"numpy.where",
"numpy.pad",
"numpy.stack",
"numpy.ln",
"numpy.copy",
"numpy.nansum",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.isclose",
"numpy.log",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.isnan",
"numpy.logical_or",
"numpy.append",
"numpy.savetxt",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.sign",
"matplotlib.pyplot.colorbar",
"numpy.average",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
nickduran/pliers | [
"9b10b27e70c3fbb7647eb1c70e031f55d824f3f6"
] | [
"pliers/tests/extractors/api/test_clarifai_extractors.py"
] | [
"from os.path import join\n\nimport numpy as np\nimport pytest\n\nfrom ...utils import get_test_data_path\nfrom pliers import config\nfrom pliers.extractors import (ClarifaiAPIImageExtractor,\n ClarifaiAPIVideoExtractor)\nfrom pliers.extractors.base import merge_results\nfrom pliers.stimuli import ImageStim, VideoStim\n\nIMAGE_DIR = join(get_test_data_path(), 'image')\nVIDEO_DIR = join(get_test_data_path(), 'video')\n\n\[email protected]_payment\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor():\n stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'))\n ext = ClarifaiAPIImageExtractor()\n assert ext.validate_keys()\n result = ext.transform(stim).to_df()\n assert result['apple'][0] > 0.5\n assert result.ix[:, 5][0] > 0.0\n\n result = ClarifaiAPIImageExtractor(max_concepts=5).transform(stim).to_df()\n assert result.shape == (1, 9)\n\n result = ClarifaiAPIImageExtractor(\n min_value=0.9).transform(stim).to_df(object_id=False)\n assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])\n\n concepts = ['cat', 'dog']\n result = ClarifaiAPIImageExtractor(select_concepts=concepts).transform(stim)\n result = result.to_df()\n assert result.shape == (1, 6)\n assert 'cat' in result.columns and 'dog' in result.columns\n\n url = 'https://via.placeholder.com/350x150'\n stim = ImageStim(url=url)\n result = ClarifaiAPIImageExtractor(max_concepts=5).transform(stim).to_df()\n assert result.shape == (1, 9)\n assert result['graphic'][0] > 0.8\n\n ext = ClarifaiAPIImageExtractor(api_key='nogood')\n assert not ext.validate_keys()\n\n stim = ImageStim(join(IMAGE_DIR, 'obama.jpg'))\n result = ClarifaiAPIImageExtractor(model='face').transform(stim).to_df()\n keys_to_check = ['top_row', 'left_col', 'bottom_row', 'right_col']\n assert [k not in result.keys() for k in keys_to_check]\n assert all([result[k][0] != np.nan for k in result if k in keys_to_check])\n\n stim = ImageStim(join(IMAGE_DIR, 'aspect_ratio_fail.jpg'))\n result = ClarifaiAPIImageExtractor(model='face').transform(stim).to_df()\n assert [k in result.keys() for k in keys_to_check]\n\n # check whether a multi-face image has the appropriate amount of rows\n stim = ImageStim(join(IMAGE_DIR, 'thai_people.jpg'))\n result = ClarifaiAPIImageExtractor(model='face').transform(stim).to_df()\n assert len(result) == 4\n\[email protected]_payment\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor_batch():\n stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'))\n stim2 = ImageStim(join(IMAGE_DIR, 'obama.jpg'))\n ext = ClarifaiAPIImageExtractor()\n results = ext.transform([stim, stim2])\n results = merge_results(results)\n assert results['ClarifaiAPIImageExtractor#apple'][0] > 0.5 or \\\n results['ClarifaiAPIImageExtractor#apple'][1] > 0.5\n\n\[email protected]_payment\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor_large():\n default = config.get_option('allow_large_jobs')\n default_large = config.get_option('large_job')\n config.set_option('allow_large_jobs', False)\n config.set_option('large_job', 1)\n\n ext = ClarifaiAPIImageExtractor()\n images = [ImageStim(join(IMAGE_DIR, 'apple.jpg')),\n ImageStim(join(IMAGE_DIR, 'obama.jpg'))]\n with pytest.raises(ValueError):\n merge_results(ext.transform(images))\n\n config.set_option('allow_large_jobs', True)\n results = merge_results(ext.transform(images))\n assert 'ClarifaiAPIImageExtractor#apple' in results.columns\n assert results.shape == (2, 49)\n\n config.set_option('allow_large_jobs', default)\n config.set_option('large_job', default_large)\n\n\[email protected]_payment\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_video_extractor():\n stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))\n ext = ClarifaiAPIVideoExtractor()\n assert ext.validate_keys()\n result = ext.transform(stim).to_df()\n # This should actually be 6, in principle, because the clip is < 6 seconds,\n # but the Clarifai API is doing weird things. See comment in\n # ClarifaiAPIVideoExtractor._to_df() for further explanation.\n assert result.shape[0] in (6, 7)\n # Changes sometimes, so use a range\n assert result.shape[1] > 25 and result.shape[1] < 30\n assert result['toy'][0] > 0.5\n assert result['onset'][1] == 1.0\n assert result['duration'][0] == 1.0\n # because of the behavior described above—handle both cases\n assert np.isclose(result['duration'][5], 0.57) or result['duration'][6] == 0\n\n ext = ClarifaiAPIVideoExtractor(model='face')\n result = ext.transform(stim).to_df()\n keys_to_check = ['top_row', 'left_col', 'bottom_row', 'right_col']\n assert [k not in result.keys() for k in keys_to_check]\n assert all([result[k][0] == np.nan for k in result if k in keys_to_check])\n\n stim = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))\n result = ext.transform(stim).to_df()\n keys_to_check = ['top_row', 'left_col', 'bottom_row', 'right_col']\n assert [k in result.keys() for k in keys_to_check]\n # check if we return more than 1 row per second of face bounding boxes\n assert len(result) > 6"
] | [
[
"numpy.isnan",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bansan85/ocr-book-tests | [
"00594aa5e637e5a96d91d52449e669214985c708"
] | [
"test_images.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom diptych.angle import Angle\nfrom diptych.cv2ext import charge_image\nfrom diptych.fsext import get_absolute_from_current_path\nfrom diptych.print_interface import ConstString\nfrom tests.mock_separate_page import MockDisableSeparatePage\n\nnp.seterr(all=\"raise\")\ntc = unittest.TestCase()\n\n\nMAX_VAL = 6\nFUZZING = False\n\n\ndef test_0001_png() -> None:\n \"\"\"first good page\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"0001.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.42),\n Angle.deg(90.68),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2480, 2489),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.49),\n Angle.deg(0.81),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.09),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 325, 332),\n ConstString.image_crop(1, \"y1\"): (\"range\", 334, 337),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2342, 2347),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3221, 3223),\n ConstString.image_crop(2, \"x1\"): (\"range\", 165, 175),\n ConstString.image_crop(2, \"y1\"): (\"range\", 648, 649),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2180, 2190),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3360, 3362),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 317, 340),\n ConstString.image_border(1, 2): (\"range\", 260, 282),\n ConstString.image_border(1, 3): (\"range\", 219, 225),\n ConstString.image_border(1, 4): (\"range\", 219, 225),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 638, 674),\n ConstString.image_border(2, 2): (\"range\", 101, 136),\n ConstString.image_border(2, 3): (\"range\", 221, 224),\n ConstString.image_border(2, 4): (\"range\", 221, 224),\n },\n )\n\n\ndef test_2_pages_2_contours_png() -> None:\n \"\"\"There is not one contour for the two pages,\n but one contour for each page.\n \"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"2-pages-2-contours.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.91),\n Angle.deg(90.32),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2486, 2492),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.11),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.04),\n Angle.deg(0.41),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 1181, 1199),\n ConstString.image_crop(1, \"y1\"): (\"range\", 1719, 1751),\n ConstString.image_crop(1, \"x2\"): (\"range\", 1182, 1200),\n ConstString.image_crop(1, \"y2\"): (\"range\", 1720, 1752),\n ConstString.image_crop(2, \"x1\"): (\"range\", 89, 114),\n ConstString.image_crop(2, \"y1\"): (\"range\", 240, 241),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2136, 2159),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3239, 3242),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 1752, 1753),\n ConstString.image_border(1, 2): (\"range\", 1753, 1753),\n ConstString.image_border(1, 3): (\"range\", 1239, 1239),\n ConstString.image_border(1, 4): (\"range\", 1239, 1239),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 226, 240),\n ConstString.image_border(2, 2): (\"range\", 248, 262),\n ConstString.image_border(2, 3): (\"range\", 203, 207),\n ConstString.image_border(2, 4): (\"range\", 203, 207),\n },\n )\n\n\ndef test_black_border_not_removed_png() -> None:\n \"\"\"The border on the right is still there.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"black-border-not-removed.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.95),\n Angle.deg(90.1),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2451, 2458),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.11),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.21),\n Angle.deg(0.16),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 294, 299),\n ConstString.image_crop(1, \"y1\"): (\"range\", 139, 144),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2305, 2312),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3345, 3349),\n ConstString.image_crop(2, \"x1\"): (\"range\", 153, 159),\n ConstString.image_crop(2, \"y1\"): (\"range\", 143, 146),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2168, 2173),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3350, 3353),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 127, 137),\n ConstString.image_border(1, 2): (\"range\", 145, 157),\n ConstString.image_border(1, 3): (\"range\", 221, 226),\n ConstString.image_border(1, 4): (\"range\", 221, 226),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 130, 139),\n ConstString.image_border(2, 2): (\"range\", 141, 151),\n ConstString.image_border(2, 3): (\"range\", 222, 224),\n ConstString.image_border(2, 4): (\"range\", 222, 224),\n },\n )\n\n\ndef test_image_failed_to_rotate_png() -> None:\n \"\"\"Failed to compute angle to rotate. The image takes the whole page.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"image_failed_to_rotate.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.07),\n Angle.deg(90.50),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2476, 2488),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.01),\n Angle.deg(0.66),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.19),\n Angle.deg(0.51),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 19, 91),\n ConstString.image_crop(1, \"y1\"): (\"range\", 1, 23),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2456, 2486),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3483, 3505),\n ConstString.image_crop(2, \"x1\"): (\"range\", 159, 183),\n ConstString.image_crop(2, \"y1\"): (\"range\", 231, 236),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2242, 2261),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3354, 3359),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 2, 19),\n ConstString.image_border(1, 2): (\"range\", 2, 19),\n ConstString.image_border(1, 3): (\"range\", 10, 55),\n ConstString.image_border(1, 4): (\"range\", 10, 55),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 206, 228),\n ConstString.image_border(2, 2): (\"range\", 140, 160),\n ConstString.image_border(2, 3): (\"range\", 186, 192),\n ConstString.image_border(2, 4): (\"range\", 186, 192),\n },\n )\n\n\ndef test_image_failed_to_crop_data_png() -> None:\n \"\"\"Failed to detect edges. The image takes the whole page and is too closed\n to the border of the image.\n \"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"image_failed_to_crop_data.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.86),\n Angle.deg(90.20),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2477, 2486),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.01),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 40, 116),\n ConstString.image_crop(1, \"y1\"): (\"range\", 1, 13),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2469, 2483),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3499, 3505),\n ConstString.image_crop(2, \"x1\"): (\"range\", 155, 168),\n ConstString.image_crop(2, \"y1\"): (\"range\", 217, 220),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2235, 2248),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3348, 3350),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 2, 11),\n ConstString.image_border(1, 2): (\"range\", 2, 11),\n ConstString.image_border(1, 3): (\"range\", 19, 58),\n ConstString.image_border(1, 4): (\"range\", 19, 58),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 205, 225),\n ConstString.image_border(2, 2): (\"range\", 129, 151),\n ConstString.image_border(2, 3): (\"range\", 189, 192),\n ConstString.image_border(2, 4): (\"range\", 189, 192),\n },\n )\n\n\ndef test_wrong_angle_split_line_png() -> None:\n \"\"\"Failed to detect edges. The image takes the whole page and is too closed\n to the border of the image.\n \"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"wrong_angle_split_line.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.00),\n Angle.deg(90.22),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2476, 2487),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.01),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 28, 61),\n ConstString.image_crop(1, \"y1\"): (\"range\", 1, 10),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2470, 2485),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3500, 3505),\n ConstString.image_crop(2, \"x1\"): (\"range\", 154, 171),\n ConstString.image_crop(2, \"y1\"): (\"range\", 217, 219),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2237, 2249),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3348, 3350),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 2, 8),\n ConstString.image_border(1, 2): (\"range\", 2, 8),\n ConstString.image_border(1, 3): (\"range\", 15, 34),\n ConstString.image_border(1, 4): (\"range\", 15, 34),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 195, 219),\n ConstString.image_border(2, 2): (\"range\", 136, 159),\n ConstString.image_border(2, 3): (\"range\", 188, 192),\n ConstString.image_border(2, 4): (\"range\", 188, 192),\n },\n )\n tc.assertEqual(\n charge_image(\n get_absolute_from_current_path(\n __file__, \"wrong_angle_split_line.png_page_1.png\"\n )\n ).shape[2],\n 3,\n )\n\n\ndef test_angle_page_lower_split_line_png() -> None:\n \"\"\"Failed when angle of a page in lower than\n the angle of the split line.\n \"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"angle_page_lower_split_line.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.71),\n Angle.deg(89.81),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2470, 2475),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.46),\n Angle.deg(-0.14),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.04),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 241, 245),\n ConstString.image_crop(1, \"y1\"): (\"range\", 156, 161),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2350, 2357),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3364, 3368),\n ConstString.image_crop(2, \"x1\"): (\"range\", 136, 154),\n ConstString.image_crop(2, \"y1\"): (\"range\", 145, 147),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2243, 2264),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3350, 3352),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 130, 147),\n ConstString.image_border(1, 2): (\"range\", 135, 152),\n ConstString.image_border(1, 3): (\"range\", 172, 177),\n ConstString.image_border(1, 4): (\"range\", 172, 177),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 116, 132),\n ConstString.image_border(2, 2): (\"range\", 151, 168),\n ConstString.image_border(2, 3): (\"range\", 174, 176),\n ConstString.image_border(2, 4): (\"range\", 174, 176),\n },\n )\n\n\ndef test_wrong_split_line_png() -> None:\n \"\"\"Improve choice of the split line between different algorithm.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"wrong_split_line.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.80),\n Angle.deg(89.99),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2461, 2471),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.01),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.19),\n Angle.deg(0.71),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 211, 213),\n ConstString.image_crop(1, \"y1\"): (\"range\", 155, 157),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2322, 2324),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3362, 3363),\n ConstString.image_crop(2, \"x1\"): (\"range\", 115, 129),\n ConstString.image_crop(2, \"y1\"): (\"range\", 160, 167),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2230, 2243),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3371, 3378),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 115, 180),\n ConstString.image_border(1, 2): (\"range\", 100, 167),\n ConstString.image_border(1, 3): (\"range\", 173, 175),\n ConstString.image_border(1, 4): (\"range\", 173, 175),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 129, 148),\n ConstString.image_border(2, 2): (\"range\", 129, 152),\n ConstString.image_border(2, 3): (\"range\", 168, 174),\n ConstString.image_border(2, 4): (\"range\", 168, 174),\n },\n )\n\n\ndef test_crop_too_much_png() -> None:\n \"\"\"Reduce distance to ignore black area closed to the edge.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"crop_too_much.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.03),\n Angle.deg(90.47),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2452, 2463),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.16),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.19),\n Angle.deg(0.51),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 300, 303),\n ConstString.image_crop(1, \"y1\"): (\"range\", 145, 148),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2313, 2317),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3350, 3353),\n ConstString.image_crop(2, \"x1\"): (\"range\", 158, 180),\n ConstString.image_crop(2, \"y1\"): (\"range\", 151, 156),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2176, 2192),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3359, 3363),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 116, 135),\n ConstString.image_border(1, 2): (\"range\", 147, 168),\n ConstString.image_border(1, 3): (\"range\", 221, 225),\n ConstString.image_border(1, 4): (\"range\", 221, 225),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 95, 142),\n ConstString.image_border(2, 2): (\"range\", 140, 186),\n ConstString.image_border(2, 3): (\"range\", 218, 224),\n ConstString.image_border(2, 4): (\"range\", 218, 224),\n },\n )\n\n\ndef test_crop_too_few_png() -> None:\n \"\"\"Improve detection of black area to ignored\n and that are closed to the edge.\n \"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"crop_too_few.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.22),\n Angle.deg(89.62),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2508, 2515),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.81),\n Angle.deg(-0.49),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.16),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 261, 265),\n ConstString.image_crop(1, \"y1\"): (\"range\", 148, 161),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2375, 2381),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3356, 3359),\n ConstString.image_crop(2, \"x1\"): (\"range\", 130, 141),\n ConstString.image_crop(2, \"y1\"): (\"range\", 138, 141),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2256, 2263),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3343, 3347),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 84, 137),\n ConstString.image_border(1, 2): (\"range\", 141, 208),\n ConstString.image_border(1, 3): (\"range\", 170, 175),\n ConstString.image_border(1, 4): (\"range\", 170, 175),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 126, 143),\n ConstString.image_border(2, 2): (\"range\", 140, 158),\n ConstString.image_border(2, 3): (\"range\", 166, 169),\n ConstString.image_border(2, 4): (\"range\", 166, 169),\n },\n )\n\n\ndef test_crop_too_much_2_png() -> None:\n \"\"\"Reduce distance to ignore black area closed to the edge.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"crop_too_much_2.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.08),\n Angle.deg(90.29),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2475, 2482),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.04),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.04),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 328, 332),\n ConstString.image_crop(1, \"y1\"): (\"range\", 131, 134),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2342, 2345),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3334, 3336),\n ConstString.image_crop(2, \"x1\"): (\"range\", 159, 170),\n ConstString.image_crop(2, \"y1\"): (\"range\", 137, 139),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2175, 2183),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3345, 3347),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 112, 127),\n ConstString.image_border(1, 2): (\"range\", 157, 170),\n ConstString.image_border(1, 3): (\"range\", 221, 225),\n ConstString.image_border(1, 4): (\"range\", 221, 225),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 123, 133),\n ConstString.image_border(2, 2): (\"range\", 148, 157),\n ConstString.image_border(2, 3): (\"range\", 219, 223),\n ConstString.image_border(2, 4): (\"range\", 219, 223),\n },\n )\n\n\ndef test_wrong_split_line_2_png() -> None:\n \"\"\"Improve choice of the split line between different algorithm.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"wrong_split_line_2.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.15),\n Angle.deg(90.40),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2435, 2442),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.01),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.19),\n Angle.deg(0.41),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 294, 294),\n ConstString.image_crop(1, \"y1\"): (\"range\", 136, 138),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2324, 2327),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3374, 3376),\n ConstString.image_crop(2, \"x1\"): (\"range\", 135, 146),\n ConstString.image_crop(2, \"y1\"): (\"range\", 190, 193),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2165, 2175),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3386, 3389),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 121, 131),\n ConstString.image_border(1, 2): (\"range\", 116, 126),\n ConstString.image_border(1, 3): (\"range\", 213, 215),\n ConstString.image_border(1, 4): (\"range\", 213, 215),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 180, 185),\n ConstString.image_border(2, 2): (\"range\", 105, 111),\n ConstString.image_border(2, 3): (\"range\", 212, 216),\n ConstString.image_border(2, 4): (\"range\", 212, 216),\n },\n )\n\n\ndef test_small_wave_png() -> None:\n \"\"\"The wave at the bottom of the image is very small.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"small_wave.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.51),\n Angle.deg(90.70),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2491, 2500),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.49),\n Angle.deg(0.81),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.54),\n Angle.deg(0.86),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 316, 324),\n ConstString.image_crop(1, \"y1\"): (\"range\", 197, 199),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2351, 2356),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3392, 3395),\n ConstString.image_crop(2, \"x1\"): (\"range\", 127, 139),\n ConstString.image_crop(2, \"y1\"): (\"range\", 220, 224),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2158, 2168),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3417, 3421),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 174, 182),\n ConstString.image_border(1, 2): (\"range\", 110, 117),\n ConstString.image_border(1, 3): (\"range\", 210, 215),\n ConstString.image_border(1, 4): (\"range\", 210, 215),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 191, 203),\n ConstString.image_border(2, 2): (\"range\", 83, 102),\n ConstString.image_border(2, 3): (\"range\", 211, 216),\n ConstString.image_border(2, 4): (\"range\", 211, 216),\n },\n )\n\n\ndef test_wrong_split_line_3_png() -> None:\n \"\"\"The split line was not the right one.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"wrong_split_line_3.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.36),\n Angle.deg(90.69),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2494, 2504),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.14),\n Angle.deg(0.41),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.64),\n Angle.deg(1.01),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 331, 331),\n ConstString.image_crop(1, \"y1\"): (\"range\", 163, 166),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2375, 2379),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3375, 3377),\n ConstString.image_crop(2, \"x1\"): (\"range\", 96, 113),\n ConstString.image_crop(2, \"y1\"): (\"range\", 203, 210),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2130, 2144),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3412, 3416),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 148, 161),\n ConstString.image_border(1, 2): (\"range\", 114, 128),\n ConstString.image_border(1, 3): (\"range\", 206, 208),\n ConstString.image_border(1, 4): (\"range\", 206, 208),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 183, 201),\n ConstString.image_border(2, 2): (\"range\", 80, 95),\n ConstString.image_border(2, 3): (\"range\", 208, 215),\n ConstString.image_border(2, 4): (\"range\", 208, 215),\n },\n )\n\n\ndef test_wrong_wave_split_line_png() -> None:\n \"\"\"The split line by wave method was wrong.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"wrong_wave_split_line.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.36),\n Angle.deg(90.54),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2499, 2505),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.01),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.74),\n Angle.deg(0.91),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 383, 385),\n ConstString.image_crop(1, \"y1\"): (\"range\", 178, 179),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2411, 2412),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3374, 3375),\n ConstString.image_crop(2, \"x1\"): (\"range\", 0, 124),\n ConstString.image_crop(2, \"y1\"): (\"range\", 0, 58),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2398, 2489),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3391, 3505),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 167, 174),\n ConstString.image_border(1, 2): (\"range\", 118, 124),\n ConstString.image_border(1, 3): (\"range\", 215, 217),\n ConstString.image_border(1, 4): (\"range\", 215, 217),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 0, 11),\n ConstString.image_border(2, 2): (\"range\", 1, 163),\n ConstString.image_border(2, 3): (\"range\", 9, 95),\n ConstString.image_border(2, 4): (\"range\", 9, 95),\n },\n )\n\n\ndef test_no_split_line_line_algo_png() -> None:\n \"\"\"No line for split line with line detection algo.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"no_split_line_line_algo.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.46),\n Angle.deg(89.48),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2476, 2476),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.01),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.46),\n Angle.deg(-0.14),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 203, 205),\n ConstString.image_crop(1, \"y1\"): (\"range\", 156, 158),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2320, 2323),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3366, 3367),\n ConstString.image_crop(2, \"x1\"): (\"range\", 185, 187),\n ConstString.image_crop(2, \"y1\"): (\"range\", 221, 225),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2200, 2204),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3362, 3366),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 137, 145),\n ConstString.image_border(1, 2): (\"range\", 132, 142),\n ConstString.image_border(1, 3): (\"range\", 170, 172),\n ConstString.image_border(1, 4): (\"range\", 170, 172),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 215, 225),\n ConstString.image_border(2, 2): (\"range\", 117, 132),\n ConstString.image_border(2, 3): (\"range\", 220, 223),\n ConstString.image_border(2, 4): (\"range\", 220, 223),\n },\n )\n\n\ndef test_failed_split_line_line_algo_png() -> None:\n \"\"\"Failed to compute line for split line with line detection alog.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"failed_split_line_line_algo.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.96),\n Angle.deg(90.08),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2573, 2576),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.16),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 407, 414),\n ConstString.image_crop(1, \"y1\"): (\"range\", 2925, 2928),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2419, 2426),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3171, 3174),\n ConstString.image_crop(2, \"x1\"): (\"range\", 1180, 1192),\n ConstString.image_crop(2, \"y1\"): (\"range\", 1724, 1750),\n ConstString.image_crop(2, \"x2\"): (\"range\", 1181, 1193),\n ConstString.image_crop(2, \"y2\"): (\"range\", 1725, 1751),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 2917, 2930),\n ConstString.image_border(1, 2): (\"range\", 308, 323),\n ConstString.image_border(1, 3): (\"range\", 224, 224),\n ConstString.image_border(1, 4): (\"range\", 224, 224),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 1752, 1753),\n ConstString.image_border(2, 2): (\"range\", 1753, 1753),\n ConstString.image_border(2, 3): (\"range\", 1239, 1239),\n ConstString.image_border(2, 4): (\"range\", 1239, 1239),\n },\n )\n\n\ndef test_failed_split_line_line_algo_2_png() -> None:\n \"\"\"Failed to compute line for split line with line detection alog.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"failed_split_line_line_algo_2.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.97),\n Angle.deg(90.06),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2497, 2500),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.44),\n Angle.deg(0.66),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 269, 274),\n ConstString.image_crop(1, \"y1\"): (\"range\", 146, 149),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2381, 2387),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3352, 3355),\n ConstString.image_crop(2, \"x1\"): (\"range\", 107, 124),\n ConstString.image_crop(2, \"y1\"): (\"range\", 153, 155),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2230, 2248),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3349, 3352),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 135, 142),\n ConstString.image_border(1, 2): (\"range\", 138, 146),\n ConstString.image_border(1, 3): (\"range\", 173, 174),\n ConstString.image_border(1, 4): (\"range\", 173, 174),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 133, 146),\n ConstString.image_border(2, 2): (\"range\", 145, 159),\n ConstString.image_border(2, 3): (\"range\", 166, 169),\n ConstString.image_border(2, 4): (\"range\", 166, 169),\n },\n )\n\n\ndef test_crop_too_much_3_png() -> None:\n \"\"\"Failed to compute line for split line with line detection alog.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"crop_too_much_3.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.36),\n Angle.deg(89.75),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2494, 2504),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-1.01),\n Angle.deg(-0.54),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.01),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 368, 375),\n ConstString.image_crop(1, \"y1\"): (\"range\", 252, 258),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2385, 2394),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3458, 3463),\n ConstString.image_crop(2, \"x1\"): (\"range\", 162, 172),\n ConstString.image_crop(2, \"y1\"): (\"range\", 228, 230),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2175, 2186),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3415, 3417),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 237, 241),\n ConstString.image_border(1, 2): (\"range\", 35, 50),\n ConstString.image_border(1, 3): (\"range\", 217, 224),\n ConstString.image_border(1, 4): (\"range\", 217, 224),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 192, 215),\n ConstString.image_border(2, 2): (\"range\", 84, 106),\n ConstString.image_border(2, 3): (\"range\", 219, 223),\n ConstString.image_border(2, 4): (\"range\", 219, 223),\n },\n )\n\n\ndef test_wrong_wave_split_line_2_png() -> None:\n \"\"\"Need to relax tolerance to have the split line by wave algo.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"wrong_wave_split_line_2.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.64),\n Angle.deg(91.02),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2483, 2494),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.19),\n Angle.deg(0.51),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(1.59),\n Angle.deg(1.86),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 223, 227),\n ConstString.image_crop(1, \"y1\"): (\"range\", 146, 148),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2335, 2342),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3353, 3356),\n ConstString.image_crop(2, \"x1\"): (\"range\", 72, 118),\n ConstString.image_crop(2, \"y1\"): (\"range\", 194, 198),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2194, 2240),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3396, 3399),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 126, 141),\n ConstString.image_border(1, 2): (\"range\", 138, 153),\n ConstString.image_border(1, 3): (\"range\", 172, 174),\n ConstString.image_border(1, 4): (\"range\", 172, 174),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 164, 179),\n ConstString.image_border(2, 2): (\"range\", 108, 123),\n ConstString.image_border(2, 3): (\"range\", 167, 169),\n ConstString.image_border(2, 4): (\"range\", 167, 169),\n },\n )\n\n\ndef test_wrong_wave_split_line_3_png() -> None:\n \"\"\"Need to relax tolerance to have the split line by wave algo.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"wrong_wave_split_line_3.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.00),\n Angle.deg(90.16),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2510, 2516),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.01),\n Angle.deg(0.16),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.14),\n Angle.deg(0.36),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 272, 274),\n ConstString.image_crop(1, \"y1\"): (\"range\", 218, 221),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2383, 2384),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3422, 3424),\n ConstString.image_crop(2, \"x1\"): (\"range\", 110, 119),\n ConstString.image_crop(2, \"y1\"): (\"range\", 224, 226),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2235, 2244),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3413, 3415),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 190, 203),\n ConstString.image_border(1, 2): (\"range\", 79, 93),\n ConstString.image_border(1, 3): (\"range\", 174, 175),\n ConstString.image_border(1, 4): (\"range\", 174, 175),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 190, 208),\n ConstString.image_border(2, 2): (\"range\", 90, 110),\n ConstString.image_border(2, 3): (\"range\", 165, 169),\n ConstString.image_border(2, 4): (\"range\", 165, 169),\n },\n )\n\n\ndef test_no_split_line_wave_algo_png() -> None:\n \"\"\"Failed to detect wave due to image.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"no_split_line_wave_algo.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.45),\n Angle.deg(90.30),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2471, 2493),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.56),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.56),\n Angle.deg(-0.39),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 81, 102),\n ConstString.image_crop(1, \"y1\"): (\"range\", 25, 81),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2477, 2502),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3505, 3505),\n ConstString.image_crop(2, \"x1\"): (\"range\", 157, 180),\n ConstString.image_crop(2, \"y1\"): (\"range\", 266, 268),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2233, 2251),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3380, 3382),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 14, 42),\n ConstString.image_border(1, 2): (\"range\", 14, 42),\n ConstString.image_border(1, 3): (\"range\", 30, 52),\n ConstString.image_border(1, 4): (\"range\", 30, 52),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 252, 265),\n ConstString.image_border(2, 2): (\"range\", 106, 122),\n ConstString.image_border(2, 3): (\"range\", 191, 198),\n ConstString.image_border(2, 4): (\"range\", 191, 198),\n },\n )\n\n\ndef test_no_split_line_wave_algo_2_png() -> None:\n \"\"\"Failed to detect wave due to missing wave at the bottom.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"no_split_line_wave_algo_2.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.29),\n Angle.deg(91.22),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2466, 2488),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.94),\n Angle.deg(1.16),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.04),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 266, 268),\n ConstString.image_crop(1, \"y1\"): (\"range\", 209, 210),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2342, 2346),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3411, 3413),\n ConstString.image_crop(2, \"x1\"): (\"range\", 161, 199),\n ConstString.image_crop(2, \"y1\"): (\"range\", 239, 241),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2245, 2282),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3433, 3434),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 180, 202),\n ConstString.image_border(1, 2): (\"range\", 82, 103),\n ConstString.image_border(1, 3): (\"range\", 191, 192),\n ConstString.image_border(1, 4): (\"range\", 191, 192),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 209, 226),\n ConstString.image_border(2, 2): (\"range\", 69, 86),\n ConstString.image_border(2, 3): (\"range\", 185, 188),\n ConstString.image_border(2, 4): (\"range\", 185, 188),\n },\n )\n\n\ndef test_crop_too_few_2_png() -> None:\n \"\"\"Use different area on the left and the right to detect\n black noise.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"crop_too_few_2.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.20),\n Angle.deg(90.61),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2530, 2540),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.99),\n Angle.deg(1.31),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.16),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 286, 290),\n ConstString.image_crop(1, \"y1\"): (\"range\", 163, 165),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2396, 2403),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3366, 3370),\n ConstString.image_crop(2, \"x1\"): (\"range\", 145, 163),\n ConstString.image_crop(2, \"y1\"): (\"range\", 175, 191),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2266, 2287),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3373, 3387),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 139, 157),\n ConstString.image_border(1, 2): (\"range\", 125, 145),\n ConstString.image_border(1, 3): (\"range\", 173, 175),\n ConstString.image_border(1, 4): (\"range\", 173, 175),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 173, 182),\n ConstString.image_border(2, 2): (\"range\", 107, 116),\n ConstString.image_border(2, 3): (\"range\", 166, 171),\n ConstString.image_border(2, 4): (\"range\", 166, 171),\n },\n )\n\n\ndef test_failed_detect_rectangle_png() -> None:\n \"\"\"Before, approxPolyDP algo was used to detect shape.\n\n But, due to noise on the edge with background other pages,\n the contour have lost of noise.\n So use HoughLinesP to detect shape.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"failed_detect_rectangle.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.12),\n Angle.deg(90.31),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2471, 2477),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(0.14),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.09),\n Angle.deg(0.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 257, 258),\n ConstString.image_crop(1, \"y1\"): (\"range\", 201, 202),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2333, 2334),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3401, 3402),\n ConstString.image_crop(2, \"x1\"): (\"range\", 131, 139),\n ConstString.image_crop(2, \"y1\"): (\"range\", 213, 215),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2216, 2222),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3406, 3406),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 186, 188),\n ConstString.image_border(1, 2): (\"range\", 97, 101),\n ConstString.image_border(1, 3): (\"range\", 192, 192),\n ConstString.image_border(1, 4): (\"range\", 192, 192),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 197, 204),\n ConstString.image_border(2, 2): (\"range\", 91, 97),\n ConstString.image_border(2, 3): (\"range\", 187, 188),\n ConstString.image_border(2, 4): (\"range\", 187, 188),\n },\n )\n\n\ndef test_single_page_png() -> None:\n \"\"\"Detect that the scan is single page.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"single_page.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.71),\n Angle.deg(90.06),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2464, 2476),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.16),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.26),\n Angle.deg(0.01),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 1, 9),\n ConstString.image_crop(1, \"y1\"): (\"range\", 7, 9),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2464, 2478),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3500, 3505),\n ConstString.image_crop(2, \"x1\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"y1\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"x2\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"y2\"): (\"range\", 0, 0),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 5, 8),\n ConstString.image_border(1, 2): (\"range\", 5, 8),\n ConstString.image_border(1, 3): (\"range\", 2, 11),\n ConstString.image_border(1, 4): (\"range\", 2, 11),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 0, 0),\n ConstString.image_border(2, 2): (\"range\", 0, 0),\n ConstString.image_border(2, 3): (\"range\", 0, 0),\n ConstString.image_border(2, 4): (\"range\", 0, 0),\n },\n )\n\n\ndef test_no_split_line_wave_algo_3_png() -> None:\n \"\"\"Failed to detect wave due to missing wave at the bottom.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"no_split_line_wave_algo_3.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.04),\n Angle.deg(90.31),\n ),\n ConstString.separation_double_page_y(): (\"range\", 2399, 2411),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.21),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.11),\n Angle.deg(0.46),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 181, 189),\n ConstString.image_crop(1, \"y1\"): (\"range\", 2955, 2960),\n ConstString.image_crop(1, \"x2\"): (\"range\", 2198, 2206),\n ConstString.image_crop(1, \"y2\"): (\"range\", 3201, 3203),\n ConstString.image_crop(2, \"x1\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"y1\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"x2\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"y2\"): (\"range\", 0, 0),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 2950, 2958),\n ConstString.image_border(1, 2): (\"range\", 287, 293),\n ConstString.image_border(1, 3): (\"range\", 221, 221),\n ConstString.image_border(1, 4): (\"range\", 221, 221),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 0, 0),\n ConstString.image_border(2, 2): (\"range\", 0, 0),\n ConstString.image_border(2, 3): (\"range\", 0, 0),\n ConstString.image_border(2, 4): (\"range\", 0, 0),\n },\n )\n\n\ndef test_wrong_split_line_4_png() -> None:\n \"\"\"Detect wrong split line with line algo.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(__file__, \"wrong_split_line_4.png\"),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(90.55),\n Angle.deg(91.47),\n ),\n ConstString.separation_double_page_y(): (\"range\", 49, 69),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.01),\n Angle.deg(1.41),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(0.99),\n Angle.deg(1.21),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 0, 0),\n ConstString.image_crop(1, \"y1\"): (\"range\", 0, 0),\n ConstString.image_crop(1, \"x2\"): (\"range\", 0, 0),\n ConstString.image_crop(1, \"y2\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"x1\"): (\"range\", 276, 292),\n ConstString.image_crop(2, \"y1\"): (\"range\", 329, 331),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2300, 2318),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3216, 3219),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 0, 0),\n ConstString.image_border(1, 2): (\"range\", 0, 0),\n ConstString.image_border(1, 3): (\"range\", 0, 0),\n ConstString.image_border(1, 4): (\"range\", 0, 0),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 321, 323),\n ConstString.image_border(2, 2): (\"range\", 276, 279),\n ConstString.image_border(2, 3): (\"range\", 216, 219),\n ConstString.image_border(2, 4): (\"range\", 216, 219),\n },\n )\n\n\ndef test_failed_split_line_line_algo_3_png() -> None:\n \"\"\"get_rectangle_from_contour_hough_lines fails\n because fewer than 4 lines are detected.\"\"\"\n MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(\n get_absolute_from_current_path(\n __file__, \"failed_split_line_line_algo_3.png\"\n ),\n {\n ConstString.separation_double_page_angle(): (\n \"range\",\n Angle.deg(89.95),\n Angle.deg(90.10),\n ),\n ConstString.separation_double_page_y(): (\"range\", 37, 41),\n ConstString.page_rotation(1): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.01),\n ),\n ConstString.page_rotation(2): (\n \"range\",\n Angle.deg(-0.16),\n Angle.deg(0.06),\n ),\n ConstString.image_crop(1, \"x1\"): (\"range\", 0, 0),\n ConstString.image_crop(1, \"y1\"): (\"range\", 0, 0),\n ConstString.image_crop(1, \"x2\"): (\"range\", 0, 0),\n ConstString.image_crop(1, \"y2\"): (\"range\", 0, 0),\n ConstString.image_crop(2, \"x1\"): (\"range\", 244, 256),\n ConstString.image_crop(2, \"y1\"): (\"range\", 152, 155),\n ConstString.image_crop(2, \"x2\"): (\"range\", 2259, 2270),\n ConstString.image_crop(2, \"y2\"): (\"range\", 3359, 3361),\n ConstString.image_dpi(1): (\"difference\", 300, 0.0000001),\n ConstString.image_border(1, 1): (\"range\", 0, 0),\n ConstString.image_border(1, 2): (\"range\", 0, 0),\n ConstString.image_border(1, 3): (\"range\", 0, 0),\n ConstString.image_border(1, 4): (\"range\", 0, 0),\n ConstString.image_dpi(2): (\"difference\", 300, 0.0000001),\n ConstString.image_border(2, 1): (\"range\", 143, 146),\n ConstString.image_border(2, 2): (\"range\", 134, 137),\n ConstString.image_border(2, 3): (\"range\", 221, 223),\n ConstString.image_border(2, 4): (\"range\", 221, 223),\n },\n )\n"
] | [
[
"numpy.seterr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frank-wei/torchdynamo | [
"26c4c1b593bebf4246e566749dade38254b59ffb"
] | [
"torchdynamo/allowed_functions.py"
] | [
"import builtins\nimport collections\nimport copy\nimport functools\nimport itertools\nimport math\nimport operator\nimport types\nimport warnings\nfrom functools import lru_cache\n\nimport numpy\nimport torch\n\nfrom . import config\n\n\n@lru_cache(None)\ndef _disallowed_function_ids():\n remove = [\n True,\n False,\n None,\n collections.OrderedDict,\n copy.copy,\n copy.deepcopy,\n torch.autocast_decrement_nesting,\n torch.autocast_increment_nesting,\n torch.autograd.grad,\n torch.clear_autocast_cache,\n torch.cuda.current_device,\n torch.distributions.constraints.is_dependent,\n torch.distributions.normal.Normal,\n torch.inference_mode,\n torch.set_anomaly_enabled,\n torch.set_autocast_cache_enabled,\n torch.set_autocast_cpu_dtype,\n torch.set_autocast_cpu_enabled,\n torch.set_autocast_enabled,\n torch.set_autocast_gpu_dtype,\n torch.autograd.profiler.profile,\n warnings.warn,\n ]\n return {id(x) for x in remove}\n\n\n@lru_cache(None)\ndef _allowed_function_ids():\n \"\"\"\n Walk torch.* and get the ids of all the stuff in it\n \"\"\"\n warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.distributed\")\n torch.distributions.Distribution.set_default_validate_args(False)\n torch_object_ids = dict()\n\n def _find_torch_objects(module):\n if any(\n module.__name__.startswith(mod_name)\n for mod_name in config.allowed_functions_module_string_ignorelist\n ):\n return\n torch_object_ids[id(module)] = module.__name__\n for name, obj in list(module.__dict__.items()):\n if id(obj) not in torch_object_ids:\n if isinstance(obj, types.ModuleType):\n if obj.__name__.startswith(\"torch.\"):\n torch_object_ids[id(obj)] = f\"{module.__name__}.{name}\"\n _find_torch_objects(obj)\n else:\n torch_object_ids[id(obj)] = f\"{module.__name__}.{name}\"\n\n _find_torch_objects(torch)\n _find_torch_objects(math)\n\n for idx in _disallowed_function_ids():\n if idx in torch_object_ids:\n del torch_object_ids[idx]\n\n return torch_object_ids\n\n\ndef is_allowed(obj):\n \"\"\"Is this safe to trace like torch.add ?\"\"\"\n return id(obj) in _allowed_function_ids()\n\n\ndef is_disallowed(obj):\n \"\"\"Is this safe to trace like torch.add ?\"\"\"\n return id(obj) in _disallowed_function_ids()\n\n\n@lru_cache(None)\ndef _builtin_function_ids():\n rv = {\n id(v): f\"builtins.{k}\"\n for k, v in builtins.__dict__.items()\n if not k.startswith(\"_\") and callable(v)\n }\n rv.update(\n {\n id(v): f\"operator.{k}\"\n for k, v in operator.__dict__.items()\n if not k.startswith(\"_\") and callable(v)\n }\n )\n rv.update(\n {id(v): f\"functools.{v.__name__}\" for v in (itertools.chain, itertools.islice)}\n )\n rv[id(functools.reduce)] = \"functools.reduce\"\n return rv\n\n\ndef is_builtin(obj):\n return id(obj) in _builtin_function_ids()\n\n\n@lru_cache(None)\ndef _numpy_function_ids():\n rv = dict()\n for mod in (numpy, numpy.random):\n rv.update(\n {\n id(v): f\"{mod.__name__}.{k}\"\n for k, v in mod.__dict__.items()\n if callable(v)\n and (getattr(v, \"__module__\", None) or mod.__name__) == mod.__name__\n }\n )\n return rv\n\n\ndef is_numpy(obj):\n return isinstance(obj, numpy.ndarray) or id(obj) in _numpy_function_ids()\n"
] | [
[
"torch.distributions.Distribution.set_default_validate_args"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ApproxEng/approxeng.picamera | [
"775a30b4700858695a19b9179a9f730c220f4d06"
] | [
"src/python/approxeng/picamera/__init__.py"
] | [
"import numpy as np\nimport cv2\n\n\ndef find_lines(image, threshold=100, scan_region_height=50, scan_region_position=0, scan_region_width_pad=0,\n min_detection_area=100, invert=False, blur_kernel_size=21):\n \"\"\"\n Scan a numpy image to find things that look like dark lines on a pale background. Return a sorted sequence of\n locations of line centroids, where (when not inverted) -1.0 corresponds to the left edge of the image, 0 to the \n centre and 1.0 to the right edge. Centroids are sorted in ascending order.\n \n :param image: \n A numpy image to process. Grab this with the read method of VideoStream from imutils or similar\n :param threshold: \n The threshold used to convert to black and white after a gaussian blur is applied, defaults to 100\n :param scan_region_height: \n The height in pixels of the region to use, defaults to 50\n :param scan_region_position: \n The position of the region relative to the entire frame. 0 is at the top, 1.0 is as far towards the bottom as it\n will go. Defaults to 0, scanning the top 'scan_region_height' pixels of the image\n :param scan_region_width_pad:\n The number of pixels to discard at either edge of the region, defaults t0 0\n :param min_detection_area:\n The minimum area of detected moments, any feature below this size will be ignored. Defaults to 100 pixels\n :param invert: \n Boolean - set this to true if your pi camera is upside-down and you therefore want to have -1.0 at the right \n hand edge of the image rather than the left\n :param blur_kernel_size:\n Size of the kernel used when applying the gaussian blur. Defaults to 21, 9 is less computationally intensive.\n :return: \n A sequence of float values ranging from -1.0 to 1.0, in ascending order, corresponding to the x coordinate of\n the centroids of any line regions detected\n \"\"\"\n height = np.size(image, 0)\n width = np.size(image, 1)\n min_row = (height - scan_region_height) * scan_region_position\n\n # Select a sub-region and convert it to grayscale, we're not interested in colour here\n region = cv2.cvtColor(\n image[min_row:(min_row + scan_region_height), scan_region_width_pad:(width - scan_region_width_pad)],\n cv2.COLOR_BGR2GRAY)\n # Apply a gaussian blur to deal with any noise, this cleans up grain from the camera and removes any tiny\n # features we don't care about\n region = cv2.GaussianBlur(region, (blur_kernel_size, blur_kernel_size), 0)\n # Threshold the image, converting it into black and white. Because we previously blurred it this should result\n # in a reasonably clean set of features\n th, region = cv2.threshold(region, threshold, 255, cv2.THRESH_BINARY_INV)\n # Find contours - these are boundaries of regions in the image.\n contoured, contours, heirarchy = cv2.findContours(region, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n # We'll populate this with line locations\n lines = []\n # Iterate over the contours. Each contour is a single feature, hopefully a bit of a line; the contour contains the\n # boundary formed from the edge of the region we're sampling as well as the actual line edges.\n for contour in contours:\n # Compute moments of the region.\n m = cv2.moments(contour)\n # Reject any regions which have a smaller area than the minimum. This cleans up larger false hits than the\n # blur operation we did earlier. The 'min_detection_area' needs to be tuned to your particular application, in\n # the case of line followers you'll worry about the height of the sampling region, resolution of the camera and\n # width of the line itself.\n if m['m00'] > min_detection_area:\n # Get the x coordinate of the centroid of the region\n cx = m['m10'] / m['m00']\n # ...and its value on a scale of -1.0 to 1.0\n proportional_cx = 2 * cx / width - 1.0\n # ...and flip it if we said that's what we wanted (i.e. the camera is mounted upside-down)\n if invert:\n proportional_cx = -proportional_cx\n lines.append(proportional_cx)\n # Return a sorted set of x coordinates of detected lines\n return sorted(lines)"
] | [
[
"numpy.size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcin-sel/PADPy_PD1 | [
"adb8d9e17594c72ec7d65900627d5d3c9fe4ba4a"
] | [
"PD1_Plumber.py"
] | [
"import pygame, copy, sys\r\nimport numpy as np\r\n\r\n######################################################################################################\r\n###################################### Ustawienia początkowe zmiennych################################\r\n######################################################################################################\r\n\r\nfont_size = 20\r\nfont_size_2 = 1.5 * font_size\r\nlw = 2\r\n\r\npygame.init()\r\n\r\npygame.font.init()\r\nmyfont = pygame.font.SysFont('Comic Sans MS', font_size)\r\n\r\nclock = pygame.time.Clock()\r\n\r\nunit = 48\r\nfont_size = unit/3\r\n\r\nwidth = unit*15\r\nheight = unit*14\r\n\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\n\r\n\r\n######################################################################################################\r\n####################### Wczytanie grafiki i dźwieku ##################################################\r\n######################################################################################################\r\n\r\npipes = (pygame.transform.scale(pygame.image.load(\"pipe1.png\"), (unit, unit)),\r\n pygame.transform.scale(pygame.image.load(\"pipe2.png\"), (unit, unit)))\r\n\r\npipes2 = (pygame.transform.scale(pygame.image.load(\"pipe1_2.png\"), (unit, unit)),\r\n pygame.transform.scale(pygame.image.load(\"pipe2_2.png\"), (unit, unit)))\r\n\r\nvalve = pygame.transform.scale(pygame.image.load(\"valve.png\"), (unit, unit))\r\nvalve2 = pygame.transform.scale(pygame.image.load(\"valve_2.png\"), (unit, unit))\r\n\r\noutlet = pygame.transform.scale(pygame.image.load(\"outlet.png\"), (unit, unit))\r\noutlet2 = pygame.transform.scale(pygame.image.load(\"outlet_2.png\"), (unit, unit))\r\noutlet3 = pygame.transform.scale(pygame.image.load(\"outlet_3.png\"), (unit, unit))\r\n\r\nclangs = (pygame.mixer.Sound('clang1.wav'),\r\n pygame.mixer.Sound('plate1.wav')) \r\n\r\nVictorySmall = pygame.mixer.Sound('VictorySmall.wav')\r\nVictoryBig = pygame.mixer.Sound('VictoryBig.wav')\r\nTouch = pygame.mixer.Sound('Touch.wav')\r\n\r\nwater = 'water.ogg'\r\n\r\n\r\n######################################################################################################\r\n####################### Deklaracja poziomów ##########################################################\r\n######################################################################################################\r\n\r\nAA = [\r\n [[1,\t0,\t0,\t0,\t0],\r\n [0,\t0,\t0,\t0,\t1]],\r\n\r\n [[0,\t1,\t1,\t0],\t\t\t\t\r\n [0,\t1,\t0,\t1]],\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n [[1,\t0,\t1,\t0],\t\t\t\t\r\n [1,\t0,\t0,\t1],\t\t\t\t\r\n [0,\t1,\t0,\t1]],\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n [[0,\t0,\t0,\t0,\t1,\t0],\t\t\r\n [0,\t1,\t1,\t1,\t0,\t0],\t\t\r\n [1,\t0,\t0,\t1,\t1,\t1],\t\t\r\n [0,\t0,\t0,\t1,\t1,\t1],\t\t\r\n [1,\t0,\t1,\t0,\t1,\t1],\t\t\r\n [0,\t0,\t0,\t0,\t0,\t1]],\t\t\r\n\t\t\t\t\t\t\t\r\n [[1,\t0,\t0,\t0,\t1,\t0],\t\t\r\n [1,\t1,\t1,\t1,\t0,\t0],\t\t\r\n [1,\t1,\t1,\t1,\t1,\t1],\t\t\r\n [1,\t1,\t1,\t1,\t1,\t1],\t\t\r\n [1,\t1,\t1,\t0,\t1,\t1],\t\t\r\n [0,\t0,\t0,\t1,\t1,\t0]],\t\t\r\n \t\t\t\t\t\t\t\r\n [[1,\t0,\t0,\t1,\t1,\t0,\t1],\t\r\n [0,\t0,\t1,\t1,\t1,\t1,\t0],\t\r\n [0,\t1,\t0,\t0,\t1,\t0,\t1],\t\r\n [0,\t0,\t1,\t0,\t0,\t0,\t1],\t\r\n [1,\t0,\t0,\t1,\t1,\t1,\t1],\t\r\n [1,\t0,\t1,\t0,\t0,\t1,\t0],\t\r\n [0,\t1,\t0,\t0,\t1,\t0,\t0],\t\r\n [1,\t0,\t0,\t0,\t0,\t0,\t0]],\t\r\n \t\t\r\n [[1,\t0,\t1,\t1,\t1,\t0,\t1,\t1],\r\n [0,\t0,\t0,\t0,\t1,\t1,\t0,\t0],\r\n [1,\t1,\t0,\t1,\t0,\t1,\t1,\t0],\r\n [0,\t0,\t0,\t0,\t1,\t0,\t0,\t0],\r\n [1,\t0,\t0,\t1,\t0,\t1,\t1,\t1],\r\n [0,\t1,\t0,\t1,\t1,\t1,\t1,\t1],\r\n [0,\t0,\t1,\t1,\t0,\t0,\t0,\t0],\r\n [1,\t1,\t0,\t0,\t0,\t1,\t0,\t0],\r\n [1,\t0,\t1,\t0,\t0,\t0,\t0,\t0]],\r\n \r\n [[0,\t0,\t1,\t0,\t0,\t1,\t1,\t0,\t1],\r\n [0,\t0,\t0,\t0,\t1,\t0,\t0,\t1,\t0],\r\n [0,\t0,\t1,\t0,\t1,\t1,\t1,\t0,\t0],\r\n [0,\t0,\t1,\t0,\t1,\t1,\t1,\t0,\t1],\r\n [1,\t0,\t1,\t0,\t0,\t0,\t1,\t0,\t0],\r\n [0,\t0,\t1,\t1,\t1,\t0,\t1,\t1,\t0],\r\n [0,\t0,\t0,\t0,\t1,\t1,\t0,\t0,\t0],\r\n [0,\t1,\t0,\t0,\t0,\t0,\t0,\t0,\t0],\r\n [0,\t1,\t1,\t0,\t1,\t1,\t0,\t0,\t0],\r\n [1,\t0,\t1,\t1,\t0,\t1,\t0,\t0,\t0]],\r\n \r\n [[1,\t0,\t0,\t0,\t0],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [0,\t0,\t0,\t0,\t1]]\r\n \r\n]\r\n\r\nBB = [\r\n [[0,\t1,\t2,\t1,\t2],\r\n [0,\t3,\t0,\t3,\t1]],\r\n\r\n [[1,\t0,\t0,\t3],\t\t\t\t\r\n [2,\t1,\t0,\t2]],\t\t\t\t\r\n \t\t\t\t\t\t\t\r\n [[2,\t2,\t2,\t1],\t\t\t\t\r\n [2,\t0,\t0,\t1],\t\t\t\t\r\n [1,\t2,\t0,\t3]],\t\t\t\t\r\n \t\t\t\t\t\t\t\r\n [[0,\t3,\t3,\t2,\t3,\t0],\t\t\r\n [2,\t1,\t2,\t1,\t2,\t0],\t\t\r\n [0,\t2,\t1,\t2,\t1,\t0],\t\t\r\n [0,\t0,\t0,\t3,\t2,\t0],\t\t\r\n [2,\t0,\t2,\t1,\t3,\t3],\t\t\r\n [3,\t3,\t3,\t2,\t1,\t0]],\t\t\r\n \t\t\t\t\t\t\t\r\n [[1,\t2,\t0,\t0,\t0,\t2],\t\t\r\n [2,\t1,\t2,\t1,\t1,\t1],\t\t\r\n [2,\t0,\t1,\t3,\t0,\t2],\t\t\r\n [0,\t2,\t1,\t3,\t0,\t0],\t\t\r\n [1,\t3,\t3,\t3,\t2,\t1],\t\t\r\n [1,\t3,\t2,\t0,\t1,\t1]],\t\t\r\n \t\t\t\t\t\t\t\r\n [[3,\t3,\t3,\t0,\t3,\t3,\t1],\t\r\n [2,\t2,\t0,\t2,\t1,\t3,\t0],\t\r\n [3,\t0,\t2,\t2,\t3,\t0,\t3],\t\r\n [3,\t0,\t0,\t3,\t2,\t2,\t1],\t\r\n [2,\t3,\t2,\t3,\t3,\t3,\t0],\t\r\n [1,\t3,\t1,\t0,\t3,\t3,\t1],\t\r\n [1,\t1,\t2,\t1,\t3,\t3,\t0],\t\r\n [1,\t0,\t3,\t1,\t1,\t0,\t0]],\t\r\n \t\t\t\t\t\t\t\r\n [[1,\t2,\t2,\t3,\t1,\t0,\t2,\t2],\r\n [2,\t3,\t2,\t1,\t3,\t0,\t0,\t0],\r\n [1,\t2,\t0,\t0,\t3,\t0,\t2,\t0],\r\n [3,\t3,\t1,\t1,\t1,\t1,\t2,\t1],\r\n [2,\t3,\t1,\t1,\t1,\t0,\t3,\t1],\r\n [3,\t1,\t3,\t3,\t1,\t1,\t3,\t0],\r\n [3,\t0,\t1,\t1,\t2,\t2,\t3,\t2],\r\n [3,\t3,\t1,\t0,\t2,\t3,\t1,\t3],\r\n [3,\t3,\t3,\t1,\t2,\t3,\t0,\t2]],\r\n \r\n [[2, 0,\t1,\t3,\t3,\t1,\t0,\t3,\t1],\r\n [2,\t3,\t3,\t3,\t2,\t0,\t1,\t0,\t2],\r\n [2,\t1,\t1,\t3,\t0,\t3,\t1,\t3,\t2],\r\n [1,\t3,\t0,\t1,\t1,\t3,\t0,\t2,\t0],\r\n [2,\t3,\t3,\t1,\t2,\t2,\t0,\t2,\t0],\r\n [1,\t0,\t0,\t2,\t3,\t3,\t1,\t1,\t1],\r\n [2,\t1,\t3,\t2,\t2,\t2,\t1,\t0,\t3],\r\n [1,\t1,\t0,\t3,\t1,\t2,\t3,\t1,\t2],\r\n [2,\t0,\t3,\t3,\t3,\t3,\t2,\t3,\t0],\r\n [0,\t1,\t2,\t2,\t0,\t3,\t2,\t3,\t1]],\r\n \r\n [[0,\t1,\t2,\t1,\t2],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [1,\t1,\t1,\t1,\t1],\r\n [0,\t3,\t0,\t3,\t1]]\r\n\r\n]\r\n\r\n\r\n\r\n\r\n######################################################################################################\r\n################################################ GRA #################################################\r\n######################################################################################################\r\n\r\n\r\ndef main_game(nr_gry):\r\n\r\n # Układ rur będę reprezentował jako dwie macierze:\r\n # Macierz kształtów rur A\r\n # Macierz orienracji rur B - elementy macierzy b reprezentyją krotność obrotu o 90 stopni względem pozycji wyjściowej\r\n \r\n\r\n A = AA[nr_gry]\r\n B = copy.deepcopy(BB[nr_gry])\r\n\r\n m = len(A)\r\n n = len(A[0])\r\n \r\n predkosc = 3*(m*n)**(1/2) # Prędkosc napelniania się rur \r\n # - proporcjonalna do redniej geometrycznej wymiarów planszy\r\n predkosc2 = 3\r\n\r\n x_start = width/2 - n * unit/2\r\n y_start = 2*unit\r\n\r\n\r\n \r\n # Połączenia poszczególnych rur przechowuję w postaci wektorów logiczny.\r\n # Wartość True oznacza, że rura łączy się w danym kierunku, kolejno w górę, w prawo, w dół, w lewo.\r\n # Tablica P reprezentuje połączenia w wyjściowej pozycji (obrót 0 stopni)\r\n\r\n P = [[True, True, False, False],\r\n [False, True, False, True]]\r\n\r\n\r\n # Inicjacja planszy\r\n\r\n screen = pygame.display.set_mode((width, height))\r\n \r\n screen.blit(myfont.render('Poziom ' + str(nr_gry + 1) + \".\", False, GREEN), (width / 2 - font_size_2/4, 0))\r\n\r\n screen.blit(valve, (x_start, y_start - unit))\r\n screen.blit(outlet, (x_start + (n - 1) * unit, y_start + m * unit))\r\n\r\n for k in range(m):\r\n for l in range(n):\r\n pipe = pygame.transform.rotate(pipes[A[k][l]], -90 * B[k][l])\r\n screen.blit(pipe, (x_start + l * unit, y_start + k * unit))\r\n\r\n\r\n done = False\r\n quit_game = False\r\n\r\n x = x_start\r\n y = y_start\r\n\r\n i = 0\r\n j = 0\r\n \r\n change = True # Nie wprowadzać zmiany na ekranie tylko w przypadku faktycznych zmian \r\n # (nie wypisywać wielokrotnie tego samego). Upłynniło to dziłania pętlo.\r\n # Pewnie można lepiej/prosciej/inaczej.\r\n\r\n while not (done or quit_game):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n pygame.quit()\r\n sys.exit() \r\n\r\n pressed = pygame.key.get_pressed()\r\n\r\n if pressed[pygame.K_RETURN]: done = True\r\n\r\n if pressed[pygame.K_SPACE]: \r\n B[i][j] = (B[i][j] + 1) % 4\r\n clangs[0].play()\r\n change = True\r\n \r\n if pressed[pygame.K_LCTRL]: \r\n B[i][j] = (B[i][j] - 1) % 4\r\n clangs[1].play()\r\n change = True\r\n\r\n if pressed[pygame.K_RIGHT]: \r\n j = min(j + 1, n - 1)\r\n Touch.play()\r\n change = True\r\n if pressed[pygame.K_LEFT]: \r\n j = max(j - 1, 0)\r\n Touch.play()\r\n change = True\r\n if pressed[pygame.K_DOWN]: \r\n i = min(i + 1, m - 1)\r\n Touch.play()\r\n change = True\r\n if pressed[pygame.K_UP]: \r\n i = max(i - 1, 0)\r\n Touch.play()\r\n change = True\r\n\r\n if pressed[pygame.K_ESCAPE]:\r\n break\r\n \r\n if change: # Skoro niczego nie zmienilimy to po co nanosić modyfikacje?\r\n \r\n screen.blit(pipe, (x, y))\r\n \r\n x = x_start + j * unit\r\n y = y_start + i * unit\r\n \r\n pipe = pygame.transform.rotate(pipes[A[i][j]], -90*B[i][j])\r\n screen.blit(pipe, (x, y))\r\n pygame.draw.lines(screen, GREEN, True, [[x, y], [x+unit-lw, y], [x+unit-lw, y+unit-lw], [x, y+unit-lw]], lw)\r\n pygame.display.update()\r\n\r\n change = False\r\n \r\n clock.tick(7)\r\n\r\n\r\n#####################################################################################################\r\n################################ Sprawdzenie poprawnosci rozwiązania ################################\r\n##################################################################################################### \r\n\r\n\r\n if done and not quit_game:\r\n \r\n screen.blit(pipe, (x, y))\r\n pygame.display.update()\r\n \r\n pygame.mixer.music.load(water)\r\n pygame.mixer.music.play()\r\n \r\n clock.tick(2)\r\n\r\n # Zdefiniujemy funkcje, które pozwolą nam ustalać połączenia rur po ich obrocie,\r\n def rotate_pipe(p_i):\r\n return [p_i[3], p_i[0], p_i[1], p_i[2]]\r\n\r\n\r\n def rotate_pipe_k(p_i, k):\r\n for r in range(k):\r\n p_i = rotate_pipe(p_i)\r\n return p_i\r\n\r\n\r\n D = np.array([ # Możliwe kierunki podróży\r\n [-1, 0], # Góra\r\n [0, 1], # Prawo\r\n [1, 0], # Dół\r\n [0, -1]]) # Lewo\r\n\r\n\r\n direction = np.array([True, False, False, False]) # direction z ktorego dolatuje woda (na poczatku od gory)\r\n\r\n # Test dla pierwszej rury\r\n\r\n a = A[0][0]\r\n b = B[0][0]\r\n\r\n p = P[a]\r\n p = rotate_pipe_k(p, b)\r\n pipe = pygame.transform.rotate(pipes[a], -90 * b)\r\n \r\n \r\n # Zmienne mówiąca o zwycięstwie bądź porażce w grze, chciałbym, aby gra również miała do nich dostęp\r\n \r\n global victory\r\n victory = False\r\n loss = False\r\n\r\n if not p[0]:\r\n loss = True\r\n\r\n k = 0\r\n l = 0\r\n \r\n x = x_start + l * unit\r\n y = y_start + k * unit\r\n\r\n\r\n screen.blit(valve2, (x_start, y_start - unit))\r\n pygame.display.update()\r\n\r\n\r\n while not (loss or victory or quit_game):\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n pygame.quit()\r\n sys.exit()\r\n \r\n x = x_start + l * unit\r\n y = y_start + k * unit\r\n\r\n a = A[k][l] # kształt k-l-tej rury\r\n b = B[k][l] # orientacja k-l-tej rury\r\n\r\n p = P[a]\r\n p = rotate_pipe_k(p, b)\r\n\r\n pipe = pygame.transform.rotate(pipes2[a], -90 * b)\r\n\r\n screen.blit(pipe, (x, y))\r\n pygame.display.update()\r\n \r\n\r\n direction = np.logical_xor(p, direction) # direction, w którym da się płynąć, inny niż powrótny\r\n nastepnik = [k, l] + D[direction]\r\n [k_n, l_n] = [nastepnik[0][0], nastepnik[0][1]]\r\n\r\n direction = np.array(\r\n rotate_pipe_k(direction, 2)) # direction wyplywania staje sie kierunkiem wplywania dla nastepnika\r\n\r\n\r\n if np.any(nastepnik < [0, 0]) | np.any(nastepnik > [m - 1, n - 1]): # Nie chcemy wyjsc poza planszę\r\n if k == m - 1 and l == n - 1 and p[2]: # Obsługa ostatniego elementu\r\n victory = True\r\n else:\r\n loss = True\r\n\r\n else:\r\n a_n = A[k_n][l_n] # kształt nastepnika\r\n b_n = B[k_n][l_n] # orientacja nastepnika\r\n p_n = rotate_pipe_k(P[a_n], b_n)\r\n\r\n if ~np.any(direction & p_n):\r\n loss = True\r\n\r\n [k, l] = [k_n, l_n] # Przechodzimy do nastepnika\r\n\r\n pygame.display.update()\r\n clock.tick(predkosc)\r\n \r\n pygame.mixer.music.stop()\r\n \r\n if loss and not quit_game:\r\n for it in range(3):\r\n screen.blit(pipe, (x, y))\r\n pygame.display.update()\r\n\r\n clock.tick(2)\r\n pygame.draw.lines(screen, RED, True,\r\n [[x + lw + 0, y + lw - 0],\r\n [x + unit - lw - 2, y + lw - 0],\r\n [x + unit - lw - 2, y + unit - lw - 2],\r\n [x + lw + 0, y + unit - lw - 2]], 3 * lw)\r\n pygame.display.update()\r\n\r\n clock.tick(predkosc2)\r\n elif not quit_game:\r\n VictorySmall.play()\r\n for it in range(3):\r\n screen.blit(outlet2, (x_start + (n - 1) * unit, y_start + m * unit))\r\n pygame.display.update()\r\n clock.tick(predkosc2)\r\n screen.blit(outlet3, (x_start + (n - 1) * unit, y_start + m * unit))\r\n pygame.display.update()\r\n\r\n clock.tick(predkosc2)\r\n\r\n\r\n\r\n\r\n######################################################################################################\r\n################################################ MENU ################################################\r\n######################################################################################################\r\n\r\nquit_game = False\r\nlvl = open(\"lvl.txt\", \"r\")\r\nlvl_numb = int(lvl.read())\r\nlvl.close()\r\n\r\nall_lvls = len(AA)\r\n\r\nindent = width/5\r\nind2 = width / 10\r\n\r\ndef messege(text, y, x = width / 2 - indent):\r\n screen.blit(myfont.render(text, False, GREEN), (x, y * font_size_2))\r\n\r\ndef messege1(text, y = 1/3):\r\n screen = pygame.display.set_mode((width, height))\r\n screen.blit(myfont.render(text, False, GREEN), \r\n (width / 2 - len(text)*font_size/4, height*(y)))\r\n pygame.display.update()\r\n clock.tick(2)\r\n \r\ndef messege2(text, y = 1/3):\r\n screen.blit(myfont.render(text, False, GREEN), \r\n (width / 2 - len(text)*font_size/4, height*(y)))\r\n pygame.display.update()\r\n clock.tick(2)\r\n \r\n\r\nfirst_time = True # To będzie znacznik czy jestem w pętli while po raz pierwszy. \r\n # Nie będę wielokrotnie wypisywał tych samych stanów na ekranie. Znacznie upłynniło to prace MUNU.\r\n\r\nglobal victory\r\nvictory = False\r\n \r\nwhile not quit_game:\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n pygame.quit()\r\n quit()\r\n \r\n if first_time:\r\n screen = pygame.display.set_mode((width, height))\r\n messege(\"Menu:\", 1)\r\n \r\n second_line = 3\r\n messege('[1] - Zagraj w grę', second_line)\r\n messege('[2] - Instrukcja', second_line + 1)\r\n messege('[Esc] - Wyjście', second_line + 3)\r\n messege('Naciśnij podany przycisk, aby wybrać opcję', second_line + 6)\r\n pygame.display.update()\r\n\r\n first_time = False\r\n\r\n\r\n pressed = pygame.key.get_pressed()\r\n\r\n if pressed[pygame.K_ESCAPE]:\r\n quit_game = True\r\n quit()\r\n\r\n if pressed[pygame.K_2]:\r\n Touch.play()\r\n first_time = True\r\n \r\n while not quit_game:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n pygame.quit()\r\n quit()\r\n \r\n if first_time:\r\n screen = pygame.display.set_mode((width, height))\r\n messege('Instrukcja', 1, width/2 - 10*font_size/4)\r\n messege('Zadaniem gracza jest stworzenie z kawałków rur połączenia', 3, ind2)\r\n messege('pomiędzy zaworem a ujściem.', 4, ind2)\r\n messege('W tym celu należy obracać poszczególne fragmenty tak długo, ', 5, ind2)\r\n messege('aż uzyska się nieprzerwany ciąg pomiędzy początkiem a końcem.', 6, ind2)\r\n messege('Kontrola odbywa się wyłącznie przy pomocy klawiatury.', 7, ind2)\r\n \r\n messege('Do zabawy służą następujące klawisze:', 9, ind2)\r\n messege('Strzałki - przemieszczanie się pomiędzy fragmentami rur', 10, ind2)\r\n messege('[Spacja] - obrót elementu zgodnie z ruchem wskazówek zegara', 11, ind2)\r\n messege('[Lewy CTRL] - obrót elementu przeciwnie do ruchu wskazówek zegara', 12, ind2)\r\n messege('[ENTER] - akceptacja układu i odkręcenie wody', 13, ind2)\r\n \r\n messege('Aby odblokować kolejny poziom gracz musi pomyślnie ukończyć bieżący.', 14, ind2)\r\n messege('Gra automatycznie zapamiętuje postep gry.', 15, ind2)\r\n \r\n messege('[Esc] - Powrót', 17, width/2 - 10*font_size/4)\r\n pygame.display.update()\r\n first_time = False\r\n \r\n pressed = pygame.key.get_pressed()\r\n if pressed[pygame.K_ESCAPE]:\r\n Touch.play()\r\n first_time = True\r\n break\r\n \r\n\r\n if pressed[pygame.K_1]:\r\n Touch.play()\r\n first_time = True\r\n while not quit_game:\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n pygame.quit()\r\n \r\n if first_time:\r\n screen = pygame.display.set_mode((width, height))\r\n screen.blit(myfont.render('Zagraj w grę:', False, GREEN), (width / 2 - indent, font_size_2))\r\n screen.blit(myfont.render('[1] - Wybierz poziom', False, GREEN), (width / 2 - indent, 3 * font_size_2))\r\n screen.blit(myfont.render('[2] - Zrestartuj postęp', False, GREEN), (width / 2 - indent, 4 * font_size_2))\r\n screen.blit(myfont.render('[3] - Odblokuj wszystkie poziomy', False, GREEN), (width / 2 - indent, 5 * font_size_2))\r\n screen.blit(myfont.render(' (opcja dla leniuchów)', False, GREEN), (width / 2 - indent, 6 * font_size_2))\r\n\r\n screen.blit(myfont.render('[Esc] - Powrót', False, GREEN), (width / 2 - indent, 8 * font_size_2))\r\n pygame.display.update()\r\n\r\n first_time = False\r\n\r\n pressed = pygame.key.get_pressed()\r\n\r\n if pressed[pygame.K_ESCAPE]:\r\n Touch.play()\r\n first_time = True\r\n break\r\n \r\n if pressed[pygame.K_2]:\r\n Touch.play()\r\n first_time = True\r\n lvl_numb = 0\r\n lvl = open(\"lvl.txt\", \"w\")\r\n lvl.write(\"0\")\r\n lvl.close()\r\n \r\n messege1(\"Zrestartowano postęp gry\")\r\n clock.tick(1)\r\n \r\n \r\n if pressed[pygame.K_3]:\r\n Touch.play()\r\n first_time = True\r\n lvl_numb = all_lvls - 1\r\n lvl = open(\"lvl.txt\", \"w\")\r\n lvl.write(str(all_lvls - 1))\r\n lvl.close()\r\n \r\n messege1(\"Odblokowano wszystkie poziomy\")\r\n clock.tick(1)\r\n \r\n \r\n if pressed[pygame.K_1]:\r\n Touch.play()\r\n first_time = True\r\n while not quit_game:\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n pygame.quit()\r\n quit()\r\n \r\n if first_time: \r\n screen = pygame.display.set_mode((width, height))\r\n screen.blit(myfont.render('Wybierz poziom:', False, GREEN), (width / 2 - indent, font_size_2))\r\n for lvls in range(lvl_numb + 1):\r\n screen.blit(myfont.render(\"[\" + str(lvls+1) + \"] - Poziom \" + str(lvls+1) + \".\",\r\n False, GREEN), (width / 2 - indent, (lvls + 2) * font_size_2))\r\n screen.blit(myfont.render('[Esc] - Powrót', False, GREEN), (width / 2 - indent, (lvl_numb + 4) * font_size_2))\r\n pygame.display.update()\r\n\r\n first_time = False\r\n \r\n pressed = pygame.key.get_pressed()\r\n\r\n if pressed[pygame.K_ESCAPE]:\r\n Touch.play()\r\n first_time = True\r\n break\r\n \r\n if pressed[pygame.K_1] or pressed[pygame.K_2] or pressed[pygame.K_3] or \\\r\n pressed[pygame.K_4] or pressed[pygame.K_5] or pressed[pygame.K_6] or \\\r\n pressed[pygame.K_7] or pressed[pygame.K_8] or pressed[pygame.K_9]:\r\n \r\n Touch.play()\r\n first_time = True\r\n\r\n if pressed[pygame.K_1]: nr_gry = 0\r\n if pressed[pygame.K_2]: nr_gry = 1\r\n if pressed[pygame.K_3]: nr_gry = 2\r\n if pressed[pygame.K_4]: nr_gry = 3\r\n if pressed[pygame.K_5]: nr_gry = 4\r\n if pressed[pygame.K_6]: nr_gry = 5\r\n if pressed[pygame.K_7]: nr_gry = 6\r\n if pressed[pygame.K_8]: nr_gry = 7\r\n if pressed[pygame.K_9]: nr_gry = 8\r\n \r\n if nr_gry <= lvl_numb:\r\n main_game(nr_gry)\r\n \r\n if victory:\r\n lvl_numb = min(lvl_numb + 1, all_lvls - 1)\r\n \r\n lvl = open(\"lvl.txt\", \"w\")\r\n lvl.write(str(lvl_numb))\r\n lvl.close()\r\n \r\n if nr_gry == all_lvls - 1:\r\n \r\n quit_loop = False\r\n \r\n VictoryBig.play()\r\n while not quit_loop:\r\n \r\n screen = pygame.display.set_mode((width, height))\r\n messege2('Jesteś zwycięzcą!')\r\n clock.tick(1.3)\r\n messege2('Nacinij dowolny przycisk, aby kontynuować!', 2/3)\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit_loop = True\r\n quit()\r\n if event.type == pygame.KEYDOWN:\r\n quit_loop = True\r\n \r\n \r\n clock.tick(1.3)\r\n \r\n \r\n \r\n\r\n"
] | [
[
"numpy.logical_xor",
"numpy.array",
"numpy.any"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edelaye/tvm | [
"3d6c1df8c4e50f7e0268b936495439b085c0a631"
] | [
"benchmark/tensorrt/run_tvm.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport time\nimport numpy as np\nimport argparse\nimport nnvm\nimport tvm\nfrom tvm.contrib import graph_runtime\nfrom tvm.autotvm.measure.measure_methods import set_cuda_target_arch\n\nbatch_size = 1\n\nmodels = ['resnet18_v1',\n 'resnet34_v1',\n 'resnet50_v1',\n 'resnet101_v1',\n 'resnet152_v1',\n 'resnet18_v2',\n 'resnet34_v2',\n 'resnet50_v2',\n 'resnet101_v2',\n 'resnet152_v2',\n 'vgg11',\n 'vgg13',\n 'vgg16',\n 'vgg19',\n 'vgg11_bn',\n 'vgg13_bn',\n 'vgg16_bn',\n 'vgg19_bn',\n 'alexnet',\n 'densenet121',\n 'densenet161',\n 'densenet169',\n 'densenet201',\n 'squeezenet1.0',\n 'squeezenet1.1',\n 'inceptionv3',\n 'mobilenet1.0',\n 'mobilenet0.75',\n 'mobilenet0.5',\n 'mobilenet0.25',\n 'mobilenetv2_1.0',\n 'mobilenetv2_0.75',\n 'mobilenetv2_0.5',\n 'mobilenetv2_0.25']\n\n\ndef get_data_shape(model_name):\n if model_name.startswith('inception'):\n return (batch_size, 3, 299, 299)\n else:\n return (batch_size, 3, 224, 224)\n\n\ndef get_tvm_workload(network, **kwargs):\n from nnvm.frontend import from_mxnet\n from mxnet.gluon.model_zoo.vision import get_model\n block = get_model(network, **kwargs)\n if network.startswith('resnet152'):\n import sys\n sys.setrecursionlimit(10000)\n sym, params = from_mxnet(block)\n sym = nnvm.sym.softmax(sym)\n return sym, params\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Benchmark TVM')\n parser.add_argument('--ext-accel', type=str, default='none', choices=['none', 'tensorrt'])\n parser.add_argument('--network', type=str, required=True, choices=models)\n parser.add_argument('--cuda-arch', type=str, required=True, choices=['sm_37', 'sm_70', 'sm_53', 'sm_62'])\n parser.add_argument('--target-host', type=str, required=True, choices=['x86_64-linux-gnu', 'aarch64-linux-gnu'])\n parser.add_argument('--compile', dest='compile', action='store_true')\n parser.add_argument('--run', dest='run', action='store_true')\n parser.set_defaults(compile=False)\n parser.set_defaults(run=False)\n args = parser.parse_args()\n\n network = args.network\n num_classes = 1000\n data_shape = get_data_shape(network)\n ext_accel = None if args.ext_accel == 'none' else args.ext_accel\n cuda_arch = args.cuda_arch\n set_cuda_target_arch(cuda_arch)\n target_host = 'llvm -target=%s' % args.target_host\n\n if args.compile:\n net, params = get_tvm_workload(network, pretrained=True)\n net = nnvm.graph.create(net)\n print(\"===========Saving graph for model %s\" % network)\n with open('%s.json' % network, \"w\") as fo:\n fo.write(net.json())\n opt_level = 3\n target = tvm.target.cuda()\n print(\"===========Start to compile %s graph with params, external accelerator: %s\" % (network, ext_accel))\n start = time.time()\n with nnvm.compiler.build_config(opt_level=opt_level, ext_accel=ext_accel):\n graph, lib, params = nnvm.compiler.build(\n net, target, shape={\"data\": data_shape}, params=params, target_host=target_host)\n print(\"===========Compiling model %s took %.3fs\" % (network, time.time() - start))\n\n print(\"===========Saving lowered graph for model %s\" % network)\n with open('%s_ext_accel_%s_%s.json' % (network, ext_accel, cuda_arch), \"w\") as fo:\n fo.write(graph.json())\n print(\"===========Saving module for model %s\" % network)\n if lib.is_empty():\n print(\"lib is empty\")\n else:\n print(\"lib is not empty\")\n lib.export_library('%s_ext_accel_%s_%s.tar' % (network, ext_accel, cuda_arch))\n print(\"===========Saving params for model %s\" % network)\n with open('%s_ext_accel_%s_%s.params' % (network, ext_accel, cuda_arch), 'wb') as fo:\n fo.write(nnvm.compiler.save_param_dict(params))\n\n if args.run:\n print(\"===========Starting to load model %s\" % network)\n loaded_json = open('%s_ext_accel_%s_%s.json' % (network, ext_accel, cuda_arch)).read()\n loaded_lib = tvm.module.load('%s_ext_accel_%s_%s.tar' % (network, ext_accel, cuda_arch))\n loaded_params = bytearray(open('%s_ext_accel_%s_%s.params' % (network, ext_accel, cuda_arch), 'rb').read())\n ctx = tvm.gpu()\n np.random.seed(3342902)\n data = np.random.uniform(-1, 1, size=data_shape).astype(\"float32\")\n data = tvm.nd.array(data)\n # create module\n module = graph_runtime.create(loaded_json, loaded_lib, ctx)\n module.load_params(loaded_params)\n # set input and parameters\n module.set_input(\"data\", data)\n repeat = 100\n print(\"===========Building TensorRT inference engine...\")\n s = time.time()\n module.run()\n e = time.time() - s\n print(\"===========Building TensorRT inference engine took %.3f seconds\" % e)\n print(\"===========Warming up inference engine...\")\n for i in range(repeat):\n module.run(data=data)\n\n print(\"===========Starting to time inference...\")\n repeat = 1000\n start = time.time()\n for i in range(repeat):\n module.run(data=data)\n total_elapse = time.time() - start\n avg_time = total_elapse / repeat * 1000.0\n import resource\n print(\"peak memory usage (bytes on OS X, kilobytes on Linux) {}\"\n .format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))\n print(\"%s, ext_accel=%s, average time cost/forward: %.3fms\" % (network, ext_accel, avg_time))\n"
] | [
[
"numpy.random.uniform",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhaocq-nlp/NJUNMT-tf | [
"f1440726b3c007bcf19126fc4dee43a91dccc718"
] | [
"njunmt/data/text_inputter.py"
] | [
"# Copyright 2017 Natural Language Processing Group, Nanjing University, [email protected].\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Classes for reading in data. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy\nimport six\nimport tensorflow as tf\n\nfrom njunmt.data.data_reader import LineReader\nfrom njunmt.utils.constants import Constants\nfrom njunmt.utils.constants import concat_name\nfrom njunmt.utils.misc import padding_batch_data\nfrom njunmt.utils.expert_utils import repeat_n_times\n\n\ndef do_bucketing(pivot, *args):\n \"\"\" Sorts the `pivot` and args by length of `pivot`.\n\n Args:\n pivot: The pivot.\n args: A list of others.\n\n Returns: The same as inputs.\n \"\"\"\n tlen = numpy.array([len(t) for t in pivot])\n tidx = tlen.argsort()\n _pivot = [pivot[i] for i in tidx]\n _args = []\n for ele in args:\n _args.append([ele[i] for i in tidx])\n return _pivot, _args\n\n\ndef pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields):\n \"\"\"\n\n Args:\n name_prefixs: A prefix string of a list of strings.\n origin_datas: Data list or a list of data lists.\n paddings: A padding id or a list of padding ids.\n input_fields: A list of input fields dict.\n\n Returns: A dict for while loop.\n \"\"\"\n data = dict()\n data[\"feed_dict\"] = dict()\n\n def map_fn(n, d, p):\n # n: name prefix\n # d: data list\n # p: padding symbol\n data[concat_name(n, Constants.IDS_NAME)] = d\n n_samples = len(d)\n n_devices = len(input_fields)\n n_samples_per_gpu = n_samples // n_devices\n if n_samples % n_devices > 0:\n n_samples_per_gpu += 1\n\n def _feed_batchs(_start_idx, _inpf):\n if _start_idx * n_samples_per_gpu >= n_samples:\n return 0\n x, x_len = padding_batch_data(\n d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p)\n data[\"feed_dict\"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x\n data[\"feed_dict\"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len\n return len(x_len)\n\n parallels = repeat_n_times(\n n_devices, _feed_batchs,\n list(range(n_devices)), input_fields)\n data[\"feed_dict\"][\"parallels\"] = parallels\n\n if isinstance(name_prefixs, six.string_types):\n map_fn(name_prefixs, origin_datas, paddings)\n else:\n [map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)]\n return data\n\n\[email protected]_metaclass(ABCMeta)\nclass TextInputter(object):\n \"\"\"Base class for inputters. \"\"\"\n\n def __init__(self):\n pass\n\n @abstractmethod\n def make_feeding_data(self, *args, **kwargs):\n \"\"\" Processes the data file and return an iterable instance for loop. \"\"\"\n raise NotImplementedError\n\n\nclass TextLineInputter(TextInputter):\n \"\"\" Class for reading in lines. \"\"\"\n\n def __init__(self,\n line_readers,\n padding_id,\n batch_size):\n \"\"\" Initializes the parameters for this inputter.\n\n Args:\n line_readers: A LineReader instance or a list of LineReader instances.\n padding_id: An integer for padding.\n batch_size: An integer value indicating the number of\n sentences passed into one step. Sentences will be padded by EOS.\n\n Raises:\n ValueError: if `batch_size` is None.\n \"\"\"\n super(TextLineInputter, self).__init__()\n self._readers = line_readers\n self._batch_size = batch_size\n if self._batch_size is None:\n raise ValueError(\"batch_size should be provided.\")\n self._padding_id = padding_id\n\n def _make_feeding_data_from(self,\n reader,\n input_fields,\n name_prefix):\n \"\"\" Processes the data file and return an iterable instance for loop.\n\n Args:\n reader: A LineReader instance.\n input_fields: A dict of placeholders.\n name_prefix: A string, the key name prefix for feed_dict.\n\n Returns: An iterable instance that packs feeding dictionary\n for `tf.Session().run` according to the `filename`.\n \"\"\"\n assert isinstance(reader, LineReader)\n ss_buf = []\n while True:\n encoded_ss = reader.next()\n if encoded_ss == \"\":\n break\n if encoded_ss is None:\n continue\n ss_buf.append(encoded_ss)\n reader.close()\n data = []\n batch_data_idx = 0\n\n while batch_data_idx < len(ss_buf):\n data.append(pack_feed_dict(\n name_prefixs=name_prefix,\n origin_datas=ss_buf[batch_data_idx: batch_data_idx + self._batch_size],\n paddings=self._padding_id,\n input_fields=input_fields))\n batch_data_idx += self._batch_size\n return data\n\n def make_feeding_data(self, input_fields,\n name_prefix=Constants.FEATURE_NAME_PREFIX):\n \"\"\" Processes the data file(s) and return an iterable\n instance for loop.\n\n Args:\n input_fields: A dict of placeholders.\n name_prefix: A string, the key name prefix for feed_dict.\n\n Returns: An iterable instance or a list of iterable\n instances according to the `data_field_name`\n in the constructor.\n \"\"\"\n if isinstance(self._readers, list):\n return [self._make_feeding_data_from(\n reader, input_fields, name_prefix)\n for reader in self._readers]\n return self._make_feeding_data_from(\n self._readers, input_fields, name_prefix)\n\n\nclass ParallelTextInputter(TextInputter):\n \"\"\" Class for reading in parallel texts. \"\"\"\n\n def __init__(self,\n features_reader,\n labels_reader,\n features_padding_id,\n labels_padding_id,\n batch_size=None,\n batch_tokens_size=None,\n shuffle_every_epoch=None,\n fill_full_batch=False,\n bucketing=True):\n \"\"\" Initializes the parameters for this inputter.\n\n Args:\n features_reader: A LineReader instance for features.\n labels_reader: A LineReader instance for labels.\n features_padding_id: An integer for features padding.\n labels_padding_id: An integer for labels padding.\n batch_size: An integer value indicating the number of\n sentences passed into one step. Sentences will be padded by EOS.\n batch_tokens_size: An integer value indicating the number of\n words of each batch. If provided, sentence pairs will be batched\n together by approximate sequence length.\n shuffle_every_epoch: A string type. If provided, use it as postfix\n of shuffled data file name.\n fill_full_batch: Whether to ensure each batch of data has `batch_size`\n of datas.\n bucketing: Whether to sort the sentences by length of labels.\n\n Raises:\n ValueError: if both `batch_size` and `batch_tokens_size` are\n not provided.\n\n \"\"\"\n super(ParallelTextInputter, self).__init__()\n self._features_reader = features_reader\n self._labels_reader = labels_reader\n self._features_padding_id = features_padding_id\n self._labels_padding_id = labels_padding_id\n self._batch_size = batch_size\n self._batch_tokens_size = batch_tokens_size\n self._shuffle_every_epoch = shuffle_every_epoch\n self._fill_full_batch = fill_full_batch\n self._bucketing = bucketing\n if self._batch_size is None and self._batch_tokens_size is None:\n raise ValueError(\"Either batch_size or batch_tokens_size should be provided.\")\n if (self._batch_size is not None) and (self._batch_tokens_size is not None):\n tf.logging.info(\"batching data according to batch_tokens_size={}, \"\n \"and use batch_size={} as an auxiliary variable.\".format(batch_tokens_size, batch_size))\n if batch_tokens_size is None:\n self._cache_size = self._batch_size * 128 # 80 * 128 = 10240\n else:\n self._cache_size = self._batch_tokens_size * 6 # 4096 * 6 := 25000\n if batch_size is None:\n self._batch_size = 32\n\n def _small_parallel_data(self, input_fields):\n \"\"\" Function for reading small scale parallel data for evaluation.\n\n Args:\n input_fields: A dict of placeholders or a list of dicts.\n\n Returns: A list of feeding data.\n \"\"\"\n ss_buf = []\n tt_buf = []\n while True:\n ss = self._features_reader.next()\n tt = self._labels_reader.next()\n if ss == \"\" or tt == \"\":\n break\n if ss is None or tt is None:\n continue\n ss_buf.append(ss)\n tt_buf.append(tt)\n self._features_reader.close()\n self._labels_reader.close()\n if self._bucketing:\n tt_buf, ss_buf = do_bucketing(tt_buf, ss_buf)\n ss_buf = ss_buf[0]\n data = []\n batch_data_idx = 0\n while batch_data_idx < len(ss_buf):\n data.append(\n pack_feed_dict(\n name_prefixs=[Constants.FEATURE_NAME_PREFIX, Constants.LABEL_NAME_PREFIX],\n origin_datas=[ss_buf[batch_data_idx: batch_data_idx + self._batch_size],\n tt_buf[batch_data_idx: batch_data_idx + self._batch_size]],\n paddings=[self._features_padding_id, self._labels_padding_id],\n input_fields=input_fields))\n batch_data_idx += self._batch_size\n return data\n\n def make_feeding_data(self,\n input_fields,\n in_memory=False):\n \"\"\" Processes the data files and return an iterable\n instance for loop.\n Args:\n input_fields: A dict of placeholders or a list of dicts.\n in_memory: Whether to load all data into memory.\n\n Returns: An iterable instance.\n \"\"\"\n if in_memory and self._fill_full_batch:\n raise ValueError(\n \"`in_memory` option with _SmallParallelData fn now only deal with small evaluation data. \"\n \"`fill_full_batch` for ParallelTextInputter is available for training data only.\")\n if in_memory and self._shuffle_every_epoch:\n raise ValueError(\n \"`in_memory` option with _SmallParallelData fn now only deal with small evaluation data. \"\n \"`shuffle_every_epoch` for ParallelTextInputter is available for training data only.\")\n if in_memory:\n return self._small_parallel_data(input_fields)\n return self._BigParallelDataIterator(\n input_fields=input_fields,\n **self.__dict__)\n\n class _BigParallelDataIterator(object):\n \"\"\" An iterator class for reading parallel data. \"\"\"\n\n def __init__(self,\n input_fields,\n **kwargs):\n \"\"\" Initializes.\n\n Args:\n input_fields: A dict of placeholders or a list of dicts.\n **kwargs: The attributes of the parent ParallelTextInputter.\n \"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n self._reset()\n\n self._features_buffer = []\n self._labels_buffer = []\n self._features_len_buffer = []\n self._labels_len_buffer = []\n self._end_of_data = False\n self._input_fields = input_fields\n\n def __iter__(self):\n return self\n\n def _reset(self):\n \"\"\" shuffle features & labels file. \"\"\"\n if not hasattr(self, \"_shuffled_features_file\"):\n self._shuffled_features_file = \"features_file.\" + str(self._shuffle_every_epoch)\n self._shuffled_labels_file = \"labels_file.\" + str(self._shuffle_every_epoch)\n argsort_index = self._features_reader.reset(\n do_shuffle=self._shuffle_every_epoch,\n shuffle_to_file=self._shuffled_features_file,\n argsort_index=None)\n _ = self._labels_reader.reset(\n do_shuffle=self._shuffle_every_epoch,\n shuffle_to_file=self._shuffled_labels_file,\n argsort_index=argsort_index)\n\n def __next__(self):\n \"\"\" capable for python3 \"\"\"\n return self.next()\n\n def next(self):\n if self._end_of_data:\n self._end_of_data = False\n self._reset()\n raise StopIteration\n\n assert len(self._features_buffer) == len(self._labels_buffer), \"Buffer size mismatch\"\n if len(self._features_buffer) < self._batch_size:\n cnt = len(self._features_buffer)\n while cnt < self._cache_size:\n ss = self._features_reader.next()\n tt = self._labels_reader.next()\n if ss == \"\" or tt == \"\":\n break\n if ss is None or tt is None:\n continue\n cnt += 1\n self._features_buffer.append(ss)\n self._labels_buffer.append(tt)\n if len(self._features_buffer) == 0 or len(self._labels_buffer) == 0:\n self._end_of_data = False\n self._reset()\n raise StopIteration\n if self._bucketing:\n # sort by len\n self._labels_buffer, self._features_buffer \\\n = do_bucketing(self._labels_buffer, self._features_buffer)\n self._features_buffer = self._features_buffer[0]\n self._features_len_buffer = [len(s) for s in self._features_buffer]\n self._labels_len_buffer = [len(t) for t in self._labels_buffer]\n if self._fill_full_batch and len(self._features_buffer) < self._batch_size:\n self._end_of_data = False\n self._reset()\n raise StopIteration\n local_batch_size = self._batch_size\n if self._batch_tokens_size is not None: # batching data by num of tokens\n sum_s = numpy.sum(self._features_len_buffer[: local_batch_size])\n sum_t = numpy.sum(self._labels_len_buffer[: local_batch_size])\n while True:\n if sum_s >= self._batch_tokens_size or sum_t >= self._batch_tokens_size:\n break\n if self._batch_tokens_size - sum_s < 20 or self._batch_tokens_size - sum_t < 20:\n break\n if local_batch_size >= len(self._features_len_buffer):\n break\n sum_s += self._features_len_buffer[local_batch_size]\n sum_t += self._labels_len_buffer[local_batch_size]\n local_batch_size += 1\n features = self._features_buffer[:local_batch_size]\n labels = self._labels_buffer[:local_batch_size]\n if len(features) < local_batch_size:\n del self._features_buffer[:]\n del self._labels_buffer[:]\n del self._features_len_buffer[:]\n del self._labels_len_buffer[:]\n else:\n del self._features_buffer[:local_batch_size]\n del self._labels_buffer[:local_batch_size]\n del self._features_len_buffer[:local_batch_size]\n del self._labels_len_buffer[:local_batch_size]\n if len(features) <= 0 or len(labels) <= 0:\n self._end_of_data = False\n self._reset()\n raise StopIteration\n ret_data = pack_feed_dict(\n name_prefixs=[Constants.FEATURE_NAME_PREFIX, Constants.LABEL_NAME_PREFIX],\n origin_datas=[features, labels],\n paddings=[self._features_padding_id, self._labels_padding_id],\n input_fields=self._input_fields)\n if self._fill_full_batch:\n ret_data[\"feed_dict\"].pop(\"parallels\")\n return ret_data\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bsun0802/Zero-Learning-Fast-Medical-Image-Fusion | [
"b3325f09b3d81c3262bb7fc546739e07f3a459aa"
] | [
"code/main.py"
] | [
"import sys\nimport pkg_resources\nimport argparse\nfrom pathlib import Path\nfrom subprocess import check_call\n\nimport torch\nfrom torchvision.models.vgg import vgg19\n\nfrom skimage.measure import shannon_entropy\nfrom pytorch_msssim import SSIM, MS_SSIM\n\nfrom utils import *\nfrom metrics import *\n\n\ndef parse_args():\n '''Usage:\n python main.py --imagePath=../images/IV_images --imageSource \"VIS*.png\" \"IR*.png\"\n python main.py --imagePath=../images/MRI-PET --imageSource \"MRI*.png\" \"PET*.png\"\n # python main.py --imagePath=../images/MRI-SPECT --imageSource \"MRI*.png\" \"SPECT*.png\"\n '''\n parser = argparse.ArgumentParser(description='Image Fusion with guided filter and vgg19')\n parser.add_argument('--imagePath', required=True)\n parser.add_argument('--imageSources', required=True, nargs='+')\n args = parser.parse_args()\n return args\n\n\nclass Args:\n device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n\n k = (30, 30) # box blur kernel size\n r, eps = 45, 0.01 # parameters for guided filter\n\n model = vgg19(pretrained=True)\n relus = [1, 3, 8] # relus for vgg19\n\n args = parse_args()\n\n imagePath = Path(args.imagePath)\n # put images needed to be fused together in a bundle\n imageSources = []\n for pattern in args.imageSources:\n imageSources.append(sorted(imagePath.glob(pattern)))\n bundles = zip(*imageSources)\n\n # grid_cell_size = (360, 270) # for IV_images\n grid_cell_size = (320, 320) # for brain MRI/CT images\n resultPath = Path('../results').joinpath(imagePath.stem)\n\n # install package\n _installed = {pkg.key for pkg in pkg_resources.working_set}\n if 'pytorch-msssim' not in _installed:\n _python = sys.executable\n check_call(['echo', '[INFO] Install pytorch-msssim'])\n _pipinstall = ['sudo', _python, '-m', 'pip', 'install', 'pytorch-msssim']\n check_call(_pipinstall)\n\n\nif __name__ == '__main__':\n container = dict(SCD=[], SSIM_f=[], MSSSIM_f=[], Qmi=[], EN=[], MI=[])\n ssim_measure = SSIM(data_range=1.0, size_average=True, channel=1)\n msssim_measure = MS_SSIM(data_range=1.0, size_average=True, channel=1)\n\n nested_list = []\n Args.resultPath.mkdir(parents=True, exist_ok=True)\n\n for bundle in Args.bundles:\n print(f'Fusing => f{[fp.name for fp in bundle]}')\n imgs = [read_image(fp) for fp in bundle] # np.uint8\n\n Ys, Ys_f, CbCrs_f = split_YCbCr(imgs)\n\n bases, details = decompose(Ys_f)\n\n Wb_0 = sal_weights(Ys)\n Wb_0 = np.moveaxis(Wb_0, -1, 0) # easier indexed in for-loop\n Wb = guided_optimize(Ys_f, Wb_0, Args.r, Args.eps)\n\n fused_base = weighted_sum(bases, Wb)\n\n tensor_details = stack_to_tensor(details)\n fused_detial = cnn_detail_fusion(\n tensor_details, Args.model, Args.device, relus=Args.relus)\n\n fusedY_f = np.clip(fused_base + fused_detial, 0, 1)\n\n fused_f = YCbCr_to_RGB(CbCrs_f, fusedY_f)\n\n fused_u8 = np.rint(fused_f * 255).astype(np.uint8)\n name = ''.join(x for x in bundle[0].name if x.isdigit())\n save_image(fused_u8, Args.resultPath.joinpath(f'FUSED-{name}.png'))\n\n nested_list.append(grid_row(*imgs, fused_u8, resized=Args.grid_cell_size))\n\n if len([x for x in CbCrs_f if x is not None]) == 0:\n print('Evaluation..')\n container['SCD'].append(SCD(Ys_f[0], Ys_f[1], fused_f))\n container['SSIM_f'].append(\n ssim_f(to_tensor(Ys_f[0]),\n to_tensor(Ys_f[1]), to_tensor(fused_f), measure=ssim_measure)\n )\n container['MSSSIM_f'].append(\n ssim_f(to_tensor(Ys_f[0]),\n to_tensor(Ys_f[1]), to_tensor(fused_f), measure=msssim_measure)\n )\n container['Qmi'].append(Qmi(Ys_f[0], Ys_f[1], fused_f))\n container['EN'].append(shannon_entropy(fused_u8))\n container['MI'].append(mi_f(Ys_f[0], Ys_f[1], fused_f))\n\n for k, v in container.items():\n print(f'{k} : {np.mean(v):.4f}')\n\n print('Done!\\n')\n\n grid = make_grid(nested_list, Args.grid_cell_size, addText=True)\n save_image(grid, Args.resultPath.joinpath('combined.pdf'))\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GDSC-UIT/RealTime-Emotion-Recognizer | [
"2d12f651dbd02b9bccc9990a43189e8274fbea46"
] | [
"build_model/data_preprocess.py"
] | [
"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras import utils\nimport pandas as pd\nimport numpy as np\nfrom constants import img_depth, img_height, img_width\n\ndf = pd.read_csv('dataset/fer2013.csv')\n\nemotion_label = {0:'anger', 1:'disgust', 2:'fear', 3:'happiness', 4: 'sadness', 5: 'surprise', 6: 'neutral'}\n\nINTERESTED_LABELS = [3, 4, 6]\n\ndf = df[df.emotion.isin(INTERESTED_LABELS)]\n\nimg_array = df.pixels.apply(lambda x: np.array(x.split(' ')).reshape(img_width, img_height, img_depth).astype('float32'))\nimg_array = np.stack(img_array, axis=0)\n\nle = LabelEncoder()\nimg_labels = le.fit_transform(df.emotion)\nimg_labels = utils.to_categorical(img_labels)\nle_name_mapping = dict(zip(le.classes_, le.transform(le.classes_)))\n\nX_train, X_valid, y_train, y_valid = train_test_split(img_array, img_labels,\n shuffle=True, stratify=img_labels,\n test_size=0.1, random_state=42)\n\nimg_width = X_train.shape[1]\nimg_height = X_train.shape[2]\nimg_depth = X_train.shape[3]\nnum_classes = y_train.shape[1]\n\n''' Normalize data '''\nX_train = X_train / 255.\nX_valid = X_valid / 255.\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.stack",
"sklearn.preprocessing.LabelEncoder",
"tensorflow.keras.utils.to_categorical"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
percycat/Tutorial | [
"d01fbb974c3a85436ea68ec277ca55cd553dd61f"
] | [
"MachineLearning/Regression/demo_LMS.py"
] | [
"\"\"\"\n===========\nOrdinary Least Square Demo\n===========\n\nUsing the slider widget to control visual properties of your plot.\n\nIn this example, a slider is used to choose the variance of the distribution that\nthe noise belongs to.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\na0 = 1.5\nf0 = -2.5\nx_range = 5\nplt.axis([0, x_range, -10, 10])\nt = np.arange(0.0, x_range, 0.01)\ns = a0*t + f0\nl, = plt.plot(t, s, lw=2, color='red')\nsample = np.random.normal(0, 1, len(t[0::10]))\nN=plt.scatter(t[0::10], s[0::10]+sample, color='green')\n\naxcolor = 'lightgoldenrodyellow'\nax_var = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n\ndelta = 0.1\nsVar = Slider(ax_var, 'variance', 0.1, 5.0, valinit=1.0, valstep=delta)\n\ndef draw(var, color='red'):\n ax.cla()\n ax.axis([0, x_range, -10, 10])\n ax.scatter(t[0::10], s[0::10]+sample, color='green')\n ax.plot(t, s, lw=2, color=color)\n fig.canvas.draw_idle()\n \ndef update(val):\n global sample\n var = sVar.val\n sample = np.random.normal(0, var, len(t[0::10]))\n draw(var)\n \nsVar.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\ndef reset(event):\n sVar.reset()\nbutton.on_clicked(reset)\n\nrax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)\nradio = RadioButtons(rax, ('red', 'blue', 'orange'), active=0)\n\n\ndef colorfunc(label):\n draw(sVar.val, label)\n fig.canvas.draw_idle()\nradio.on_clicked(colorfunc)\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.scatter",
"matplotlib.widgets.Button",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.widgets.RadioButtons"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
enjoy82/pytorch-ssd | [
"bc5449244a2ad98c81f49e0c187f7f8597185cba"
] | [
"run_ssd_demo.py"
] | [
"import cv2\nimport numpy as np\nimport time\nfrom utils_for_openvino.mb3lite_predictor import create_mobilenetv3_small_ssd_lite_predictor\n \n# モジュール読み込み \nimport sys\nsys.path.append('/opt/intel/openvino/python/python3.5/armv7l')\nfrom openvino.inference_engine import IENetwork, IEPlugin\n\nwindowwidth = 320\nwindowheight = 240\nimage_sige = 300\nnms_method = \"hard\"\nlabel_path = \"./models/gakuv2/open-images-model-labels.txt\"\nclass_names = [name.strip() for name in open(label_path).readlines()]\n\n\n# ターゲットデバイスの指定 \nplugin = IEPlugin(device=\"MYRIAD\")\n \n# モデルの読み込み \nnet = IENetwork(model='./models/forasp/mbv3-ssd-cornv1.xml', weights='./models/forasp/mbv3-ssd-cornv1.bin')\nexec_net = plugin.load(network=net)\ninput_blob_name = list(net.inputs.keys())[0]\noutput_blob_name = sorted(list(net.outputs.keys()))\n\n#predictor\npredictor = create_mobilenetv3_small_ssd_lite_predictor(exec_net, image_size = image_sige, nms_method=nms_method, input = input_blob_name, output = output_blob_name)\n\nprint(\"stand\", input_blob_name, output_blob_name)\n# カメラ準備 \n\n\nframe = cv2.imread(\"/home/pi/samples/build/hikage_010_can.JPG\")\n# Reload on error \n\n#frame = cv2.imread(\"./gun.jpg\")\nboxes, labels, probs = predictor.predict(frame,10, 0.4) #TODO 閾値\n# 出力から必要なデータのみ取り出し \n#TODO label怪しい\nprint(boxes, labels, probs)\n\nframe = cv2.resize(frame, (300, 300))\n\nboxed = [] #重複box\nfor i in range(len(boxes)):\n box = boxes[i, :]\n box = list(map(int, box))\n flag = 1\n for b in boxed:#重複チェック\n if np.all(box == b):\n flag = 0\n if flag == 0:\n continue\n boxed.append(box)\n\n label = class_names[labels[i]] + str(probs[i])\n cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)\n cv2.putText(frame, label,\n (int(box[0]) + 20, int(box[1]) + 40),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1, # font scale\n (255, 0, 255),\n 2) # line type\n\n# 画像表示 \ncv2.imshow('frame', frame)\ncv2.imwrite(\"/home/pi/samples/build/result.jpg\", frame)\n \n# 終了処理 \n#cap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
martinfleis/contextily | [
"fc00861c9756821f62de27bb06ef6771abc517d6"
] | [
"tests/test_ctx.py"
] | [
"import matplotlib\n\nmatplotlib.use(\"agg\") # To prevent plots from using display\nimport contextily as ctx\nimport os\nimport numpy as np\nimport mercantile as mt\nimport rasterio as rio\nfrom contextily.tile import _calculate_zoom\nfrom numpy.testing import assert_array_almost_equal\nimport pytest\n\nTOL = 7\nSEARCH = \"boulder\"\nADJUST = -3 # To save download size / time\n\n# Tile\n\n\ndef test_bounds2raster():\n w, s, e, n = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n _ = ctx.bounds2raster(w, s, e, n, \"test.tif\", zoom=4, ll=True)\n rtr = rio.open(\"test.tif\")\n img = np.array([band for band in rtr.read()]).transpose(1, 2, 0)\n solu = (\n -12528334.684053527,\n 2509580.5126589066,\n -10023646.141204873,\n 5014269.05550756,\n )\n for i, j in zip(rtr.bounds, solu):\n assert round(i - j, TOL) == 0\n assert img[100, 100, :].tolist() == [230, 229, 188, 255]\n assert img[100, 200, :].tolist() == [156, 180, 131, 255]\n assert img[200, 100, :].tolist() == [230, 225, 189, 255]\n assert img[:,:,:3].sum() == 36926856\n assert img.sum() == 53638536\n assert_array_almost_equal(img[:,:,:3].mean(), 187.8197021484375)\n assert_array_almost_equal(img.mean(), 204.614777)\n\n # multiple tiles for which result is not square\n w, s, e, n = (\n 2.5135730322461427,\n 49.529483547557504,\n 6.15665815595878,\n 51.47502370869813,\n )\n img, ext = ctx.bounds2raster(w, s, e, n, \"test2.tif\", zoom=7, ll=True)\n rtr = rio.open(\"test2.tif\")\n rimg = np.array([band for band in rtr.read()]).transpose(1, 2, 0)\n assert rimg.shape == img.shape\n assert rimg.sum() == img.sum()\n assert_array_almost_equal(rimg.mean(), img.mean())\n assert_array_almost_equal(\n ext, (0.0, 939258.2035682457, 6261721.35712164, 6887893.492833804)\n )\n rtr_bounds = [\n -611.49622628141,\n 6262332.853347922,\n 938646.7073419644,\n 6888504.989060086,\n ]\n assert_array_almost_equal(list(rtr.bounds), rtr_bounds)\n\n\ndef test_bounds2img():\n w, s, e, n = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n img, ext = ctx.bounds2img(w, s, e, n, zoom=4, ll=True)\n solu = (\n -12523442.714243276,\n -10018754.171394622,\n 2504688.5428486555,\n 5009377.085697309,\n )\n for i, j in zip(ext, solu):\n assert round(i - j, TOL) == 0\n assert img[100, 100, :].tolist() == [230, 229, 188, 255]\n assert img[100, 200, :].tolist() == [156, 180, 131, 255]\n assert img[200, 100, :].tolist() == [230, 225, 189, 255]\n\n\ndef test_warp_tiles():\n w, s, e, n = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n img, ext = ctx.bounds2img(w, s, e, n, zoom=4, ll=True)\n wimg, wext = ctx.warp_tiles(img, ext)\n assert_array_almost_equal(\n np.array(wext),\n np.array(\n [\n -112.54394531249996,\n -90.07903186397023,\n 21.966726124122374,\n 41.013065787006276,\n ]\n ),\n )\n assert wimg[100, 100, :].tolist() == [228, 221, 184, 255]\n assert wimg[100, 200, :].tolist() == [213, 219, 177, 255]\n assert wimg[200, 100, :].tolist() == [133, 130, 109, 255]\n\n\ndef test_warp_img_transform():\n w, s, e, n = ext = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n _ = ctx.bounds2raster(w, s, e, n, \"test.tif\", zoom=4, ll=True)\n rtr = rio.open(\"test.tif\")\n img = np.array([band for band in rtr.read()])\n wimg, wext = ctx.warp_img_transform(\n img, rtr.transform, rtr.crs, {\"init\": \"epsg:4326\"}\n )\n assert wimg[:, 100, 100].tolist() == [228, 221, 184, 255]\n assert wimg[:, 100, 200].tolist() == [213, 219, 177, 255]\n assert wimg[:, 200, 100].tolist() == [133, 130, 109, 255]\n\n\ndef test_howmany():\n w, s, e, n = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n zoom = 7\n expected = 25\n got = ctx.howmany(w, s, e, n, zoom=zoom, verbose=False, ll=True)\n assert got == expected\n\n\ndef test_ll2wdw():\n w, s, e, n = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n hou = (-10676650.69219051, 3441477.046670125, -10576977.7804825, 3523606.146650609)\n _ = ctx.bounds2raster(w, s, e, n, \"test.tif\", zoom=4, ll=True)\n rtr = rio.open(\"test.tif\")\n wdw = ctx.tile.bb2wdw(hou, rtr)\n assert wdw == ((152, 161), (189, 199))\n\n\ndef test__sm2ll():\n w, s, e, n = (\n -106.6495132446289,\n 25.845197677612305,\n -93.50721740722656,\n 36.49387741088867,\n )\n minX, minY = ctx.tile._sm2ll(w, s)\n maxX, maxY = ctx.tile._sm2ll(e, n)\n nw, ns = mt.xy(minX, minY)\n ne, nn = mt.xy(maxX, maxY)\n assert round(nw - w, TOL) == 0\n assert round(ns - s, TOL) == 0\n assert round(ne - e, TOL) == 0\n assert round(nn - n, TOL) == 0\n\n\ndef test_autozoom():\n w, s, e, n = (-105.3014509, 39.9643513, -105.1780988, 40.094409)\n expected_zoom = 13\n zoom = _calculate_zoom(w, s, e, n)\n assert zoom == expected_zoom\n\n\ndef test_validate_zoom():\n # tiny extent to trigger large calculated zoom\n w, s, e, n = (0, 0, 0.001, 0.001)\n\n # automatically inferred -> set to known max but warn\n with pytest.warns(UserWarning, match=\"inferred zoom level\"):\n ctx.bounds2img(w, s, e, n)\n\n # specify manually -> raise an error\n with pytest.raises(ValueError):\n ctx.bounds2img(w, s, e, n, zoom=23)\n\n # with specific string url (not dict) -> error when specified\n url = \"https://a.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n with pytest.raises(ValueError):\n ctx.bounds2img(w, s, e, n, zoom=33, source=url)\n\n # but also when inferred (no max zoom know to set to)\n with pytest.raises(ValueError):\n ctx.bounds2img(w, s, e, n, source=url)\n\n\n# Place\n\n\ndef test_place():\n expected_bbox = [-105.430545, 39.8549856, -105.110545, 40.1749856]\n expected_bbox_map = [\n -11740727.544603072,\n -11662456.027639052,\n 4774562.53480525,\n 4931105.568733288,\n ]\n expected_zoom = 9\n loc = ctx.Place(SEARCH, zoom_adjust=ADJUST)\n assert loc.im.shape == (512, 256, 4)\n loc # Make sure repr works\n\n # Check auto picks are correct\n assert loc.search == SEARCH\n assert_array_almost_equal([loc.w, loc.s, loc.e, loc.n], expected_bbox)\n assert_array_almost_equal(loc.bbox_map, expected_bbox_map)\n assert loc.zoom == expected_zoom\n\n loc = ctx.Place(SEARCH, path=\"./test2.tif\", zoom_adjust=ADJUST)\n assert os.path.exists(\"./test2.tif\")\n\n # .plot() method\n ax = loc.plot()\n assert_array_almost_equal(loc.bbox_map, ax.images[0].get_extent())\n\n f, ax = matplotlib.pyplot.subplots(1)\n ax = loc.plot(ax=ax)\n assert_array_almost_equal(loc.bbox_map, ax.images[0].get_extent())\n\n\ndef test_plot_map():\n # Place as a search\n loc = ctx.Place(SEARCH, zoom_adjust=ADJUST)\n w, e, s, n = loc.bbox_map\n ax = ctx.plot_map(loc)\n\n assert ax.get_title() == loc.place\n ax = ctx.plot_map(loc.im, loc.bbox)\n assert_array_almost_equal(loc.bbox, ax.images[0].get_extent())\n\n # Place as an image\n img, ext = ctx.bounds2img(w, s, e, n, zoom=10)\n ax = ctx.plot_map(img, ext)\n assert_array_almost_equal(ext, ax.images[0].get_extent())\n\n\n# Plotting\n\n\ndef test_add_basemap():\n # Plot boulder bbox as in test_place\n x1, x2, y1, y2 = [\n -11740727.544603072,\n -11701591.786121061,\n 4852834.0517692715,\n 4891969.810251278,\n ]\n\n # Test web basemap\n fig, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n ctx.add_basemap(ax, zoom=10)\n\n # ensure add_basemap did not change the axis limits of ax\n ax_extent = (x1, x2, y1, y2)\n assert ax.axis() == ax_extent\n\n assert ax.images[0].get_array().sum() == 51551927\n assert ax.images[0].get_array().shape == (256, 256, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:,:,:3].mean(), 177.20665995279947)\n assert_array_almost_equal(ax.images[0].get_array().mean(), 196.654995)\n\n # Test local source\n ## Windowed read\n subset = (\n -11730803.981631357,\n -11711668.223149346,\n 4862910.488797557,\n 4882046.247279563,\n )\n\n f, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(subset[0], subset[1])\n ax.set_ylim(subset[2], subset[3])\n loc = ctx.Place(SEARCH, path=\"./test2.tif\", zoom_adjust=ADJUST)\n ctx.add_basemap(ax, source=\"./test2.tif\", reset_extent=True)\n\n assert_array_almost_equal(subset, ax.images[0].get_extent())\n assert ax.images[0].get_array().sum() == 3187219\n assert ax.images[0].get_array()[:,:,:3].sum() == 2175124\n assert ax.images[0].get_array().shape == (64, 64, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:,:,:3].mean(), 177.01204427083334)\n assert_array_almost_equal(ax.images[0].get_array().mean(), 194.53240966796875)\n ## Full read\n f, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n loc = ctx.Place(SEARCH, path=\"./test2.tif\", zoom_adjust=ADJUST)\n ctx.add_basemap(ax, source=\"./test2.tif\", reset_extent=False)\n\n raster_extent = (\n -11740880.418659642,\n -11662608.901695622,\n 4774715.408861821,\n 4931258.442789858,\n )\n assert_array_almost_equal(raster_extent, ax.images[0].get_extent())\n assert ax.images[0].get_array()[:,:,:3].sum() == 76248416\n assert ax.images[0].get_array().sum() == 109671776\n assert ax.images[0].get_array().shape == (512, 256, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:,:,:3].mean(), 193.90974934895834)\n assert_array_almost_equal(ax.images[0].get_array().mean(), 209.18231201171875)\n\n # Test with auto-zoom\n f, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n ctx.add_basemap(ax, zoom=\"auto\")\n\n ax_extent = (\n -11740727.544603072,\n -11701591.786121061,\n 4852834.051769271,\n 4891969.810251278,\n )\n assert_array_almost_equal(ax_extent, ax.images[0].get_extent())\n assert ax.images[0].get_array()[:,:,:3].sum() == 563185119\n assert ax.images[0].get_array().sum() == 830571999\n assert ax.images[0].get_array().shape == (1024, 1024, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:,:,:3].mean(), 179.03172779083252)\n assert_array_almost_equal(ax.images[0].get_array().mean(), 198.023796)\n\n # Test on-th-fly warping\n x1, x2 = -105.5, -105.00\n y1, y2 = 39.56, 40.13\n f, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n ctx.add_basemap(ax, crs={\"init\": \"epsg:4326\"}, attribution=None)\n assert ax.get_xlim() == (x1, x2)\n assert ax.get_ylim() == (y1, y2)\n assert ax.images[0].get_array()[:,:,:3].sum() == 724238693\n assert ax.images[0].get_array().shape == (1135, 1183, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:,:,:3].mean(), 179.79593258881636)\n assert_array_almost_equal(ax.images[0].get_array().mean(), 198.596949)\n # Test local source warping\n _ = ctx.bounds2raster(x1, y1, x2, y2, \"./test2.tif\", ll=True)\n f, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n ctx.add_basemap(\n ax, source=\"./test2.tif\", crs={\"init\": \"epsg:4326\"}, attribution=None\n )\n assert ax.get_xlim() == (x1, x2)\n assert ax.get_ylim() == (y1, y2)\n\n assert ax.images[0].get_array()[:,:,:3].sum() == 464536503\n assert ax.images[0].get_array().shape == (980, 862, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:,:,:3].mean(), 183.301175)\n\n assert ax.images[0].get_array().sum() == 678981558\n assert_array_almost_equal(ax.images[0].get_array().mean(), 200.939189)\n\n x1, x2, y1, y2 = [\n -11740727.544603072,\n -11701591.786121061,\n 4852834.0517692715,\n 4891969.810251278,\n ]\n\ndef test_add_basemap_overlay():\n x1, x2, y1, y2 = [\n -11740727.544603072,\n -11701591.786121061,\n 4852834.0517692715,\n 4891969.810251278,\n ]\n fig, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n\n # Draw two layers, the 2nd of which is an overlay.\n ctx.add_basemap(ax, zoom=10)\n ctx.add_basemap(ax, zoom=10, source=ctx.providers.Stamen.TonerLabels)\n\n # ensure add_basemap did not change the axis limits of ax\n ax_extent = (x1, x2, y1, y2)\n assert ax.axis() == ax_extent\n\n # check totals on lowest (opaque terrain) base layer\n assert_array_almost_equal(ax_extent, ax.images[0].get_extent())\n assert ax.images[0].get_array()[:, :, :3].sum() == 34840247\n assert ax.images[0].get_array().sum() == 51551927\n assert ax.images[0].get_array().shape == (256, 256, 4)\n assert_array_almost_equal(ax.images[0].get_array()[:, :, :3].mean(), 177.20665995279947)\n assert_array_almost_equal(ax.images[0].get_array().mean(), 196.654995)\n\n # check totals on overaly (mostly transparent labels) layer\n assert ax.images[1].get_array().sum() == 1653387\n assert ax.images[1].get_array().shape == (256, 256, 4)\n assert_array_almost_equal(ax.images[1].get_array().mean(), 6.3071708679)\n\n # create a new map\n fig, ax = matplotlib.pyplot.subplots(1)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n\n # Draw two layers, the 1st of which is an overlay.\n ctx.add_basemap(ax, zoom=10, source=ctx.providers.Stamen.TonerLabels)\n ctx.add_basemap(ax, zoom=10)\n\n # check that z-order of overlay is higher than that of base layer\n assert ax.images[0].zorder > ax.images[1].zorder\n assert ax.images[0].get_array().sum() == 1653387\n assert ax.images[1].get_array().sum() == 51551927\n\n\ndef test_basemap_attribution():\n extent = (-11945319, -10336026, 2910477, 4438236)\n\n def get_attr(ax):\n return [\n c\n for c in ax.get_children()\n if isinstance(c, matplotlib.text.Text) and c.get_text()\n ]\n\n # default provider and attribution\n fig, ax = matplotlib.pyplot.subplots()\n ax.axis(extent)\n ctx.add_basemap(ax)\n (txt,) = get_attr(ax)\n assert txt.get_text() == ctx.providers.Stamen.Terrain[\"attribution\"]\n\n # override attribution\n fig, ax = matplotlib.pyplot.subplots()\n ax.axis(extent)\n ctx.add_basemap(ax, attribution=\"custom text\")\n (txt,) = get_attr(ax)\n assert txt.get_text() == \"custom text\"\n\n # disable attribution\n fig, ax = matplotlib.pyplot.subplots()\n ax.axis(extent)\n ctx.add_basemap(ax, attribution=False)\n assert len(get_attr(ax)) == 0\n\n # specified provider\n fig, ax = matplotlib.pyplot.subplots()\n ax.axis(extent)\n ctx.add_basemap(ax, source=ctx.providers.OpenStreetMap.Mapnik)\n (txt,) = get_attr(ax)\n assert txt.get_text() == ctx.providers.OpenStreetMap.Mapnik[\"attribution\"]\n\n\ndef test_attribution():\n fig, ax = matplotlib.pyplot.subplots(1)\n txt = ctx.add_attribution(ax, \"Test\")\n assert isinstance(txt, matplotlib.text.Text)\n assert txt.get_text() == \"Test\"\n\n # test passthrough font size and kwargs\n fig, ax = matplotlib.pyplot.subplots(1)\n txt = ctx.add_attribution(ax, \"Test\", font_size=15, fontfamily=\"monospace\")\n assert txt.get_size() == 15\n assert txt.get_fontfamily() == [\"monospace\"]\n\n\ndef test_set_cache_dir(tmpdir):\n # set cache directory manually\n path = str(tmpdir.mkdir(\"cache\"))\n ctx.set_cache_dir(path)\n\n # then check that plotting still works\n extent = (-11945319, -10336026, 2910477, 4438236)\n fig, ax = matplotlib.pyplot.subplots()\n ax.axis(extent)\n ctx.add_basemap(ax)\n"
] | [
[
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PeterouZh/Omni-GAN-PyTorch | [
"564a586fed6ce51ef73933d8815d94ce077c4e5c"
] | [
"BigGAN_PyTorch_1_lib/utils.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n''' Utilities file\nThis file contains utility functions for bookkeeping, logging, and data loading.\nMethods which directly affect training should either go in layers, the model,\nor train_fns.py.\n'''\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport numpy as np\nimport time\nimport datetime\nimport json\nimport pickle\nfrom argparse import ArgumentParser\nimport animal_hash\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nimport datasets as dset\n\nfrom detectron2.data.samplers import TrainingSampler\n\n\ndef prepare_parser():\n usage = 'Parser for all scripts.'\n parser = ArgumentParser(description=usage)\n \n ### Dataset/Dataloader stuff ###\n parser.add_argument(\n '--dataset', type=str, default='I128_hdf5',\n help='Which Dataset to train on, out of I128, I256, C10, C100;'\n 'Append \"_hdf5\" to use the hdf5 version for ISLVRC '\n '(default: %(default)s)')\n parser.add_argument(\n '--augment', action='store_true', default=False,\n help='Augment with random crops and flips (default: %(default)s)')\n parser.add_argument(\n '--num_workers', type=int, default=8,\n help='Number of dataloader workers; consider using less for HDF5 '\n '(default: %(default)s)')\n parser.add_argument(\n '--no_pin_memory', action='store_false', dest='pin_memory', default=True,\n help='Pin data into memory through dataloader? (default: %(default)s)') \n parser.add_argument(\n '--shuffle', action='store_true', default=False,\n help='Shuffle the data (strongly recommended)? (default: %(default)s)')\n parser.add_argument(\n '--load_in_mem', action='store_true', default=False,\n help='Load all data into memory? (default: %(default)s)')\n parser.add_argument(\n '--use_multiepoch_sampler', action='store_true', default=False,\n help='Use the multi-epoch sampler for dataloader? (default: %(default)s)')\n \n \n ### Model stuff ###\n parser.add_argument(\n '--model', type=str, default='BigGAN',\n help='Name of the model module (default: %(default)s)')\n parser.add_argument(\n '--G_param', type=str, default='SN',\n help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD)'\n ' or None (default: %(default)s)')\n parser.add_argument(\n '--D_param', type=str, default='SN',\n help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD)'\n ' or None (default: %(default)s)') \n parser.add_argument(\n '--G_ch', type=int, default=64,\n help='Channel multiplier for G (default: %(default)s)')\n parser.add_argument(\n '--D_ch', type=int, default=64,\n help='Channel multiplier for D (default: %(default)s)')\n parser.add_argument(\n '--G_depth', type=int, default=1,\n help='Number of resblocks per stage in G? (default: %(default)s)')\n parser.add_argument(\n '--D_depth', type=int, default=1,\n help='Number of resblocks per stage in D? (default: %(default)s)')\n parser.add_argument(\n '--D_thin', action='store_false', dest='D_wide', default=True,\n help='Use the SN-GAN channel pattern for D? (default: %(default)s)')\n parser.add_argument(\n '--G_shared', action='store_true', default=False,\n help='Use shared embeddings in G? (default: %(default)s)')\n parser.add_argument(\n '--shared_dim', type=int, default=0,\n help='G''s shared embedding dimensionality; if 0, will be equal to dim_z. '\n '(default: %(default)s)')\n parser.add_argument(\n '--dim_z', type=int, default=128,\n help='Noise dimensionality: %(default)s)')\n parser.add_argument(\n '--z_var', type=float, default=1.0,\n help='Noise variance: %(default)s)') \n parser.add_argument(\n '--hier', action='store_true', default=False,\n help='Use hierarchical z in G? (default: %(default)s)')\n parser.add_argument(\n '--cross_replica', action='store_true', default=False,\n help='Cross_replica batchnorm in G?(default: %(default)s)')\n parser.add_argument(\n '--mybn', action='store_true', default=False,\n help='Use my batchnorm (which supports standing stats?) %(default)s)')\n parser.add_argument(\n '--G_nl', type=str, default='relu',\n help='Activation function for G (default: %(default)s)')\n parser.add_argument(\n '--D_nl', type=str, default='relu',\n help='Activation function for D (default: %(default)s)')\n parser.add_argument(\n '--G_attn', type=str, default='64',\n help='What resolutions to use attention on for G (underscore separated) '\n '(default: %(default)s)')\n parser.add_argument(\n '--D_attn', type=str, default='64',\n help='What resolutions to use attention on for D (underscore separated) '\n '(default: %(default)s)')\n parser.add_argument(\n '--norm_style', type=str, default='bn',\n help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], '\n 'ln [layernorm], gn [groupnorm] (default: %(default)s)')\n \n ### Model init stuff ###\n parser.add_argument(\n '--seed', type=int, default=0,\n help='Random seed to use; affects both initialization and '\n ' dataloading. (default: %(default)s)')\n parser.add_argument(\n '--G_init', type=str, default='ortho',\n help='Init style to use for G (default: %(default)s)')\n parser.add_argument(\n '--D_init', type=str, default='ortho',\n help='Init style to use for D(default: %(default)s)')\n parser.add_argument(\n '--skip_init', action='store_true', default=False,\n help='Skip initialization, ideal for testing when ortho init was used '\n '(default: %(default)s)')\n \n ### Optimizer stuff ###\n parser.add_argument(\n '--G_lr', type=float, default=5e-5,\n help='Learning rate to use for Generator (default: %(default)s)')\n parser.add_argument(\n '--D_lr', type=float, default=2e-4,\n help='Learning rate to use for Discriminator (default: %(default)s)')\n parser.add_argument(\n '--G_B1', type=float, default=0.0,\n help='Beta1 to use for Generator (default: %(default)s)')\n parser.add_argument(\n '--D_B1', type=float, default=0.0,\n help='Beta1 to use for Discriminator (default: %(default)s)')\n parser.add_argument(\n '--G_B2', type=float, default=0.999,\n help='Beta2 to use for Generator (default: %(default)s)')\n parser.add_argument(\n '--D_B2', type=float, default=0.999,\n help='Beta2 to use for Discriminator (default: %(default)s)')\n \n ### Batch size, parallel, and precision stuff ###\n parser.add_argument(\n '--batch_size', type=int, default=64,\n help='Default overall batchsize (default: %(default)s)')\n parser.add_argument(\n '--G_batch_size', type=int, default=0,\n help='Batch size to use for G; if 0, same as D (default: %(default)s)')\n parser.add_argument(\n '--num_G_accumulations', type=int, default=1,\n help='Number of passes to accumulate G''s gradients over '\n '(default: %(default)s)') \n parser.add_argument(\n '--num_D_steps', type=int, default=2,\n help='Number of D steps per G step (default: %(default)s)')\n parser.add_argument(\n '--num_D_accumulations', type=int, default=1,\n help='Number of passes to accumulate D''s gradients over '\n '(default: %(default)s)')\n parser.add_argument(\n '--split_D', action='store_true', default=False,\n help='Run D twice rather than concatenating inputs? (default: %(default)s)')\n parser.add_argument(\n '--num_epochs', type=int, default=100,\n help='Number of epochs to train for (default: %(default)s)')\n parser.add_argument(\n '--parallel', action='store_true', default=False,\n help='Train with multiple GPUs (default: %(default)s)')\n parser.add_argument(\n '--G_fp16', action='store_true', default=False,\n help='Train with half-precision in G? (default: %(default)s)')\n parser.add_argument(\n '--D_fp16', action='store_true', default=False,\n help='Train with half-precision in D? (default: %(default)s)')\n parser.add_argument(\n '--D_mixed_precision', action='store_true', default=False,\n help='Train with half-precision activations but fp32 params in D? '\n '(default: %(default)s)')\n parser.add_argument(\n '--G_mixed_precision', action='store_true', default=False,\n help='Train with half-precision activations but fp32 params in G? '\n '(default: %(default)s)')\n parser.add_argument(\n '--accumulate_stats', action='store_true', default=False,\n help='Accumulate \"standing\" batchnorm stats? (default: %(default)s)')\n parser.add_argument(\n '--num_standing_accumulations', type=int, default=16,\n help='Number of forward passes to use in accumulating standing stats? '\n '(default: %(default)s)') \n \n ### Bookkeping stuff ### \n parser.add_argument(\n '--G_eval_mode', action='store_true', default=False,\n help='Run G in eval mode (running/standing stats?) at sample/test time? '\n '(default: %(default)s)')\n parser.add_argument(\n '--save_every', type=int, default=2000,\n help='Save every X iterations (default: %(default)s)')\n parser.add_argument(\n '--num_save_copies', type=int, default=2,\n help='How many copies to save (default: %(default)s)')\n parser.add_argument(\n '--num_best_copies', type=int, default=2,\n help='How many previous best checkpoints to save (default: %(default)s)')\n parser.add_argument(\n '--which_best', type=str, default='IS',\n help='Which metric to use to determine when to save new \"best\"'\n 'checkpoints, one of IS or FID (default: %(default)s)')\n parser.add_argument(\n '--no_fid', action='store_true', default=False,\n help='Calculate IS only, not FID? (default: %(default)s)')\n parser.add_argument(\n '--test_every', type=int, default=5000,\n help='Test every X iterations (default: %(default)s)')\n parser.add_argument(\n '--num_inception_images', type=int, default=50000,\n help='Number of samples to compute inception metrics with '\n '(default: %(default)s)')\n parser.add_argument(\n '--hashname', action='store_true', default=False,\n help='Use a hash of the experiment name instead of the full config '\n '(default: %(default)s)') \n parser.add_argument(\n '--base_root', type=str, default='',\n help='Default location to store all weights, samples, data, and logs '\n ' (default: %(default)s)')\n parser.add_argument(\n '--data_root', type=str, default='data',\n help='Default location where data is stored (default: %(default)s)')\n parser.add_argument(\n '--weights_root', type=str, default='weights',\n help='Default location to store weights (default: %(default)s)')\n parser.add_argument(\n '--logs_root', type=str, default='logs',\n help='Default location to store logs (default: %(default)s)')\n parser.add_argument(\n '--samples_root', type=str, default='samples',\n help='Default location to store samples (default: %(default)s)') \n parser.add_argument(\n '--pbar', type=str, default='mine',\n help='Type of progressbar to use; one of \"mine\" or \"tqdm\" '\n '(default: %(default)s)')\n parser.add_argument(\n '--name_suffix', type=str, default='',\n help='Suffix for experiment name for loading weights for sampling '\n '(consider \"best0\") (default: %(default)s)')\n parser.add_argument(\n '--experiment_name', type=str, default='',\n help='Optionally override the automatic experiment naming with this arg. '\n '(default: %(default)s)')\n parser.add_argument(\n '--config_from_name', action='store_true', default=False,\n help='Use a hash of the experiment name instead of the full config '\n '(default: %(default)s)')\n \n ### EMA Stuff ###\n parser.add_argument(\n '--ema', action='store_true', default=False,\n help='Keep an ema of G''s weights? (default: %(default)s)')\n parser.add_argument(\n '--ema_decay', type=float, default=0.9999,\n help='EMA decay rate (default: %(default)s)')\n parser.add_argument(\n '--use_ema', action='store_true', default=False,\n help='Use the EMA parameters of G for evaluation? (default: %(default)s)')\n parser.add_argument(\n '--ema_start', type=int, default=0,\n help='When to start updating the EMA weights (default: %(default)s)')\n \n ### Numerical precision and SV stuff ### \n parser.add_argument(\n '--adam_eps', type=float, default=1e-8,\n help='epsilon value to use for Adam (default: %(default)s)')\n parser.add_argument(\n '--BN_eps', type=float, default=1e-5,\n help='epsilon value to use for BatchNorm (default: %(default)s)')\n parser.add_argument(\n '--SN_eps', type=float, default=1e-8,\n help='epsilon value to use for Spectral Norm(default: %(default)s)')\n parser.add_argument(\n '--num_G_SVs', type=int, default=1,\n help='Number of SVs to track in G (default: %(default)s)')\n parser.add_argument(\n '--num_D_SVs', type=int, default=1,\n help='Number of SVs to track in D (default: %(default)s)')\n parser.add_argument(\n '--num_G_SV_itrs', type=int, default=1,\n help='Number of SV itrs in G (default: %(default)s)')\n parser.add_argument(\n '--num_D_SV_itrs', type=int, default=1,\n help='Number of SV itrs in D (default: %(default)s)')\n \n ### Ortho reg stuff ### \n parser.add_argument(\n '--G_ortho', type=float, default=0.0, # 1e-4 is default for BigGAN\n help='Modified ortho reg coefficient in G(default: %(default)s)')\n parser.add_argument(\n '--D_ortho', type=float, default=0.0,\n help='Modified ortho reg coefficient in D (default: %(default)s)')\n parser.add_argument(\n '--toggle_grads', action='store_true', default=True,\n help='Toggle D and G''s \"requires_grad\" settings when not training them? '\n ' (default: %(default)s)')\n \n ### Which train function ###\n parser.add_argument(\n '--which_train_fn', type=str, default='GAN',\n help='How2trainyourbois (default: %(default)s)') \n \n ### Resume training stuff\n parser.add_argument(\n '--load_weights', type=str, default='',\n help='Suffix for which weights to load (e.g. best0, copy0) '\n '(default: %(default)s)')\n parser.add_argument(\n '--resume', action='store_true', default=False,\n help='Resume training? (default: %(default)s)')\n \n ### Log stuff ###\n parser.add_argument(\n '--logstyle', type=str, default='%3.3e',\n help='What style to use when logging training metrics?'\n 'One of: %#.#f/ %#.#e (float/exp, text),'\n 'pickle (python pickle),'\n 'npz (numpy zip),'\n 'mat (MATLAB .mat file) (default: %(default)s)')\n parser.add_argument(\n '--log_G_spectra', action='store_true', default=False,\n help='Log the top 3 singular values in each SN layer in G? '\n '(default: %(default)s)')\n parser.add_argument(\n '--log_D_spectra', action='store_true', default=False,\n help='Log the top 3 singular values in each SN layer in D? '\n '(default: %(default)s)')\n parser.add_argument(\n '--sv_log_interval', type=int, default=10,\n help='Iteration interval for logging singular values '\n ' (default: %(default)s)') \n \n return parser\n\n# Arguments for sample.py; not presently used in train.py\ndef add_sample_parser(parser):\n parser.add_argument(\n '--sample_npz', action='store_true', default=False,\n help='Sample \"sample_num_npz\" images and save to npz? '\n '(default: %(default)s)')\n parser.add_argument(\n '--sample_num_npz', type=int, default=50000,\n help='Number of images to sample when sampling NPZs '\n '(default: %(default)s)')\n parser.add_argument(\n '--sample_sheets', action='store_true', default=False,\n help='Produce class-conditional sample sheets and stick them in '\n 'the samples root? (default: %(default)s)')\n parser.add_argument(\n '--sample_interps', action='store_true', default=False,\n help='Produce interpolation sheets and stick them in '\n 'the samples root? (default: %(default)s)') \n parser.add_argument(\n '--sample_sheet_folder_num', type=int, default=-1,\n help='Number to use for the folder for these sample sheets '\n '(default: %(default)s)')\n parser.add_argument(\n '--sample_random', action='store_true', default=False,\n help='Produce a single random sheet? (default: %(default)s)')\n parser.add_argument(\n '--sample_trunc_curves', type=str, default='',\n help='Get inception metrics with a range of variances?'\n 'To use this, specify a startpoint, step, and endpoint, e.g. '\n '--sample_trunc_curves 0.2_0.1_1.0 for a startpoint of 0.2, '\n 'endpoint of 1.0, and stepsize of 1.0. Note that this is '\n 'not exactly identical to using tf.truncated_normal, but should '\n 'have approximately the same effect. (default: %(default)s)')\n parser.add_argument(\n '--sample_inception_metrics', action='store_true', default=False,\n help='Calculate Inception metrics with sample.py? (default: %(default)s)') \n return parser\n\n# Convenience dicts\ndset_dict = {'I32': dset.ImageFolder, 'I64': dset.ImageFolder, \n 'I128': dset.ImageFolder, 'I256': dset.ImageFolder,\n 'I32_hdf5': dset.ILSVRC_HDF5, 'I64_hdf5': dset.ILSVRC_HDF5, \n 'I128_hdf5': dset.ILSVRC_HDF5, 'I256_hdf5': dset.ILSVRC_HDF5,\n 'C10': dset.CIFAR10, 'C100': dset.CIFAR100}\nimsize_dict = {'I32': 32, 'I32_hdf5': 32,\n 'I64': 64, 'I64_hdf5': 64,\n 'I128': 128, 'I128_hdf5': 128,\n 'I256': 256, 'I256_hdf5': 256,\n 'C10': 32, 'C100': 32}\nroot_dict = {'I32': 'ImageNet', 'I32_hdf5': 'ILSVRC32.hdf5',\n 'I64': 'ImageNet', 'I64_hdf5': 'ILSVRC64.hdf5',\n 'I128': 'ImageNet', 'I128_hdf5': 'ILSVRC128.hdf5',\n 'I256': 'ImageNet', 'I256_hdf5': 'ILSVRC256.hdf5',\n 'C10': 'cifar', 'C100': 'cifar'}\nnclass_dict = {'I32': 1000, 'I32_hdf5': 1000,\n 'I64': 1000, 'I64_hdf5': 1000,\n 'I128': 1000, 'I128_hdf5': 1000,\n 'I256': 1000, 'I256_hdf5': 1000,\n 'C10': 10, 'C100': 100}\n# Number of classes to put per sample sheet \nclasses_per_sheet_dict = {'I32': 50, 'I32_hdf5': 50,\n 'I64': 50, 'I64_hdf5': 50,\n 'I128': 20, 'I128_hdf5': 20,\n 'I256': 20, 'I256_hdf5': 20,\n 'C10': 10, 'C100': 100,\n 'mnist_svhn': 20}\nactivation_dict = {'inplace_relu': nn.ReLU(inplace=True),\n 'relu': nn.ReLU(inplace=False),\n 'ir': nn.ReLU(inplace=True),}\n\nclass CenterCropLongEdge(object):\n \"\"\"Crops the given PIL Image on the long edge.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped.\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n return transforms.functional.center_crop(img, min(img.size))\n\n def __repr__(self):\n return self.__class__.__name__\n\nclass RandomCropLongEdge(object):\n \"\"\"Crops the given PIL Image on the long edge with a random start point.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped.\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n size = (min(img.size), min(img.size))\n # Only step forward along this edge if it's the long edge\n i = (0 if size[0] == img.size[0] \n else np.random.randint(low=0,high=img.size[0] - size[0]))\n j = (0 if size[1] == img.size[1]\n else np.random.randint(low=0,high=img.size[1] - size[1]))\n return transforms.functional.crop(img, i, j, size[0], size[1])\n\n def __repr__(self):\n return self.__class__.__name__\n\n \n# multi-epoch Dataset sampler to avoid memory leakage and enable resumption of\n# training from the same sample regardless of if we stop mid-epoch\nclass MultiEpochSampler(torch.utils.data.Sampler):\n r\"\"\"Samples elements randomly over multiple epochs\n\n Arguments:\n data_source (Dataset): dataset to sample from\n num_epochs (int) : Number of times to loop over the dataset\n start_itr (int) : which iteration to begin from\n \"\"\"\n\n def __init__(self, data_source, num_epochs, start_itr=0, batch_size=128):\n self.data_source = data_source\n self.num_samples = len(self.data_source)\n self.num_epochs = num_epochs\n self.start_itr = start_itr\n self.batch_size = batch_size\n\n if not isinstance(self.num_samples, int) or self.num_samples <= 0:\n raise ValueError(\"num_samples should be a positive integeral \"\n \"value, but got num_samples={}\".format(self.num_samples))\n\n def __iter__(self):\n n = len(self.data_source)\n # Determine number of epochs\n # num_epochs = int(np.ceil((n * self.num_epochs - (self.start_itr * self.batch_size)) / float(n)))\n num_epochs = int(np.ceil(len(self) / float(n)))\n # Sample all the indices, and then grab the last num_epochs index sets;\n # This ensures if we're starting at epoch 4, we're still grabbing epoch 4's\n # indices\n out = [torch.randperm(n) for epoch in range(self.num_epochs)][-num_epochs:]\n # Ignore the first start_itr % n indices of the first epoch\n out[0] = out[0][(self.start_itr * self.batch_size % n):]\n # if self.replacement:\n # return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())\n # return iter(.tolist())\n output = torch.cat(out).tolist()\n print('Length dataset output is %d' % len(output))\n return iter(output)\n\n def __len__(self):\n left_imgs = (self.start_itr * self.batch_size) % (len(self.data_source) * self.num_epochs)\n return len(self.data_source) * self.num_epochs - left_imgs\n\n\n# Convenience function to centralize all data loaders\ndef get_data_loaders(dataset, data_root=None, augment=False, batch_size=64, \n num_workers=8, shuffle=True, load_in_mem=False, hdf5=False,\n pin_memory=True, drop_last=True, start_itr=0,\n num_epochs=500, use_multiepoch_sampler=False,\n index_filename=None,\n use_data_root=False,\n dataset_kwargs={},\n use_training_sampler=False,\n **kwargs):\n\n # Append /FILENAME.hdf5 to root if using hdf5\n if not use_data_root:\n data_root += '/%s' % root_dict[dataset]\n else:\n data_root = os.path.expanduser(data_root)\n print('Using dataset root location %s' % data_root)\n\n which_dataset = dset_dict[dataset]\n norm_mean = [0.5,0.5,0.5]\n norm_std = [0.5,0.5,0.5]\n image_size = imsize_dict[dataset]\n # For image folder datasets, name of the file where we store the precomputed\n # image locations to avoid having to walk the dirs every time we load.\n if index_filename is None:\n dataset_kwargs = {'index_filename': '%s_imgs.npz' % dataset, **dataset_kwargs}\n else:\n dataset_kwargs = {'index_filename': '%s' % index_filename,\n **dataset_kwargs}\n \n # HDF5 datasets have their own inbuilt transform, no need to train_transform \n if 'hdf5' in dataset:\n train_transform = None\n else:\n if augment:\n print('Data will be augmented...')\n if dataset in ['C10', 'C100']:\n train_transform = [transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip()]\n else:\n train_transform = [RandomCropLongEdge(),\n transforms.Resize(image_size),\n transforms.RandomHorizontalFlip()]\n else:\n print('Data will not be augmented...')\n if dataset in ['C10', 'C100']:\n train_transform = []\n else:\n train_transform = [CenterCropLongEdge(), transforms.Resize(image_size)]\n # train_transform = [transforms.Resize(image_size), transforms.CenterCrop]\n train_transform = transforms.Compose(train_transform + [\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std)])\n train_set = which_dataset(root=data_root, transform=train_transform,\n load_in_mem=load_in_mem, **dataset_kwargs)\n\n # Prepare loader; the loaders list is for forward compatibility with\n # using validation / test splits.\n loaders = [] \n if use_multiepoch_sampler:\n print('Using multiepoch sampler from start_itr %d...' % start_itr)\n loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory}\n sampler = MultiEpochSampler(train_set, num_epochs, start_itr, batch_size)\n train_loader = DataLoader(train_set, batch_size=batch_size,\n sampler=sampler, **loader_kwargs)\n elif use_training_sampler:\n loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory,\n 'drop_last': drop_last} # Default, drop last incomplete batch\n sampler = TrainingSampler(len(train_set))\n train_loader = DataLoader(train_set, batch_size=batch_size,\n sampler=sampler, **loader_kwargs)\n else:\n loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory,\n 'drop_last': drop_last} # Default, drop last incomplete batch\n train_loader = DataLoader(train_set, batch_size=batch_size,\n shuffle=shuffle, **loader_kwargs)\n loaders.append(train_loader)\n return loaders\n\n\n# Utility file to seed rngs\ndef seed_rng(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n np.random.seed(seed)\n\n\n# Utility to peg all roots to a base root\n# If a base root folder is provided, peg all other root folders to it.\ndef update_config_roots(config):\n if config['base_root']:\n print('Pegging all root folders to base root %s' % config['base_root'])\n for key in ['weights', 'logs', 'samples']:\n config['%s_root' % key] = '%s/%s' % (config['base_root'], key)\n return config\n\n\n# Utility to prepare root folders if they don't exist; parent folder must exist\ndef prepare_root(config):\n for key in ['weights_root', 'logs_root', 'samples_root']:\n if not os.path.exists(config[key]):\n print('Making directory %s for %s...' % (config[key], key))\n os.makedirs(config[key], exist_ok=True)\n\n\n# Simple wrapper that applies EMA to a model. COuld be better done in 1.0 using\n# the parameters() and buffers() module functions, but for now this works\n# with state_dicts using .copy_\nclass ema(object):\n def __init__(self, source, target, decay=0.9999, start_itr=0):\n self.source = source\n self.target = target\n self.decay = decay\n # Optional parameter indicating what iteration to start the decay at\n self.start_itr = start_itr\n # Initialize target's params to be source's\n self.source_dict = self.source.state_dict()\n self.target_dict = self.target.state_dict()\n print('Initializing EMA parameters to be source parameters...')\n with torch.no_grad():\n for key in self.source_dict:\n self.target_dict[key].data.copy_(self.source_dict[key].data)\n # target_dict[key].data = source_dict[key].data # Doesn't work!\n\n def update(self, itr=None):\n # If an iteration counter is provided and itr is less than the start itr,\n # peg the ema weights to the underlying weights.\n if itr and itr < self.start_itr:\n decay = 0.0\n else:\n decay = self.decay\n with torch.no_grad():\n for key in self.source_dict:\n self.target_dict[key].data.copy_(self.target_dict[key].data * decay \n + self.source_dict[key].data * (1 - decay))\n\n\n# Apply modified ortho reg to a model\n# This function is an optimized version that directly computes the gradient,\n# instead of computing and then differentiating the loss.\ndef ortho(model, strength=1e-4, blacklist=[]):\n with torch.no_grad():\n for param in model.parameters():\n # Only apply this to parameters with at least 2 axes, and not in the blacklist\n if len(param.shape) < 2 or any([param is item for item in blacklist]):\n continue\n w = param.view(param.shape[0], -1)\n grad = (2 * torch.mm(torch.mm(w, w.t()) \n * (1. - torch.eye(w.shape[0], device=w.device)), w))\n param.grad.data += strength * grad.view(param.shape)\n\n\n# Default ortho reg\n# This function is an optimized version that directly computes the gradient,\n# instead of computing and then differentiating the loss.\ndef default_ortho(model, strength=1e-4, blacklist=[]):\n with torch.no_grad():\n for param in model.parameters():\n # Only apply this to parameters with at least 2 axes & not in blacklist\n if len(param.shape) < 2 or param in blacklist:\n continue\n w = param.view(param.shape[0], -1)\n grad = (2 * torch.mm(torch.mm(w, w.t()) \n - torch.eye(w.shape[0], device=w.device), w))\n param.grad.data += strength * grad.view(param.shape)\n\n\n# Convenience utility to switch off requires_grad\ndef toggle_grad(model, on_or_off):\n for param in model.parameters():\n param.requires_grad = on_or_off\n\n\n# Function to join strings or ignore them\n# Base string is the string to link \"strings,\" while strings\n# is a list of strings or Nones.\ndef join_strings(base_string, strings):\n return base_string.join([item for item in strings if item])\n\n\n# Save a model's weights, optimizer, and the state_dict\ndef save_weights(G, D, state_dict, weights_root, experiment_name, \n name_suffix=None, G_ema=None):\n root = '/'.join([weights_root, experiment_name])\n if not os.path.exists(root):\n os.mkdir(root)\n if name_suffix:\n print('Saving weights to %s/%s...' % (root, name_suffix))\n else:\n print('Saving weights to %s...' % root)\n torch.save(G.state_dict(), \n '%s/%s.pth' % (root, join_strings('_', ['G', name_suffix])))\n torch.save(G.optim.state_dict(), \n '%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix])))\n torch.save(D.state_dict(), \n '%s/%s.pth' % (root, join_strings('_', ['D', name_suffix])))\n torch.save(D.optim.state_dict(),\n '%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix])))\n torch.save(state_dict,\n '%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))\n if G_ema is not None:\n torch.save(G_ema.state_dict(), \n '%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))\n\n\n# Load a model's weights, optimizer, and the state_dict\ndef load_weights(G, D, state_dict, weights_root, experiment_name, \n name_suffix=None, G_ema=None, strict=True, load_optim=True):\n root = '/'.join([weights_root, experiment_name])\n if name_suffix:\n print('Loading %s weights from %s...' % (name_suffix, root))\n else:\n print('Loading weights from %s...' % root)\n if G is not None:\n G.load_state_dict(\n torch.load('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))),\n strict=strict)\n if load_optim:\n G.optim.load_state_dict(\n torch.load('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))\n if D is not None:\n D.load_state_dict(\n torch.load('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))),\n strict=strict)\n if load_optim:\n D.optim.load_state_dict(\n torch.load('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))\n # Load state dict\n for item in state_dict:\n state_dict[item] = torch.load('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))[item]\n if G_ema is not None:\n G_ema.load_state_dict(\n torch.load('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix]))),\n strict=strict)\n\n\n''' MetricsLogger originally stolen from VoxNet source code.\n Used for logging inception metrics'''\nclass MetricsLogger(object):\n def __init__(self, fname, reinitialize=False):\n self.fname = fname\n self.reinitialize = reinitialize\n if os.path.exists(self.fname):\n if self.reinitialize:\n print('{} exists, deleting...'.format(self.fname))\n os.remove(self.fname)\n\n def log(self, record=None, **kwargs):\n \"\"\"\n Assumption: no newlines in the input.\n \"\"\"\n if record is None:\n record = {}\n record.update(kwargs)\n record['_stamp'] = time.time()\n with open(self.fname, 'a') as f:\n f.write(json.dumps(record, ensure_ascii=True) + '\\n')\n\n\n# Logstyle is either:\n# '%#.#f' for floating point representation in text\n# '%#.#e' for exponent representation in text\n# 'npz' for output to npz # NOT YET SUPPORTED\n# 'pickle' for output to a python pickle # NOT YET SUPPORTED\n# 'mat' for output to a MATLAB .mat file # NOT YET SUPPORTED\nclass MyLogger(object):\n def __init__(self, fname, reinitialize=False, logstyle='%3.3f'):\n self.root = fname\n if not os.path.exists(self.root):\n os.mkdir(self.root)\n self.reinitialize = reinitialize\n self.metrics = []\n self.logstyle = logstyle # One of '%3.3f' or like '%3.3e'\n\n # Delete log if re-starting and log already exists\n def reinit(self, item):\n if os.path.exists('%s/%s.log' % (self.root, item)):\n if self.reinitialize:\n # Only print the removal mess\n if 'sv' in item :\n if not any('sv' in item for item in self.metrics):\n print('Deleting singular value logs...')\n else:\n print('{} exists, deleting...'.format('%s_%s.log' % (self.root, item)))\n os.remove('%s/%s.log' % (self.root, item))\n \n # Log in plaintext; this is designed for being read in MATLAB(sorry not sorry)\n def log(self, itr, **kwargs):\n for arg in kwargs:\n if arg not in self.metrics:\n if self.reinitialize:\n self.reinit(arg)\n self.metrics += [arg]\n if self.logstyle == 'pickle':\n print('Pickle not currently supported...')\n # with open('%s/%s.log' % (self.root, arg), 'a') as f:\n # pickle.dump(kwargs[arg], f)\n elif self.logstyle == 'mat':\n print('.mat logstyle not currently supported...')\n else:\n with open('%s/%s.log' % (self.root, arg), 'a') as f:\n f.write('%d: %s\\n' % (itr, self.logstyle % kwargs[arg]))\n\n\n# Write some metadata to the logs directory\ndef write_metadata(logs_root, experiment_name, config, state_dict):\n with open(('%s/%s/metalog.txt' % \n (logs_root, experiment_name)), 'w') as writefile:\n writefile.write('datetime: %s\\n' % str(datetime.datetime.now()))\n writefile.write('config: %s\\n' % str(config))\n writefile.write('state: %s\\n' %str(state_dict))\n\n\n\"\"\"\nVery basic progress indicator to wrap an iterable in.\n\nAuthor: Jan Schlüter\nAndy's adds: time elapsed in addition to ETA, makes it possible to add\nestimated time to 1k iters instead of estimated time to completion.\n\"\"\"\ndef progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k', stdout=sys.stdout):\n \"\"\"\n Returns a generator over `items`, printing the number and percentage of\n items processed and the estimated remaining processing time before yielding\n the next item. `total` gives the total number of items (required if `items`\n has no length), and `min_delay` gives the minimum time in seconds between\n subsequent prints. `desc` gives an optional prefix text (end with a space).\n \"\"\"\n total = total or len(items)\n t_start = time.time()\n t_last = 0\n for n, item in enumerate(items):\n t_now = time.time()\n if t_now - t_last > min_delay:\n print(\"\\r%s%d/%d (%6.2f%%)\" % (\n desc, n+1, total, n / float(total) * 100), end=\" \", file=stdout)\n if n > 0:\n \n if displaytype == 's1k': # minutes/seconds for 1000 iters\n next_1000 = n + (1000 - n%1000)\n t_done = t_now - t_start\n t_1k = t_done / n * next_1000\n outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))\n print(\"(TE/ET1k: %d:%02d / %d:%02d)\" % tuple(outlist), end=\" \", file=stdout)\n else:# displaytype == 'eta':\n t_done = t_now - t_start\n t_total = t_done / n * total\n outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))\n print(\"(TE/ETA: %d:%02d / %d:%02d)\" % tuple(outlist), end=\" \", file=stdout)\n \n stdout.flush()\n t_last = t_now\n yield item\n t_total = time.time() - t_start\n print(\"\\r%s%d/%d (100.00%%) (took %d:%02d)\" % ((desc, total, total) +\n divmod(t_total, 60)), file=stdout)\n\n\n# Sample function for use with inception metrics\ndef sample(G, z_, y_, config):\n with torch.no_grad():\n z_.sample_()\n y_.sample_()\n if config['parallel']:\n G_z = nn.parallel.data_parallel(G, (z_, G.shared(y_)))\n else:\n G_z = G(z_, G.shared(y_))\n return G_z, y_\n\n\ndef sample_imgs(G, z_, y_, config):\n with torch.no_grad():\n z_.sample_()\n y_.sample_()\n if config['parallel']:\n G_z = nn.parallel.data_parallel(G, (z_, G.shared(y_)))\n else:\n G_z = G(z_, G.shared(y_))\n return G_z\n\n\n# Sample function for sample sheets\ndef sample_sheet(G, classes_per_sheet, num_classes, samples_per_class, parallel,\n samples_root, experiment_name, folder_number, z_=None):\n # Prepare sample directory\n if not os.path.isdir('%s/%s' % (samples_root, experiment_name)):\n os.mkdir('%s/%s' % (samples_root, experiment_name))\n if not os.path.isdir('%s/%s/%d' % (samples_root, experiment_name, folder_number)):\n os.mkdir('%s/%s/%d' % (samples_root, experiment_name, folder_number))\n # loop over total number of sheets\n for i in range(num_classes // classes_per_sheet):\n ims = []\n y = torch.arange(i * classes_per_sheet, (i + 1) * classes_per_sheet, device='cuda')\n for j in range(samples_per_class):\n if (z_ is not None) and hasattr(z_, 'sample_') and classes_per_sheet <= z_.size(0):\n z_.sample_()\n else:\n z_ = torch.randn(classes_per_sheet, G.dim_z, device='cuda') \n with torch.no_grad():\n if parallel:\n o = nn.parallel.data_parallel(G, (z_[:classes_per_sheet], G.shared(y)))\n else:\n o = G(z_[:classes_per_sheet], G.shared(y))\n\n ims += [o.data.cpu()]\n # This line should properly unroll the images\n out_ims = torch.stack(ims, 1).view(-1, ims[0].shape[1], ims[0].shape[2], \n ims[0].shape[3]).data.float().cpu()\n # The path for the samples\n image_filename = '%s/%s/%d/samples%d.jpg' % (samples_root, experiment_name, \n folder_number, i)\n torchvision.utils.save_image(out_ims, image_filename,\n nrow=samples_per_class, normalize=True)\n\n\n# Interp function; expects x0 and x1 to be of shape (shape0, 1, rest_of_shape..)\ndef interp(x0, x1, num_midpoints):\n lerp = torch.linspace(0, 1.0, num_midpoints + 2, device='cuda').to(x0.dtype)\n return ((x0 * (1 - lerp.view(1, -1, 1))) + (x1 * lerp.view(1, -1, 1)))\n\n\n# interp sheet function\n# Supports full, class-wise and intra-class interpolation\ndef interp_sheet(G, num_per_sheet, num_midpoints, num_classes, parallel,\n samples_root, experiment_name, folder_number, sheet_number=0,\n fix_z=False, fix_y=False, device='cuda'):\n # Prepare zs and ys\n if fix_z: # If fix Z, only sample 1 z per row\n zs = torch.randn(num_per_sheet, 1, G.dim_z, device=device)\n zs = zs.repeat(1, num_midpoints + 2, 1).view(-1, G.dim_z)\n else:\n zs = interp(torch.randn(num_per_sheet, 1, G.dim_z, device=device),\n torch.randn(num_per_sheet, 1, G.dim_z, device=device),\n num_midpoints).view(-1, G.dim_z)\n if fix_y: # If fix y, only sample 1 z per row\n ys = sample_1hot(num_per_sheet, num_classes)\n ys = G.shared(ys).view(num_per_sheet, 1, -1)\n ys = ys.repeat(1, num_midpoints + 2, 1).view(num_per_sheet * (num_midpoints + 2), -1)\n else:\n ys = interp(G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),\n G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),\n num_midpoints).view(num_per_sheet * (num_midpoints + 2), -1)\n # Run the net--note that we've already passed y through G.shared.\n if G.fp16:\n zs = zs.half()\n with torch.no_grad():\n if parallel:\n out_ims = nn.parallel.data_parallel(G, (zs, ys)).data.cpu()\n else:\n out_ims = G(zs, ys).data.cpu()\n interp_style = '' + ('Z' if not fix_z else '') + ('Y' if not fix_y else '')\n image_filename = '%s/%s/%d/interp%s%d.jpg' % (samples_root, experiment_name,\n folder_number, interp_style,\n sheet_number)\n torchvision.utils.save_image(out_ims, image_filename,\n nrow=num_midpoints + 2, normalize=True)\n\n\n# Convenience debugging function to print out gradnorms and shape from each layer\n# May need to rewrite this so we can actually see which parameter is which\ndef print_grad_norms(net):\n gradsums = [[float(torch.norm(param.grad).item()),\n float(torch.norm(param).item()), param.shape]\n for param in net.parameters()]\n order = np.argsort([item[0] for item in gradsums])\n print(['%3.3e,%3.3e, %s' % (gradsums[item_index][0],\n gradsums[item_index][1],\n str(gradsums[item_index][2])) \n for item_index in order])\n\n\n# Get singular values to log. This will use the state dict to find them\n# and substitute underscores for dots.\ndef get_SVs(net, prefix):\n d = net.state_dict()\n return {('%s_%s' % (prefix, key)).replace('.', '_') :\n float(d[key].item())\n for key in d if 'sv' in key}\n\n\n# Name an experiment based on its config\ndef name_from_config(config):\n name = '_'.join([\n item for item in [\n 'Big%s' % config['which_train_fn'],\n config['dataset'],\n config['model'] if config['model'] != 'BigGAN' else None,\n 'seed%d' % config['seed'],\n 'Gch%d' % config['G_ch'],\n 'Dch%d' % config['D_ch'],\n 'Gd%d' % config['G_depth'] if config['G_depth'] > 1 else None,\n 'Dd%d' % config['D_depth'] if config['D_depth'] > 1 else None,\n 'bs%d' % config['batch_size'],\n 'Gfp16' if config['G_fp16'] else None,\n 'Dfp16' if config['D_fp16'] else None,\n 'nDs%d' % config['num_D_steps'] if config['num_D_steps'] > 1 else None,\n 'nDa%d' % config['num_D_accumulations'] if config['num_D_accumulations'] > 1 else None,\n 'nGa%d' % config['num_G_accumulations'] if config['num_G_accumulations'] > 1 else None,\n 'Glr%2.1e' % config['G_lr'],\n 'Dlr%2.1e' % config['D_lr'],\n 'GB%3.3f' % config['G_B1'] if config['G_B1'] !=0.0 else None,\n 'GBB%3.3f' % config['G_B2'] if config['G_B2'] !=0.999 else None,\n 'DB%3.3f' % config['D_B1'] if config['D_B1'] !=0.0 else None,\n 'DBB%3.3f' % config['D_B2'] if config['D_B2'] !=0.999 else None,\n 'Gnl%s' % config['G_nl'],\n 'Dnl%s' % config['D_nl'],\n 'Ginit%s' % config['G_init'],\n 'Dinit%s' % config['D_init'],\n 'G%s' % config['G_param'] if config['G_param'] != 'SN' else None,\n 'D%s' % config['D_param'] if config['D_param'] != 'SN' else None,\n 'Gattn%s' % config['G_attn'] if config['G_attn'] != '0' else None,\n 'Dattn%s' % config['D_attn'] if config['D_attn'] != '0' else None,\n 'Gortho%2.1e' % config['G_ortho'] if config['G_ortho'] > 0.0 else None,\n 'Dortho%2.1e' % config['D_ortho'] if config['D_ortho'] > 0.0 else None,\n config['norm_style'] if config['norm_style'] != 'bn' else None,\n 'cr' if config['cross_replica'] else None,\n 'Gshared' if config['G_shared'] else None,\n 'hier' if config['hier'] else None,\n 'ema' if config['ema'] else None,\n config['name_suffix'] if config['name_suffix'] else None,\n ]\n if item is not None])\n # dogball\n if config['hashname']:\n return hashname(name)\n else:\n return name\n\n\n# A simple function to produce a unique experiment name from the animal hashes.\ndef hashname(name):\n h = hash(name)\n a = h % len(animal_hash.a)\n h = h // len(animal_hash.a)\n b = h % len(animal_hash.b)\n h = h // len(animal_hash.c)\n c = h % len(animal_hash.c)\n return animal_hash.a[a] + animal_hash.b[b] + animal_hash.c[c]\n\n\n# Get GPU memory, -i is the index\ndef query_gpu(indices):\n os.system('nvidia-smi -i 0 --query-gpu=memory.free --format=csv')\n\n\n# Convenience function to count the number of parameters in a module\ndef count_parameters(module):\n print('Number of parameters: {}'.format(\n sum([p.data.nelement() for p in module.parameters()])))\n\n \n# Convenience function to sample an index, not actually a 1-hot\ndef sample_1hot(batch_size, num_classes, device='cuda'):\n return torch.randint(low=0, high=num_classes, size=(batch_size,),\n device=device, dtype=torch.int64, requires_grad=False)\n\n\n# A highly simplified convenience class for sampling from distributions\n# One could also use PyTorch's inbuilt distributions package.\n# Note that this class requires initialization to proceed as\n# x = Distribution(torch.randn(size))\n# x.init_distribution(dist_type, **dist_kwargs)\n# x = x.to(device,dtype)\n# This is partially based on https://discuss.pytorch.org/t/subclassing-torch-tensor/23754/2\nclass Distribution(torch.Tensor):\n # Init the params of the distribution\n def init_distribution(self, dist_type, **kwargs): \n self.dist_type = dist_type\n self.dist_kwargs = kwargs\n if self.dist_type == 'normal':\n self.mean, self.var = kwargs['mean'], kwargs['var']\n elif self.dist_type == 'categorical':\n self.num_categories = kwargs['num_categories']\n\n def sample_(self):\n if self.dist_type == 'normal':\n self.normal_(self.mean, self.var)\n elif self.dist_type == 'categorical':\n self.random_(0, self.num_categories) \n # return self.variable\n \n # Silly hack: overwrite the to() method to wrap the new object\n # in a distribution as well\n def to(self, *args, **kwargs):\n new_obj = Distribution(self)\n new_obj.init_distribution(self.dist_type, **self.dist_kwargs)\n new_obj.data = super().to(*args, **kwargs) \n return new_obj\n\n\n# Convenience function to prepare a z and y vector\ndef prepare_z_y(G_batch_size, dim_z, nclasses, device='cuda', \n fp16=False,z_var=1.0):\n z_ = Distribution(torch.randn(G_batch_size, dim_z, requires_grad=False))\n z_.init_distribution('normal', mean=0, var=z_var)\n z_ = z_.to(device,torch.float16 if fp16 else torch.float32) \n \n if fp16:\n z_ = z_.half()\n\n y_ = Distribution(torch.zeros(G_batch_size, requires_grad=False))\n y_.init_distribution('categorical',num_categories=nclasses)\n y_ = y_.to(device, torch.int64)\n return z_, y_\n\n\ndef initiate_standing_stats(net):\n for module in net.modules():\n if hasattr(module, 'accumulate_standing'):\n module.reset_stats()\n module.accumulate_standing = True\n\n\ndef accumulate_standing_stats(net, z, y, nclasses, num_accumulations=16):\n initiate_standing_stats(net)\n net.train()\n for i in range(num_accumulations):\n with torch.no_grad():\n z.normal_()\n y.random_(0, nclasses)\n x = net(z, net.shared(y)) # No need to parallelize here unless using syncbn\n # Set to eval mode\n net.eval() \n\n\n# This version of Adam keeps an fp32 copy of the parameters and\n# does all of the parameter updates in fp32, while still doing the\n# forwards and backwards passes using fp16 (i.e. fp16 copies of the\n# parameters and fp16 activations).\n#\n# Note that this calls .float().cuda() on the params.\nimport math\nfrom torch.optim.optimizer import Optimizer\nclass Adam16(Optimizer):\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay)\n params = list(params)\n super(Adam16, self).__init__(params, defaults)\n \n # Safety modification to make sure we floatify our state\n def load_state_dict(self, state_dict):\n super(Adam16, self).load_state_dict(state_dict)\n for group in self.param_groups:\n for p in group['params']:\n self.state[p]['exp_avg'] = self.state[p]['exp_avg'].float()\n self.state[p]['exp_avg_sq'] = self.state[p]['exp_avg_sq'].float()\n self.state[p]['fp32_p'] = self.state[p]['fp32_p'].float()\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n \n grad = p.grad.data.float()\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = grad.new().resize_as_(grad).zero_()\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()\n # Fp32 copy of the weights\n state['fp32_p'] = p.data.float()\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], state['fp32_p'])\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n \n state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)\n p.data = state['fp32_p'].half()\n\n return loss\n"
] | [
[
"torch.randint",
"torch.zeros",
"torch.randperm",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.no_grad",
"numpy.random.randint",
"torch.norm",
"torch.randn",
"torch.eye",
"torch.arange",
"torch.linspace",
"torch.stack",
"numpy.argsort",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.nn.parallel.data_parallel",
"torch.manual_seed",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
olmosUC3M/Introduction-to-Data-Science-and-Machine-Learning | [
"33a908011a5673dcbc6136dfc1eae868ef32e6b4"
] | [
"Notebooks/Session 5 Gradient Descent/Ridge_functions.py"
] | [
"import numpy as np\n\n\n# A function that normalizes data with pre-scecified mean and std. \ndef normalize(X,mu=0,std=1,flag_train=True):\n if(flag_train):\n mu = np.mean(X,0)\n std = np.std(X,0)\n \n X -= mu\n X /= std\n \n return X,mu,std\n\n# A function to add the all-ones column\ndef add_interfit(X):\n col_ones = np.ones([X.shape[0],1])\n return np.concatenate([col_ones,X],1)\n\n# A function to create the normalized function with polynomial features up to degree deg_max\ndef create_feature_matrix(X_0,deg_max,mu=0,std=1,flag_train=True):\n X = np.zeros([X_0.shape[0],deg_max])\n \n deg=1\n while deg<=deg_max:\n X[:,deg-1] = X_0**deg \n deg += 1\n \n X,train_mean,train_std = normalize(X,mu,std,flag_train)\n \n X = add_interfit(X)\n \n return X,train_mean,train_std\n\n# A function to evaluate the LS solution\ndef LS_evaluate(X,T):\n return (X @ T.transpose())\n\n# A function that calculates the error\ndef J_error(Y,Y_est):\n return np.mean((Y-Y_est)**2)\n\n# A function that calculates the error + L2 penalization\ndef J_error_L2(Y,Y_est,T,l):\n return J_error(Y,Y_est) + (l/Y.shape[0]) * np.sum(T**2)\n\n# A function to compute the LS solution\ndef Ridge_solution(X,Y,l):\n A = l*np.eye(X.shape[1])\n A[0,0] = 0\n A += X.transpose() @ X \n\n return (np.linalg.inv(A) @ X.transpose() @ Y) \n\n# A function to randomly split a data set\ndef split_set(X_0,Y_0,fraction):\n \n N = X_0.shape[0]\n N_split = np.round(fraction * X_0.shape[0]).astype(np.int32)\n mask = np.random.permutation(N)\n \n \n X_1 = X_0[mask[N_split:-1]]\n Y_1 = Y_0[mask[N_split:-1]]\n \n X_0 = X_0[mask[:N_split]]\n Y_0 = Y_0[mask[:N_split]]\n \n return X_0,X_1,Y_0,Y_1\n\n# A function to compute the Ridge cost function for both train and validation/test sets using \n# a provided value of the parameter vector T, polynomial degree and lambda_value\n\ndef eval_J_Ridge_given_T(X_train,Xvt,degree,Y_train,Yvt,l,T):\n \n # Xvt,Yvt --> We use this function to evaluate either validation error or test error\n \n # Lets compute the normalized feature matrices F_train, F_test\n F_train,train_mean,train_std = create_feature_matrix(X_train,degree,0,1,flag_train=True)\n \n F_vt,_,_ = create_feature_matrix(Xvt,degree,train_mean,train_std,flag_train=False)\n \n \n # We evaluate the Penalized MSE (MSE + L2 penalization)\n J_train = J_error_L2(Y_train,LS_evaluate(F_train,T),T,l) \n J_vt = J_error_L2(Yvt,LS_evaluate(F_vt,T),T,l) \n\n\n \n return J_train,J_vt,F_train,F_vt"
] | [
[
"numpy.linalg.inv",
"numpy.eye",
"numpy.ones",
"numpy.concatenate",
"numpy.round",
"numpy.std",
"numpy.random.permutation",
"numpy.mean",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intisarnaheen/Face-Recognition-API | [
"9ae0deba69eef50cd552f8700b9ee75021b65db8"
] | [
"src/automation.py"
] | [
"\n\nimport cv2\nimport sys\nimport os\nimport traceback\nfrom PIL import Image\nfrom glob import glob\nfrom scipy.ndimage import rotate\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport cv2\n\nimport numpy as np\n#%matplotlib inline\nimport matplotlib.image as mpimg\n\nCASCADE=\"/home/cspd/Documents/IBUS/Face_cascade.xml\"\nFACE_CASCADE=cv2.CascadeClassifier(CASCADE)\n\n\n\n\ndef augment_brightness_camera_images(image):\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = .25+np.random.uniform()\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1\n\n# def transform_image(img,ang_range,shear_range,trans_range,path,image_name):\n\n# # Rotation\n\n# # ang_rot = np.random.uniform(ang_range)-ang_range/2\n# rows,cols,ch = img.shape \n# # Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)\n\n# # Translation\n# # tr_x = trans_range*np.random.uniform()-trans_range/2\n# #tr_y = trans_range*np.random.uniform()-trans_range/2\n# #Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])\n\n# # Shear\n# pts1 = np.float32([[5,5],[20,5],[5,20]])\n\n# pt1 = 5+shear_range*np.random.uniform()-shear_range/2\n# pt2 = 20+shear_range*np.random.uniform()-shear_range/2\n \n# # Brightness \n \n\n# pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])\n\n# shear_M = cv2.getAffineTransform(pts1,pts2)\n \n# # img = cv2.warpAffine(img,Rot_M,(cols,rows))\n# #img = cv2.warpAffine(img,Tran8s_M,(cols,rows))\n# img = cv2.warpAffine(img,shear_M,(cols,rows))\n \n# img = augment_brightness_camera_images(img)\n \n# cv2.imwrite(os.path.join(path,image_name+\"br\"+\".jpg\"), img)\n \n# # return img\n\n\ndef transform_image(img,shear_range,path,image_name):\n\n rows,cols,ch = img.shape \n\n # Shear\n pts1 = np.float32([[5,5],[20,5],[5,20]])\n\n pt1 = 5+shear_range*np.random.uniform()-shear_range/2\n pt2 = 20+shear_range*np.random.uniform()-shear_range/2\n \n # Brightness \n \n pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])\n\n shear_M = cv2.getAffineTransform(pts1,pts2) \n # img = cv2.warpAffine(img,Rot_M,(cols,rows))\n #img = cv2.warpAffine(img,Trans_M,(cols,rows))\n img = cv2.warpAffine(img,shear_M,(cols,rows))\n \n #img = augment_brightness_camera_images(img)\n \n cv2.imwrite(os.path.join(path,image_name+\"sher\"+\".png\"), img)\n \n # return img\n\n\n\ndef rotate_blar(img,path,image_name):\n \n for i in range(-45,45,10):\n image =rotate(img,i)\n image=cv2.blur(image,(5,5))\n image=cv2.resize(image,(112,112))\n cv2.imwrite(os.path.join(path,image_name+\"blur_\"+str(i)+\".png\"), image)\n\n\n\n\ndef rotate_image(img,path,image_name):\n \n for i in range(-45,45,10):\n image =rotate(img,i)\n image=cv2.resize(image,(112,112))\n cv2.imwrite(os.path.join(path,image_name+str(i)+\".png\"), image)\n\n\n\n\ndef increase_bright(img,path,image_name):\n \n for i in range(-45,45,10):\n image =rotate(img,i)\n image=cv2.resize(image,(112,112))\n image= cv2.detailEnhance(image, sigma_s=2, sigma_r=0.7)\n \n cv2.imwrite(os.path.join(path,image_name+\"bright_\"+str(i)+\".png\"), image)\n\n\ndef increase_contrast(img,path,image_name):\n \n for i in range(-45,45,10):\n image =rotate(img,i)\n lab= cv2.cvtColor(image, cv2.COLOR_BGR2LAB)\n l, a, b = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(6,6))\n cl = clahe.apply(l)\n image = cv2.merge((cl,a,b))\n image=cv2.resize(image,(112,112))\n image= cv2.detailEnhance(image, sigma_s=2, sigma_r=0.7)\n cv2.imwrite(os.path.join(path,image_name+\"con_\"+str(i)+\".png\"), image)\n\n\n\ndef augment(img, img_name):\n \n img =cv2.imread(img)\n x=create_directory(img_name)\n rotate_image(img,x,img_name)\n #increase_contrast(img,x,img_name)\n rotate_blar(img,x,img_name)\n increase_bright(img,x,img_name)\n \n# Create directory\ndef create_directory(folder_name):\n dirName = \"/home/cspd/Documents/IBUS/process_data/\"+folder_name\n try:\n # Create target Directory\n os.mkdir(dirName)\n print(\"Directory \" , dirName , \" Created \") \n except FileExistsError:\n print(\"Directory \" , dirName , \" already exists\")\n\n return dirName+\"/\"\n\n\ndef detect_faces(image_path):\n \n image=cv2.imread(image_path)\n image_grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n faces = FACE_CASCADE.detectMultiScale(image_grey,scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0)\n image_name = image_path.split('/')[-1].split('.')[-2]\n\n for x,y,w,h in faces:\n sub_img = image[y-10:y+h+10,x-10:x+w+10]\n sub_img = cv2.resize(sub_img, (112,112))\n cv2.imwrite(\"/home/cspd/Documents/IBUS/tmp/\" + image_name+\".png\",sub_img)\n\n\n\n\ndef file_read(input_path):\n return glob(input_path)\n\n\n\n\ndef main(image_path, tmp_path):\n l = file_read(image_path)\n for file in l:\n detect_faces(file)\n m = file_read(tmp_path)\n for file in m:\n image_name = file.split('/')[-1].split('.')[-2]\n augment(file, image_name)\n \n \n \n \n\nif __name__ == '__main__':\n main(\"/home/cspd/Documents/IBUS/new/*.png\", \"/home/cspd/Documents/IBUS/tmp/*.png\")\n\n\n\n\n\n"
] | [
[
"numpy.random.uniform",
"scipy.ndimage.rotate",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
lobpcg/scipy | [
"8627df31ab07d66b438c7e5d5880e3f4d435f248"
] | [
"scipy/stats/_multivariate.py"
] | [
"#\n# Author: Joris Vankerschaver 2013\n#\nimport math\nimport numpy as np\nfrom numpy import asarray_chkfinite, asarray\nimport scipy.linalg\nfrom scipy._lib import doccer\nfrom scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln\nfrom scipy._lib._util import check_random_state\nfrom scipy.linalg.blas import drot\nfrom scipy.linalg._misc import LinAlgError\nfrom scipy.linalg.lapack import get_lapack_funcs\n\nfrom ._discrete_distns import binom\nfrom . import _mvn\n\n__all__ = ['multivariate_normal',\n 'matrix_normal',\n 'dirichlet',\n 'wishart',\n 'invwishart',\n 'multinomial',\n 'special_ortho_group',\n 'ortho_group',\n 'random_correlation',\n 'unitary_group',\n 'multivariate_t',\n 'multivariate_hypergeom']\n\n_LOG_2PI = np.log(2 * np.pi)\n_LOG_2 = np.log(2)\n_LOG_PI = np.log(np.pi)\n\n\n_doc_random_state = \"\"\"\\\nseed : {None, int, np.random.RandomState, np.random.Generator}, optional\n Used for drawing random variates.\n If `seed` is `None`, the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is `None`.\n\"\"\"\n\n\ndef _squeeze_output(out):\n \"\"\"\n Remove single-dimensional entries from array and convert to scalar,\n if necessary.\n \"\"\"\n out = out.squeeze()\n if out.ndim == 0:\n out = out[()]\n return out\n\n\ndef _eigvalsh_to_eps(spectrum, cond=None, rcond=None):\n \"\"\"Determine which eigenvalues are \"small\" given the spectrum.\n\n This is for compatibility across various linear algebra functions\n that should agree about whether or not a Hermitian matrix is numerically\n singular and what is its numerical matrix rank.\n This is designed to be compatible with scipy.linalg.pinvh.\n\n Parameters\n ----------\n spectrum : 1d ndarray\n Array of eigenvalues of a Hermitian matrix.\n cond, rcond : float, optional\n Cutoff for small eigenvalues.\n Singular values smaller than rcond * largest_eigenvalue are\n considered zero.\n If None or -1, suitable machine precision is used.\n\n Returns\n -------\n eps : float\n Magnitude cutoff for numerical negligibility.\n\n \"\"\"\n if rcond is not None:\n cond = rcond\n if cond in [None, -1]:\n t = spectrum.dtype.char.lower()\n factor = {'f': 1E3, 'd': 1E6}\n cond = factor[t] * np.finfo(t).eps\n eps = cond * np.max(abs(spectrum))\n return eps\n\n\ndef _pinv_1d(v, eps=1e-5):\n \"\"\"A helper function for computing the pseudoinverse.\n\n Parameters\n ----------\n v : iterable of numbers\n This may be thought of as a vector of eigenvalues or singular values.\n eps : float\n Values with magnitude no greater than eps are considered negligible.\n\n Returns\n -------\n v_pinv : 1d float ndarray\n A vector of pseudo-inverted numbers.\n\n \"\"\"\n return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)\n\n\nclass _PSD:\n \"\"\"\n Compute coordinated functions of a symmetric positive semidefinite matrix.\n\n This class addresses two issues. Firstly it allows the pseudoinverse,\n the logarithm of the pseudo-determinant, and the rank of the matrix\n to be computed using one call to eigh instead of three.\n Secondly it allows these functions to be computed in a way\n that gives mutually compatible results.\n All of the functions are computed with a common understanding as to\n which of the eigenvalues are to be considered negligibly small.\n The functions are designed to coordinate with scipy.linalg.pinvh()\n but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().\n\n Parameters\n ----------\n M : array_like\n Symmetric positive semidefinite matrix (2-D).\n cond, rcond : float, optional\n Cutoff for small eigenvalues.\n Singular values smaller than rcond * largest_eigenvalue are\n considered zero.\n If None or -1, suitable machine precision is used.\n lower : bool, optional\n Whether the pertinent array data is taken from the lower\n or upper triangle of M. (Default: lower)\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite\n numbers. Disabling may give a performance gain, but may result\n in problems (crashes, non-termination) if the inputs do contain\n infinities or NaNs.\n allow_singular : bool, optional\n Whether to allow a singular matrix. (Default: True)\n\n Notes\n -----\n The arguments are similar to those of scipy.linalg.pinvh().\n\n \"\"\"\n\n def __init__(self, M, cond=None, rcond=None, lower=True,\n check_finite=True, allow_singular=True):\n # Compute the symmetric eigendecomposition.\n # Note that eigh takes care of array conversion, chkfinite,\n # and assertion that the matrix is square.\n s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)\n\n eps = _eigvalsh_to_eps(s, cond, rcond)\n if np.min(s) < -eps:\n msg = \"The input matrix must be symmetric positive semidefinite.\"\n raise ValueError(msg)\n d = s[s > eps]\n if len(d) < len(s) and not allow_singular:\n msg = (\"When `allow_singular is False`, the input matrix must be \"\n \"symmetric positive definite.\")\n raise np.linalg.LinAlgError(msg)\n s_pinv = _pinv_1d(s, eps)\n U = np.multiply(u, np.sqrt(s_pinv))\n\n # Initialize the eagerly precomputed attributes.\n self.rank = len(d)\n self.U = U\n self.log_pdet = np.sum(np.log(d))\n\n # Initialize an attribute to be lazily computed.\n self._pinv = None\n\n @property\n def pinv(self):\n if self._pinv is None:\n self._pinv = np.dot(self.U, self.U.T)\n return self._pinv\n\n\nclass multi_rv_generic:\n \"\"\"\n Class which encapsulates common functionality between all multivariate\n distributions.\n \"\"\"\n def __init__(self, seed=None):\n super().__init__()\n self._random_state = check_random_state(seed)\n\n @property\n def random_state(self):\n \"\"\" Get or set the Generator object for generating random variates.\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance then\n that instance is used.\n\n \"\"\"\n return self._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self._random_state = check_random_state(seed)\n\n def _get_random_state(self, random_state):\n if random_state is not None:\n return check_random_state(random_state)\n else:\n return self._random_state\n\n\nclass multi_rv_frozen:\n \"\"\"\n Class which encapsulates common functionality between all frozen\n multivariate distributions.\n \"\"\"\n @property\n def random_state(self):\n return self._dist._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self._dist._random_state = check_random_state(seed)\n\n\n_mvn_doc_default_callparams = \"\"\"\\\nmean : array_like, default: ``[0]``\n Mean of the distribution.\ncov : array_like, default: ``[1]``\n Symmetric positive (semi)definite covariance matrix of the distribution.\nallow_singular : bool, default: ``False``\n Whether to allow a singular covariance matrix.\n\"\"\"\n\n_mvn_doc_callparams_note = \"\"\"\\\nSetting the parameter `mean` to `None` is equivalent to having `mean`\nbe the zero-vector. The parameter `cov` can be a scalar, in which case\nthe covariance matrix is the identity times that value, a vector of\ndiagonal entries for the covariance matrix, or a two-dimensional\narray_like.\n\"\"\"\n\n_mvn_doc_frozen_callparams = \"\"\n\n_mvn_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\nmvn_docdict_params = {\n '_mvn_doc_default_callparams': _mvn_doc_default_callparams,\n '_mvn_doc_callparams_note': _mvn_doc_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\nmvn_docdict_noparams = {\n '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,\n '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\n\nclass multivariate_normal_gen(multi_rv_generic):\n r\"\"\"A multivariate normal random variable.\n\n The `mean` keyword specifies the mean. The `cov` keyword specifies the\n covariance matrix.\n\n Methods\n -------\n pdf(x, mean=None, cov=1, allow_singular=False)\n Probability density function.\n logpdf(x, mean=None, cov=1, allow_singular=False)\n Log of the probability density function.\n cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)\n Cumulative distribution function.\n logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)\n Log of the cumulative distribution function.\n rvs(mean=None, cov=1, size=1, random_state=None)\n Draw random samples from a multivariate normal distribution.\n entropy()\n Compute the differential entropy of the multivariate normal.\n\n Parameters\n ----------\n %(_mvn_doc_default_callparams)s\n %(_doc_random_state)s\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n The covariance matrix `cov` must be a symmetric positive semidefinite\n matrix when `allow_singular` is True; it must be (strictly) positive\n definite when `allow_singular` is False.\n Symmetry is not checked; only the lower triangular portion is used.\n The determinant and inverse of `cov` are computed\n as the pseudo-determinant and pseudo-inverse, respectively, so\n that `cov` does not need to have full rank.\n\n The probability density function for `multivariate_normal` is\n\n .. math::\n\n f(x) = \\frac{1}{\\sqrt{(2 \\pi)^k \\det \\Sigma}}\n \\exp\\left( -\\frac{1}{2} (x - \\mu)^T \\Sigma^{-1} (x - \\mu) \\right),\n\n where :math:`\\mu` is the mean, :math:`\\Sigma` the covariance matrix,\n and :math:`k` is the dimension of the space where :math:`x` takes values.\n\n .. versionadded:: 0.14.0\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy.stats import multivariate_normal\n\n >>> x = np.linspace(0, 5, 10, endpoint=False)\n >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y\n array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,\n 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])\n >>> fig1 = plt.figure()\n >>> ax = fig1.add_subplot(111)\n >>> ax.plot(x, y)\n >>> plt.show()\n\n Alternatively, the object may be called (as a function) to fix the mean\n and covariance parameters, returning a \"frozen\" multivariate normal\n random variable:\n\n >>> rv = multivariate_normal(mean=None, cov=1, allow_singular=False)\n >>> # Frozen object with the same methods but holding the given\n >>> # mean and covariance fixed.\n\n The input quantiles can be any shape of array, as long as the last\n axis labels the components. This allows us for instance to\n display the frozen pdf for a non-isotropic random variable in 2D as\n follows:\n\n >>> x, y = np.mgrid[-1:1:.01, -1:1:.01]\n >>> pos = np.dstack((x, y))\n >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])\n >>> fig2 = plt.figure()\n >>> ax2 = fig2.add_subplot(111)\n >>> ax2.contourf(x, y, rv.pdf(pos))\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)\n\n def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):\n \"\"\"Create a frozen multivariate normal distribution.\n\n See `multivariate_normal_frozen` for more information.\n \"\"\"\n return multivariate_normal_frozen(mean, cov,\n allow_singular=allow_singular,\n seed=seed)\n\n def _process_parameters(self, dim, mean, cov):\n \"\"\"\n Infer dimensionality from mean or covariance matrix, ensure that\n mean and covariance are full vector resp. matrix.\n \"\"\"\n # Try to infer dimensionality\n if dim is None:\n if mean is None:\n if cov is None:\n dim = 1\n else:\n cov = np.asarray(cov, dtype=float)\n if cov.ndim < 2:\n dim = 1\n else:\n dim = cov.shape[0]\n else:\n mean = np.asarray(mean, dtype=float)\n dim = mean.size\n else:\n if not np.isscalar(dim):\n raise ValueError(\"Dimension of random variable must be \"\n \"a scalar.\")\n\n # Check input sizes and return full arrays for mean and cov if\n # necessary\n if mean is None:\n mean = np.zeros(dim)\n mean = np.asarray(mean, dtype=float)\n\n if cov is None:\n cov = 1.0\n cov = np.asarray(cov, dtype=float)\n\n if dim == 1:\n mean = mean.reshape(1)\n cov = cov.reshape(1, 1)\n\n if mean.ndim != 1 or mean.shape[0] != dim:\n raise ValueError(\"Array 'mean' must be a vector of length %d.\" %\n dim)\n if cov.ndim == 0:\n cov = cov * np.eye(dim)\n elif cov.ndim == 1:\n cov = np.diag(cov)\n elif cov.ndim == 2 and cov.shape != (dim, dim):\n rows, cols = cov.shape\n if rows != cols:\n msg = (\"Array 'cov' must be square if it is two dimensional,\"\n \" but cov.shape = %s.\" % str(cov.shape))\n else:\n msg = (\"Dimension mismatch: array 'cov' is of shape %s,\"\n \" but 'mean' is a vector of length %d.\")\n msg = msg % (str(cov.shape), len(mean))\n raise ValueError(msg)\n elif cov.ndim > 2:\n raise ValueError(\"Array 'cov' must be at most two-dimensional,\"\n \" but cov.ndim = %d\" % cov.ndim)\n\n return dim, mean, cov\n\n def _process_quantiles(self, x, dim):\n \"\"\"\n Adjust quantiles array so that last axis labels the components of\n each data point.\n \"\"\"\n x = np.asarray(x, dtype=float)\n\n if x.ndim == 0:\n x = x[np.newaxis]\n elif x.ndim == 1:\n if dim == 1:\n x = x[:, np.newaxis]\n else:\n x = x[np.newaxis, :]\n\n return x\n\n def _logpdf(self, x, mean, prec_U, log_det_cov, rank):\n \"\"\"Log of the multivariate normal probability density function.\n\n Parameters\n ----------\n x : ndarray\n Points at which to evaluate the log of the probability\n density function\n mean : ndarray\n Mean of the distribution\n prec_U : ndarray\n A decomposition such that np.dot(prec_U, prec_U.T)\n is the precision matrix, i.e. inverse of the covariance matrix.\n log_det_cov : float\n Logarithm of the determinant of the covariance matrix\n rank : int\n Rank of the covariance matrix.\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'logpdf' instead.\n\n \"\"\"\n dev = x - mean\n maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)\n return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)\n\n def logpdf(self, x, mean=None, cov=1, allow_singular=False):\n \"\"\"Log of the multivariate normal probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_mvn_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray or scalar\n Log of the probability density function evaluated at `x`\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n \"\"\"\n dim, mean, cov = self._process_parameters(None, mean, cov)\n x = self._process_quantiles(x, dim)\n psd = _PSD(cov, allow_singular=allow_singular)\n out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)\n return _squeeze_output(out)\n\n def pdf(self, x, mean=None, cov=1, allow_singular=False):\n \"\"\"Multivariate normal probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_mvn_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray or scalar\n Probability density function evaluated at `x`\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n \"\"\"\n dim, mean, cov = self._process_parameters(None, mean, cov)\n x = self._process_quantiles(x, dim)\n psd = _PSD(cov, allow_singular=allow_singular)\n out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))\n return _squeeze_output(out)\n\n def _cdf(self, x, mean, cov, maxpts, abseps, releps):\n \"\"\"Multivariate normal cumulative distribution function.\n\n Parameters\n ----------\n x : ndarray\n Points at which to evaluate the cumulative distribution function.\n mean : ndarray\n Mean of the distribution\n cov : array_like\n Covariance matrix of the distribution\n maxpts : integer\n The maximum number of points to use for integration\n abseps : float\n Absolute error tolerance\n releps : float\n Relative error tolerance\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'cdf' instead.\n\n .. versionadded:: 1.0.0\n\n \"\"\"\n lower = np.full(mean.shape, -np.inf)\n # mvnun expects 1-d arguments, so process points sequentially\n func1d = lambda x_slice: _mvn.mvnun(lower, x_slice, mean, cov,\n maxpts, abseps, releps)[0]\n out = np.apply_along_axis(func1d, -1, x)\n return _squeeze_output(out)\n\n def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,\n abseps=1e-5, releps=1e-5):\n \"\"\"Log of the multivariate normal cumulative distribution function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_mvn_doc_default_callparams)s\n maxpts : integer, optional\n The maximum number of points to use for integration\n (default `1000000*dim`)\n abseps : float, optional\n Absolute error tolerance (default 1e-5)\n releps : float, optional\n Relative error tolerance (default 1e-5)\n\n Returns\n -------\n cdf : ndarray or scalar\n Log of the cumulative distribution function evaluated at `x`\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n .. versionadded:: 1.0.0\n\n \"\"\"\n dim, mean, cov = self._process_parameters(None, mean, cov)\n x = self._process_quantiles(x, dim)\n # Use _PSD to check covariance matrix\n _PSD(cov, allow_singular=allow_singular)\n if not maxpts:\n maxpts = 1000000 * dim\n out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))\n return out\n\n def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,\n abseps=1e-5, releps=1e-5):\n \"\"\"Multivariate normal cumulative distribution function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_mvn_doc_default_callparams)s\n maxpts : integer, optional\n The maximum number of points to use for integration\n (default `1000000*dim`)\n abseps : float, optional\n Absolute error tolerance (default 1e-5)\n releps : float, optional\n Relative error tolerance (default 1e-5)\n\n Returns\n -------\n cdf : ndarray or scalar\n Cumulative distribution function evaluated at `x`\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n .. versionadded:: 1.0.0\n\n \"\"\"\n dim, mean, cov = self._process_parameters(None, mean, cov)\n x = self._process_quantiles(x, dim)\n # Use _PSD to check covariance matrix\n _PSD(cov, allow_singular=allow_singular)\n if not maxpts:\n maxpts = 1000000 * dim\n out = self._cdf(x, mean, cov, maxpts, abseps, releps)\n return out\n\n def rvs(self, mean=None, cov=1, size=1, random_state=None):\n \"\"\"Draw random samples from a multivariate normal distribution.\n\n Parameters\n ----------\n %(_mvn_doc_default_callparams)s\n size : integer, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of size (`size`, `N`), where `N` is the\n dimension of the random variable.\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n \"\"\"\n dim, mean, cov = self._process_parameters(None, mean, cov)\n\n random_state = self._get_random_state(random_state)\n out = random_state.multivariate_normal(mean, cov, size)\n return _squeeze_output(out)\n\n def entropy(self, mean=None, cov=1):\n \"\"\"Compute the differential entropy of the multivariate normal.\n\n Parameters\n ----------\n %(_mvn_doc_default_callparams)s\n\n Returns\n -------\n h : scalar\n Entropy of the multivariate normal distribution\n\n Notes\n -----\n %(_mvn_doc_callparams_note)s\n\n \"\"\"\n dim, mean, cov = self._process_parameters(None, mean, cov)\n _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)\n return 0.5 * logdet\n\n\nmultivariate_normal = multivariate_normal_gen()\n\n\nclass multivariate_normal_frozen(multi_rv_frozen):\n def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,\n maxpts=None, abseps=1e-5, releps=1e-5):\n \"\"\"Create a frozen multivariate normal distribution.\n\n Parameters\n ----------\n mean : array_like, default: ``[0]``\n Mean of the distribution.\n cov : array_like, default: ``[1]``\n Symmetric positive (semi)definite covariance matrix of the\n distribution.\n allow_singular : bool, default: ``False``\n Whether to allow a singular covariance matrix.\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n maxpts : integer, optional\n The maximum number of points to use for integration of the\n cumulative distribution function (default `1000000*dim`)\n abseps : float, optional\n Absolute error tolerance for the cumulative distribution function\n (default 1e-5)\n releps : float, optional\n Relative error tolerance for the cumulative distribution function\n (default 1e-5)\n\n Examples\n --------\n When called with the default parameters, this will create a 1D random\n variable with mean 0 and covariance 1:\n\n >>> from scipy.stats import multivariate_normal\n >>> r = multivariate_normal()\n >>> r.mean\n array([ 0.])\n >>> r.cov\n array([[1.]])\n\n \"\"\"\n self._dist = multivariate_normal_gen(seed)\n self.dim, self.mean, self.cov = self._dist._process_parameters(\n None, mean, cov)\n self.cov_info = _PSD(self.cov, allow_singular=allow_singular)\n if not maxpts:\n maxpts = 1000000 * self.dim\n self.maxpts = maxpts\n self.abseps = abseps\n self.releps = releps\n\n def logpdf(self, x):\n x = self._dist._process_quantiles(x, self.dim)\n out = self._dist._logpdf(x, self.mean, self.cov_info.U,\n self.cov_info.log_pdet, self.cov_info.rank)\n return _squeeze_output(out)\n\n def pdf(self, x):\n return np.exp(self.logpdf(x))\n\n def logcdf(self, x):\n return np.log(self.cdf(x))\n\n def cdf(self, x):\n x = self._dist._process_quantiles(x, self.dim)\n out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,\n self.releps)\n return _squeeze_output(out)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.mean, self.cov, size, random_state)\n\n def entropy(self):\n \"\"\"Computes the differential entropy of the multivariate normal.\n\n Returns\n -------\n h : scalar\n Entropy of the multivariate normal distribution\n\n \"\"\"\n log_pdet = self.cov_info.log_pdet\n rank = self.cov_info.rank\n return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# multivariate_normal_gen and fill in default strings in class docstrings\nfor name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:\n method = multivariate_normal_gen.__dict__[name]\n method_frozen = multivariate_normal_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(method.__doc__,\n mvn_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)\n\n_matnorm_doc_default_callparams = \"\"\"\\\nmean : array_like, optional\n Mean of the distribution (default: `None`)\nrowcov : array_like, optional\n Among-row covariance matrix of the distribution (default: `1`)\ncolcov : array_like, optional\n Among-column covariance matrix of the distribution (default: `1`)\n\"\"\"\n\n_matnorm_doc_callparams_note = \"\"\"\\\nIf `mean` is set to `None` then a matrix of zeros is used for the mean.\nThe dimensions of this matrix are inferred from the shape of `rowcov` and\n`colcov`, if these are provided, or set to `1` if ambiguous.\n\n`rowcov` and `colcov` can be two-dimensional array_likes specifying the\ncovariance matrices directly. Alternatively, a one-dimensional array will\nbe be interpreted as the entries of a diagonal matrix, and a scalar or\nzero-dimensional array will be interpreted as this value times the\nidentity matrix.\n\"\"\"\n\n_matnorm_doc_frozen_callparams = \"\"\n\n_matnorm_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\nmatnorm_docdict_params = {\n '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,\n '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\nmatnorm_docdict_noparams = {\n '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,\n '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\n\nclass matrix_normal_gen(multi_rv_generic):\n r\"\"\"A matrix normal random variable.\n\n The `mean` keyword specifies the mean. The `rowcov` keyword specifies the\n among-row covariance matrix. The 'colcov' keyword specifies the\n among-column covariance matrix.\n\n Methods\n -------\n pdf(X, mean=None, rowcov=1, colcov=1)\n Probability density function.\n logpdf(X, mean=None, rowcov=1, colcov=1)\n Log of the probability density function.\n rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)\n Draw random samples.\n\n Parameters\n ----------\n %(_matnorm_doc_default_callparams)s\n %(_doc_random_state)s\n\n Notes\n -----\n %(_matnorm_doc_callparams_note)s\n\n The covariance matrices specified by `rowcov` and `colcov` must be\n (symmetric) positive definite. If the samples in `X` are\n :math:`m \\times n`, then `rowcov` must be :math:`m \\times m` and\n `colcov` must be :math:`n \\times n`. `mean` must be the same shape as `X`.\n\n The probability density function for `matrix_normal` is\n\n .. math::\n\n f(X) = (2 \\pi)^{-\\frac{mn}{2}}|U|^{-\\frac{n}{2}} |V|^{-\\frac{m}{2}}\n \\exp\\left( -\\frac{1}{2} \\mathrm{Tr}\\left[ U^{-1} (X-M) V^{-1}\n (X-M)^T \\right] \\right),\n\n where :math:`M` is the mean, :math:`U` the among-row covariance matrix,\n :math:`V` the among-column covariance matrix.\n\n The `allow_singular` behaviour of the `multivariate_normal`\n distribution is not currently supported. Covariance matrices must be\n full rank.\n\n The `matrix_normal` distribution is closely related to the\n `multivariate_normal` distribution. Specifically, :math:`\\mathrm{Vec}(X)`\n (the vector formed by concatenating the columns of :math:`X`) has a\n multivariate normal distribution with mean :math:`\\mathrm{Vec}(M)`\n and covariance :math:`V \\otimes U` (where :math:`\\otimes` is the Kronecker\n product). Sampling and pdf evaluation are\n :math:`\\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but\n :math:`\\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,\n making this equivalent form algorithmically inefficient.\n\n .. versionadded:: 0.17.0\n\n Examples\n --------\n\n >>> from scipy.stats import matrix_normal\n\n >>> M = np.arange(6).reshape(3,2); M\n array([[0, 1],\n [2, 3],\n [4, 5]])\n >>> U = np.diag([1,2,3]); U\n array([[1, 0, 0],\n [0, 2, 0],\n [0, 0, 3]])\n >>> V = 0.3*np.identity(2); V\n array([[ 0.3, 0. ],\n [ 0. , 0.3]])\n >>> X = M + 0.1; X\n array([[ 0.1, 1.1],\n [ 2.1, 3.1],\n [ 4.1, 5.1]])\n >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)\n 0.023410202050005054\n\n >>> # Equivalent multivariate normal\n >>> from scipy.stats import multivariate_normal\n >>> vectorised_X = X.T.flatten()\n >>> equiv_mean = M.T.flatten()\n >>> equiv_cov = np.kron(V,U)\n >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)\n 0.023410202050005054\n\n Alternatively, the object may be called (as a function) to fix the mean\n and covariance parameters, returning a \"frozen\" matrix normal\n random variable:\n\n >>> rv = matrix_normal(mean=None, rowcov=1, colcov=1)\n >>> # Frozen object with the same methods but holding the given\n >>> # mean and covariance fixed.\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)\n\n def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):\n \"\"\"Create a frozen matrix normal distribution.\n\n See `matrix_normal_frozen` for more information.\n\n \"\"\"\n return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)\n\n def _process_parameters(self, mean, rowcov, colcov):\n \"\"\"\n Infer dimensionality from mean or covariance matrices. Handle\n defaults. Ensure compatible dimensions.\n \"\"\"\n\n # Process mean\n if mean is not None:\n mean = np.asarray(mean, dtype=float)\n meanshape = mean.shape\n if len(meanshape) != 2:\n raise ValueError(\"Array `mean` must be two dimensional.\")\n if np.any(meanshape == 0):\n raise ValueError(\"Array `mean` has invalid shape.\")\n\n # Process among-row covariance\n rowcov = np.asarray(rowcov, dtype=float)\n if rowcov.ndim == 0:\n if mean is not None:\n rowcov = rowcov * np.identity(meanshape[0])\n else:\n rowcov = rowcov * np.identity(1)\n elif rowcov.ndim == 1:\n rowcov = np.diag(rowcov)\n rowshape = rowcov.shape\n if len(rowshape) != 2:\n raise ValueError(\"`rowcov` must be a scalar or a 2D array.\")\n if rowshape[0] != rowshape[1]:\n raise ValueError(\"Array `rowcov` must be square.\")\n if rowshape[0] == 0:\n raise ValueError(\"Array `rowcov` has invalid shape.\")\n numrows = rowshape[0]\n\n # Process among-column covariance\n colcov = np.asarray(colcov, dtype=float)\n if colcov.ndim == 0:\n if mean is not None:\n colcov = colcov * np.identity(meanshape[1])\n else:\n colcov = colcov * np.identity(1)\n elif colcov.ndim == 1:\n colcov = np.diag(colcov)\n colshape = colcov.shape\n if len(colshape) != 2:\n raise ValueError(\"`colcov` must be a scalar or a 2D array.\")\n if colshape[0] != colshape[1]:\n raise ValueError(\"Array `colcov` must be square.\")\n if colshape[0] == 0:\n raise ValueError(\"Array `colcov` has invalid shape.\")\n numcols = colshape[0]\n\n # Ensure mean and covariances compatible\n if mean is not None:\n if meanshape[0] != numrows:\n raise ValueError(\"Arrays `mean` and `rowcov` must have the \"\n \"same number of rows.\")\n if meanshape[1] != numcols:\n raise ValueError(\"Arrays `mean` and `colcov` must have the \"\n \"same number of columns.\")\n else:\n mean = np.zeros((numrows, numcols))\n\n dims = (numrows, numcols)\n\n return dims, mean, rowcov, colcov\n\n def _process_quantiles(self, X, dims):\n \"\"\"\n Adjust quantiles array so that last two axes labels the components of\n each data point.\n \"\"\"\n X = np.asarray(X, dtype=float)\n if X.ndim == 2:\n X = X[np.newaxis, :]\n if X.shape[-2:] != dims:\n raise ValueError(\"The shape of array `X` is not compatible \"\n \"with the distribution parameters.\")\n return X\n\n def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,\n col_prec_rt, log_det_colcov):\n \"\"\"Log of the matrix normal probability density function.\n\n Parameters\n ----------\n dims : tuple\n Dimensions of the matrix variates\n X : ndarray\n Points at which to evaluate the log of the probability\n density function\n mean : ndarray\n Mean of the distribution\n row_prec_rt : ndarray\n A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)\n is the inverse of the among-row covariance matrix\n log_det_rowcov : float\n Logarithm of the determinant of the among-row covariance matrix\n col_prec_rt : ndarray\n A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)\n is the inverse of the among-column covariance matrix\n log_det_colcov : float\n Logarithm of the determinant of the among-column covariance matrix\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'logpdf' instead.\n\n \"\"\"\n numrows, numcols = dims\n roll_dev = np.moveaxis(X-mean, -1, 0)\n scale_dev = np.tensordot(col_prec_rt.T,\n np.dot(roll_dev, row_prec_rt), 1)\n maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)\n return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov\n + numrows*log_det_colcov + maha)\n\n def logpdf(self, X, mean=None, rowcov=1, colcov=1):\n \"\"\"Log of the matrix normal probability density function.\n\n Parameters\n ----------\n X : array_like\n Quantiles, with the last two axes of `X` denoting the components.\n %(_matnorm_doc_default_callparams)s\n\n Returns\n -------\n logpdf : ndarray\n Log of the probability density function evaluated at `X`\n\n Notes\n -----\n %(_matnorm_doc_callparams_note)s\n\n \"\"\"\n dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,\n colcov)\n X = self._process_quantiles(X, dims)\n rowpsd = _PSD(rowcov, allow_singular=False)\n colpsd = _PSD(colcov, allow_singular=False)\n out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,\n colpsd.log_pdet)\n return _squeeze_output(out)\n\n def pdf(self, X, mean=None, rowcov=1, colcov=1):\n \"\"\"Matrix normal probability density function.\n\n Parameters\n ----------\n X : array_like\n Quantiles, with the last two axes of `X` denoting the components.\n %(_matnorm_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at `X`\n\n Notes\n -----\n %(_matnorm_doc_callparams_note)s\n\n \"\"\"\n return np.exp(self.logpdf(X, mean, rowcov, colcov))\n\n def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):\n \"\"\"Draw random samples from a matrix normal distribution.\n\n Parameters\n ----------\n %(_matnorm_doc_default_callparams)s\n size : integer, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of size (`size`, `dims`), where `dims` is the\n dimension of the random matrices.\n\n Notes\n -----\n %(_matnorm_doc_callparams_note)s\n\n \"\"\"\n size = int(size)\n dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,\n colcov)\n rowchol = scipy.linalg.cholesky(rowcov, lower=True)\n colchol = scipy.linalg.cholesky(colcov, lower=True)\n random_state = self._get_random_state(random_state)\n std_norm = random_state.standard_normal(size=(dims[1], size, dims[0]))\n roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)\n out = np.moveaxis(roll_rvs.T, 1, 0) + mean[np.newaxis, :, :]\n if size == 1:\n out = out.reshape(mean.shape)\n return out\n\n\nmatrix_normal = matrix_normal_gen()\n\n\nclass matrix_normal_frozen(multi_rv_frozen):\n \"\"\"\n Create a frozen matrix normal distribution.\n\n Parameters\n ----------\n %(_matnorm_doc_default_callparams)s\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is `None`.\n\n Examples\n --------\n >>> from scipy.stats import matrix_normal\n\n >>> distn = matrix_normal(mean=np.zeros((3,3)))\n >>> X = distn.rvs(); X\n array([[-0.02976962, 0.93339138, -0.09663178],\n [ 0.67405524, 0.28250467, -0.93308929],\n [-0.31144782, 0.74535536, 1.30412916]])\n >>> distn.pdf(X)\n 2.5160642368346784e-05\n >>> distn.logpdf(X)\n -10.590229595124615\n \"\"\"\n\n def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):\n self._dist = matrix_normal_gen(seed)\n self.dims, self.mean, self.rowcov, self.colcov = \\\n self._dist._process_parameters(mean, rowcov, colcov)\n self.rowpsd = _PSD(self.rowcov, allow_singular=False)\n self.colpsd = _PSD(self.colcov, allow_singular=False)\n\n def logpdf(self, X):\n X = self._dist._process_quantiles(X, self.dims)\n out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,\n self.rowpsd.log_pdet, self.colpsd.U,\n self.colpsd.log_pdet)\n return _squeeze_output(out)\n\n def pdf(self, X):\n return np.exp(self.logpdf(X))\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,\n random_state)\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# matrix_normal_gen and fill in default strings in class docstrings\nfor name in ['logpdf', 'pdf', 'rvs']:\n method = matrix_normal_gen.__dict__[name]\n method_frozen = matrix_normal_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(method.__doc__,\n matnorm_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)\n\n_dirichlet_doc_default_callparams = \"\"\"\\\nalpha : array_like\n The concentration parameters. The number of entries determines the\n dimensionality of the distribution.\n\"\"\"\n_dirichlet_doc_frozen_callparams = \"\"\n\n_dirichlet_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\ndirichlet_docdict_params = {\n '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,\n '_doc_random_state': _doc_random_state\n}\n\ndirichlet_docdict_noparams = {\n '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,\n '_doc_random_state': _doc_random_state\n}\n\n\ndef _dirichlet_check_parameters(alpha):\n alpha = np.asarray(alpha)\n if np.min(alpha) <= 0:\n raise ValueError(\"All parameters must be greater than 0\")\n elif alpha.ndim != 1:\n raise ValueError(\"Parameter vector 'a' must be one dimensional, \"\n \"but a.shape = %s.\" % (alpha.shape, ))\n return alpha\n\n\ndef _dirichlet_check_input(alpha, x):\n x = np.asarray(x)\n\n if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:\n raise ValueError(\"Vector 'x' must have either the same number \"\n \"of entries as, or one entry fewer than, \"\n \"parameter vector 'a', but alpha.shape = %s \"\n \"and x.shape = %s.\" % (alpha.shape, x.shape))\n\n if x.shape[0] != alpha.shape[0]:\n xk = np.array([1 - np.sum(x, 0)])\n if xk.ndim == 1:\n x = np.append(x, xk)\n elif xk.ndim == 2:\n x = np.vstack((x, xk))\n else:\n raise ValueError(\"The input must be one dimensional or a two \"\n \"dimensional matrix containing the entries.\")\n\n if np.min(x) < 0:\n raise ValueError(\"Each entry in 'x' must be greater than or equal \"\n \"to zero.\")\n\n if np.max(x) > 1:\n raise ValueError(\"Each entry in 'x' must be smaller or equal one.\")\n\n # Check x_i > 0 or alpha_i > 1\n xeq0 = (x == 0)\n alphalt1 = (alpha < 1)\n if x.shape != alpha.shape:\n alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)\n chk = np.logical_and(xeq0, alphalt1)\n\n if np.sum(chk):\n raise ValueError(\"Each entry in 'x' must be greater than zero if its \"\n \"alpha is less than one.\")\n\n if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():\n raise ValueError(\"The input vector 'x' must lie within the normal \"\n \"simplex. but np.sum(x, 0) = %s.\" % np.sum(x, 0))\n\n return x\n\n\ndef _lnB(alpha):\n r\"\"\"Internal helper function to compute the log of the useful quotient.\n\n .. math::\n\n B(\\alpha) = \\frac{\\prod_{i=1}{K}\\Gamma(\\alpha_i)}\n {\\Gamma\\left(\\sum_{i=1}^{K} \\alpha_i \\right)}\n\n Parameters\n ----------\n %(_dirichlet_doc_default_callparams)s\n\n Returns\n -------\n B : scalar\n Helper quotient, internal use only\n\n \"\"\"\n return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))\n\n\nclass dirichlet_gen(multi_rv_generic):\n r\"\"\"A Dirichlet random variable.\n\n The ``alpha`` keyword specifies the concentration parameters of the\n distribution.\n\n .. versionadded:: 0.15.0\n\n Methods\n -------\n pdf(x, alpha)\n Probability density function.\n logpdf(x, alpha)\n Log of the probability density function.\n rvs(alpha, size=1, random_state=None)\n Draw random samples from a Dirichlet distribution.\n mean(alpha)\n The mean of the Dirichlet distribution\n var(alpha)\n The variance of the Dirichlet distribution\n entropy(alpha)\n Compute the differential entropy of the Dirichlet distribution.\n\n Parameters\n ----------\n %(_dirichlet_doc_default_callparams)s\n %(_doc_random_state)s\n\n Notes\n -----\n Each :math:`\\alpha` entry must be positive. The distribution has only\n support on the simplex defined by\n\n .. math::\n \\sum_{i=1}^{K} x_i = 1\n\n where :math:`0 < x_i < 1`.\n\n If the quantiles don't lie within the simplex, a ValueError is raised.\n\n The probability density function for `dirichlet` is\n\n .. math::\n\n f(x) = \\frac{1}{\\mathrm{B}(\\boldsymbol\\alpha)} \\prod_{i=1}^K x_i^{\\alpha_i - 1}\n\n where\n\n .. math::\n\n \\mathrm{B}(\\boldsymbol\\alpha) = \\frac{\\prod_{i=1}^K \\Gamma(\\alpha_i)}\n {\\Gamma\\bigl(\\sum_{i=1}^K \\alpha_i\\bigr)}\n\n and :math:`\\boldsymbol\\alpha=(\\alpha_1,\\ldots,\\alpha_K)`, the\n concentration parameters and :math:`K` is the dimension of the space\n where :math:`x` takes values.\n\n Note that the dirichlet interface is somewhat inconsistent.\n The array returned by the rvs function is transposed\n with respect to the format expected by the pdf and logpdf.\n\n Examples\n --------\n >>> from scipy.stats import dirichlet\n\n Generate a dirichlet random variable\n\n >>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles\n >>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters\n >>> dirichlet.pdf(quantiles, alpha)\n 0.2843831684937255\n\n The same PDF but following a log scale\n\n >>> dirichlet.logpdf(quantiles, alpha)\n -1.2574327653159187\n\n Once we specify the dirichlet distribution\n we can then calculate quantities of interest\n\n >>> dirichlet.mean(alpha) # get the mean of the distribution\n array([0.01960784, 0.24509804, 0.73529412])\n >>> dirichlet.var(alpha) # get variance\n array([0.00089829, 0.00864603, 0.00909517])\n >>> dirichlet.entropy(alpha) # calculate the differential entropy\n -4.3280162474082715\n\n We can also return random samples from the distribution\n\n >>> dirichlet.rvs(alpha, size=1, random_state=1)\n array([[0.00766178, 0.24670518, 0.74563305]])\n >>> dirichlet.rvs(alpha, size=2, random_state=2)\n array([[0.01639427, 0.1292273 , 0.85437844],\n [0.00156917, 0.19033695, 0.80809388]])\n\n Alternatively, the object may be called (as a function) to fix\n concentration parameters, returning a \"frozen\" Dirichlet\n random variable:\n\n >>> rv = dirichlet(alpha)\n >>> # Frozen object with the same methods but holding the given\n >>> # concentration parameters fixed.\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)\n\n def __call__(self, alpha, seed=None):\n return dirichlet_frozen(alpha, seed=seed)\n\n def _logpdf(self, x, alpha):\n \"\"\"Log of the Dirichlet probability density function.\n\n Parameters\n ----------\n x : ndarray\n Points at which to evaluate the log of the probability\n density function\n %(_dirichlet_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'logpdf' instead.\n\n \"\"\"\n lnB = _lnB(alpha)\n return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)\n\n def logpdf(self, x, alpha):\n \"\"\"Log of the Dirichlet probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_dirichlet_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray or scalar\n Log of the probability density function evaluated at `x`.\n\n \"\"\"\n alpha = _dirichlet_check_parameters(alpha)\n x = _dirichlet_check_input(alpha, x)\n\n out = self._logpdf(x, alpha)\n return _squeeze_output(out)\n\n def pdf(self, x, alpha):\n \"\"\"The Dirichlet probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_dirichlet_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray or scalar\n The probability density function evaluated at `x`.\n\n \"\"\"\n alpha = _dirichlet_check_parameters(alpha)\n x = _dirichlet_check_input(alpha, x)\n\n out = np.exp(self._logpdf(x, alpha))\n return _squeeze_output(out)\n\n def mean(self, alpha):\n \"\"\"Compute the mean of the dirichlet distribution.\n\n Parameters\n ----------\n %(_dirichlet_doc_default_callparams)s\n\n Returns\n -------\n mu : ndarray or scalar\n Mean of the Dirichlet distribution.\n\n \"\"\"\n alpha = _dirichlet_check_parameters(alpha)\n\n out = alpha / (np.sum(alpha))\n return _squeeze_output(out)\n\n def var(self, alpha):\n \"\"\"Compute the variance of the dirichlet distribution.\n\n Parameters\n ----------\n %(_dirichlet_doc_default_callparams)s\n\n Returns\n -------\n v : ndarray or scalar\n Variance of the Dirichlet distribution.\n\n \"\"\"\n\n alpha = _dirichlet_check_parameters(alpha)\n\n alpha0 = np.sum(alpha)\n out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))\n return _squeeze_output(out)\n\n def entropy(self, alpha):\n \"\"\"Compute the differential entropy of the dirichlet distribution.\n\n Parameters\n ----------\n %(_dirichlet_doc_default_callparams)s\n\n Returns\n -------\n h : scalar\n Entropy of the Dirichlet distribution\n\n \"\"\"\n\n alpha = _dirichlet_check_parameters(alpha)\n\n alpha0 = np.sum(alpha)\n lnB = _lnB(alpha)\n K = alpha.shape[0]\n\n out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(\n (alpha - 1) * scipy.special.psi(alpha))\n return _squeeze_output(out)\n\n def rvs(self, alpha, size=1, random_state=None):\n \"\"\"Draw random samples from a Dirichlet distribution.\n\n Parameters\n ----------\n %(_dirichlet_doc_default_callparams)s\n size : int, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of size (`size`, `N`), where `N` is the\n dimension of the random variable.\n\n \"\"\"\n alpha = _dirichlet_check_parameters(alpha)\n random_state = self._get_random_state(random_state)\n return random_state.dirichlet(alpha, size=size)\n\n\ndirichlet = dirichlet_gen()\n\n\nclass dirichlet_frozen(multi_rv_frozen):\n def __init__(self, alpha, seed=None):\n self.alpha = _dirichlet_check_parameters(alpha)\n self._dist = dirichlet_gen(seed)\n\n def logpdf(self, x):\n return self._dist.logpdf(x, self.alpha)\n\n def pdf(self, x):\n return self._dist.pdf(x, self.alpha)\n\n def mean(self):\n return self._dist.mean(self.alpha)\n\n def var(self):\n return self._dist.var(self.alpha)\n\n def entropy(self):\n return self._dist.entropy(self.alpha)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.alpha, size, random_state)\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# multivariate_normal_gen and fill in default strings in class docstrings\nfor name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:\n method = dirichlet_gen.__dict__[name]\n method_frozen = dirichlet_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(\n method.__doc__, dirichlet_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)\n\n\n_wishart_doc_default_callparams = \"\"\"\\\ndf : int\n Degrees of freedom, must be greater than or equal to dimension of the\n scale matrix\nscale : array_like\n Symmetric positive definite scale matrix of the distribution\n\"\"\"\n\n_wishart_doc_callparams_note = \"\"\n\n_wishart_doc_frozen_callparams = \"\"\n\n_wishart_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\nwishart_docdict_params = {\n '_doc_default_callparams': _wishart_doc_default_callparams,\n '_doc_callparams_note': _wishart_doc_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\nwishart_docdict_noparams = {\n '_doc_default_callparams': _wishart_doc_frozen_callparams,\n '_doc_callparams_note': _wishart_doc_frozen_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\n\nclass wishart_gen(multi_rv_generic):\n r\"\"\"A Wishart random variable.\n\n The `df` keyword specifies the degrees of freedom. The `scale` keyword\n specifies the scale matrix, which must be symmetric and positive definite.\n In this context, the scale matrix is often interpreted in terms of a\n multivariate normal precision matrix (the inverse of the covariance\n matrix). These arguments must satisfy the relationship\n ``df > scale.ndim - 1``, but see notes on using the `rvs` method with\n ``df < scale.ndim``.\n\n Methods\n -------\n pdf(x, df, scale)\n Probability density function.\n logpdf(x, df, scale)\n Log of the probability density function.\n rvs(df, scale, size=1, random_state=None)\n Draw random samples from a Wishart distribution.\n entropy()\n Compute the differential entropy of the Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n %(_doc_random_state)s\n\n Raises\n ------\n scipy.linalg.LinAlgError\n If the scale matrix `scale` is not positive definite.\n\n See Also\n --------\n invwishart, chi2\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n The scale matrix `scale` must be a symmetric positive definite\n matrix. Singular matrices, including the symmetric positive semi-definite\n case, are not supported. Symmetry is not checked; only the lower triangular\n portion is used.\n\n The Wishart distribution is often denoted\n\n .. math::\n\n W_p(\\nu, \\Sigma)\n\n where :math:`\\nu` is the degrees of freedom and :math:`\\Sigma` is the\n :math:`p \\times p` scale matrix.\n\n The probability density function for `wishart` has support over positive\n definite matrices :math:`S`; if :math:`S \\sim W_p(\\nu, \\Sigma)`, then\n its PDF is given by:\n\n .. math::\n\n f(S) = \\frac{|S|^{\\frac{\\nu - p - 1}{2}}}{2^{ \\frac{\\nu p}{2} }\n |\\Sigma|^\\frac{\\nu}{2} \\Gamma_p \\left ( \\frac{\\nu}{2} \\right )}\n \\exp\\left( -tr(\\Sigma^{-1} S) / 2 \\right)\n\n If :math:`S \\sim W_p(\\nu, \\Sigma)` (Wishart) then\n :math:`S^{-1} \\sim W_p^{-1}(\\nu, \\Sigma^{-1})` (inverse Wishart).\n\n If the scale matrix is 1-dimensional and equal to one, then the Wishart\n distribution :math:`W_1(\\nu, 1)` collapses to the :math:`\\chi^2(\\nu)`\n distribution.\n\n The algorithm [2]_ implemented by the `rvs` method may\n produce numerically singular matrices with :math:`p - 1 < \\nu < p`; the\n user may wish to check for this condition and generate replacement samples\n as necessary.\n\n\n .. versionadded:: 0.16.0\n\n References\n ----------\n .. [1] M.L. Eaton, \"Multivariate Statistics: A Vector Space Approach\",\n Wiley, 1983.\n .. [2] W.B. Smith and R.R. Hocking, \"Algorithm AS 53: Wishart Variate\n Generator\", Applied Statistics, vol. 21, pp. 341-345, 1972.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy.stats import wishart, chi2\n >>> x = np.linspace(1e-5, 8, 100)\n >>> w = wishart.pdf(x, df=3, scale=1); w[:5]\n array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])\n >>> c = chi2.pdf(x, 3); c[:5]\n array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])\n >>> plt.plot(x, w)\n >>> plt.show()\n\n The input quantiles can be any shape of array, as long as the last\n axis labels the components.\n\n Alternatively, the object may be called (as a function) to fix the degrees\n of freedom and scale parameters, returning a \"frozen\" Wishart random\n variable:\n\n >>> rv = wishart(df=1, scale=1)\n >>> # Frozen object with the same methods but holding the given\n >>> # degrees of freedom and scale fixed.\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)\n\n def __call__(self, df=None, scale=None, seed=None):\n \"\"\"Create a frozen Wishart distribution.\n\n See `wishart_frozen` for more information.\n \"\"\"\n return wishart_frozen(df, scale, seed)\n\n def _process_parameters(self, df, scale):\n if scale is None:\n scale = 1.0\n scale = np.asarray(scale, dtype=float)\n\n if scale.ndim == 0:\n scale = scale[np.newaxis, np.newaxis]\n elif scale.ndim == 1:\n scale = np.diag(scale)\n elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:\n raise ValueError(\"Array 'scale' must be square if it is two\"\n \" dimensional, but scale.scale = %s.\"\n % str(scale.shape))\n elif scale.ndim > 2:\n raise ValueError(\"Array 'scale' must be at most two-dimensional,\"\n \" but scale.ndim = %d\" % scale.ndim)\n\n dim = scale.shape[0]\n\n if df is None:\n df = dim\n elif not np.isscalar(df):\n raise ValueError(\"Degrees of freedom must be a scalar.\")\n elif df <= dim - 1:\n raise ValueError(\"Degrees of freedom must be greater than the \"\n \"dimension of scale matrix minus 1.\")\n\n return dim, df, scale\n\n def _process_quantiles(self, x, dim):\n \"\"\"\n Adjust quantiles array so that last axis labels the components of\n each data point.\n \"\"\"\n x = np.asarray(x, dtype=float)\n\n if x.ndim == 0:\n x = x * np.eye(dim)[:, :, np.newaxis]\n if x.ndim == 1:\n if dim == 1:\n x = x[np.newaxis, np.newaxis, :]\n else:\n x = np.diag(x)[:, :, np.newaxis]\n elif x.ndim == 2:\n if not x.shape[0] == x.shape[1]:\n raise ValueError(\"Quantiles must be square if they are two\"\n \" dimensional, but x.shape = %s.\"\n % str(x.shape))\n x = x[:, :, np.newaxis]\n elif x.ndim == 3:\n if not x.shape[0] == x.shape[1]:\n raise ValueError(\"Quantiles must be square in the first two\"\n \" dimensions if they are three dimensional\"\n \", but x.shape = %s.\" % str(x.shape))\n elif x.ndim > 3:\n raise ValueError(\"Quantiles must be at most two-dimensional with\"\n \" an additional dimension for multiple\"\n \"components, but x.ndim = %d\" % x.ndim)\n\n # Now we have 3-dim array; should have shape [dim, dim, *]\n if not x.shape[0:2] == (dim, dim):\n raise ValueError('Quantiles have incompatible dimensions: should'\n ' be %s, got %s.' % ((dim, dim), x.shape[0:2]))\n\n return x\n\n def _process_size(self, size):\n size = np.asarray(size)\n\n if size.ndim == 0:\n size = size[np.newaxis]\n elif size.ndim > 1:\n raise ValueError('Size must be an integer or tuple of integers;'\n ' thus must have dimension <= 1.'\n ' Got size.ndim = %s' % str(tuple(size)))\n n = size.prod()\n shape = tuple(size)\n\n return n, shape\n\n def _logpdf(self, x, dim, df, scale, log_det_scale, C):\n \"\"\"Log of the Wishart probability density function.\n\n Parameters\n ----------\n x : ndarray\n Points at which to evaluate the log of the probability\n density function\n dim : int\n Dimension of the scale matrix\n df : int\n Degrees of freedom\n scale : ndarray\n Scale matrix\n log_det_scale : float\n Logarithm of the determinant of the scale matrix\n C : ndarray\n Cholesky factorization of the scale matrix, lower triagular.\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'logpdf' instead.\n\n \"\"\"\n # log determinant of x\n # Note: x has components along the last axis, so that x.T has\n # components alone the 0-th axis. Then since det(A) = det(A'), this\n # gives us a 1-dim vector of determinants\n\n # Retrieve tr(scale^{-1} x)\n log_det_x = np.empty(x.shape[-1])\n scale_inv_x = np.empty(x.shape)\n tr_scale_inv_x = np.empty(x.shape[-1])\n for i in range(x.shape[-1]):\n _, log_det_x[i] = self._cholesky_logdet(x[:, :, i])\n scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])\n tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()\n\n # Log PDF\n out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -\n (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +\n multigammaln(0.5*df, dim)))\n\n return out\n\n def logpdf(self, x, df, scale):\n \"\"\"Log of the Wishart probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n Each quantile must be a symmetric positive definite matrix.\n %(_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray\n Log of the probability density function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n x = self._process_quantiles(x, dim)\n\n # Cholesky decomposition of scale, get log(det(scale))\n C, log_det_scale = self._cholesky_logdet(scale)\n\n out = self._logpdf(x, dim, df, scale, log_det_scale, C)\n return _squeeze_output(out)\n\n def pdf(self, x, df, scale):\n \"\"\"Wishart probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n Each quantile must be a symmetric positive definite matrix.\n %(_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n return np.exp(self.logpdf(x, df, scale))\n\n def _mean(self, dim, df, scale):\n \"\"\"Mean of the Wishart distribution.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n %(_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'mean' instead.\n\n \"\"\"\n return df * scale\n\n def mean(self, df, scale):\n \"\"\"Mean of the Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n mean : float\n The mean of the distribution\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n out = self._mean(dim, df, scale)\n return _squeeze_output(out)\n\n def _mode(self, dim, df, scale):\n \"\"\"Mode of the Wishart distribution.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n %(_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'mode' instead.\n\n \"\"\"\n if df >= dim + 1:\n out = (df-dim-1) * scale\n else:\n out = None\n return out\n\n def mode(self, df, scale):\n \"\"\"Mode of the Wishart distribution\n\n Only valid if the degrees of freedom are greater than the dimension of\n the scale matrix.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n mode : float or None\n The Mode of the distribution\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n out = self._mode(dim, df, scale)\n return _squeeze_output(out) if out is not None else out\n\n def _var(self, dim, df, scale):\n \"\"\"Variance of the Wishart distribution.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n %(_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'var' instead.\n\n \"\"\"\n var = scale**2\n diag = scale.diagonal() # 1 x dim array\n var += np.outer(diag, diag)\n var *= df\n return var\n\n def var(self, df, scale):\n \"\"\"Variance of the Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n var : float\n The variance of the distribution\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n out = self._var(dim, df, scale)\n return _squeeze_output(out)\n\n def _standard_rvs(self, n, shape, dim, df, random_state):\n \"\"\"\n Parameters\n ----------\n n : integer\n Number of variates to generate\n shape : iterable\n Shape of the variates to generate\n dim : int\n Dimension of the scale matrix\n df : int\n Degrees of freedom\n random_state : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'rvs' instead.\n\n \"\"\"\n # Random normal variates for off-diagonal elements\n n_tril = dim * (dim-1) // 2\n covariances = random_state.normal(\n size=n*n_tril).reshape(shape+(n_tril,))\n\n # Random chi-square variates for diagonal elements\n variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5\n for i in range(dim)]].reshape((dim,) +\n shape[::-1]).T)\n\n # Create the A matri(ces) - lower triangular\n A = np.zeros(shape + (dim, dim))\n\n # Input the covariances\n size_idx = tuple([slice(None, None, None)]*len(shape))\n tril_idx = np.tril_indices(dim, k=-1)\n A[size_idx + tril_idx] = covariances\n\n # Input the variances\n diag_idx = np.diag_indices(dim)\n A[size_idx + diag_idx] = variances\n\n return A\n\n def _rvs(self, n, shape, dim, df, C, random_state):\n \"\"\"Draw random samples from a Wishart distribution.\n\n Parameters\n ----------\n n : integer\n Number of variates to generate\n shape : iterable\n Shape of the variates to generate\n dim : int\n Dimension of the scale matrix\n df : int\n Degrees of freedom\n C : ndarray\n Cholesky factorization of the scale matrix, lower triangular.\n %(_doc_random_state)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'rvs' instead.\n\n \"\"\"\n random_state = self._get_random_state(random_state)\n # Calculate the matrices A, which are actually lower triangular\n # Cholesky factorizations of a matrix B such that B ~ W(df, I)\n A = self._standard_rvs(n, shape, dim, df, random_state)\n\n # Calculate SA = C A A' C', where SA ~ W(df, scale)\n # Note: this is the product of a (lower) (lower) (lower)' (lower)'\n # or, denoting B = AA', it is C B C' where C is the lower\n # triangular Cholesky factorization of the scale matrix.\n # this appears to conflict with the instructions in [1]_, which\n # suggest that it should be D' B D where D is the lower\n # triangular factorization of the scale matrix. However, it is\n # meant to refer to the Bartlett (1933) representation of a\n # Wishart random variate as L A A' L' where L is lower triangular\n # so it appears that understanding D' to be upper triangular\n # is either a typo in or misreading of [1]_.\n for index in np.ndindex(shape):\n CA = np.dot(C, A[index])\n A[index] = np.dot(CA, CA.T)\n\n return A\n\n def rvs(self, df, scale, size=1, random_state=None):\n \"\"\"Draw random samples from a Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n size : integer or iterable of integers, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray\n Random variates of shape (`size`) + (`dim`, `dim), where `dim` is\n the dimension of the scale matrix.\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n n, shape = self._process_size(size)\n dim, df, scale = self._process_parameters(df, scale)\n\n # Cholesky decomposition of scale\n C = scipy.linalg.cholesky(scale, lower=True)\n\n out = self._rvs(n, shape, dim, df, C, random_state)\n\n return _squeeze_output(out)\n\n def _entropy(self, dim, df, log_det_scale):\n \"\"\"Compute the differential entropy of the Wishart.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n df : int\n Degrees of freedom\n log_det_scale : float\n Logarithm of the determinant of the scale matrix\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'entropy' instead.\n\n \"\"\"\n return (\n 0.5 * (dim+1) * log_det_scale +\n 0.5 * dim * (dim+1) * _LOG_2 +\n multigammaln(0.5*df, dim) -\n 0.5 * (df - dim - 1) * np.sum(\n [psi(0.5*(df + 1 - (i+1))) for i in range(dim)]\n ) +\n 0.5 * df * dim\n )\n\n def entropy(self, df, scale):\n \"\"\"Compute the differential entropy of the Wishart.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n h : scalar\n Entropy of the Wishart distribution\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n _, log_det_scale = self._cholesky_logdet(scale)\n return self._entropy(dim, df, log_det_scale)\n\n def _cholesky_logdet(self, scale):\n \"\"\"Compute Cholesky decomposition and determine (log(det(scale)).\n\n Parameters\n ----------\n scale : ndarray\n Scale matrix.\n\n Returns\n -------\n c_decomp : ndarray\n The Cholesky decomposition of `scale`.\n logdet : scalar\n The log of the determinant of `scale`.\n\n Notes\n -----\n This computation of ``logdet`` is equivalent to\n ``np.linalg.slogdet(scale)``. It is ~2x faster though.\n\n \"\"\"\n c_decomp = scipy.linalg.cholesky(scale, lower=True)\n logdet = 2 * np.sum(np.log(c_decomp.diagonal()))\n return c_decomp, logdet\n\n\nwishart = wishart_gen()\n\n\nclass wishart_frozen(multi_rv_frozen):\n \"\"\"Create a frozen Wishart distribution.\n\n Parameters\n ----------\n df : array_like\n Degrees of freedom of the distribution\n scale : array_like\n Scale matrix of the distribution\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance then\n that instance is used.\n\n \"\"\"\n def __init__(self, df, scale, seed=None):\n self._dist = wishart_gen(seed)\n self.dim, self.df, self.scale = self._dist._process_parameters(\n df, scale)\n self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)\n\n def logpdf(self, x):\n x = self._dist._process_quantiles(x, self.dim)\n\n out = self._dist._logpdf(x, self.dim, self.df, self.scale,\n self.log_det_scale, self.C)\n return _squeeze_output(out)\n\n def pdf(self, x):\n return np.exp(self.logpdf(x))\n\n def mean(self):\n out = self._dist._mean(self.dim, self.df, self.scale)\n return _squeeze_output(out)\n\n def mode(self):\n out = self._dist._mode(self.dim, self.df, self.scale)\n return _squeeze_output(out) if out is not None else out\n\n def var(self):\n out = self._dist._var(self.dim, self.df, self.scale)\n return _squeeze_output(out)\n\n def rvs(self, size=1, random_state=None):\n n, shape = self._dist._process_size(size)\n out = self._dist._rvs(n, shape, self.dim, self.df,\n self.C, random_state)\n return _squeeze_output(out)\n\n def entropy(self):\n return self._dist._entropy(self.dim, self.df, self.log_det_scale)\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# Wishart and fill in default strings in class docstrings\nfor name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:\n method = wishart_gen.__dict__[name]\n method_frozen = wishart_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(\n method.__doc__, wishart_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)\n\n\ndef _cho_inv_batch(a, check_finite=True):\n \"\"\"\n Invert the matrices a_i, using a Cholesky factorization of A, where\n a_i resides in the last two dimensions of a and the other indices describe\n the index i.\n\n Overwrites the data in a.\n\n Parameters\n ----------\n a : array\n Array of matrices to invert, where the matrices themselves are stored\n in the last two dimensions.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n x : array\n Array of inverses of the matrices ``a_i``.\n\n See Also\n --------\n scipy.linalg.cholesky : Cholesky factorization of a matrix\n\n \"\"\"\n if check_finite:\n a1 = asarray_chkfinite(a)\n else:\n a1 = asarray(a)\n if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:\n raise ValueError('expected square matrix in last two dimensions')\n\n potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))\n\n triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)\n for index in np.ndindex(a1.shape[:-2]):\n\n # Cholesky decomposition\n a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,\n clean=False)\n if info > 0:\n raise LinAlgError(\"%d-th leading minor not positive definite\"\n % info)\n if info < 0:\n raise ValueError('illegal value in %d-th argument of internal'\n ' potrf' % -info)\n # Inversion\n a1[index], info = potri(a1[index], lower=True, overwrite_c=False)\n if info > 0:\n raise LinAlgError(\"the inverse could not be computed\")\n if info < 0:\n raise ValueError('illegal value in %d-th argument of internal'\n ' potrf' % -info)\n\n # Make symmetric (dpotri only fills in the lower triangle)\n a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]\n\n return a1\n\n\nclass invwishart_gen(wishart_gen):\n r\"\"\"An inverse Wishart random variable.\n\n The `df` keyword specifies the degrees of freedom. The `scale` keyword\n specifies the scale matrix, which must be symmetric and positive definite.\n In this context, the scale matrix is often interpreted in terms of a\n multivariate normal covariance matrix.\n\n Methods\n -------\n pdf(x, df, scale)\n Probability density function.\n logpdf(x, df, scale)\n Log of the probability density function.\n rvs(df, scale, size=1, random_state=None)\n Draw random samples from an inverse Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n %(_doc_random_state)s\n\n Raises\n ------\n scipy.linalg.LinAlgError\n If the scale matrix `scale` is not positive definite.\n\n See Also\n --------\n wishart\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n The scale matrix `scale` must be a symmetric positive definite\n matrix. Singular matrices, including the symmetric positive semi-definite\n case, are not supported. Symmetry is not checked; only the lower triangular\n portion is used.\n\n The inverse Wishart distribution is often denoted\n\n .. math::\n\n W_p^{-1}(\\nu, \\Psi)\n\n where :math:`\\nu` is the degrees of freedom and :math:`\\Psi` is the\n :math:`p \\times p` scale matrix.\n\n The probability density function for `invwishart` has support over positive\n definite matrices :math:`S`; if :math:`S \\sim W^{-1}_p(\\nu, \\Sigma)`,\n then its PDF is given by:\n\n .. math::\n\n f(S) = \\frac{|\\Sigma|^\\frac{\\nu}{2}}{2^{ \\frac{\\nu p}{2} }\n |S|^{\\frac{\\nu + p + 1}{2}} \\Gamma_p \\left(\\frac{\\nu}{2} \\right)}\n \\exp\\left( -tr(\\Sigma S^{-1}) / 2 \\right)\n\n If :math:`S \\sim W_p^{-1}(\\nu, \\Psi)` (inverse Wishart) then\n :math:`S^{-1} \\sim W_p(\\nu, \\Psi^{-1})` (Wishart).\n\n If the scale matrix is 1-dimensional and equal to one, then the inverse\n Wishart distribution :math:`W_1(\\nu, 1)` collapses to the\n inverse Gamma distribution with parameters shape = :math:`\\frac{\\nu}{2}`\n and scale = :math:`\\frac{1}{2}`.\n\n .. versionadded:: 0.16.0\n\n References\n ----------\n .. [1] M.L. Eaton, \"Multivariate Statistics: A Vector Space Approach\",\n Wiley, 1983.\n .. [2] M.C. Jones, \"Generating Inverse Wishart Matrices\", Communications\n in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,\n 1985.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy.stats import invwishart, invgamma\n >>> x = np.linspace(0.01, 1, 100)\n >>> iw = invwishart.pdf(x, df=6, scale=1)\n >>> iw[:3]\n array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])\n >>> ig = invgamma.pdf(x, 6/2., scale=1./2)\n >>> ig[:3]\n array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])\n >>> plt.plot(x, iw)\n >>> plt.show()\n\n The input quantiles can be any shape of array, as long as the last\n axis labels the components.\n\n Alternatively, the object may be called (as a function) to fix the degrees\n of freedom and scale parameters, returning a \"frozen\" inverse Wishart\n random variable:\n\n >>> rv = invwishart(df=1, scale=1)\n >>> # Frozen object with the same methods but holding the given\n >>> # degrees of freedom and scale fixed.\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)\n\n def __call__(self, df=None, scale=None, seed=None):\n \"\"\"Create a frozen inverse Wishart distribution.\n\n See `invwishart_frozen` for more information.\n\n \"\"\"\n return invwishart_frozen(df, scale, seed)\n\n def _logpdf(self, x, dim, df, scale, log_det_scale):\n \"\"\"Log of the inverse Wishart probability density function.\n\n Parameters\n ----------\n x : ndarray\n Points at which to evaluate the log of the probability\n density function.\n dim : int\n Dimension of the scale matrix\n df : int\n Degrees of freedom\n scale : ndarray\n Scale matrix\n log_det_scale : float\n Logarithm of the determinant of the scale matrix\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'logpdf' instead.\n\n \"\"\"\n log_det_x = np.empty(x.shape[-1])\n x_inv = np.copy(x).T\n if dim > 1:\n _cho_inv_batch(x_inv) # works in-place\n else:\n x_inv = 1./x_inv\n tr_scale_x_inv = np.empty(x.shape[-1])\n\n for i in range(x.shape[-1]):\n C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)\n\n log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))\n\n tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()\n\n # Log PDF\n out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -\n (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -\n multigammaln(0.5*df, dim))\n\n return out\n\n def logpdf(self, x, df, scale):\n \"\"\"Log of the inverse Wishart probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n Each quantile must be a symmetric positive definite matrix.\n %(_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray\n Log of the probability density function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n x = self._process_quantiles(x, dim)\n _, log_det_scale = self._cholesky_logdet(scale)\n out = self._logpdf(x, dim, df, scale, log_det_scale)\n return _squeeze_output(out)\n\n def pdf(self, x, df, scale):\n \"\"\"Inverse Wishart probability density function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n Each quantile must be a symmetric positive definite matrix.\n %(_doc_default_callparams)s\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n return np.exp(self.logpdf(x, df, scale))\n\n def _mean(self, dim, df, scale):\n \"\"\"Mean of the inverse Wishart distribution.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n %(_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'mean' instead.\n\n \"\"\"\n if df > dim + 1:\n out = scale / (df - dim - 1)\n else:\n out = None\n return out\n\n def mean(self, df, scale):\n \"\"\"Mean of the inverse Wishart distribution.\n\n Only valid if the degrees of freedom are greater than the dimension of\n the scale matrix plus one.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n mean : float or None\n The mean of the distribution\n\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n out = self._mean(dim, df, scale)\n return _squeeze_output(out) if out is not None else out\n\n def _mode(self, dim, df, scale):\n \"\"\"Mode of the inverse Wishart distribution.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n %(_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'mode' instead.\n\n \"\"\"\n return scale / (df + dim + 1)\n\n def mode(self, df, scale):\n \"\"\"Mode of the inverse Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n mode : float\n The Mode of the distribution\n\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n out = self._mode(dim, df, scale)\n return _squeeze_output(out)\n\n def _var(self, dim, df, scale):\n \"\"\"Variance of the inverse Wishart distribution.\n\n Parameters\n ----------\n dim : int\n Dimension of the scale matrix\n %(_doc_default_callparams)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'var' instead.\n\n \"\"\"\n if df > dim + 3:\n var = (df - dim + 1) * scale**2\n diag = scale.diagonal() # 1 x dim array\n var += (df - dim - 1) * np.outer(diag, diag)\n var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)\n else:\n var = None\n return var\n\n def var(self, df, scale):\n \"\"\"Variance of the inverse Wishart distribution.\n\n Only valid if the degrees of freedom are greater than the dimension of\n the scale matrix plus three.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n var : float\n The variance of the distribution\n \"\"\"\n dim, df, scale = self._process_parameters(df, scale)\n out = self._var(dim, df, scale)\n return _squeeze_output(out) if out is not None else out\n\n def _rvs(self, n, shape, dim, df, C, random_state):\n \"\"\"Draw random samples from an inverse Wishart distribution.\n\n Parameters\n ----------\n n : integer\n Number of variates to generate\n shape : iterable\n Shape of the variates to generate\n dim : int\n Dimension of the scale matrix\n df : int\n Degrees of freedom\n C : ndarray\n Cholesky factorization of the scale matrix, lower triagular.\n %(_doc_random_state)s\n\n Notes\n -----\n As this function does no argument checking, it should not be\n called directly; use 'rvs' instead.\n\n \"\"\"\n random_state = self._get_random_state(random_state)\n # Get random draws A such that A ~ W(df, I)\n A = super()._standard_rvs(n, shape, dim, df, random_state)\n\n # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)\n eye = np.eye(dim)\n trtrs = get_lapack_funcs(('trtrs'), (A,))\n\n for index in np.ndindex(A.shape[:-2]):\n # Calculate CA\n CA = np.dot(C, A[index])\n # Get (C A)^{-1} via triangular solver\n if dim > 1:\n CA, info = trtrs(CA, eye, lower=True)\n if info > 0:\n raise LinAlgError(\"Singular matrix.\")\n if info < 0:\n raise ValueError('Illegal value in %d-th argument of'\n ' internal trtrs' % -info)\n else:\n CA = 1. / CA\n # Get SA\n A[index] = np.dot(CA.T, CA)\n\n return A\n\n def rvs(self, df, scale, size=1, random_state=None):\n \"\"\"Draw random samples from an inverse Wishart distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n size : integer or iterable of integers, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray\n Random variates of shape (`size`) + (`dim`, `dim), where `dim` is\n the dimension of the scale matrix.\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n \"\"\"\n n, shape = self._process_size(size)\n dim, df, scale = self._process_parameters(df, scale)\n\n # Invert the scale\n eye = np.eye(dim)\n L, lower = scipy.linalg.cho_factor(scale, lower=True)\n inv_scale = scipy.linalg.cho_solve((L, lower), eye)\n # Cholesky decomposition of inverted scale\n C = scipy.linalg.cholesky(inv_scale, lower=True)\n\n out = self._rvs(n, shape, dim, df, C, random_state)\n\n return _squeeze_output(out)\n\n def entropy(self):\n # Need to find reference for inverse Wishart entropy\n raise AttributeError\n\n\ninvwishart = invwishart_gen()\n\n\nclass invwishart_frozen(multi_rv_frozen):\n def __init__(self, df, scale, seed=None):\n \"\"\"Create a frozen inverse Wishart distribution.\n\n Parameters\n ----------\n df : array_like\n Degrees of freedom of the distribution\n scale : array_like\n Scale matrix of the distribution\n seed : {None, int, `numpy.random.Generator`}, optional\n If `seed` is None the `numpy.random.Generator` singleton is used.\n If `seed` is an int, a new ``Generator`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` instance then that instance is\n used.\n\n \"\"\"\n self._dist = invwishart_gen(seed)\n self.dim, self.df, self.scale = self._dist._process_parameters(\n df, scale\n )\n\n # Get the determinant via Cholesky factorization\n C, lower = scipy.linalg.cho_factor(self.scale, lower=True)\n self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))\n\n # Get the inverse using the Cholesky factorization\n eye = np.eye(self.dim)\n self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)\n\n # Get the Cholesky factorization of the inverse scale\n self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)\n\n def logpdf(self, x):\n x = self._dist._process_quantiles(x, self.dim)\n out = self._dist._logpdf(x, self.dim, self.df, self.scale,\n self.log_det_scale)\n return _squeeze_output(out)\n\n def pdf(self, x):\n return np.exp(self.logpdf(x))\n\n def mean(self):\n out = self._dist._mean(self.dim, self.df, self.scale)\n return _squeeze_output(out) if out is not None else out\n\n def mode(self):\n out = self._dist._mode(self.dim, self.df, self.scale)\n return _squeeze_output(out)\n\n def var(self):\n out = self._dist._var(self.dim, self.df, self.scale)\n return _squeeze_output(out) if out is not None else out\n\n def rvs(self, size=1, random_state=None):\n n, shape = self._dist._process_size(size)\n\n out = self._dist._rvs(n, shape, self.dim, self.df,\n self.C, random_state)\n\n return _squeeze_output(out)\n\n def entropy(self):\n # Need to find reference for inverse Wishart entropy\n raise AttributeError\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# inverse Wishart and fill in default strings in class docstrings\nfor name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:\n method = invwishart_gen.__dict__[name]\n method_frozen = wishart_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(\n method.__doc__, wishart_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)\n\n_multinomial_doc_default_callparams = \"\"\"\\\nn : int\n Number of trials\np : array_like\n Probability of a trial falling into each category; should sum to 1\n\"\"\"\n\n_multinomial_doc_callparams_note = \"\"\"\\\n`n` should be a positive integer. Each element of `p` should be in the\ninterval :math:`[0,1]` and the elements should sum to 1. If they do not sum to\n1, the last element of the `p` array is not used and is replaced with the\nremaining probability left over from the earlier elements.\n\"\"\"\n\n_multinomial_doc_frozen_callparams = \"\"\n\n_multinomial_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\nmultinomial_docdict_params = {\n '_doc_default_callparams': _multinomial_doc_default_callparams,\n '_doc_callparams_note': _multinomial_doc_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\nmultinomial_docdict_noparams = {\n '_doc_default_callparams': _multinomial_doc_frozen_callparams,\n '_doc_callparams_note': _multinomial_doc_frozen_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\n\nclass multinomial_gen(multi_rv_generic):\n r\"\"\"A multinomial random variable.\n\n Methods\n -------\n pmf(x, n, p)\n Probability mass function.\n logpmf(x, n, p)\n Log of the probability mass function.\n rvs(n, p, size=1, random_state=None)\n Draw random samples from a multinomial distribution.\n entropy(n, p)\n Compute the entropy of the multinomial distribution.\n cov(n, p)\n Compute the covariance matrix of the multinomial distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n %(_doc_random_state)s\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n The probability mass function for `multinomial` is\n\n .. math::\n\n f(x) = \\frac{n!}{x_1! \\cdots x_k!} p_1^{x_1} \\cdots p_k^{x_k},\n\n supported on :math:`x=(x_1, \\ldots, x_k)` where each :math:`x_i` is a\n nonnegative integer and their sum is :math:`n`.\n\n .. versionadded:: 0.19.0\n\n Examples\n --------\n\n >>> from scipy.stats import multinomial\n >>> rv = multinomial(8, [0.3, 0.2, 0.5])\n >>> rv.pmf([1, 3, 4])\n 0.042000000000000072\n\n The multinomial distribution for :math:`k=2` is identical to the\n corresponding binomial distribution (tiny numerical differences\n notwithstanding):\n\n >>> from scipy.stats import binom\n >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])\n 0.29030399999999973\n >>> binom.pmf(3, 7, 0.4)\n 0.29030400000000012\n\n The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support\n broadcasting, under the convention that the vector parameters (``x`` and\n ``p``) are interpreted as if each row along the last axis is a single\n object. For instance:\n\n >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])\n array([0.2268945, 0.25412184])\n\n Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,\n but following the rules mentioned above they behave as if the rows\n ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single\n object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and\n ``p.shape = ()``. To obtain the individual elements without broadcasting,\n we would do this:\n\n >>> multinomial.pmf([3, 4], n=7, p=[.3, .7])\n 0.2268945\n >>> multinomial.pmf([3, 5], 8, p=[.3, .7])\n 0.25412184\n\n This broadcasting also works for ``cov``, where the output objects are\n square matrices of size ``p.shape[-1]``. For example:\n\n >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])\n array([[[ 0.84, -0.84],\n [-0.84, 0.84]],\n [[ 1.2 , -1.2 ],\n [-1.2 , 1.2 ]]])\n\n In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and\n following the rules above, these broadcast as if ``p.shape == (2,)``.\n Thus the result should also be of shape ``(2,)``, but since each output is\n a :math:`2 \\times 2` matrix, the result in fact has shape ``(2, 2, 2)``,\n where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and\n ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.\n\n Alternatively, the object may be called (as a function) to fix the `n` and\n `p` parameters, returning a \"frozen\" multinomial random variable:\n\n >>> rv = multinomial(n=7, p=[.3, .7])\n >>> # Frozen object with the same methods but holding the given\n >>> # degrees of freedom and scale fixed.\n\n See also\n --------\n scipy.stats.binom : The binomial distribution.\n numpy.random.Generator.multinomial : Sampling from the multinomial distribution.\n scipy.stats.multivariate_hypergeom :\n The multivariate hypergeometric distribution.\n \"\"\" # noqa: E501\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = \\\n doccer.docformat(self.__doc__, multinomial_docdict_params)\n\n def __call__(self, n, p, seed=None):\n \"\"\"Create a frozen multinomial distribution.\n\n See `multinomial_frozen` for more information.\n \"\"\"\n return multinomial_frozen(n, p, seed)\n\n def _process_parameters(self, n, p):\n \"\"\"Returns: n_, p_, npcond.\n\n n_ and p_ are arrays of the correct shape; npcond is a boolean array\n flagging values out of the domain.\n \"\"\"\n p = np.array(p, dtype=np.float64, copy=True)\n p[..., -1] = 1. - p[..., :-1].sum(axis=-1)\n\n # true for bad p\n pcond = np.any(p < 0, axis=-1)\n pcond |= np.any(p > 1, axis=-1)\n\n n = np.array(n, dtype=np.int_, copy=True)\n\n # true for bad n\n ncond = n <= 0\n\n return n, p, ncond | pcond\n\n def _process_quantiles(self, x, n, p):\n \"\"\"Returns: x_, xcond.\n\n x_ is an int array; xcond is a boolean array flagging values out of the\n domain.\n \"\"\"\n xx = np.asarray(x, dtype=np.int_)\n\n if xx.ndim == 0:\n raise ValueError(\"x must be an array.\")\n\n if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:\n raise ValueError(\"Size of each quantile should be size of p: \"\n \"received %d, but expected %d.\" %\n (xx.shape[-1], p.shape[-1]))\n\n # true for x out of the domain\n cond = np.any(xx != x, axis=-1)\n cond |= np.any(xx < 0, axis=-1)\n cond = cond | (np.sum(xx, axis=-1) != n)\n\n return xx, cond\n\n def _checkresult(self, result, cond, bad_value):\n result = np.asarray(result)\n\n if cond.ndim != 0:\n result[cond] = bad_value\n elif cond:\n if result.ndim == 0:\n return bad_value\n result[...] = bad_value\n return result\n\n def _logpmf(self, x, n, p):\n return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)\n\n def logpmf(self, x, n, p):\n \"\"\"Log of the Multinomial probability mass function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_doc_default_callparams)s\n\n Returns\n -------\n logpmf : ndarray or scalar\n Log of the probability mass function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n \"\"\"\n n, p, npcond = self._process_parameters(n, p)\n x, xcond = self._process_quantiles(x, n, p)\n\n result = self._logpmf(x, n, p)\n\n # replace values for which x was out of the domain; broadcast\n # xcond to the right shape\n xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)\n result = self._checkresult(result, xcond_, np.NINF)\n\n # replace values bad for n or p; broadcast npcond to the right shape\n npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)\n return self._checkresult(result, npcond_, np.NAN)\n\n def pmf(self, x, n, p):\n \"\"\"Multinomial probability mass function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_doc_default_callparams)s\n\n Returns\n -------\n pmf : ndarray or scalar\n Probability density function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n \"\"\"\n return np.exp(self.logpmf(x, n, p))\n\n def mean(self, n, p):\n \"\"\"Mean of the Multinomial distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n mean : float\n The mean of the distribution\n \"\"\"\n n, p, npcond = self._process_parameters(n, p)\n result = n[..., np.newaxis]*p\n return self._checkresult(result, npcond, np.NAN)\n\n def cov(self, n, p):\n \"\"\"Covariance matrix of the multinomial distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n cov : ndarray\n The covariance matrix of the distribution\n \"\"\"\n n, p, npcond = self._process_parameters(n, p)\n\n nn = n[..., np.newaxis, np.newaxis]\n result = nn * np.einsum('...j,...k->...jk', -p, p)\n\n # change the diagonal\n for i in range(p.shape[-1]):\n result[..., i, i] += n*p[..., i]\n\n return self._checkresult(result, npcond, np.nan)\n\n def entropy(self, n, p):\n r\"\"\"Compute the entropy of the multinomial distribution.\n\n The entropy is computed using this expression:\n\n .. math::\n\n f(x) = - \\log n! - n\\sum_{i=1}^k p_i \\log p_i +\n \\sum_{i=1}^k \\sum_{x=0}^n \\binom n x p_i^x(1-p_i)^{n-x} \\log x!\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n h : scalar\n Entropy of the Multinomial distribution\n\n Notes\n -----\n %(_doc_callparams_note)s\n \"\"\"\n n, p, npcond = self._process_parameters(n, p)\n\n x = np.r_[1:np.max(n)+1]\n\n term1 = n*np.sum(entr(p), axis=-1)\n term1 -= gammaln(n+1)\n\n n = n[..., np.newaxis]\n new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1\n x.shape += (1,)*new_axes_needed\n\n term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),\n axis=(-1, -1-new_axes_needed))\n\n return self._checkresult(term1 + term2, npcond, np.nan)\n\n def rvs(self, n, p, size=None, random_state=None):\n \"\"\"Draw random samples from a Multinomial distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n size : integer or iterable of integers, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of shape (`size`, `len(p)`)\n\n Notes\n -----\n %(_doc_callparams_note)s\n \"\"\"\n n, p, npcond = self._process_parameters(n, p)\n random_state = self._get_random_state(random_state)\n return random_state.multinomial(n, p, size)\n\n\nmultinomial = multinomial_gen()\n\n\nclass multinomial_frozen(multi_rv_frozen):\n r\"\"\"Create a frozen Multinomial distribution.\n\n Parameters\n ----------\n n : int\n number of trials\n p: array_like\n probability of a trial falling into each category; should sum to 1\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance then\n that instance is used.\n \"\"\"\n def __init__(self, n, p, seed=None):\n self._dist = multinomial_gen(seed)\n self.n, self.p, self.npcond = self._dist._process_parameters(n, p)\n\n # monkey patch self._dist\n def _process_parameters(n, p):\n return self.n, self.p, self.npcond\n\n self._dist._process_parameters = _process_parameters\n\n def logpmf(self, x):\n return self._dist.logpmf(x, self.n, self.p)\n\n def pmf(self, x):\n return self._dist.pmf(x, self.n, self.p)\n\n def mean(self):\n return self._dist.mean(self.n, self.p)\n\n def cov(self):\n return self._dist.cov(self.n, self.p)\n\n def entropy(self):\n return self._dist.entropy(self.n, self.p)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.n, self.p, size, random_state)\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# multinomial and fill in default strings in class docstrings\nfor name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:\n method = multinomial_gen.__dict__[name]\n method_frozen = multinomial_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(\n method.__doc__, multinomial_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__,\n multinomial_docdict_params)\n\n\nclass special_ortho_group_gen(multi_rv_generic):\n r\"\"\"A Special Orthogonal matrix (SO(N)) random variable.\n\n Return a random rotation matrix, drawn from the Haar distribution\n (the only uniform distribution on SO(N)) with a determinant of +1.\n\n The `dim` keyword specifies the dimension N.\n\n Methods\n -------\n rvs(dim=None, size=1, random_state=None)\n Draw random samples from SO(N).\n\n Parameters\n ----------\n dim : scalar\n Dimension of matrices\n seed : {None, int, np.random.RandomState, np.random.Generator}, optional\n Used for drawing random variates.\n If `seed` is `None`, the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is `None`.\n\n Notes\n -----\n This class is wrapping the random_rot code from the MDP Toolkit,\n https://github.com/mdp-toolkit/mdp-toolkit\n\n Return a random rotation matrix, drawn from the Haar distribution\n (the only uniform distribution on SO(N)).\n The algorithm is described in the paper\n Stewart, G.W., \"The efficient generation of random orthogonal\n matrices with an application to condition estimators\", SIAM Journal\n on Numerical Analysis, 17(3), pp. 403-409, 1980.\n For more information see\n https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization\n\n See also the similar `ortho_group`. For a random rotation in three\n dimensions, see `scipy.spatial.transform.Rotation.random`.\n\n Examples\n --------\n >>> from scipy.stats import special_ortho_group\n >>> x = special_ortho_group.rvs(3)\n\n >>> np.dot(x, x.T)\n array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],\n [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],\n [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])\n\n >>> import scipy.linalg\n >>> scipy.linalg.det(x)\n 1.0\n\n This generates one random matrix from SO(3). It is orthogonal and\n has a determinant of 1.\n\n Alternatively, the object may be called (as a function) to fix the `dim`\n parameter, returning a \"frozen\" special_ortho_group random variable:\n\n >>> rv = special_ortho_group(5)\n >>> # Frozen object with the same methods but holding the\n >>> # dimension parameter fixed.\n\n See Also\n --------\n ortho_group, scipy.spatial.transform.Rotation.random\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__)\n\n def __call__(self, dim=None, seed=None):\n \"\"\"Create a frozen SO(N) distribution.\n\n See `special_ortho_group_frozen` for more information.\n \"\"\"\n return special_ortho_group_frozen(dim, seed=seed)\n\n def _process_parameters(self, dim):\n \"\"\"Dimension N must be specified; it cannot be inferred.\"\"\"\n if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):\n raise ValueError(\"\"\"Dimension of rotation must be specified,\n and must be a scalar greater than 1.\"\"\")\n\n return dim\n\n def rvs(self, dim, size=1, random_state=None):\n \"\"\"Draw random samples from SO(N).\n\n Parameters\n ----------\n dim : integer\n Dimension of rotation space (N).\n size : integer, optional\n Number of samples to draw (default 1).\n\n Returns\n -------\n rvs : ndarray or scalar\n Random size N-dimensional matrices, dimension (size, dim, dim)\n\n \"\"\"\n random_state = self._get_random_state(random_state)\n\n size = int(size)\n if size > 1:\n return np.array([self.rvs(dim, size=1, random_state=random_state)\n for i in range(size)])\n\n dim = self._process_parameters(dim)\n\n H = np.eye(dim)\n D = np.empty((dim,))\n for n in range(dim-1):\n x = random_state.normal(size=(dim-n,))\n norm2 = np.dot(x, x)\n x0 = x[0].item()\n D[n] = np.sign(x[0]) if x[0] != 0 else 1\n x[0] += D[n]*np.sqrt(norm2)\n x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)\n # Householder transformation\n H[:, n:] -= np.outer(np.dot(H[:, n:], x), x)\n D[-1] = (-1)**(dim-1)*D[:-1].prod()\n # Equivalent to np.dot(np.diag(D), H) but faster, apparently\n H = (D*H.T).T\n return H\n\n\nspecial_ortho_group = special_ortho_group_gen()\n\n\nclass special_ortho_group_frozen(multi_rv_frozen):\n def __init__(self, dim=None, seed=None):\n \"\"\"Create a frozen SO(N) distribution.\n\n Parameters\n ----------\n dim : scalar\n Dimension of matrices\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n\n Examples\n --------\n >>> from scipy.stats import special_ortho_group\n >>> g = special_ortho_group(5)\n >>> x = g.rvs()\n\n \"\"\"\n self._dist = special_ortho_group_gen(seed)\n self.dim = self._dist._process_parameters(dim)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.dim, size, random_state)\n\n\nclass ortho_group_gen(multi_rv_generic):\n r\"\"\"An Orthogonal matrix (O(N)) random variable.\n\n Return a random orthogonal matrix, drawn from the O(N) Haar\n distribution (the only uniform distribution on O(N)).\n\n The `dim` keyword specifies the dimension N.\n\n Methods\n -------\n rvs(dim=None, size=1, random_state=None)\n Draw random samples from O(N).\n\n Parameters\n ----------\n dim : scalar\n Dimension of matrices\n seed : {None, int, np.random.RandomState, np.random.Generator}, optional\n Used for drawing random variates.\n If `seed` is `None`, the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is `None`.\n\n Notes\n -----\n This class is closely related to `special_ortho_group`.\n\n Some care is taken to avoid numerical error, as per the paper by Mezzadri.\n\n References\n ----------\n .. [1] F. Mezzadri, \"How to generate random matrices from the classical\n compact groups\", :arXiv:`math-ph/0609050v2`.\n\n Examples\n --------\n >>> from scipy.stats import ortho_group\n >>> x = ortho_group.rvs(3)\n\n >>> np.dot(x, x.T)\n array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],\n [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],\n [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])\n\n >>> import scipy.linalg\n >>> np.fabs(scipy.linalg.det(x))\n 1.0\n\n This generates one random matrix from O(3). It is orthogonal and\n has a determinant of +1 or -1.\n\n Alternatively, the object may be called (as a function) to fix the `dim`\n parameter, returning a \"frozen\" ortho_group random variable:\n\n >>> rv = ortho_group(5)\n >>> # Frozen object with the same methods but holding the\n >>> # dimension parameter fixed.\n\n See Also\n --------\n special_ortho_group\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__)\n\n def __call__(self, dim=None, seed=None):\n \"\"\"Create a frozen O(N) distribution.\n\n See `ortho_group_frozen` for more information.\n \"\"\"\n return ortho_group_frozen(dim, seed=seed)\n\n def _process_parameters(self, dim):\n \"\"\"Dimension N must be specified; it cannot be inferred.\"\"\"\n if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):\n raise ValueError(\"Dimension of rotation must be specified,\"\n \"and must be a scalar greater than 1.\")\n\n return dim\n\n def rvs(self, dim, size=1, random_state=None):\n \"\"\"Draw random samples from O(N).\n\n Parameters\n ----------\n dim : integer\n Dimension of rotation space (N).\n size : integer, optional\n Number of samples to draw (default 1).\n\n Returns\n -------\n rvs : ndarray or scalar\n Random size N-dimensional matrices, dimension (size, dim, dim)\n\n \"\"\"\n random_state = self._get_random_state(random_state)\n\n size = int(size)\n if size > 1:\n return np.array([self.rvs(dim, size=1, random_state=random_state)\n for i in range(size)])\n\n dim = self._process_parameters(dim)\n\n H = np.eye(dim)\n for n in range(dim):\n x = random_state.normal(size=(dim-n,))\n norm2 = np.dot(x, x)\n x0 = x[0].item()\n # random sign, 50/50, but chosen carefully to avoid roundoff error\n D = np.sign(x[0]) if x[0] != 0 else 1\n x[0] += D * np.sqrt(norm2)\n x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)\n # Householder transformation\n H[:, n:] = -D * (H[:, n:] - np.outer(np.dot(H[:, n:], x), x))\n return H\n\n\northo_group = ortho_group_gen()\n\n\nclass ortho_group_frozen(multi_rv_frozen):\n def __init__(self, dim=None, seed=None):\n \"\"\"Create a frozen O(N) distribution.\n\n Parameters\n ----------\n dim : scalar\n Dimension of matrices\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n\n Examples\n --------\n >>> from scipy.stats import ortho_group\n >>> g = ortho_group(5)\n >>> x = g.rvs()\n\n \"\"\"\n self._dist = ortho_group_gen(seed)\n self.dim = self._dist._process_parameters(dim)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.dim, size, random_state)\n\n\nclass random_correlation_gen(multi_rv_generic):\n r\"\"\"A random correlation matrix.\n\n Return a random correlation matrix, given a vector of eigenvalues.\n\n The `eigs` keyword specifies the eigenvalues of the correlation matrix,\n and implies the dimension.\n\n Methods\n -------\n rvs(eigs=None, random_state=None)\n Draw random correlation matrices, all with eigenvalues eigs.\n\n Parameters\n ----------\n eigs : 1d ndarray\n Eigenvalues of correlation matrix\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n tol : float, optional\n Tolerance for input parameter checks\n diag_tol : float, optional\n Tolerance for deviation of the diagonal of the resulting\n matrix. Default: 1e-7\n\n Raises\n ------\n RuntimeError\n Floating point error prevented generating a valid correlation\n matrix.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random size N-dimensional matrices, dimension (size, dim, dim),\n each having eigenvalues eigs.\n\n Notes\n -----\n\n Generates a random correlation matrix following a numerically stable\n algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)\n similarity transformation to construct a symmetric positive semi-definite\n matrix, and applies a series of Givens rotations to scale it to have ones\n on the diagonal.\n\n References\n ----------\n\n .. [1] Davies, Philip I; Higham, Nicholas J; \"Numerically stable generation\n of correlation matrices and their factors\", BIT 2000, Vol. 40,\n No. 4, pp. 640 651\n\n Examples\n --------\n >>> from scipy.stats import random_correlation\n >>> rng = np.random.default_rng()\n >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)\n >>> x\n array([[ 1. , -0.07198934, -0.20411041, -0.24385796],\n [-0.07198934, 1. , 0.12968613, -0.29471382],\n [-0.20411041, 0.12968613, 1. , 0.2828693 ],\n [-0.24385796, -0.29471382, 0.2828693 , 1. ]])\n >>> import scipy.linalg\n >>> e, v = scipy.linalg.eigh(x)\n >>> e\n array([ 0.5, 0.8, 1.2, 1.5])\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__)\n\n def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7):\n \"\"\"Create a frozen random correlation matrix.\n\n See `random_correlation_frozen` for more information.\n \"\"\"\n return random_correlation_frozen(eigs, seed=seed, tol=tol,\n diag_tol=diag_tol)\n\n def _process_parameters(self, eigs, tol):\n eigs = np.asarray(eigs, dtype=float)\n dim = eigs.size\n\n if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:\n raise ValueError(\"Array 'eigs' must be a vector of length \"\n \"greater than 1.\")\n\n if np.fabs(np.sum(eigs) - dim) > tol:\n raise ValueError(\"Sum of eigenvalues must equal dimensionality.\")\n\n for x in eigs:\n if x < -tol:\n raise ValueError(\"All eigenvalues must be non-negative.\")\n\n return dim, eigs\n\n def _givens_to_1(self, aii, ajj, aij):\n \"\"\"Computes a 2x2 Givens matrix to put 1's on the diagonal.\n\n The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].\n\n The output matrix g is a 2x2 anti-symmetric matrix of the form\n [ c s ; -s c ]; the elements c and s are returned.\n\n Applying the output matrix to the input matrix (as b=g.T M g)\n results in a matrix with bii=1, provided tr(M) - det(M) >= 1\n and floating point issues do not occur. Otherwise, some other\n valid rotation is returned. When tr(M)==2, also bjj=1.\n\n \"\"\"\n aiid = aii - 1.\n ajjd = ajj - 1.\n\n if ajjd == 0:\n # ajj==1, so swap aii and ajj to avoid division by zero\n return 0., 1.\n\n dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))\n\n # The choice of t should be chosen to avoid cancellation [1]\n t = (aij + math.copysign(dd, aij)) / ajjd\n c = 1. / math.sqrt(1. + t*t)\n if c == 0:\n # Underflow\n s = 1.0\n else:\n s = c*t\n return c, s\n\n def _to_corr(self, m):\n \"\"\"\n Given a psd matrix m, rotate to put one's on the diagonal, turning it\n into a correlation matrix. This also requires the trace equal the\n dimensionality. Note: modifies input matrix\n \"\"\"\n # Check requirements for in-place Givens\n if not (m.flags.c_contiguous and m.dtype == np.float64 and\n m.shape[0] == m.shape[1]):\n raise ValueError()\n\n d = m.shape[0]\n for i in range(d-1):\n if m[i, i] == 1:\n continue\n elif m[i, i] > 1:\n for j in range(i+1, d):\n if m[j, j] < 1:\n break\n else:\n for j in range(i+1, d):\n if m[j, j] > 1:\n break\n\n c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])\n\n # Use BLAS to apply Givens rotations in-place. Equivalent to:\n # g = np.eye(d)\n # g[i, i] = g[j,j] = c\n # g[j, i] = -s; g[i, j] = s\n # m = np.dot(g.T, np.dot(m, g))\n mv = m.ravel()\n drot(mv, mv, c, -s, n=d,\n offx=i*d, incx=1, offy=j*d, incy=1,\n overwrite_x=True, overwrite_y=True)\n drot(mv, mv, c, -s, n=d,\n offx=i, incx=d, offy=j, incy=d,\n overwrite_x=True, overwrite_y=True)\n\n return m\n\n def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):\n \"\"\"Draw random correlation matrices.\n\n Parameters\n ----------\n eigs : 1d ndarray\n Eigenvalues of correlation matrix\n tol : float, optional\n Tolerance for input parameter checks\n diag_tol : float, optional\n Tolerance for deviation of the diagonal of the resulting\n matrix. Default: 1e-7\n\n Raises\n ------\n RuntimeError\n Floating point error prevented generating a valid correlation\n matrix.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random size N-dimensional matrices, dimension (size, dim, dim),\n each having eigenvalues eigs.\n\n \"\"\"\n dim, eigs = self._process_parameters(eigs, tol=tol)\n\n random_state = self._get_random_state(random_state)\n\n m = ortho_group.rvs(dim, random_state=random_state)\n m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m\n m = self._to_corr(m) # Carefully rotate to unit diagonal\n\n # Check diagonal\n if abs(m.diagonal() - 1).max() > diag_tol:\n raise RuntimeError(\"Failed to generate a valid correlation matrix\")\n\n return m\n\n\nrandom_correlation = random_correlation_gen()\n\n\nclass random_correlation_frozen(multi_rv_frozen):\n def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7):\n \"\"\"Create a frozen random correlation matrix distribution.\n\n Parameters\n ----------\n eigs : 1d ndarray\n Eigenvalues of correlation matrix\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n tol : float, optional\n Tolerance for input parameter checks\n diag_tol : float, optional\n Tolerance for deviation of the diagonal of the resulting\n matrix. Default: 1e-7\n\n Raises\n ------\n RuntimeError\n Floating point error prevented generating a valid correlation\n matrix.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random size N-dimensional matrices, dimension (size, dim, dim),\n each having eigenvalues eigs.\n \"\"\"\n\n self._dist = random_correlation_gen(seed)\n self.tol = tol\n self.diag_tol = diag_tol\n _, self.eigs = self._dist._process_parameters(eigs, tol=self.tol)\n\n def rvs(self, random_state=None):\n return self._dist.rvs(self.eigs, random_state=random_state,\n tol=self.tol, diag_tol=self.diag_tol)\n\n\nclass unitary_group_gen(multi_rv_generic):\n r\"\"\"A matrix-valued U(N) random variable.\n\n Return a random unitary matrix.\n\n The `dim` keyword specifies the dimension N.\n\n Methods\n -------\n rvs(dim=None, size=1, random_state=None)\n Draw random samples from U(N).\n\n Parameters\n ----------\n dim : scalar\n Dimension of matrices\n seed : {None, int, np.random.RandomState, np.random.Generator}, optional\n Used for drawing random variates.\n If `seed` is `None`, the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is `None`.\n\n Notes\n -----\n This class is similar to `ortho_group`.\n\n References\n ----------\n .. [1] F. Mezzadri, \"How to generate random matrices from the classical\n compact groups\", :arXiv:`math-ph/0609050v2`.\n\n Examples\n --------\n >>> from scipy.stats import unitary_group\n >>> x = unitary_group.rvs(3)\n\n >>> np.dot(x, x.conj().T)\n array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],\n [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],\n [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])\n\n This generates one random matrix from U(3). The dot product confirms that\n it is unitary up to machine precision.\n\n Alternatively, the object may be called (as a function) to fix the `dim`\n parameter, return a \"frozen\" unitary_group random variable:\n\n >>> rv = unitary_group(5)\n\n See Also\n --------\n ortho_group\n\n \"\"\"\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__)\n\n def __call__(self, dim=None, seed=None):\n \"\"\"Create a frozen (U(N)) n-dimensional unitary matrix distribution.\n\n See `unitary_group_frozen` for more information.\n \"\"\"\n return unitary_group_frozen(dim, seed=seed)\n\n def _process_parameters(self, dim):\n \"\"\"Dimension N must be specified; it cannot be inferred.\"\"\"\n if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):\n raise ValueError(\"Dimension of rotation must be specified,\"\n \"and must be a scalar greater than 1.\")\n\n return dim\n\n def rvs(self, dim, size=1, random_state=None):\n \"\"\"Draw random samples from U(N).\n\n Parameters\n ----------\n dim : integer\n Dimension of space (N).\n size : integer, optional\n Number of samples to draw (default 1).\n\n Returns\n -------\n rvs : ndarray or scalar\n Random size N-dimensional matrices, dimension (size, dim, dim)\n\n \"\"\"\n random_state = self._get_random_state(random_state)\n\n size = int(size)\n if size > 1:\n return np.array([self.rvs(dim, size=1, random_state=random_state)\n for i in range(size)])\n\n dim = self._process_parameters(dim)\n\n z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) +\n 1j*random_state.normal(size=(dim, dim)))\n q, r = scipy.linalg.qr(z)\n d = r.diagonal()\n q *= d/abs(d)\n return q\n\n\nunitary_group = unitary_group_gen()\n\n\nclass unitary_group_frozen(multi_rv_frozen):\n def __init__(self, dim=None, seed=None):\n \"\"\"Create a frozen (U(N)) n-dimensional unitary matrix distribution.\n\n Parameters\n ----------\n dim : scalar\n Dimension of matrices\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance\n then that instance is used.\n\n Examples\n --------\n >>> from scipy.stats import unitary_group\n >>> x = unitary_group(3)\n >>> x.rvs()\n\n \"\"\"\n self._dist = unitary_group_gen(seed)\n self.dim = self._dist._process_parameters(dim)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.dim, size, random_state)\n\n\n_mvt_doc_default_callparams = \"\"\"\\\nloc : array_like, optional\n Location of the distribution. (default ``0``)\nshape : array_like, optional\n Positive semidefinite matrix of the distribution. (default ``1``)\ndf : float, optional\n Degrees of freedom of the distribution; must be greater than zero.\n If ``np.inf`` then results are multivariate normal. The default is ``1``.\nallow_singular : bool, optional\n Whether to allow a singular matrix. (default ``False``)\n\"\"\"\n\n_mvt_doc_callparams_note = \"\"\"\\\nSetting the parameter `loc` to ``None`` is equivalent to having `loc`\nbe the zero-vector. The parameter `shape` can be a scalar, in which case\nthe shape matrix is the identity times that value, a vector of\ndiagonal entries for the shape matrix, or a two-dimensional array_like.\n\"\"\"\n\n_mvt_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\nmvt_docdict_params = {\n '_mvt_doc_default_callparams': _mvt_doc_default_callparams,\n '_mvt_doc_callparams_note': _mvt_doc_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\nmvt_docdict_noparams = {\n '_mvt_doc_default_callparams': \"\",\n '_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\n\nclass multivariate_t_gen(multi_rv_generic):\n r\"\"\"A multivariate t-distributed random variable.\n\n The `loc` parameter specifies the location. The `shape` parameter specifies\n the positive semidefinite shape matrix. The `df` parameter specifies the\n degrees of freedom.\n\n In addition to calling the methods below, the object itself may be called\n as a function to fix the location, shape matrix, and degrees of freedom\n parameters, returning a \"frozen\" multivariate t-distribution random.\n\n Methods\n -------\n pdf(x, loc=None, shape=1, df=1, allow_singular=False)\n Probability density function.\n logpdf(x, loc=None, shape=1, df=1, allow_singular=False)\n Log of the probability density function.\n rvs(loc=None, shape=1, df=1, size=1, random_state=None)\n Draw random samples from a multivariate t-distribution.\n\n Parameters\n ----------\n %(_mvt_doc_default_callparams)s\n %(_doc_random_state)s\n\n Notes\n -----\n %(_mvt_doc_callparams_note)s\n The matrix `shape` must be a (symmetric) positive semidefinite matrix. The\n determinant and inverse of `shape` are computed as the pseudo-determinant\n and pseudo-inverse, respectively, so that `shape` does not need to have\n full rank.\n\n The probability density function for `multivariate_t` is\n\n .. math::\n\n f(x) = \\frac{\\Gamma(\\nu + p)/2}{\\Gamma(\\nu/2)\\nu^{p/2}\\pi^{p/2}|\\Sigma|^{1/2}}\n \\left[1 + \\frac{1}{\\nu} (\\mathbf{x} - \\boldsymbol{\\mu})^{\\top}\n \\boldsymbol{\\Sigma}^{-1}\n (\\mathbf{x} - \\boldsymbol{\\mu}) \\right]^{-(\\nu + p)/2},\n\n where :math:`p` is the dimension of :math:`\\mathbf{x}`,\n :math:`\\boldsymbol{\\mu}` is the :math:`p`-dimensional location,\n :math:`\\boldsymbol{\\Sigma}` the :math:`p \\times p`-dimensional shape\n matrix, and :math:`\\nu` is the degrees of freedom.\n\n .. versionadded:: 1.6.0\n\n Examples\n --------\n The object may be called (as a function) to fix the `loc`, `shape`,\n `df`, and `allow_singular` parameters, returning a \"frozen\"\n multivariate_t random variable:\n\n >>> from scipy.stats import multivariate_t\n >>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)\n >>> # Frozen object with the same methods but holding the given location,\n >>> # scale, and degrees of freedom fixed.\n\n Create a contour plot of the PDF.\n\n >>> import matplotlib.pyplot as plt\n >>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]\n >>> pos = np.dstack((x, y))\n >>> fig, ax = plt.subplots(1, 1)\n >>> ax.set_aspect('equal')\n >>> plt.contourf(x, y, rv.pdf(pos))\n\n \"\"\"\n\n def __init__(self, seed=None):\n \"\"\"Initialize a multivariate t-distributed random variable.\n\n Parameters\n ----------\n seed : Random state.\n\n \"\"\"\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)\n self._random_state = check_random_state(seed)\n\n def __call__(self, loc=None, shape=1, df=1, allow_singular=False,\n seed=None):\n \"\"\"Create a frozen multivariate t-distribution.\n\n See `multivariate_t_frozen` for parameters.\n \"\"\"\n if df == np.inf:\n return multivariate_normal_frozen(mean=loc, cov=shape,\n allow_singular=allow_singular,\n seed=seed)\n return multivariate_t_frozen(loc=loc, shape=shape, df=df,\n allow_singular=allow_singular, seed=seed)\n\n def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):\n \"\"\"Multivariate t-distribution probability density function.\n\n Parameters\n ----------\n x : array_like\n Points at which to evaluate the probability density function.\n %(_mvt_doc_default_callparams)s\n\n Returns\n -------\n pdf : Probability density function evaluated at `x`.\n\n Examples\n --------\n >>> from scipy.stats import multivariate_t\n >>> x = [0.4, 5]\n >>> loc = [0, 1]\n >>> shape = [[1, 0.1], [0.1, 1]]\n >>> df = 7\n >>> multivariate_t.pdf(x, loc, shape, df)\n array([0.00075713])\n\n \"\"\"\n dim, loc, shape, df = self._process_parameters(loc, shape, df)\n x = self._process_quantiles(x, dim)\n shape_info = _PSD(shape, allow_singular=allow_singular)\n logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,\n dim, shape_info.rank)\n return np.exp(logpdf)\n\n def logpdf(self, x, loc=None, shape=1, df=1):\n \"\"\"Log of the multivariate t-distribution probability density function.\n\n Parameters\n ----------\n x : array_like\n Points at which to evaluate the log of the probability density\n function.\n %(_mvt_doc_default_callparams)s\n\n Returns\n -------\n logpdf : Log of the probability density function evaluated at `x`.\n\n Examples\n --------\n >>> from scipy.stats import multivariate_t\n >>> x = [0.4, 5]\n >>> loc = [0, 1]\n >>> shape = [[1, 0.1], [0.1, 1]]\n >>> df = 7\n >>> multivariate_t.logpdf(x, loc, shape, df)\n array([-7.1859802])\n\n See Also\n --------\n pdf : Probability density function.\n\n \"\"\"\n dim, loc, shape, df = self._process_parameters(loc, shape, df)\n x = self._process_quantiles(x, dim)\n shape_info = _PSD(shape)\n return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,\n shape_info.rank)\n\n def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):\n \"\"\"Utility method `pdf`, `logpdf` for parameters.\n\n Parameters\n ----------\n x : ndarray\n Points at which to evaluate the log of the probability density\n function.\n loc : ndarray\n Location of the distribution.\n prec_U : ndarray\n A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse\n of the shape matrix.\n log_pdet : float\n Logarithm of the determinant of the shape matrix.\n df : float\n Degrees of freedom of the distribution.\n dim : int\n Dimension of the quantiles x.\n rank : int\n Rank of the shape matrix.\n\n Notes\n -----\n As this function does no argument checking, it should not be called\n directly; use 'logpdf' instead.\n\n \"\"\"\n if df == np.inf:\n return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)\n\n dev = x - loc\n maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)\n\n t = 0.5 * (df + dim)\n A = gammaln(t)\n B = gammaln(0.5 * df)\n C = dim/2. * np.log(df * np.pi)\n D = 0.5 * log_pdet\n E = -t * np.log(1 + (1./df) * maha)\n\n return _squeeze_output(A - B - C - D + E)\n\n def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):\n \"\"\"Draw random samples from a multivariate t-distribution.\n\n Parameters\n ----------\n %(_mvt_doc_default_callparams)s\n size : integer, optional\n Number of samples to draw (default 1).\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of size (`size`, `P`), where `P` is the\n dimension of the random variable.\n\n Examples\n --------\n >>> from scipy.stats import multivariate_t\n >>> x = [0.4, 5]\n >>> loc = [0, 1]\n >>> shape = [[1, 0.1], [0.1, 1]]\n >>> df = 7\n >>> multivariate_t.rvs(loc, shape, df)\n array([[0.93477495, 3.00408716]])\n\n \"\"\"\n # For implementation details, see equation (3):\n #\n # Hofert, \"On Sampling from the Multivariatet Distribution\", 2013\n # http://rjournal.github.io/archive/2013-2/hofert.pdf\n #\n dim, loc, shape, df = self._process_parameters(loc, shape, df)\n if random_state is not None:\n rng = check_random_state(random_state)\n else:\n rng = self._random_state\n\n if np.isinf(df):\n x = np.ones(size)\n else:\n x = rng.chisquare(df, size=size) / df\n\n z = rng.multivariate_normal(np.zeros(dim), shape, size=size)\n samples = loc + z / np.sqrt(x)[..., None]\n return _squeeze_output(samples)\n\n def _process_quantiles(self, x, dim):\n \"\"\"\n Adjust quantiles array so that last axis labels the components of\n each data point.\n \"\"\"\n x = np.asarray(x, dtype=float)\n if x.ndim == 0:\n x = x[np.newaxis]\n elif x.ndim == 1:\n if dim == 1:\n x = x[:, np.newaxis]\n else:\n x = x[np.newaxis, :]\n return x\n\n def _process_parameters(self, loc, shape, df):\n \"\"\"\n Infer dimensionality from location array and shape matrix, handle\n defaults, and ensure compatible dimensions.\n \"\"\"\n if loc is None and shape is None:\n loc = np.asarray(0, dtype=float)\n shape = np.asarray(1, dtype=float)\n dim = 1\n elif loc is None:\n shape = np.asarray(shape, dtype=float)\n if shape.ndim < 2:\n dim = 1\n else:\n dim = shape.shape[0]\n loc = np.zeros(dim)\n elif shape is None:\n loc = np.asarray(loc, dtype=float)\n dim = loc.size\n shape = np.eye(dim)\n else:\n shape = np.asarray(shape, dtype=float)\n loc = np.asarray(loc, dtype=float)\n dim = loc.size\n\n if dim == 1:\n loc = loc.reshape(1)\n shape = shape.reshape(1, 1)\n\n if loc.ndim != 1 or loc.shape[0] != dim:\n raise ValueError(\"Array 'loc' must be a vector of length %d.\" %\n dim)\n if shape.ndim == 0:\n shape = shape * np.eye(dim)\n elif shape.ndim == 1:\n shape = np.diag(shape)\n elif shape.ndim == 2 and shape.shape != (dim, dim):\n rows, cols = shape.shape\n if rows != cols:\n msg = (\"Array 'cov' must be square if it is two dimensional,\"\n \" but cov.shape = %s.\" % str(shape.shape))\n else:\n msg = (\"Dimension mismatch: array 'cov' is of shape %s,\"\n \" but 'loc' is a vector of length %d.\")\n msg = msg % (str(shape.shape), len(loc))\n raise ValueError(msg)\n elif shape.ndim > 2:\n raise ValueError(\"Array 'cov' must be at most two-dimensional,\"\n \" but cov.ndim = %d\" % shape.ndim)\n\n # Process degrees of freedom.\n if df is None:\n df = 1\n elif df <= 0:\n raise ValueError(\"'df' must be greater than zero.\")\n elif np.isnan(df):\n raise ValueError(\"'df' is 'nan' but must be greater than zero or 'np.inf'.\")\n\n return dim, loc, shape, df\n\n\nclass multivariate_t_frozen(multi_rv_frozen):\n\n def __init__(self, loc=None, shape=1, df=1, allow_singular=False,\n seed=None):\n \"\"\"Create a frozen multivariate t distribution.\n\n Parameters\n ----------\n %(_mvt_doc_default_callparams)s\n\n Examples\n --------\n >>> loc = np.zeros(3)\n >>> shape = np.eye(3)\n >>> df = 10\n >>> dist = multivariate_t(loc, shape, df)\n >>> dist.rvs()\n array([[ 0.81412036, -1.53612361, 0.42199647]])\n >>> dist.pdf([1, 1, 1])\n array([0.01237803])\n\n \"\"\"\n self._dist = multivariate_t_gen(seed)\n dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)\n self.dim, self.loc, self.shape, self.df = dim, loc, shape, df\n self.shape_info = _PSD(shape, allow_singular=allow_singular)\n\n def logpdf(self, x):\n x = self._dist._process_quantiles(x, self.dim)\n U = self.shape_info.U\n log_pdet = self.shape_info.log_pdet\n return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,\n self.shape_info.rank)\n\n def pdf(self, x):\n return np.exp(self.logpdf(x))\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(loc=self.loc,\n shape=self.shape,\n df=self.df,\n size=size,\n random_state=random_state)\n\n\nmultivariate_t = multivariate_t_gen()\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# matrix_normal_gen and fill in default strings in class docstrings\nfor name in ['logpdf', 'pdf', 'rvs']:\n method = multivariate_t_gen.__dict__[name]\n method_frozen = multivariate_t_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(method.__doc__,\n mvt_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)\n\n\n_mhg_doc_default_callparams = \"\"\"\\\nm : array_like\n The number of each type of object in the population.\n That is, :math:`m[i]` is the number of objects of\n type :math:`i`.\nn : array_like\n The number of samples taken from the population.\n\"\"\"\n\n_mhg_doc_callparams_note = \"\"\"\\\n`m` must be an array of positive integers. If the quantile\n:math:`i` contains values out of the range :math:`[0, m_i]`\nwhere :math:`m_i` is the number of objects of type :math:`i`\nin the population or if the parameters are inconsistent with one\nanother (e.g. ``x.sum() != n``), methods return the appropriate\nvalue (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative\nvalues, the result will contain ``nan`` there.\n\"\"\"\n\n_mhg_doc_frozen_callparams = \"\"\n\n_mhg_doc_frozen_callparams_note = \"\"\"\\\nSee class definition for a detailed description of parameters.\"\"\"\n\nmhg_docdict_params = {\n '_doc_default_callparams': _mhg_doc_default_callparams,\n '_doc_callparams_note': _mhg_doc_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\nmhg_docdict_noparams = {\n '_doc_default_callparams': _mhg_doc_frozen_callparams,\n '_doc_callparams_note': _mhg_doc_frozen_callparams_note,\n '_doc_random_state': _doc_random_state\n}\n\n\nclass multivariate_hypergeom_gen(multi_rv_generic):\n r\"\"\"A multivariate hypergeometric random variable.\n\n Methods\n -------\n pmf(x, m, n)\n Probability mass function.\n logpmf(x, m, n)\n Log of the probability mass function.\n rvs(m, n, size=1, random_state=None)\n Draw random samples from a multivariate hypergeometric\n distribution.\n mean(m, n)\n Mean of the multivariate hypergeometric distribution.\n var(m, n)\n Variance of the multivariate hypergeometric distribution.\n cov(m, n)\n Compute the covariance matrix of the multivariate\n hypergeometric distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n %(_doc_random_state)s\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n The probability mass function for `multivariate_hypergeom` is\n\n .. math::\n\n P(X_1 = x_1, X_2 = x_2, \\ldots, X_k = x_k) = \\frac{\\binom{m_1}{x_1}\n \\binom{m_2}{x_2} \\cdots \\binom{m_k}{x_k}}{\\binom{M}{n}}, \\\\ \\quad\n (x_1, x_2, \\ldots, x_k) \\in \\mathbb{N}^k \\text{ with }\n \\sum_{i=1}^k x_i = n\n\n where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`\n is the total number of objects in the population (sum of all the\n :math:`m_i`), and :math:`n` is the size of the sample to be taken\n from the population.\n\n .. versionadded:: 1.6.0\n\n Examples\n --------\n To evaluate the probability mass function of the multivariate\n hypergeometric distribution, with a dichotomous population of size\n :math:`10` and :math:`20`, at a sample of size :math:`12` with\n :math:`8` objects of the first type and :math:`4` objects of the\n second type, use:\n\n >>> from scipy.stats import multivariate_hypergeom\n >>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)\n 0.0025207176631464523\n\n The `multivariate_hypergeom` distribution is identical to the\n corresponding `hypergeom` distribution (tiny numerical differences\n notwithstanding) when only two types (good and bad) of objects\n are present in the population as in the example above. Consider\n another example for a comparison with the hypergeometric distribution:\n\n >>> from scipy.stats import hypergeom\n >>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)\n 0.4395604395604395\n >>> hypergeom.pmf(k=3, M=15, n=4, N=10)\n 0.43956043956044005\n\n The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``\n support broadcasting, under the convention that the vector parameters\n (``x``, ``m``, and ``n``) are interpreted as if each row along the last\n axis is a single object. For instance, we can combine the previous two\n calls to `multivariate_hypergeom` as\n\n >>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],\n ... n=[12, 4])\n array([0.00252072, 0.43956044])\n\n This broadcasting also works for ``cov``, where the output objects are\n square matrices of size ``m.shape[-1]``. For example:\n\n >>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])\n array([[[ 1.05, -1.05],\n [-1.05, 1.05]],\n [[ 1.56, -1.56],\n [-1.56, 1.56]]])\n\n That is, ``result[0]`` is equal to\n ``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal\n to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.\n\n Alternatively, the object may be called (as a function) to fix the `m`\n and `n` parameters, returning a \"frozen\" multivariate hypergeometric\n random variable.\n\n >>> rv = multivariate_hypergeom(m=[10, 20], n=12)\n >>> rv.pmf(x=[8, 4])\n 0.0025207176631464523\n\n See Also\n --------\n scipy.stats.hypergeom : The hypergeometric distribution.\n scipy.stats.multinomial : The multinomial distribution.\n\n References\n ----------\n .. [1] The Multivariate Hypergeometric Distribution,\n http://www.randomservices.org/random/urn/MultiHypergeometric.html\n .. [2] Thomas J. Sargent and John Stachurski, 2020,\n Multivariate Hypergeometric Distribution\n https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf\n \"\"\"\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)\n\n def __call__(self, m, n, seed=None):\n \"\"\"Create a frozen multivariate_hypergeom distribution.\n\n See `multivariate_hypergeom_frozen` for more information.\n \"\"\"\n return multivariate_hypergeom_frozen(m, n, seed=seed)\n\n def _process_parameters(self, m, n):\n m = np.asarray(m)\n n = np.asarray(n)\n if m.size == 0:\n m = m.astype(int)\n if n.size == 0:\n n = n.astype(int)\n if not np.issubdtype(m.dtype, np.integer):\n raise TypeError(\"'m' must an array of integers.\")\n if not np.issubdtype(n.dtype, np.integer):\n raise TypeError(\"'n' must an array of integers.\")\n if m.ndim == 0:\n raise ValueError(\"'m' must be an array with\"\n \" at least one dimension.\")\n\n # check for empty arrays\n if m.size != 0:\n n = n[..., np.newaxis]\n\n m, n = np.broadcast_arrays(m, n)\n\n # check for empty arrays\n if m.size != 0:\n n = n[..., 0]\n\n mcond = m < 0\n\n M = m.sum(axis=-1)\n\n ncond = (n < 0) | (n > M)\n return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond\n\n def _process_quantiles(self, x, M, m, n):\n x = np.asarray(x)\n if not np.issubdtype(x.dtype, np.integer):\n raise TypeError(\"'x' must an array of integers.\")\n if x.ndim == 0:\n raise ValueError(\"'x' must be an array with\"\n \" at least one dimension.\")\n if not x.shape[-1] == m.shape[-1]:\n raise ValueError(f\"Size of each quantile must be size of 'm': \"\n f\"received {x.shape[-1]}, \"\n f\"but expected {m.shape[-1]}.\")\n\n # check for empty arrays\n if m.size != 0:\n n = n[..., np.newaxis]\n M = M[..., np.newaxis]\n\n x, m, n, M = np.broadcast_arrays(x, m, n, M)\n\n # check for empty arrays\n if m.size != 0:\n n, M = n[..., 0], M[..., 0]\n\n xcond = (x < 0) | (x > m)\n return (x, M, m, n, xcond,\n np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))\n\n def _checkresult(self, result, cond, bad_value):\n result = np.asarray(result)\n if cond.ndim != 0:\n result[cond] = bad_value\n elif cond:\n return bad_value\n if result.ndim == 0:\n return result[()]\n return result\n\n def _logpmf(self, x, M, m, n, mxcond, ncond):\n # This equation of the pmf comes from the relation,\n # n combine r = beta(n+1, 1) / beta(r+1, n-r+1)\n num = np.zeros_like(m, dtype=np.float_)\n den = np.zeros_like(n, dtype=np.float_)\n m, x = m[~mxcond], x[~mxcond]\n M, n = M[~ncond], n[~ncond]\n num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))\n den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))\n num[mxcond] = np.nan\n den[ncond] = np.nan\n num = num.sum(axis=-1)\n return num - den\n\n def logpmf(self, x, m, n):\n \"\"\"Log of the multivariate hypergeometric probability mass function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_doc_default_callparams)s\n\n Returns\n -------\n logpmf : ndarray or scalar\n Log of the probability mass function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n \"\"\"\n M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)\n (x, M, m, n, xcond,\n xcond_reduced) = self._process_quantiles(x, M, m, n)\n mxcond = mcond | xcond\n ncond = ncond | np.zeros(n.shape, dtype=np.bool_)\n\n result = self._logpmf(x, M, m, n, mxcond, ncond)\n\n # replace values for which x was out of the domain; broadcast\n # xcond to the right shape\n xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)\n result = self._checkresult(result, xcond_, np.NINF)\n\n # replace values bad for n or m; broadcast\n # mncond to the right shape\n mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)\n return self._checkresult(result, mncond_, np.nan)\n\n def pmf(self, x, m, n):\n \"\"\"Multivariate hypergeometric probability mass function.\n\n Parameters\n ----------\n x : array_like\n Quantiles, with the last axis of `x` denoting the components.\n %(_doc_default_callparams)s\n\n Returns\n -------\n pmf : ndarray or scalar\n Probability density function evaluated at `x`\n\n Notes\n -----\n %(_doc_callparams_note)s\n \"\"\"\n out = np.exp(self.logpmf(x, m, n))\n return out\n\n def mean(self, m, n):\n \"\"\"Mean of the multivariate hypergeometric distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n mean : array_like or scalar\n The mean of the distribution\n \"\"\"\n M, m, n, _, _, mncond = self._process_parameters(m, n)\n # check for empty arrays\n if m.size != 0:\n M, n = M[..., np.newaxis], n[..., np.newaxis]\n cond = (M == 0)\n M = np.ma.masked_array(M, mask=cond)\n mu = n*(m/M)\n if m.size != 0:\n mncond = (mncond[..., np.newaxis] |\n np.zeros(mu.shape, dtype=np.bool_))\n return self._checkresult(mu, mncond, np.nan)\n\n def var(self, m, n):\n \"\"\"Variance of the multivariate hypergeometric distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n array_like\n The variances of the components of the distribution. This is\n the diagonal of the covariance matrix of the distribution\n \"\"\"\n M, m, n, _, _, mncond = self._process_parameters(m, n)\n # check for empty arrays\n if m.size != 0:\n M, n = M[..., np.newaxis], n[..., np.newaxis]\n cond = (M == 0) & (M-1 == 0)\n M = np.ma.masked_array(M, mask=cond)\n output = n * m/M * (M-m)/M * (M-n)/(M-1)\n if m.size != 0:\n mncond = (mncond[..., np.newaxis] |\n np.zeros(output.shape, dtype=np.bool_))\n return self._checkresult(output, mncond, np.nan)\n\n def cov(self, m, n):\n \"\"\"Covariance matrix of the multivariate hypergeometric distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n\n Returns\n -------\n cov : array_like\n The covariance matrix of the distribution\n \"\"\"\n # see [1]_ for the formula and [2]_ for implementation\n # cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)\n M, m, n, _, _, mncond = self._process_parameters(m, n)\n # check for empty arrays\n if m.size != 0:\n M = M[..., np.newaxis, np.newaxis]\n n = n[..., np.newaxis, np.newaxis]\n cond = (M == 0) & (M-1 == 0)\n M = np.ma.masked_array(M, mask=cond)\n output = (-n * (M-n)/(M-1) *\n np.einsum(\"...i,...j->...ij\", m, m) / (M**2))\n # check for empty arrays\n if m.size != 0:\n M, n = M[..., 0, 0], n[..., 0, 0]\n cond = cond[..., 0, 0]\n dim = m.shape[-1]\n # diagonal entries need to be computed differently\n for i in range(dim):\n output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))\n output[..., i, i] = output[..., i, i] / (M-1)\n output[..., i, i] = output[..., i, i] / (M**2)\n if m.size != 0:\n mncond = (mncond[..., np.newaxis, np.newaxis] |\n np.zeros(output.shape, dtype=np.bool_))\n return self._checkresult(output, mncond, np.nan)\n\n def rvs(self, m, n, size=None, random_state=None):\n \"\"\"Draw random samples from a multivariate hypergeometric distribution.\n\n Parameters\n ----------\n %(_doc_default_callparams)s\n size : integer or iterable of integers, optional\n Number of samples to draw. Default is ``None``, in which case a\n single variate is returned as an array with shape ``m.shape``.\n %(_doc_random_state)s\n\n Returns\n -------\n rvs : array_like\n Random variates of shape ``size`` or ``m.shape``\n (if ``size=None``).\n\n Notes\n -----\n %(_doc_callparams_note)s\n\n Also note that NumPy's `multivariate_hypergeometric` sampler is not\n used as it doesn't support broadcasting.\n \"\"\"\n M, m, n, _, _, _ = self._process_parameters(m, n)\n\n random_state = self._get_random_state(random_state)\n\n if size is not None and isinstance(size, int):\n size = (size, )\n\n if size is None:\n rvs = np.empty(m.shape, dtype=m.dtype)\n else:\n rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)\n rem = M\n\n # This sampler has been taken from numpy gh-13794\n # https://github.com/numpy/numpy/pull/13794\n for c in range(m.shape[-1] - 1):\n rem = rem - m[..., c]\n n0mask = n == 0\n rvs[..., c] = (~n0mask *\n random_state.hypergeometric(m[..., c],\n rem + n0mask,\n n + n0mask,\n size=size))\n n = n - rvs[..., c]\n rvs[..., m.shape[-1] - 1] = n\n\n return rvs\n\n\nmultivariate_hypergeom = multivariate_hypergeom_gen()\n\n\nclass multivariate_hypergeom_frozen(multi_rv_frozen):\n def __init__(self, m, n, seed=None):\n self._dist = multivariate_hypergeom_gen(seed)\n (self.M, self.m, self.n,\n self.mcond, self.ncond,\n self.mncond) = self._dist._process_parameters(m, n)\n\n # monkey patch self._dist\n def _process_parameters(m, n):\n return (self.M, self.m, self.n,\n self.mcond, self.ncond,\n self.mncond)\n self._dist._process_parameters = _process_parameters\n\n def logpmf(self, x):\n return self._dist.logpmf(x, self.m, self.n)\n\n def pmf(self, x):\n return self._dist.pmf(x, self.m, self.n)\n\n def mean(self):\n return self._dist.mean(self.m, self.n)\n\n def var(self):\n return self._dist.var(self.m, self.n)\n\n def cov(self):\n return self._dist.cov(self.m, self.n)\n\n def rvs(self, size=1, random_state=None):\n return self._dist.rvs(self.m, self.n,\n size=size,\n random_state=random_state)\n\n\n# Set frozen generator docstrings from corresponding docstrings in\n# multivariate_hypergeom and fill in default strings in class docstrings\nfor name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:\n method = multivariate_hypergeom_gen.__dict__[name]\n method_frozen = multivariate_hypergeom_frozen.__dict__[name]\n method_frozen.__doc__ = doccer.docformat(\n method.__doc__, mhg_docdict_noparams)\n method.__doc__ = doccer.docformat(method.__doc__,\n mhg_docdict_params)\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.einsum",
"numpy.asarray",
"numpy.issubdtype",
"numpy.vstack",
"numpy.max",
"scipy._lib.doccer.docformat",
"numpy.any",
"numpy.zeros_like",
"numpy.moveaxis",
"numpy.linalg.LinAlgError",
"numpy.exp",
"numpy.square",
"scipy.special.entr",
"numpy.tril_indices",
"numpy.linalg.slogdet",
"numpy.eye",
"scipy.linalg.blas.drot",
"scipy.special.multigammaln",
"numpy.full",
"numpy.finfo",
"scipy._lib._util.check_random_state",
"numpy.apply_along_axis",
"numpy.copy",
"scipy.linalg.lapack.get_lapack_funcs",
"numpy.outer",
"numpy.repeat",
"numpy.zeros",
"numpy.log",
"numpy.min",
"numpy.isnan",
"numpy.append",
"scipy.special.betaln",
"numpy.identity",
"scipy.special.gammaln",
"numpy.broadcast_arrays",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"scipy.special.xlogy",
"numpy.asarray_chkfinite",
"scipy.special.psi",
"numpy.triu_indices",
"scipy.linalg._misc.LinAlgError",
"numpy.ones",
"numpy.sign",
"numpy.isscalar",
"numpy.ma.masked_array",
"numpy.diag_indices",
"numpy.ndindex",
"numpy.isinf",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Steakkk/bioptim-1 | [
"ccced00a581d9bf469631cc8a186c055f2e567e1"
] | [
"examples/getting_started/custom_bounds.py"
] | [
"import numpy as np\nimport biorbd\n\nfrom bioptim import (\n Node,\n OptimalControlProgram,\n Dynamics,\n DynamicsFcn,\n Objective,\n ObjectiveFcn,\n ConstraintList,\n ConstraintFcn,\n Bounds,\n InitialGuess,\n ShowResult,\n InterpolationType,\n)\n\n\ndef custom_x_bounds_min(current_shooting_point, n_elements, nb_shooting):\n my_values = np.array([[-10, -5]] * n_elements)\n # Linear interpolation created with custom function\n return my_values[:, 0] + (my_values[:, -1] - my_values[:, 0]) * current_shooting_point / nb_shooting\n\n\ndef custom_x_bounds_max(current_shooting_point, n_elements, nb_shooting):\n my_values = np.array([[10, 5]] * n_elements)\n # Linear interpolation created with custom function\n return my_values[:, 0] + (my_values[:, -1] - my_values[:, 0]) * current_shooting_point / nb_shooting\n\n\ndef custom_u_bounds_min(current_shooting_point, n_elements, nb_shooting):\n my_values = np.array([[-20, -10]] * n_elements)\n # Linear interpolation created with custom function\n return my_values[:, 0] + (my_values[:, -1] - my_values[:, 0]) * current_shooting_point / nb_shooting\n\n\ndef custom_u_bounds_max(current_shooting_point, n_elements, nb_shooting):\n my_values = np.array([[20, 10]] * n_elements)\n # Linear interpolation created with custom function\n return my_values[:, 0] + (my_values[:, -1] - my_values[:, 0]) * current_shooting_point / nb_shooting\n\n\ndef prepare_ocp(\n biorbd_model_path,\n number_shooting_points,\n final_time,\n interpolation_type=InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT,\n):\n # --- Options --- #\n # Model path\n biorbd_model = biorbd.Model(biorbd_model_path)\n nq = biorbd_model.nbQ()\n nqdot = biorbd_model.nbQdot()\n ntau = biorbd_model.nbGeneralizedTorque()\n tau_min, tau_max, tau_init = -100, 100, 0\n\n # Add objective functions\n objective_functions = Objective(ObjectiveFcn.Lagrange.MINIMIZE_TORQUE)\n\n # Dynamics\n dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN)\n\n # Constraints\n constraints = ConstraintList()\n constraints.add(ConstraintFcn.ALIGN_MARKERS, node=Node.START, first_marker_idx=0, second_marker_idx=1)\n constraints.add(ConstraintFcn.ALIGN_MARKERS, node=Node.END, first_marker_idx=0, second_marker_idx=2)\n\n # Path constraints\n if interpolation_type == InterpolationType.CONSTANT:\n x_min = [-100] * (nq + nqdot)\n x_max = [100] * (nq + nqdot)\n x_bounds = Bounds(x_min, x_max, interpolation=InterpolationType.CONSTANT)\n u_min = [tau_min] * ntau\n u_max = [tau_max] * ntau\n u_bounds = Bounds(u_min, u_max, interpolation=InterpolationType.CONSTANT)\n elif interpolation_type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:\n x_min = np.random.random((6, 3)) * (-10) - 5\n x_max = np.random.random((6, 3)) * 10 + 5\n x_bounds = Bounds(x_min, x_max, interpolation=InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT)\n u_min = np.random.random((3, 3)) * tau_min + tau_min / 2\n u_max = np.random.random((3, 3)) * tau_max + tau_max / 2\n u_bounds = Bounds(u_min, u_max, interpolation=InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT)\n elif interpolation_type == InterpolationType.LINEAR:\n x_min = np.random.random((6, 2)) * (-10) - 5\n x_max = np.random.random((6, 2)) * 10 + 5\n x_bounds = Bounds(x_min, x_max, interpolation=InterpolationType.LINEAR)\n u_min = np.random.random((3, 2)) * tau_min + tau_min / 2\n u_max = np.random.random((3, 2)) * tau_max + tau_max / 2\n u_bounds = Bounds(u_min, u_max, interpolation=InterpolationType.LINEAR)\n elif interpolation_type == InterpolationType.EACH_FRAME:\n x_min = np.random.random((nq + nqdot, number_shooting_points + 1)) * (-10) - 5\n x_max = np.random.random((nq + nqdot, number_shooting_points + 1)) * 10 + 5\n x_bounds = Bounds(x_min, x_max, interpolation=InterpolationType.EACH_FRAME)\n u_min = np.random.random((ntau, number_shooting_points)) * tau_min + tau_min / 2\n u_max = np.random.random((ntau, number_shooting_points)) * tau_max + tau_max / 2\n u_bounds = Bounds(u_min, u_max, interpolation=InterpolationType.EACH_FRAME)\n elif interpolation_type == InterpolationType.SPLINE:\n spline_time = np.hstack((0, np.sort(np.random.random((3,)) * final_time), final_time))\n x_min = np.random.random((nq + nqdot, 5)) * (-10) - 5\n x_max = np.random.random((nq + nqdot, 5)) * 10 + 5\n u_min = np.random.random((ntau, 5)) * tau_min + tau_min / 2\n u_max = np.random.random((ntau, 5)) * tau_max + tau_max / 2\n x_bounds = Bounds(x_min, x_max, interpolation=InterpolationType.SPLINE, t=spline_time)\n u_bounds = Bounds(u_min, u_max, interpolation=InterpolationType.SPLINE, t=spline_time)\n elif interpolation_type == InterpolationType.CUSTOM:\n # The custom functions refer to the ones at the beginning of the file.\n # For this particular instance, they emulate a Linear interpolation\n extra_params_x = {\"n_elements\": nq + nqdot, \"nb_shooting\": number_shooting_points}\n extra_params_u = {\"n_elements\": ntau, \"nb_shooting\": number_shooting_points}\n x_bounds = Bounds(\n custom_x_bounds_min, custom_x_bounds_max, interpolation=InterpolationType.CUSTOM, **extra_params_x\n )\n u_bounds = Bounds(\n custom_u_bounds_min, custom_u_bounds_max, interpolation=InterpolationType.CUSTOM, **extra_params_u\n )\n else:\n raise NotImplementedError(\"Not implemented yet\")\n\n # Initial guess\n x_init = InitialGuess([0] * (nq + nqdot))\n u_init = InitialGuess([tau_init] * ntau)\n # ------------- #\n\n return OptimalControlProgram(\n biorbd_model,\n dynamics,\n number_shooting_points,\n final_time,\n x_init,\n u_init,\n x_bounds,\n u_bounds,\n objective_functions,\n constraints,\n )\n\n\nif __name__ == \"__main__\":\n print(f\"Show the bounds\")\n for interpolation_type in InterpolationType:\n print(f\"Solving problem using {interpolation_type} bounds\")\n ocp = prepare_ocp(\"cube.bioMod\", number_shooting_points=30, final_time=2, interpolation_type=interpolation_type)\n sol = ocp.solve()\n print(\"\\n\")\n\n # Print the last solution\n result_plot = ShowResult(ocp, sol)\n result_plot.graphs(adapt_graph_size_to_bounds=True)\n\n for interpolation_type in InterpolationType:\n print(f\"Solving problem using {interpolation_type} bounds\")\n ocp = prepare_ocp(\"cube.bioMod\", number_shooting_points=30, final_time=2, interpolation_type=interpolation_type)\n sol = ocp.solve()\n print(\"\\n\")\n\n # Print the last solution\n result_plot = ShowResult(ocp, sol)\n result_plot.graphs(adapt_graph_size_to_bounds=False)\n"
] | [
[
"numpy.array",
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Loopdiloop/IntpartSummerSchool2019 | [
"d3439e782472a7b66c8faaf3df31d0411722b405"
] | [
"talys_exercises/ex5/converted/gsf_empirical_to_file.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append(\"../../../talys_import_nld_gsf\")\nfrom convert_talys import gen_nld_table, sigma2, log_interp1d\n\n# commonly used const. strength_factor, convert in mb^(-1) MeV^(-2)\nstrength_factor = 8.6737E-08\n\n\ndef SLO(E, E0, Gamma0, sigma0):\n # Special Lorentzian,\n # adapted from Kopecky & Uhl (1989) eq. (2.1)\n f = strength_factor * sigma0 * E * Gamma0**2 / \\\n ((E**2 - E0**2)**2 + E**2 * Gamma0**2)\n return f\n\n\ndef GLO(E, E0, Gamma0, sigma0, T):\n # Generalized Lorentzian,\n # adapted from Kopecky & Uhl (1989) eq. (2.3-2.4)\n Gamma = Gamma0 * (E**2 + 4 * np.pi**2 * T**2) / E0**2\n f1 = (E * Gamma) / ((E**2 - E0**2)**2 + E**2 * Gamma**2)\n f2 = 0.7 * Gamma0 * 4 * np.pi**2 * T**2 / E0**5\n\n f = strength_factor * sigma0 * Gamma0 * (f1 + f2)\n return f\n\n\n# Constants\nSn = 7.558\nA = 192\nZ = 76\n\n# parameters\npGLO = {\"E0\": 13.2, \"Gamma0\": 2.8, \"sigma0\": 615, \"T\": 1.2 }\npSLO = {\"E0\": 7.1, \"Gamma0\": 4, \"sigma0\": 2.1}\n\n# For this exercise we don't read the data from file,\n# but create an equivalent type of array\n# gsf = np.loadtxt(fn_gsf)\nx = np.linspace(0, Sn+1)\ngsf = np.zeros((len(x), 3))\ngsf[:, 0] = x\ngsf[:, 1] = GLO(x, **pGLO)\ngsf[:, 2] = SLO(x, **pSLO)\n\n# The file is/should be writen in [MeV] [MeV^-3] [MeV^-3]\nif gsf[0, 0] == 0:\n gsf = gsf[1:, :]\nEgsf = gsf[:, 0]\ngsfE1 = gsf[:, 1]\ngsfM1 = gsf[:, 2]\n\n# REMEMBER that the TALYS functions are given in mb/MeV (Goriely's tables)\n# so we must convert it (simple factor)\nfactor_from_mb = 8.6737E-08 # const. factor in mb^(-1) MeV^(-2)\n\nfE1 = log_interp1d(Egsf, gsfE1, fill_value=\"extrapolate\")\nfM1 = log_interp1d(Egsf, gsfM1, fill_value=\"extrapolate\")\n\nEgsf_out = np.arange(0.1, 30.1, 0.1)\n\nfn_gsf_outE1 = \"data/gsfE1.dat\"\nfn_gsf_outM1 = \"data/gsfM1.dat\"\nheader = f\" Z= {Z} A= {A}\\n\" + \" U[MeV] fE1[mb/MeV]\"\n# gsfE1 /= factor_from_mb\nnp.savetxt(fn_gsf_outE1, np.c_[Egsf_out, fE1(Egsf_out)/factor_from_mb],\n fmt=\"%9.3f%12.3E\", header=header)\n# gsfM1 /= factor_from_mb\nnp.savetxt(fn_gsf_outM1, np.c_[Egsf_out, fM1(Egsf_out)/factor_from_mb],\n fmt=\"%9.3f%12.3E\", header=header)\n\nfig, ax = plt.subplots()\nax.semilogy(Egsf_out, fE1(Egsf_out), \"--\", label=\"E1\")\nax.semilogy(Egsf_out, fM1(Egsf_out), \"--\", label=\"M1\")\nax.semilogy(Egsf_out, fE1(Egsf_out)+fM1(Egsf_out),\n \"-\", label=\"sum\")\nax.axvspan(Egsf[-1], Egsf_out[-1], alpha=0.1, label=\"extrapolation\")\n\n# plot together with the output from talys if existent\ntry:\n talys_out = np.loadtxt(\"data/talys_output.txt\", skiprows=2)\n ax.plot(talys_out[:, 0], talys_out[:, 1], \"-.\",\n label=\"talys_output M1\")\n ax.plot(talys_out[:, 0], talys_out[:, 2], \"--\",\n label=\"talys_output E1\")\nexcept OSError:\n pass\nax.legend()\nplt.show()\n"
] | [
[
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zoonn1788/TensorflowLiteDemo | [
"dee02e2a8216232dee25a7d9587dc7039c6e8c37"
] | [
"tensorflow/python/kernel_tests/tensor_array_ops_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.tensor_array_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import tensor_array_grad\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\ndef _make_converter(tf_dtype):\n def _converter(x):\n if tf_dtype == dtypes.string:\n # In Python3, np.str is unicode, while we always want bytes\n return np.asarray(x).astype(\"|S\")\n x = np.asarray(x).astype(tf_dtype.as_numpy_dtype)\n if tf_dtype.is_complex:\n # Add a non-zero imaginary component to x.\n x -= 1j * x\n return x\n return _converter\n\n\ndef _make_ta(size, name, dtype=dtypes.float32, infer_shape=False):\n return tensor_array_ops.TensorArray(\n dtype=dtype, tensor_array_name=name, size=size, infer_shape=infer_shape)\n\n\n@test_util.run_all_in_graph_and_eager_modes\n@test_util.with_control_flow_v2\nclass TensorArrayTest(test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TensorArrayTest, cls).setUpClass()\n cls._workers, _ = test.create_local_cluster(num_workers=3, num_ps=0)\n\n @classmethod\n def tearDownClass(cls):\n super(TensorArrayTest, cls).tearDownClass()\n session_lib.Session.reset(cls._workers[0].target)\n\n @test_util.run_in_graph_and_eager_modes\n def testTensorArrayWriteRead(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=False)\n\n w0 = ta.write(0, [[4.0, 5.0]])\n w1 = w0.write(1, [[1.0]])\n w2 = w1.write(2, -3.0)\n\n r0 = w2.read(0)\n r1 = w2.read(1)\n r2 = w2.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual([[4.0, 5.0]], d0)\n self.assertAllEqual([[1.0]], d1)\n self.assertAllEqual(-3.0, d2)\n\n def _testTensorArrayWritePack(self, tf_dtype):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=tf_dtype, tensor_array_name=\"foo\", size=3)\n\n convert = _make_converter(tf_dtype)\n\n w0 = ta.write(0, convert([[4.0, 5.0]]))\n w1 = w0.write(1, convert([[6.0, 7.0]]))\n w2 = w1.write(2, convert([[8.0, 9.0]]))\n\n c0 = w2.stack()\n\n c0 = self.evaluate(c0)\n self.assertAllEqual(\n convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0)\n\n def _testTensorArrayWritePackMaybeLegacy(self):\n self._testTensorArrayWritePack(dtypes.float32)\n self._testTensorArrayWritePack(dtypes.float64)\n self._testTensorArrayWritePack(dtypes.int32)\n self._testTensorArrayWritePack(dtypes.int64)\n self._testTensorArrayWritePack(dtypes.complex64)\n self._testTensorArrayWritePack(dtypes.complex128)\n self._testTensorArrayWritePack(dtypes.string)\n\n def testTensorArrayWritePack(self):\n self._testTensorArrayWritePackMaybeLegacy()\n\n def testEmptyTensorArrayPack(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n\n empty_element = np.zeros((0, 1), dtype=np.float32)\n w0 = ta.write(0, empty_element)\n w1 = w0.write(1, empty_element)\n w2 = w1.write(2, empty_element)\n\n c0 = w2.stack()\n\n c0 = self.evaluate(c0)\n self.assertAllEqual([3, 0, 1], c0.shape)\n\n def _testTensorArrayWriteConcat(self, tf_dtype):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=tf_dtype, tensor_array_name=\"foo\", size=3, infer_shape=False)\n\n convert = _make_converter(tf_dtype)\n\n w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))\n w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))\n w2 = w1.write(2, convert([[8.0, 9.0]]))\n\n c0 = w2.concat()\n\n c0 = self.evaluate(c0)\n self.assertAllEqual(\n convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],\n [106.0, 107.0], [8.0, 9.0]]), c0)\n\n @test_util.deprecated_graph_mode_only\n def testTensorArrayWriteConcat(self):\n self._testTensorArrayWriteConcat(dtypes.float32)\n self._testTensorArrayWriteConcat(dtypes.float64)\n self._testTensorArrayWriteConcat(dtypes.int32)\n self._testTensorArrayWriteConcat(dtypes.int64)\n self._testTensorArrayWriteConcat(dtypes.complex64)\n self._testTensorArrayWriteConcat(dtypes.complex128)\n self._testTensorArrayWriteConcat(dtypes.string)\n\n def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n element_shape=tensor_shape.TensorShape([1, 2]))\n self.assertAllEqual([[0.0, 0.0]], self.evaluate(ta.read(0)))\n self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],\n self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))\n self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],\n self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))\n\n @test_util.run_v1_only(\"b/122324791\")\n def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):\n self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()\n\n def _testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3)\n self.assertAllEqual(\n [[0.0, 0.0]], self.evaluate(ta.write(1, [[4.0, 5.0]]).read(0)))\n self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],\n self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))\n self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],\n self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))\n\n @test_util.run_v1_only(\"b/122324791\")\n def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):\n self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros()\n\n @test_util.run_v1_only(\"Uses placeholders\")\n def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):\n with self.cached_session(use_gpu=True) as sess:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3)\n val = array_ops.placeholder(dtypes.float32)\n self.assertAllEqual(\n [[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))\n\n def _testTensorArrayUnpackRead(self, tf_dtype):\n with self.cached_session(use_gpu=True):\n convert = _make_converter(tf_dtype)\n\n ta = _make_ta(3, \"foo\", dtype=tf_dtype)\n # Unpack a vector into scalars\n w0 = ta.unstack(convert([1.0, 2.0, 3.0]))\n r0 = w0.read(0)\n r1 = w0.read(1)\n r2 = w0.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual(convert(1.0), d0)\n self.assertAllEqual(convert(2.0), d1)\n self.assertAllEqual(convert(3.0), d2)\n\n # Unpack a matrix into vectors\n w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))\n r0 = w1.read(0)\n r1 = w1.read(1)\n r2 = w1.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual(convert([1.0, 1.1]), d0)\n self.assertAllEqual(convert([2.0, 2.1]), d1)\n self.assertAllEqual(convert([3.0, 3.1]), d2)\n\n # Try unpacking an empty matrix, which should not cause an error.\n w2 = ta.unstack(convert([[], [], []]))\n r0 = w2.read(0)\n r1 = w2.read(1)\n r2 = w2.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual(convert([]), d0)\n self.assertAllEqual(convert([]), d1)\n self.assertAllEqual(convert([]), d2)\n\n def _testTensorArrayUnpackReadMaybeLegacy(self):\n self._testTensorArrayUnpackRead(dtypes.float32)\n self._testTensorArrayUnpackRead(dtypes.float64)\n self._testTensorArrayUnpackRead(dtypes.int32)\n self._testTensorArrayUnpackRead(dtypes.int64)\n self._testTensorArrayUnpackRead(dtypes.complex64)\n self._testTensorArrayUnpackRead(dtypes.complex128)\n self._testTensorArrayUnpackRead(dtypes.string)\n\n def testTensorArrayUnpackRead(self):\n self._testTensorArrayUnpackReadMaybeLegacy()\n\n def _testTensorArraySplitRead(self, tf_dtype):\n with self.cached_session(use_gpu=True):\n convert = _make_converter(tf_dtype)\n\n # Split an empty vector\n ta = _make_ta(3, \"foo\", dtype=tf_dtype)\n lengths = constant_op.constant([0, 0, 0])\n w0 = ta.split(convert([]), lengths=lengths)\n r0 = w0.read(0)\n r1 = w0.read(1)\n r2 = w0.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual(convert([]), d0)\n self.assertAllEqual(convert([]), d1)\n self.assertAllEqual(convert([]), d2)\n\n # Split a vector\n lengths = constant_op.constant([2, 0, 1])\n w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)\n r0 = w0.read(0)\n r1 = w0.read(1)\n r2 = w0.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual(convert([1.0, 2.0]), d0)\n self.assertAllEqual(convert([]), d1)\n self.assertAllEqual(convert([3.0]), d2)\n\n # Split a matrix\n lengths = constant_op.constant([2, 0, 1])\n w0 = ta.split(\n convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)\n r0 = w0.read(0)\n r1 = w0.read(1)\n r2 = w0.read(2)\n\n d0, d1, d2 = self.evaluate([r0, r1, r2])\n self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)\n self.assertAllEqual(convert([]).reshape(0, 2), d1)\n self.assertAllEqual(convert([[3.0, 301.0]]), d2)\n\n @test_util.deprecated_graph_mode_only\n def testTensorArraySplitRead(self):\n self._testTensorArraySplitRead(dtypes.float32)\n self._testTensorArraySplitRead(dtypes.float64)\n self._testTensorArraySplitRead(dtypes.int32)\n self._testTensorArraySplitRead(dtypes.int64)\n self._testTensorArraySplitRead(dtypes.complex64)\n self._testTensorArraySplitRead(dtypes.complex128)\n self._testTensorArraySplitRead(dtypes.string)\n\n @test_util.disable_control_flow_v2(\"v2 does not support TensorArray.grad.\")\n @test_util.run_v1_only(\"v2 does not support TensorArray.grad.\")\n def testSkipEagerTensorGradArrayWriteRead(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=False)\n g_ta = ta.grad(\"grad\")\n\n w0 = ta.write(0, [[4.0, 5.0]])\n w1 = w0.write(1, [[1.0]])\n w2 = w1.write(2, -3.0)\n\n g_w0 = g_ta.write(0, [[5.0, 6.0]])\n g_w1 = g_w0.write(1, [[2.0]])\n g_w2 = g_w1.write(2, -2.0)\n\n r0 = w2.read(0)\n r1 = w2.read(1)\n r2 = w2.read(2)\n\n g_r0 = g_w2.read(0)\n g_r1 = g_w2.read(1)\n g_r2 = g_w2.read(2)\n\n d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])\n self.assertAllEqual([[4.0, 5.0]], d0)\n self.assertAllEqual([[1.0]], d1)\n self.assertAllEqual(-3.0, d2)\n self.assertAllEqual([[5.0, 6.0]], g_d0)\n self.assertAllEqual([[2.0]], g_d1)\n self.assertAllEqual(-2.0, g_d2)\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradGrad(self):\n if not control_flow_util.ENABLE_CONTROL_FLOW_V2:\n self.skipTest(\"Legacy TensorArray does not support double derivatives.\")\n with self.test_session(use_gpu=True) as session:\n x = constant_op.constant(4.0)\n\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=1,\n infer_shape=False)\n w0 = ta.write(0, x)\n r0 = w0.read(0)\n y = r0 * r0\n\n g1 = gradients_impl.gradients(ys=[y], xs=[x])\n g2 = gradients_impl.gradients(ys=[g1], xs=[x])\n self.assertAllEqual([2.0], session.run(g2))\n\n @test_util.disable_control_flow_v2(\"v2 does not support TensorArray.grad.\")\n @test_util.run_v1_only(\"v2 does not support TensorArray.grad.\")\n def testSkipEagerTensorGradArrayDynamicWriteRead(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True,\n infer_shape=False)\n\n w0 = ta.write(0, [[4.0, 5.0]])\n w1 = w0.write(1, [[1.0]])\n w2 = w1.write(2, -3.0)\n\n g_ta = w2.grad(\"grad\") # Get gradient array here so we know the shape\n\n s = w2.size()\n g_s = g_ta.size()\n\n g_w0 = g_ta.write(0, [[5.0, 6.0]])\n g_w1 = g_w0.write(1, [[2.0]])\n g_w2 = g_w1.write(2, -2.0)\n\n r0 = w2.read(0)\n r1 = w2.read(1)\n r2 = w2.read(2)\n\n g_r0 = g_w2.read(0)\n g_r1 = g_w2.read(1)\n g_r2 = g_w2.read(2)\n\n d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(\n [r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])\n self.assertAllEqual([[4.0, 5.0]], d0)\n self.assertAllEqual([[1.0]], d1)\n self.assertAllEqual(-3.0, d2)\n self.assertAllEqual([[5.0, 6.0]], g_d0)\n self.assertAllEqual([[2.0]], g_d1)\n self.assertAllEqual(-2.0, g_d2)\n self.assertAllEqual(3, vs)\n self.assertAllEqual(3, g_vs)\n\n @test_util.disable_control_flow_v2(\"v2 does not support TensorArray.grad.\")\n @test_util.run_v1_only(\"v2 does not support TensorArray.grad.\")\n def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n g_ta_0 = ta.grad(\"grad\")\n g_ta_1 = ta.grad(\"grad\")\n\n with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):\n # Write with one gradient handle, read with another copy of it\n r1_0 = g_ta_1.read(0)\n\n t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(\n [g_ta_0.handle.op, g_ta_1.handle.op, r1_0])\n self.assertAllEqual(t_g_ta_0, t_g_ta_1)\n self.assertAllEqual([[4.0, 5.0]], d_r1_0)\n\n def testTensorArrayWriteWrongIndexOrDataTypeFails(self):\n with self.session(use_gpu=True):\n ta = _make_ta(3, \"foo\", dtype=dtypes.float32)\n # TODO(b/129870929): Remove the last 2 checks (runtime checks) after\n # back back from preferred_dtype= to dtype= in convert_to_tensor. Also\n # restrict error check to only TypeError.\n error_msg_regex = (\n \"(\"\n \"Expected float32, got 'wrong_type_scalar' of type 'str' instead.\"\n \"|\"\n \"Cannot convert provided value to EagerTensor. Provided value: \"\n \"wrong_type_scalar Requested dtype: float\"\n \"|\"\n \"TensorArray dtype is float.* but Op is trying to write dtype string\"\n \"|\"\n \"Invalid data types; op elements string but list elements float\"\n \")\")\n with self.assertRaisesRegexp(\n (TypeError, errors.InvalidArgumentError), error_msg_regex):\n self.evaluate(ta.write(0, \"wrong_type_scalar\").flow)\n\n if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and\n not context.executing_eagerly()):\n error_msg = \"Trying to modify element -1 in a list with 3 elements.\"\n else:\n error_msg = \"index -1\"\n with self.assertRaisesOpError(error_msg):\n self.evaluate(ta.write(-1, 3.0).flow)\n\n if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and\n not context.executing_eagerly()):\n error_msg = \"Trying to modify element 3 in a list with 3 elements\"\n else:\n error_msg = (\"Tried to write to index 3 but array is not \"\n \"resizeable and size is: 3\")\n # Test reading from too large an index\n with self.assertRaisesOpError(error_msg):\n self.evaluate(ta.write(3, 3.0).flow)\n\n def testTensorArrayReadWrongIndexOrDataTypeFails(self):\n with self.session(use_gpu=True):\n ta = _make_ta(3, \"foo\", dtype=dtypes.float32)\n\n w0 = ta.write(0, [[4.0, 5.0]])\n\n # Test reading wrong datatype (only possible when constructing graphs).\n if (not context.executing_eagerly() and\n not control_flow_util.ENABLE_CONTROL_FLOW_V2):\n r0_bad = gen_data_flow_ops.tensor_array_read_v3(\n handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)\n with self.assertRaisesOpError(\n \"TensorArray dtype is float but Op requested dtype double.\"):\n self.evaluate(r0_bad)\n\n if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and\n not context.executing_eagerly()):\n error_msg = \"Trying to access element -1 in a list with 3 elements.\"\n else:\n error_msg = \"index -1\"\n # Test reading from a negative index, which is not allowed\n with self.assertRaisesOpError(error_msg):\n self.evaluate(ta.read(-1))\n\n if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and\n not context.executing_eagerly()):\n error_msg = \"Trying to access element 3 in a list with 3 elements.\"\n else:\n error_msg = \"Tried to read from index 3 but array size is: 3\"\n # Test reading from too large an index\n with self.assertRaisesOpError(error_msg):\n self.evaluate(ta.read(3))\n\n @test_util.disable_control_flow_v2(\"v2 allows multiple writes.\")\n @test_util.run_v1_only(\"v2 allows multiple writes.\")\n def testSkipEagerTensorArrayWriteMultipleFails(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n\n with self.assertRaisesOpError(\n \"Could not write to TensorArray index 2 because \"\n \"it has already been written to.\"):\n self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)\n\n def testTensorArrayConcatIncompatibleShapesFails(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=False)\n\n w1 = ta.write(0, 3.0)\n w2 = w1.write(1, 4.0)\n w3 = w2.write(2, [3.0])\n\n with self.assertRaisesOpError(\n \"Concat saw a scalar shape at index 0 but requires at least vectors\"):\n self.evaluate(w3.concat())\n\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=False)\n\n w1 = ta.write(0, [3.0])\n w2 = w1.write(1, [4.0])\n w3 = w2.write(2, [[3.0]])\n\n # The exact error messages differ between eager execution and graph\n # construction as the former bubbles up the error from array_op.concat.\n error_msg = (\"Incompatible ranks\"\n if control_flow_util.ENABLE_CONTROL_FLOW_V2 and\n not context.executing_eagerly() else \"shape\")\n with self.assertRaisesRegexp(errors.InvalidArgumentError, error_msg):\n self.evaluate(w3.concat())\n\n def testTensorArraySplitIncompatibleShapesFails(self):\n with self.session(use_gpu=True):\n in_eager_mode = context.executing_eagerly()\n ta = _make_ta(3, \"foo\")\n with self.assertRaisesOpError(\n r\"Expected lengths to be a vector, received shape: \\[\\]\"):\n if in_eager_mode:\n self.evaluate(ta.split([1.0, 2.0, 3.0], 1))\n else:\n lengths = array_ops.placeholder(dtypes.int64)\n ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})\n\n error_msg = (\"Unused values in tensor. Length of tensor: 3 Values used: 1\"\n if control_flow_util.ENABLE_CONTROL_FLOW_V2 and\n not in_eager_mode else\n r\"Expected sum of lengths to be equal to values.shape\\[0\\], \"\n r\"but sum of lengths is 1 and value's shape is: \\[3\\]\")\n with self.assertRaisesOpError(error_msg):\n self.evaluate(ta.split([1.0, 2.0, 3.0], [1]).flow)\n\n ta = _make_ta(1, \"baz\")\n if control_flow_util.ENABLE_CONTROL_FLOW_V2 and not in_eager_mode:\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at least rank 1 but is rank 0\"):\n self.evaluate(ta.split(1.0, [1]).flow)\n else:\n with self.assertRaisesOpError(\n r\"Expected value to be at least a vector, but received shape: \\[\\]\"\n ):\n self.evaluate(ta.split(1.0, [1]).flow)\n\n if not control_flow_util.ENABLE_CONTROL_FLOW_V2 or in_eager_mode:\n ta = _make_ta(2, \"buz\")\n with self.assertRaisesOpError(\n r\"TensorArray's size is not equal to the size of lengths \"\n r\"\\(2 vs. 1\\), and the TensorArray is not marked as \"\n r\"dynamically resizeable\"):\n self.evaluate(ta.split([1.0], [1]).flow)\n\n def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtype, tensor_array_name=\"foo\", size=3, infer_shape=False)\n ta_grad = ta.grad(\"grad\")\n\n c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)\n\n w0 = ta.write(2, c(3.0))\n w1 = w0.write(2, c(4.0))\n\n w0_grad = ta_grad.write(2, c(3.0))\n w1_grad = w0_grad.write(2, c(4.0))\n w2_grad = w1_grad.write(2, c(5.0))\n\n # Assert that aggregation works correctly\n self.assertAllEqual(c(12.00), w2_grad.read(2).eval())\n\n # Assert that if multiple_writes_aggregate is not enabled,\n # multiple writes raise an exception.\n with self.assertRaisesOpError(\n r\"TensorArray foo_.*: Could not write to TensorArray index 2 because \"\n r\"it has already been written to.\"):\n w1.flow.eval()\n\n # Using differing shapes causes an exception\n wb0_grad = ta_grad.write(1, c(1.0))\n wb1_grad = wb0_grad.write(1, c([1.0]))\n\n with self.assertRaisesOpError(\n r\"Could not aggregate to TensorArray index 1 because the \"\n r\"existing shape is \\[\\] but the new input shape is \\[1\\]\"):\n wb1_grad.flow.eval()\n\n @test_util.disable_control_flow_v2(\"v2 does not support TensorArray.grad.\")\n @test_util.run_v1_only(\"v2 does not support TensorArray.grad.\")\n def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self):\n for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,\n dtypes.complex64, dtypes.complex128):\n self._testTensorArrayWriteGradientAddMultipleAdds(dtype)\n\n @test_util.disable_control_flow_v2(\"Low level legacy TA op test.\")\n @test_util.run_v1_only(\"Low level legacy TA op test.\")\n def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):\n with self.session(use_gpu=True) as sess:\n ta = tensor_array_ops.TensorArray(\n size=3,\n dtype=dtypes.float32,\n element_shape=tensor_shape.TensorShape([2, 3]))\n handle, flow = data_flow_ops.tensor_array_grad_with_shape(\n handle=ta.handle,\n flow_in=ta.flow,\n shape_to_prepend=tensor_shape.TensorShape([4, 5]),\n source=\"source\")\n ta_grad = tensor_array_ops.TensorArray(\n dtypes.float32, handle=handle, flow=flow)\n value = array_ops.placeholder(dtypes.float32)\n ta_grad = ta_grad.write(0, value)\n read_value = ta_grad.read(0)\n\n # Make sure shape inference worked.\n self.assertAllEqual([None, None, 2, 3], read_value.shape.as_list())\n # Writing with wrong shape should not work.\n with self.assertRaisesRegexp(errors.InvalidArgumentError,\n \"Could not write to TensorArray\"):\n fed_value = np.random.random([2, 3])\n sess.run(read_value, feed_dict={value: fed_value})\n # Writing with correct shape should work.\n fed_value = np.random.random([4, 5, 2, 3])\n self.assertAllClose(fed_value,\n sess.run(read_value, feed_dict={value: fed_value}))\n\n @test_util.disable_control_flow_v2(\"Low level legacy TA op test.\")\n @test_util.run_v1_only(\"Low level legacy TA op test.\")\n def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):\n with self.session(use_gpu=True) as sess:\n ta = tensor_array_ops.TensorArray(\n size=3, dtype=dtypes.float32,\n element_shape=None) # Note that element_shape is unknown\n handle, flow = data_flow_ops.tensor_array_grad_with_shape(\n handle=ta.handle,\n flow_in=ta.flow,\n shape_to_prepend=tensor_shape.TensorShape([4, 5]),\n source=\"source\")\n ta_grad = tensor_array_ops.TensorArray(\n dtypes.float32, handle=handle, flow=flow)\n value = array_ops.placeholder(dtypes.float32)\n ta_grad = ta_grad.write(0, value)\n read_value = ta_grad.read(0)\n\n # Make sure shape inference worked.\n self.assertIsNone(read_value.shape.ndims)\n # Write with some shape and check read value.\n fed_value = np.random.random([4, 5, 7])\n self.assertAllClose(fed_value,\n sess.run(read_value, feed_dict={value: fed_value}))\n\n def testMultiTensorArray(self):\n with self.session(use_gpu=True):\n h1 = tensor_array_ops.TensorArray(\n size=1, dtype=dtypes.float32, tensor_array_name=\"foo\")\n w1 = h1.write(0, 4.0)\n r1 = w1.read(0)\n\n h2 = tensor_array_ops.TensorArray(\n size=1, dtype=dtypes.float32, tensor_array_name=\"bar\")\n\n w2 = h2.write(0, 5.0)\n r2 = w2.read(0)\n r = r1 + r2\n val = self.evaluate(r)\n self.assertAllClose(9.0, val)\n\n def _testTensorArrayGradientWriteReadType(self, dtype):\n with self.cached_session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.as_dtype(dtype),\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=False)\n\n c = lambda x: np.array(x, dtype=dtype)\n\n value_0 = constant_op.constant(c([[4.0, 5.0]]))\n value_1 = constant_op.constant(c(3.0))\n\n w0 = ta.write(0, value_0)\n w1 = w0.write(1, value_1)\n r0 = w1.read(0)\n r1 = w1.read(1)\n r0_2 = w1.read(0)\n\n # Test individual components' gradients\n grad_just_r0 = gradients_impl.gradients(\n ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])\n grad_just_r0_vals = session.run(grad_just_r0)\n self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])\n\n grad_r0_r0_2 = gradients_impl.gradients(\n ys=[r0, r0_2],\n xs=[value_0],\n grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])\n grad_r0_r0_2_vals = session.run(grad_r0_r0_2)\n self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])\n\n grad_just_r1 = gradients_impl.gradients(\n ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])\n grad_just_r1_vals = session.run(grad_just_r1)\n self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])\n\n # Test combined gradients\n grad = gradients_impl.gradients(\n ys=[r0, r0_2, r1],\n xs=[value_0, value_1],\n grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])\n grad_vals = session.run(grad)\n self.assertEqual(len(grad_vals), 2)\n self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])\n self.assertAllEqual(c(-2.0), grad_vals[1])\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradientWriteRead(self):\n for dtype in (np.float32, np.float64, np.complex64, np.complex128):\n self._testTensorArrayGradientWriteReadType(dtype)\n\n def _testTensorArrayGradientWritePackConcatAndRead(self):\n with self.cached_session(use_gpu=True) as sess:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=2,\n clear_after_read=False)\n\n value_0 = constant_op.constant([-1.0, 1.0])\n value_1 = constant_op.constant([-10.0, 10.0])\n\n w0 = ta.write(0, value_0)\n w1 = w0.write(1, value_1)\n p0 = w1.stack()\n r0 = w1.read(0)\n s0 = w1.concat()\n\n # Test gradient accumulation between read(0), pack(), and concat()\n with ops.control_dependencies([p0, r0, s0]):\n grad_r = gradients_impl.gradients(\n ys=[p0, r0, s0],\n xs=[value_0, value_1],\n grad_ys=[\n [[2.0, 3.0], [4.0, 5.0]], # pack gradient\n [-0.5, 1.5], # read(0) gradient\n [20.0, 30.0, 40.0, 50.0]\n ]) # concat gradient\n grad_vals = self.evaluate(grad_r) # 2 + 2 entries\n\n self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])\n self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradientWritePackConcatAndRead(self):\n self._testTensorArrayGradientWritePackConcatAndRead()\n\n @test_util.disable_control_flow_v2(\"v2 does not support clear_after_read.\")\n @test_util.run_v1_only(\"v2 does not support clear_after_read.\")\n def testTensorArrayReadTwice(self):\n with self.session(use_gpu=True):\n value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])\n\n ta_readonce = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=2)\n\n w_readonce = ta_readonce.unstack(value)\n r0_readonce = w_readonce.read(0)\n\n with self.assertRaisesOpError(\n r\"Could not read index 0 twice because it was cleared after a \"\n r\"previous read \\(perhaps try setting clear_after_read = false\\?\\)\"):\n with ops.control_dependencies([r0_readonce]):\n self.evaluate(w_readonce.read(0))\n\n ta_readtwice = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=2,\n clear_after_read=False)\n w_readtwice = ta_readtwice.unstack(value)\n r0_readtwice = w_readtwice.read(0)\n with ops.control_dependencies([r0_readtwice]):\n r1_readtwice = w_readtwice.read(0)\n\n self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))\n\n def _testTensorArrayGradientUnpackRead(self):\n with self.cached_session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=2,\n clear_after_read=False)\n\n value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])\n\n w = ta.unstack(value)\n r0 = w.read(0)\n r0_1 = w.read(0)\n r1 = w.read(1)\n\n # Test combined gradients + aggregation of read(0)\n grad = gradients_impl.gradients(\n ys=[r0, r0_1, r1],\n xs=[value],\n grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])\n grad_vals = session.run(grad)\n\n self.assertEqual(len(grad_vals), 1)\n self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradientUnpackRead(self):\n self._testTensorArrayGradientUnpackRead()\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradientSplitConcat(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=2,\n infer_shape=False)\n\n value = constant_op.constant(\n [[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])\n\n w = ta.split(value, [2, 1])\n r = w.concat()\n\n # Test combined gradients\n grad = gradients_impl.gradients(\n ys=[r],\n xs=[value],\n grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])\n grad_vals = session.run(grad)\n\n self.assertEqual(len(grad_vals), 1)\n self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],\n grad_vals[0])\n\n def _testTensorArrayGradientDynamicUnpackRead(self):\n with self.cached_session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True)\n\n value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])\n\n w = ta.unstack(value)\n r0 = w.read(0)\n r1 = w.read(1)\n\n # Test combined gradients + aggregation of read(0)\n grad = gradients_impl.gradients(\n ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])\n grad_vals = session.run(grad)\n\n self.assertEqual(len(grad_vals), 1)\n self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradientDynamicUnpackRead(self):\n self._testTensorArrayGradientDynamicUnpackRead()\n\n def testCloseTensorArray(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n self.evaluate(ta.close())\n\n def testSizeTensorArray(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n s = ta.size()\n self.assertAllEqual(3, self.evaluate(s))\n\n def testWriteCloseTensorArray(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=False)\n w0 = ta.write(0, [[4.0, 5.0]])\n w1 = w0.write(1, [3.0])\n self.evaluate(w1.close()) # Expected to run without problems\n\n def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):\n np_dtype = dtype.as_numpy_dtype\n with self.cached_session(use_gpu=True):\n def func(v0, state0, var):\n ta = tensor_array_ops.TensorArray(\n dtype=dtype,\n tensor_array_name=\"foo\",\n size=0 if dynamic_size else 3,\n dynamic_size=dynamic_size)\n time_0 = array_ops.identity(0)\n\n def body(time, ta_t, state):\n sliced = array_ops.slice(\n v0, begin=array_ops.stack([time, 0]), size=[1, -1])\n sliced = array_ops.squeeze(sliced)\n out = sliced + var + state\n state += sliced\n ta_t = ta_t.write(time, out)\n return (time + 1, ta_t, state)\n\n (unused_0, h_final, unused_2) = control_flow_ops.while_loop(\n cond=lambda time, unused_1, unused_2: time < 3,\n body=body,\n loop_vars=(time_0, ta, state0),\n shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),\n tensor_shape.unknown_shape()),\n parallel_iterations=3)\n vout = h_final.stack()\n return vout\n\n v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))\n state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))\n init_val = np.arange(100, 105, dtype=np_dtype)\n var = variable_scope.get_variable(\n \"var\",\n shape=init_val.shape,\n dtype=np_dtype,\n initializer=init_ops.constant_initializer(init_val))\n\n vout = func(v0, state0, var)\n grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)\n if context.executing_eagerly():\n grad_fn = backprop.gradients_function(func)\n v0_grad, state0_grad, var_grad = grad_fn(v0, state0, var, dy=grad_val)\n else:\n v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]\n state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]\n var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]\n self.evaluate(variables.global_variables_initializer())\n\n state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (\n self.evaluate(\n ([state0, var, v0, vout, v0_grad, var_grad, state0_grad])))\n just_v0_grad_t = self.evaluate(v0_grad)\n\n # state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]\n # vout = [ v0[0] + var + state[0] |\n # v0[1] + var + state[1] |\n # v0[2] + var + state[2] ]\n # = [ v0[0] + var + state0 |\n # v0[1] + var + state0 + v0[0] |\n # v0[2] + var + state0 + v0[0] + v0[1] ]\n #\n # d(vout[0])/d(v0) = [1 | 0 | 0 ]\n # d(vout[1])/d(v0) = [1 | 1 | 0 ]\n # d(vout[2])/d(v0) = [1 | 1 | 1 ]\n # d(vout)/d(var) = [1 | 1 | 1]\n # d(vout)/d(state0) = [ 1 | 1 | 1 ]\n\n state_per_time = np.array(\n [state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])\n\n # Compare forward prop\n self.assertAllClose(v0_t + var_t + state_per_time, vout_t)\n\n # Compare backward prop\n expected_v0_grad_t = np.array([\n grad_val[0, :] + grad_val[1, :] + grad_val[2, :],\n grad_val[1, :] + grad_val[2, :], grad_val[2, :]\n ])\n\n self.assertAllEqual(expected_v0_grad_t, v0_grad_t)\n self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)\n self.assertAllClose(grad_val.sum(axis=0), var_grad_t)\n self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)\n\n def testWhileLoopWritePackGradients(self):\n self._testWhileLoopWritePackGradients(\n dynamic_size=False, dtype=dtypes.float32)\n # TODO(ebrevdo): re-enable when While supports non-float32 gradients.\n # self._testWhileLoopWritePackGradients(\n # dynamic_size=False, dtype=tf.int64)\n\n @test_util.run_v1_only(\"b/117943489\")\n def testSkipEagerWhileLoopDynamicWritePackGradients(self):\n self._testWhileLoopWritePackGradients(\n dynamic_size=True, dtype=dtypes.float32)\n\n def testGradSerialTwoLoops(self):\n with self.session(use_gpu=True):\n def loop(x):\n num_steps = 100\n acc = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n size=num_steps,\n clear_after_read=False,\n element_shape=tensor_shape.TensorShape([]))\n i = constant_op.constant(0, name=\"i\")\n\n c = lambda i, acc: i < 5\n\n def b(i, acc):\n x1 = control_flow_ops.cond(\n math_ops.equal(i, 0), lambda: x,\n lambda: math_ops.multiply(acc.read(i - 1), 2.0))\n return i + 1, acc.write(i, x1)\n\n i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])\n\n z = constant_op.constant(0.0)\n\n def fn(i, acc):\n return i + 1, acc.write(i, z)\n\n _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,\n [i1, acc1])\n\n r = acc2.stack()\n return r\n\n x = constant_op.constant(2.0, name=\"x\")\n if context.executing_eagerly():\n grad = backprop.gradients_function(loop)(x)[0]\n else:\n grad = gradients_impl.gradients(loop(x), [x])[0]\n self.assertAllClose(31.0, self.evaluate(grad))\n\n def testShapeAfterWhileLoop(self):\n size = 10\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size)\n _, ta = control_flow_ops.while_loop(\n lambda i, _: i < size,\n lambda i, ta: (i + 1, ta.write(i, [[0.]])), [0, ta],\n parallel_iterations=1)\n self.assertIsNotNone(ta.element_shape.dims)\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):\n with self.session(use_gpu=True) as session:\n a = array_ops.identity(\n np.arange(\n 3 * 5, dtype=np.float32).reshape(3, 5) + 1)\n b = array_ops.identity(\n np.arange(\n 3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)\n ta = ta.write(0, a, name=\"write_a\")\n ta = ta.write(1, b, name=\"write_b\")\n c = (\n ta.read(\n 0, name=\"read_a_0\") + # a + b\n ta.read(\n 1, name=\"read_b_0\"))\n g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)\n grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1\n grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1\n\n # Test gradients calculated individually\n grad_a_t, = session.run([grad_a])\n self.assertAllEqual(grad_a_t, g0)\n\n grad_b_t, = session.run([grad_b])\n self.assertAllEqual(grad_b_t, g0)\n\n # Test gradients calculated jointly\n joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])\n self.assertAllEqual(joint_grad_a_t, g0)\n self.assertAllEqual(joint_grad_b_t, g0)\n\n def _grad_source_for_name(self, name):\n return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerGetGradSource_Invalid(self):\n with self.assertRaises(ValueError):\n self._grad_source_for_name(\"\")\n with self.assertRaises(ValueError):\n self._grad_source_for_name(\"foo\")\n with self.assertRaises(ValueError):\n self._grad_source_for_name(\"foo/bar\")\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerGetGradSource_NoEnclosingScope(self):\n self.assertEqual(\"gradients:0\", self._grad_source_for_name(\"gradients\"))\n self.assertEqual(\"gradients_0:0\", self._grad_source_for_name(\"gradients_0\"))\n self.assertEqual(\"gradients\", self._grad_source_for_name(\"gradients/foo\"))\n self.assertEqual(\"gradients_0\",\n self._grad_source_for_name(\"gradients_0/foo\"))\n self.assertEqual(\"gradients\",\n self._grad_source_for_name(\"gradients/foo/bar\"))\n self.assertEqual(\"gradients_0\",\n self._grad_source_for_name(\"gradients_0/foo/bar\"))\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerGetGradSource_EnclosingScope(self):\n self.assertEqual(\"foo/gradients:0\",\n self._grad_source_for_name(\"foo/gradients\"))\n self.assertEqual(\"foo/gradients_0:0\",\n self._grad_source_for_name(\"foo/gradients_0\"))\n self.assertEqual(\"foo/gradients\",\n self._grad_source_for_name(\"foo/gradients/bar\"))\n self.assertEqual(\"foo/gradients_0\",\n self._grad_source_for_name(\"foo/gradients_0/bar\"))\n self.assertEqual(\"foo/bar/gradients\",\n self._grad_source_for_name(\"foo/bar/gradients/baz\"))\n self.assertEqual(\"foo/bar/gradients_0\",\n self._grad_source_for_name(\"foo/bar/gradients_0/baz\"))\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerGetGradSource_NestedUsesInnermost(self):\n self.assertEqual(\n \"foo/gradients/bar/gradients_0\",\n self._grad_source_for_name(\"foo/gradients/bar/gradients_0/baz\"))\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerWriteShape(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n c0 = constant_op.constant([4.0, 5.0])\n w0 = ta.write(0, c0)\n r0 = w0.read(0)\n self.assertAllEqual(c0.get_shape(), r0.get_shape())\n\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n c1 = constant_op.constant([6.0, 7.0])\n w1 = w0.write(1, c1)\n r0 = w1.read(0)\n r1 = w1.read(1)\n self.assertAllEqual(c0.get_shape(), r0.get_shape())\n self.assertAllEqual(c1.get_shape(), r1.get_shape())\n\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=3)\n c2 = constant_op.constant([4.0, 5.0, 6.0])\n with self.assertRaises(ValueError):\n w0.write(0, c2)\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerPartlyUnknownShape(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=6)\n\n c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])\n w0 = ta.write(0, c0)\n r0 = w0.read(0)\n self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())\n\n c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])\n w1 = w0.write(1, c1)\n r1 = w1.read(0)\n self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())\n\n # Writing less specific shape (doesn't change type.)\n c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])\n w2 = w1.write(2, c2)\n r2 = w2.read(0)\n self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())\n\n # Writing more specific shape in one dimension and less specific in\n # another.\n c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])\n w3 = w2.write(3, c3)\n r3 = w3.read(0)\n self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())\n\n # Writing partly defined shape using TensorArray.scatter.\n c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])\n w4 = w3.scatter([4, 5], c4)\n r4 = w4.read(0)\n self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())\n\n # Writing fully defined shape using TensorArray.split.\n c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])\n w5 = w4.split(c5, constant_op.constant([5, 5]))\n r5 = w5.read(0)\n self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())\n\n def _testUnpackShape(self):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True,\n infer_shape=True)\n value = constant_op.constant(\n [[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])\n w0 = ta.unstack(value)\n r0 = w0.read(0)\n self.assertAllEqual((2,), r0.get_shape())\n\n c1 = constant_op.constant([4.0, 5.0])\n w1 = w0.write(3, c1)\n\n if not control_flow_util.ENABLE_CONTROL_FLOW_V2:\n # TensorArray v2 does not support clear_after_read.\n with self.assertRaisesOpError(\n r\"Could not read index 0 twice because it was cleared after a \"\n r\"previous read \\(perhaps try setting clear_after_read = false\\?\\)\"\n ):\n with ops.control_dependencies([r0]):\n self.evaluate(w1.read(0))\n\n r1 = w1.read(1)\n self.assertAllEqual(c1.get_shape(), r1.shape)\n\n c2 = constant_op.constant([4.0, 5.0, 6.0])\n with self.assertRaises(ValueError):\n w1.write(4, c2)\n\n @test_util.run_v1_only(\"b/117943489\")\n def testUnpackShape(self):\n self._testUnpackShape()\n\n @test_util.deprecated_graph_mode_only\n def testSplitShape(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True,\n infer_shape=True)\n value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])\n w0 = ta.split(value, [1, 1, 1])\n r0 = w0.read(0)\n self.assertAllEqual((1, 2), r0.get_shape())\n\n ta1 = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo1\",\n size=0,\n dynamic_size=True,\n infer_shape=True)\n w0 = ta1.split(value, [1, 2])\n r0 = w0.read(0)\n if context.executing_eagerly():\n self.assertEqual((1, 2), r0.get_shape())\n self.assertEqual((2, 2), w0.read(1).get_shape())\n else:\n self.assertEqual(r0.get_shape().ndims, None)\n if not control_flow_util.ENABLE_CONTROL_FLOW_V2:\n self.assertEqual(\n tensor_shape.TensorShape(\n ta1.handle.op.get_attr(\"element_shape\")).ndims, None)\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerWriteUnknownShape(self):\n with self.session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=3,\n infer_shape=True)\n c0 = array_ops.placeholder(dtypes.float32)\n w0 = ta.write(0, c0)\n r0 = w0.read(0)\n self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())\n\n def _testGradientWhenNotAllComponentsRead(self):\n with self.cached_session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)\n x = constant_op.constant([2.0, 3.0])\n w = ta.unstack(x)\n r0 = w.read(0)\n # calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).\n grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])\n grad_r0_vals = session.run(grad_r0)[0]\n self.assertAllEqual(grad_r0_vals, [1.0, 0.0])\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerGradientWhenNotAllComponentsRead(self):\n self._testGradientWhenNotAllComponentsRead()\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerWriteButNotAllComponentsReadGrad(self):\n with self.cached_session(use_gpu=True) as session:\n x0 = constant_op.constant(5.0)\n x1 = constant_op.constant(10.0)\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=2).write(0, x0).write(1, x1)\n r0 = ta.read(0)\n # calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).\n grad_r0_x1 = gradients_impl.gradients(ys=[r0], xs=[x0, x1], grad_ys=[1.0])\n grad_r0_x1_vals = session.run(grad_r0_x1)\n self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])\n\n def _testTensorArrayUnpackDynamic(self):\n with self.cached_session(use_gpu=True) as sess:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=3, dynamic_size=True)\n x = constant_op.constant([1.0, 2.0, 3.0])\n w0 = ta.unstack(x)\n w1 = w0.write(3, 4.0)\n r = w1.stack()\n self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))\n grad = gradients_impl.gradients(ys=[r], xs=[x])\n self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])\n\n @test_util.run_v1_only(\"b/117943489\")\n def testSkipEagerTensorArrayUnpackDynamic(self):\n self._testTensorArrayUnpackDynamic()\n\n @test_util.run_v1_only(\"b/117943489\")\n def testSkipEagerTensorArraySplitDynamic(self):\n with self.session(use_gpu=True) as sess:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=3, dynamic_size=True)\n x = constant_op.constant([1.0, 2.0, 3.0])\n w0 = ta.split(x, [1, 1, 1])\n w1 = w0.write(3, [4.0])\n r = w1.concat()\n self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))\n grad = gradients_impl.gradients(ys=[r], xs=[x])\n self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])\n\n def testStackShape(self):\n\n @def_function.function\n def ta_stack():\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)\n x = constant_op.constant([1.0, 2.0, 3.0])\n ta = ta.write(0, x)\n t = ta.stack()\n self.assertEqual(t.shape.as_list(), [None, 3])\n return t\n\n ta_stack()\n\n def testReadShape(self):\n\n @def_function.function\n def ta_read():\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)\n x = constant_op.constant([1.0, 2.0, 3.0])\n ta = ta.write(0, x)\n t = ta.read(0)\n self.assertEqual(t.shape.as_list(), [3])\n return t\n\n ta_read()\n\n def testGatherShape(self):\n\n def ta_gather(indices):\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)\n x = constant_op.constant([1.0, 2.0, 3.0])\n ta = ta.write(0, x)\n t = ta.gather(indices)\n self.assertEqual(t.shape.as_list(), [first_dim, 3])\n return t\n\n # This propagates shape of `indices` when compiling ta_gather.\n ta_gather_with_known_indices_shape = def_function.function(ta_gather)\n first_dim = 1\n ta_gather_with_known_indices_shape([0])\n\n # Here were force the shape of `indices` to be [None] during ta_gather's\n # compilation.\n ta_gather_with_unknown_indices_shape = def_function.function(\n ta_gather,\n input_signature=[\n tensor_spec.TensorSpec(dtype=dtypes.int32, shape=[None])\n ])\n first_dim = None\n ta_gather_with_unknown_indices_shape([0])\n\n def _testTensorArrayEvalEmpty(self):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)\n v2_msg = (\"Tried to stack elements of an empty list with \"\n \"non-fully-defined element_shape\")\n v1_msg = (\n \"TensorArray has size zero, but element shape <unknown> is not \"\n \"fully defined. Currently only static shapes are supported when \"\n \"packing zero-size TensorArrays.\")\n with self.assertRaisesOpError(\n v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):\n ta.stack().eval()\n\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerTensorArrayEvalEmpty(self):\n self._testTensorArrayEvalEmpty()\n\n # this test is ill-defined for Eager mode --- unpacking an empty tensor\n # gives an empty list / there is not equivalent of \"mark_used\" in Eager\n def _testTensorArrayEvalEmptyWithDefault(self):\n with self.cached_session(use_gpu=True):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)\n self.assertEqual(0, ta.size().eval())\n # Don't actually perform the pack. This stores the static shape.\n if control_flow_util.ENABLE_CONTROL_FLOW_V2:\n ta = ta.unstack(array_ops.zeros([0, 3, 5]))\n else:\n ta.unstack(array_ops.zeros([0, 3, 5])).mark_used()\n packed = ta.stack()\n concatenated = ta.concat()\n self.assertAllEqual([0, 3, 5], self.evaluate(packed).shape)\n # Concatenating zero tensors along their first dimension gives a\n # first dimension of zero\n self.assertAllEqual([0, 5], self.evaluate(concatenated).shape)\n\n @test_util.run_v1_only(\"b/117943489\")\n def testSkipEagerTensorArrayEvalEmptyWithDefault(self):\n self._testTensorArrayEvalEmptyWithDefault()\n\n @test_util.run_v1_only(\"b/117943489\")\n def testSkipEagerTensorArrayScatterReadAndGradients(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True)\n\n indices = constant_op.constant([1, 8])\n value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])\n\n w = ta.scatter(indices, value)\n r0 = w.read(1)\n r1 = w.read(8)\n\n # Test combined gradients + aggregation of read(0)\n grad = gradients_impl.gradients(\n ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])\n read_vals, grad_vals = session.run([[r0, r1], grad])\n\n self.assertEqual(len(read_vals), 2)\n self.assertEqual(len(grad_vals), 1)\n self.assertAllEqual([1.0, -1.0], read_vals[0])\n self.assertAllEqual([10.0, -10.0], read_vals[1])\n self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])\n\n @test_util.run_v1_only(\"b/117943489\")\n def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True)\n\n indices = constant_op.constant([1, 8])\n value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])\n\n w = ta.scatter(indices, value)\n r0 = w.read(1)\n\n # Test combined gradients + aggregation of read(0)\n grad = gradients_impl.gradients(\n ys=[r0], xs=[value], grad_ys=[[2.0, 3.0]])[0]\n read_val, grad_val = session.run([r0, grad])\n\n self.assertAllEqual([1.0, -1.0], read_val)\n self.assertAllEqual([[2.0, 3.0], [0.0, 0.0]], grad_val)\n\n def testScatterIntoExistingList(self):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, tensor_array_name=\"foo\", size=5)\n\n ta = ta.scatter(indices=[3, 4], value=array_ops.ones([2]))\n self.assertAllEqual(ta.stack(), [0., 0., 0., 1., 1.])\n\n ta = ta.scatter(indices=[1], value=array_ops.ones([1]))\n self.assertAllEqual(ta.stack(), [0., 1., 0., 1., 1.])\n\n ta = ta.scatter(indices=[0, 2], value=[5., 6.])\n self.assertAllEqual(ta.stack(), [5., 1., 6., 1., 1.])\n\n @test_util.run_v1_only(\"b/118890905\")\n def testTensorArrayWriteGatherAndGradients(self):\n with self.session(use_gpu=True) as session:\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32,\n tensor_array_name=\"foo\",\n size=0,\n dynamic_size=True)\n\n def func(values):\n indices = constant_op.constant([1, 8])\n w = ta.unstack(values)\n g = w.gather(indices)\n return g\n\n values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])\n g = func(values)\n grad_ys = [[[2.0, 3.0], [4.0, 5.0]]]\n # Test combined gradients + aggregation of read(0)\n if context.executing_eagerly():\n g_vals = [g]\n grad_vals = backprop.gradients_function(func)(\n values, dy=constant_op.constant(grad_ys[0], dtype=dtypes.float32))\n else:\n grad = gradients_impl.gradients(ys=[g], xs=[values], grad_ys=grad_ys)\n g_vals, grad_vals = session.run([[g], grad])\n\n # Gradients for 8 of the 10 unread components are zero.\n expected_grad = np.zeros((10, 2))\n expected_grad[1] = [2.0, 3.0]\n expected_grad[8] = [4.0, 5.0]\n\n self.assertEqual(len(g_vals), 1)\n self.assertEqual(len(grad_vals), 1)\n self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])\n self.assertAllEqual(expected_grad, grad_vals[0])\n\n @test_util.disable_control_flow_v2(\"colocate_with not supported in v2.\")\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self):\n with ops.device(\"/job:worker/task:0/cpu:0\"):\n # this initial device will be ignored.\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)\n with ops.device(\"/job:worker/task:1/cpu:0\"):\n # the first write sets the op's device.\n ta = ta.write(0, 1.0)\n with ops.device(\"/job:worker/task:2/cpu:0\"):\n # subsequent writes do not modify the op's device.\n ta = ta.write(1, 1.0)\n\n # The gradient TA will sit on the same device as the forward TA.\n ta_grad = ta.grad(\"grad\")\n flows = [ta.flow, ta_grad.flow]\n\n # Similar tests for unpack and split\n with ops.device(\"/job:worker/task:0/cpu:0\"):\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)\n with ops.device(\"/job:worker/task:1/cpu:0\"):\n ta = ta.unstack([1.0, 2.0])\n with ops.device(\"/job:worker/task:2/cpu:0\"):\n ta = ta.write(2, 3.0)\n flows.append(ta.flow)\n\n with ops.device(\"/job:worker/task:0/cpu:0\"):\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)\n with ops.device(\"/job:worker/task:1/cpu:0\"):\n ta = ta.split([1.0, 2.0], [1, 1])\n flows.append(ta.flow)\n\n session = session_lib.Session(self._workers[0].target)\n\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n session.run(flows, options=run_options, run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField(\"step_stats\"))\n dev_stats = {d.device: d.node_stats\n for d in run_metadata.step_stats.dev_stats}\n for d in dev_stats:\n if \"/task:1/\" in d:\n self.assertTrue(\n [s for s in dev_stats[d] if \"/TensorArray\" in s.node_name])\n elif \"/host:CPU\" not in d:\n self.assertFalse(\n [s for s in dev_stats[d] if \"/TensorArray\" in s.node_name])\n\n @test_util.disable_control_flow_v2(\"colocate_with not supported in v2.\")\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):\n with ops.device(\"/job:worker/task:0/cpu:0\"):\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)\n\n def _body(i, ta_i):\n with ops.device(\"/job:worker/task:1/cpu:0\"):\n return i + 1, ta_i.write(i, constant_op.constant(0.0))\n\n _, ta_out = control_flow_ops.while_loop(\n lambda i, ta: i < 2, _body, loop_vars=[0, ta])\n\n session = session_lib.Session(self._workers[0].target)\n\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField(\"step_stats\"))\n dev_stats = {d.device: d.node_stats\n for d in run_metadata.step_stats.dev_stats}\n for d in dev_stats:\n if \"/task:1/\" in d:\n self.assertTrue(\n [s for s in dev_stats[d] if \"TensorArray\" == s.node_name])\n else:\n self.assertFalse(\n [s for s in dev_stats[d] if \"TensorArray\" == s.node_name])\n\n @test_util.disable_control_flow_v2(\"colocate_with not supported in v2.\")\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self):\n with ops.device(\"/job:worker/task:0/cpu:0\"):\n ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=2, colocate_with_first_write_call=False)\n\n def _body(i, ta_i):\n with ops.device(\"/job:worker/task:1/cpu:0\"):\n return i + 1, ta_i.write(i, constant_op.constant(0.0))\n\n _, ta_out = control_flow_ops.while_loop(\n lambda i, ta: i < 2, _body, loop_vars=[0, ta])\n\n session = session_lib.Session(self._workers[0].target)\n\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)\n self.assertTrue(run_metadata.HasField(\"step_stats\"))\n dev_stats = {d.device: list(d.node_stats)\n for d in run_metadata.step_stats.dev_stats}\n for d in dev_stats:\n if \"/task:0/\" in d and \"CPU\" in d: # Skip any GPU node stats\n self.assertTrue(\n [s for s in dev_stats[d] if \"TensorArray\" == s.node_name])\n else:\n self.assertFalse(\n [s for s in dev_stats[d] if \"TensorArray\" == s.node_name])\n\n def testTensorArrayIdentity(self):\n with self.session(use_gpu=True):\n ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,\n infer_shape=False)\n ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,\n infer_shape=True)\n\n ta0 = ta0.write(0, 0.)\n ta1 = ta1.write(0, 1)\n\n v0 = variable_scope.get_variable(\n \"v0\", shape=(), initializer=init_ops.zeros_initializer())\n v1 = variable_scope.get_variable(\n \"v1\", shape=(), initializer=init_ops.zeros_initializer())\n\n with ops.control_dependencies([v0.assign_add(1)]):\n ta0 = ta0.identity()\n\n with ops.control_dependencies([v1.assign_add(1)]):\n ta1 = ta1.identity()\n\n read0 = ta0.read(0)\n read1 = ta1.read(0)\n\n size0 = ta0.size()\n size1 = ta1.size()\n\n # Tests correct properties on new TensorArrays.\n self.assertEqual(dtypes.float32, ta0.dtype)\n self.assertEqual(dtypes.int32, ta1.dtype)\n if context.executing_eagerly():\n self.assertEqual(tensor_shape.TensorShape([]), read0.get_shape())\n else:\n self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())\n self.assertEqual(tensor_shape.TensorShape([]), read1.get_shape())\n\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n\n read0_v, read1_v, size0_v, size1_v = self.evaluate((read0, read1, size0,\n size1))\n\n # Tests that the control dependencies was added and executed.\n self.assertEqual(1, self.evaluate(v0))\n self.assertEqual(1, self.evaluate(v1))\n\n # Tests correct TensorArray.\n self.assertEqual(read0_v, 0)\n self.assertEqual(read1_v, 1)\n self.assertEqual(size0_v, 2)\n self.assertEqual(size1_v, 4)\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayGradYsInCorrectScope(self):\n n_time = 1\n n_dim = 1\n x = constant_op.constant([[1.42]])\n dy = constant_op.constant([[2.42]])\n\n ta = tensor_array_ops.TensorArray(\n dtypes.float32, size=n_time, element_shape=[n_dim])\n for t in range(n_time):\n ta = ta.write(index=t, value=x[t])\n y = ta.stack()\n # dy is outside of the gradients name scope; tf.gradients must\n # wrap it in the correct name scope.\n dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])\n with self.cached_session(use_gpu=True) as sess:\n vdx, vdy = self.evaluate([dx, dy])\n self.assertAllClose(vdx, vdy)\n\n @test_util.deprecated_graph_mode_only\n def testSkipEagerTensorArrayInt64GPU(self):\n if not test.is_gpu_available():\n return\n with self.session(use_gpu=True, force_gpu=True) as sess:\n value = array_ops.placeholder(dtypes.int64)\n ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)\n ta = ta.scatter([0, 1], value)\n r0 = ta.read(0)\n r1 = ta.read(1)\n v0, v1 = sess.run([r0, r1], feed_dict={value: [-3, 100]})\n self.assertAllEqual(v0, -3)\n self.assertAllEqual(v1, 100)\n\n\nclass TensorArrayBenchmark(test.Benchmark):\n\n def _tensorArrayWriteInWhile(self):\n size = 10000\n ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size)\n (_, ta) = control_flow_ops.while_loop(\n lambda i, _: i < size,\n lambda i, ta: (i + 1, ta.write(i, 0.)), [0, ta],\n parallel_iterations=1)\n return ta.stack()\n\n def _benchmarkWriteInWhile(self):\n ops.reset_default_graph()\n op = self._tensorArrayWriteInWhile()\n self.run_op_benchmark(session_lib.Session(), op)\n\n def benchmarkWriteInWhile(self):\n self._benchmarkWriteInWhile()\n\n @test_util.enable_control_flow_v2\n def benchmarkWriteInWhileWithControlFlowV2(self):\n self._benchmarkWriteInWhile()\n\n def benchmarkWriteInDatasetMapFn(self):\n ds = dataset_ops.Dataset.from_tensors(array_ops.zeros([10])).repeat()\n ds = ds.map(lambda _: self._tensorArrayWriteInWhile())\n op = ds.make_one_shot_iterator().get_next()\n self.run_op_benchmark(session_lib.Session(), op)\n\n def benchmarkWriteInDatasetParallelMapFn(self):\n ds = dataset_ops.Dataset.from_tensors(array_ops.zeros([10])).repeat()\n ds = ds.map(lambda _: self._tensorArrayWriteInWhile(), num_parallel_calls=2)\n op = ds.make_one_shot_iterator().get_next()\n self.run_op_benchmark(session_lib.Session(), op)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.framework.tensor_shape.TensorShape",
"numpy.asarray",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.gen_data_flow_ops.tensor_array_read_v3",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.test_util.disable_control_flow_v2",
"tensorflow.python.framework.test_util.run_v1_only",
"numpy.arange",
"tensorflow.python.platform.test.create_local_cluster",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.framework.ops.control_dependencies",
"numpy.zeros",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.client.session.Session",
"tensorflow.python.client.session.Session.reset",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"numpy.array",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.eager.backprop.gradients_function",
"numpy.random.random",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
4kssoft/unilm | [
"bc98ced9414bfba9ba453a8368caedae6f9d578b"
] | [
"unilm-v1/src/pytorch_pretrained_bert/optimization.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch optimization for BERT model.\"\"\"\n\nimport math\nimport torch\nfrom torch.optim import Optimizer\nfrom torch.optim.optimizer import required\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom collections import defaultdict\nfrom torch._six import container_abcs\nfrom copy import deepcopy\nfrom itertools import chain\n\n\ndef warmup_cosine(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return 0.5 * (1.0 + torch.cos(math.pi * x))\n\n\ndef warmup_constant(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return 1.0\n\n\ndef warmup_linear(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return max((x-1.)/(warmup-1.), 0)\n\n\nSCHEDULES = {\n 'warmup_cosine': warmup_cosine,\n 'warmup_constant': warmup_constant,\n 'warmup_linear': warmup_linear,\n}\n\n\nclass BertAdam(Optimizer):\n \"\"\"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n \"\"\"\n\n def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):\n if lr is not required and lr < 0.0:\n raise ValueError(\n \"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if schedule not in SCHEDULES:\n raise ValueError(\"Invalid schedule parameter: {}\".format(schedule))\n if not 0.0 <= warmup < 1.0 and not warmup == -1:\n raise ValueError(\n \"Invalid warmup: {} - should be in [0.0, 1.0[ or -1\".format(warmup))\n if not 0.0 <= b1 < 1.0:\n raise ValueError(\n \"Invalid b1 parameter: {} - should be in [0.0, 1.0[\".format(b1))\n if not 0.0 <= b2 < 1.0:\n raise ValueError(\n \"Invalid b2 parameter: {} - should be in [0.0, 1.0[\".format(b2))\n if not e >= 0.0:\n raise ValueError(\n \"Invalid epsilon value: {} - should be >= 0.0\".format(e))\n defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,\n b1=b1, b2=b2, e=e, weight_decay=weight_decay,\n max_grad_norm=max_grad_norm)\n super(BertAdam, self).__init__(params, defaults)\n\n def get_lr(self):\n lr = []\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n if len(state) == 0:\n return [0]\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(\n state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n lr.append(lr_scheduled)\n return lr\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(\n state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss\n\n\nclass BertAdamFineTune(BertAdam):\n def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):\n self.init_param_group = []\n super(BertAdamFineTune, self).__init__(params, lr, warmup,\n t_total, schedule, b1, b2, e, weight_decay, max_grad_norm)\n\n def save_init_param_group(self, param_groups, name_groups, missing_keys):\n self.init_param_group = []\n for group, name in zip(param_groups, name_groups):\n if group['weight_decay'] > 0.0:\n init_p_list = []\n for p, n in zip(group['params'], name):\n init_p = p.data.clone().detach()\n if any(mk in n for mk in missing_keys):\n print(\"[no finetuning weight decay]\", n)\n # should use the original weight decay\n init_p.zero_()\n init_p_list.append(init_p)\n self.init_param_group.append(init_p_list)\n else:\n # placeholder\n self.init_param_group.append([])\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for i_group, group in enumerate(self.param_groups):\n for i_p, p in enumerate(group['params']):\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n if self.init_param_group:\n update += group['weight_decay'] * \\\n (2.0 * p.data -\n self.init_param_group[i_group][i_p])\n else:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(\n state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss\n\n def load_state_dict_subset_finetune(self, state_dict, num_load_group):\n r\"\"\"Loads the optimizer state.\n\n Arguments:\n state_dict (dict): optimizer state. Should be an object returned\n from a call to :meth:`state_dict`.\n \"\"\"\n # deepcopy, to be consistent with module API\n state_dict = deepcopy(state_dict)\n # Validate the state_dict\n groups = self.param_groups\n saved_groups = state_dict['param_groups']\n\n if len(groups) < num_load_group or len(saved_groups) < num_load_group:\n raise ValueError(\"loaded state dict has a different number of \"\n \"parameter groups\")\n param_lens = (len(g['params']) for g in groups[:num_load_group])\n saved_lens = (len(g['params']) for g in saved_groups[:num_load_group])\n if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):\n raise ValueError(\"loaded state dict contains a parameter group \"\n \"that doesn't match the size of optimizer's group\")\n\n # Update the state\n id_map = {old_id: p for old_id, p in\n zip(chain(*(g['params'] for g in saved_groups[:num_load_group])),\n chain(*(g['params'] for g in groups[:num_load_group])))}\n\n def cast(param, value):\n r\"\"\"Make a deep copy of value, casting all tensors to device of param.\"\"\"\n if isinstance(value, torch.Tensor):\n # Floating-point types are a bit special here. They are the only ones\n # that are assumed to always match the type of params.\n if param.is_floating_point():\n value = value.to(param.dtype)\n value = value.to(param.device)\n return value\n elif isinstance(value, dict):\n return {k: cast(param, v) for k, v in value.items()}\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(cast(param, v) for v in value)\n else:\n return value\n\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict['state'].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n # handle additional params\n for k, v in self.state:\n if k not in state:\n state[k] = v\n\n # do not change groups: {'weight_decay': 0.01, 'lr': 9.995e-06, 'schedule': 'warmup_linear', 'warmup': 0.1, 't_total': 400000, 'b1': 0.9, 'b2': 0.999, 'e': 1e-06, 'max_grad_norm': 1.0, 'params': [...]}\n # # Update parameter groups, setting their 'params' value\n # def update_group(group, new_group):\n # new_group['params'] = group['params']\n # return new_group\n # param_groups = [\n # update_group(g, ng) for g, ng in zip(groups[:num_load_group], saved_groups[:num_load_group])]\n # # handle additional params\n # param_groups.extend(groups[num_load_group:])\n\n self.__setstate__({'state': state, 'param_groups': groups})\n\n\ndef find_state_dict_subset_finetune(org_state_dict, org_name_list, no_decay, param_optimizer):\n # only use the bert encoder and embeddings\n want_name_set = set()\n for n in org_name_list:\n if ('bert.encoder' in n) or ('bert.embeddings' in n):\n want_name_set.add(n)\n # original: name to pid, pid to name\n org_grouped_names = [[n for n in org_name_list if not any(nd in n for nd in no_decay)],\n [n for n in org_name_list if any(nd in n for nd in no_decay)]]\n org_n2id, org_id2n = {}, {}\n for ng, pg in zip(org_grouped_names, org_state_dict['param_groups']):\n for n, pid in zip(ng, pg['params']):\n org_n2id[n] = pid\n org_id2n[pid] = n\n # group by: whether pretrained; whether weight decay\n g_np_list = [\n [(n, p) for n, p in param_optimizer if n in want_name_set and not any(\n nd in n for nd in no_decay)],\n [(n, p) for n, p in param_optimizer if n in want_name_set and any(\n nd in n for nd in no_decay)],\n [(n, p) for n, p in param_optimizer if n not in want_name_set and not any(\n nd in n for nd in no_decay)],\n [(n, p) for n, p in param_optimizer if n not in want_name_set and any(\n nd in n for nd in no_decay)],\n ]\n optimizer_grouped_parameters = [\n {'params': [p for n, p in g_np_list[0]], 'weight_decay': 0.01},\n {'params': [p for n, p in g_np_list[1]], 'weight_decay': 0.0},\n {'params': [p for n, p in g_np_list[2]], 'weight_decay': 0.01},\n {'params': [p for n, p in g_np_list[3]], 'weight_decay': 0.0}\n ]\n new_state_dict = {}\n # regroup the original state_dict\n new_state_dict['state'] = {pid: v for pid, v in org_state_dict['state'].items(\n ) if pid not in org_id2n or org_id2n[pid] in want_name_set}\n # reset step count to 0\n for pid, st in new_state_dict['state'].items():\n st['step'] = 0\n\n def _filter_group(group, g_np_list, i, org_n2id):\n packed = {k: v for k, v in group.items() if k != 'params'}\n packed['params'] = [pid for pid in group['params']\n if pid in org_id2n and org_id2n[pid] in want_name_set]\n assert len(g_np_list[i]) == len(packed['params'])\n # keep them the same order\n packed['params'] = [org_n2id[n] for n, p in g_np_list[i]]\n return packed\n new_state_dict['param_groups'] = [_filter_group(\n g, g_np_list, i, org_n2id) for i, g in enumerate(org_state_dict['param_groups'])]\n return new_state_dict, optimizer_grouped_parameters\n"
] | [
[
"torch.nn.utils.clip_grad_norm_",
"torch.zeros_like",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
spotify-companion/CLI | [
"29934cc715922263652235d0a9a4f0d8f57ecec5"
] | [
"data_analysis.py"
] | [
"from click import style\r\nimport requests\r\nimport pandas as pd\r\nimport spotipy\r\nimport spotipy.util as util\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import preprocessing\r\nimport matplotlib.pyplot as plt\r\nimport spotify_client as spotify\r\nimport os\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport printing_essentials\r\nfrom alive_progress import alive_bar\r\n\r\n\r\nclass AnalysePlaylist:\r\n\r\n\r\n def __init__(self):\r\n self.client = spotify.SpotifyLogin().get_instance()\r\n self.client.refresh()\r\n self.sp = self.client.login()\r\n self.username = self.client.username\r\n self.client_id = self.client.CLIENT_ID\r\n self.client_secret = self.client.CLIENT_SECRET\r\n self.name = self.sp.current_user()['display_name']\r\n sns.set_theme(style='darkgrid')\r\n self.console = printing_essentials.Printer().get_instance().console\r\n self.printer = printing_essentials.Printer().get_instance()\r\n\r\n\r\n\r\n def analyse_playlists(self, playlist_id):\r\n df = pd.DataFrame(columns = ['Name', 'Album', 'Artist', 'Year', 'Popularity', 'Duration','Tempo','Key', 'Valence', 'Liveness','Danceability', 'Instrumentalness', 'Acousticness', 'Energy'])\r\n self.data = df\r\n track_ids = []\r\n playlist = self.sp.playlist(playlist_id)\r\n name = self.sp.playlist(playlist_id)['name']\r\n # self.console.print(name)\r\n \r\n tracks = playlist['tracks']['items']\r\n offset = playlist['tracks']\r\n try:\r\n while offset['next']:\r\n offset = self.sp.next(offset)\r\n tracks.extend(offset['items'])\r\n except:\r\n self.console.print()\r\n length = len(tracks)\r\n self.console.print(\"There are \" + str(len(tracks)) + \" Tracks in \" + name)\r\n for i in tracks:\r\n track_ids.append(i['track']['id'])\r\n\r\n with alive_bar(2, title = 'Fetching all songs',manual=True) as bar:\r\n count = 0 \r\n for i in track_ids:\r\n count+=1\r\n percentage = (count/length)\r\n bar(percentage)\r\n try:\r\n if i is not None:\r\n meta = self.sp.track(i)\r\n features = self.sp.audio_features(i)\r\n features = features[0];\r\n # self.console.print(features) \r\n track_dict = {\r\n\r\n 'Name' : meta['name'], \r\n 'Album' : meta['album']['name'], \r\n 'Artist' : meta['album']['artists'][0]['name'],\r\n 'Year' : meta['album']['release_date'][0:4], \r\n 'Popularity' : meta['popularity'],\r\n 'Duration' : meta['duration_ms'] * 0.001 ,\r\n 'Danceability' : features['danceability'],\r\n 'Energy' : features['energy'],\r\n 'Key' : features['key'],\r\n 'Instrumentalness' : features['instrumentalness'],\r\n 'Valence' : features['valence'],\r\n 'Tempo' : features['tempo'],\r\n 'Liveness' : features['liveness'],\r\n 'Acousticness' : features['acousticness'],\r\n\r\n } \r\n # self.console.print(track_dict) if count == 0 else self.console.print() \r\n df = df.append(track_dict, ignore_index = True, sort = False)\r\n \r\n except: continue\r\n bar(1.0)\r\n # self.console.print(df)\r\n self.get_graphs(df, name)\r\n \r\n \r\n def get_graphs(self, df, name= \"Default\"):\r\n df = df.sort_values('Popularity', ascending = False)\r\n feature_list = ['Year', 'Popularity','Tempo','Key', 'Valence', 'Liveness','Danceability', 'Instrumentalness', 'Acousticness', 'Energy']\r\n numeric_data = df[feature_list]\r\n numeric_data = numeric_data.apply(pd.to_numeric)\r\n for i in feature_list:\r\n headers = ['Data', 'Value']\r\n title = \"Analysis by \" + i + \" for \" + name\r\n data = []\r\n data = []\r\n data.append([\"Mean \" + i, str(numeric_data[i].mean())[:5]])\r\n data.append([\"Variance in \" + i, str(numeric_data[i].var())]) \r\n self.printer.printPlayList(data, headers, title)\r\n title = \"Top songs by \" + i\r\n top_df = df.sort_values(i, ascending = False)\r\n headers = ['Song', i]\r\n data = []\r\n # self.save_data(top_df, name, 'Top songs by' + i)\r\n for index, row in top_df.iterrows():\r\n data.append([row['Name'], str(row[i])])\r\n\r\n self.printer.printPlayList(data,headers,title)\r\n by_year = numeric_data.groupby('Year').mean().sort_values('Year').reset_index()\r\n # self.save_data(by_year, name,'Averages by Year')\r\n\r\n plt.figure(figsize=(12,8))\r\n sns.heatmap(numeric_data, annot = True)\r\n self.save_data(df,name)\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n os.chdir(path + '/User Data Analysis')\r\n plt.savefig(name + '/' + 'Heatmap' + '.jpg')\r\n plt.savefig(name + '/' + 'Heatmap' + '.svg')\r\n sns.pairplot(data = numeric_data)\r\n plt.savefig(name + '/' + 'PairPlot' + '.jpg')\r\n plt.savefig(name + '/' + 'PairPlot' + '.svg')\r\n\r\n \r\n \r\n\r\n def analyse_user(self):\r\n playlists = self.sp.user_playlists(self.username)\r\n playlist_indices = {}\r\n index = 1\r\n playlist_header = ['Sl. No', 'Playlist Name']\r\n playlist_data_print = []\r\n for playlist in playlists['items']:\r\n playlist_data_print.append([str(index),playlist['name']])\r\n playlist_indices[index] = playlist\r\n index+=1\r\n self.printer.printPlayList(playlist_data_print, playlist_header, self.name)\r\n self.console.print(\"Select a playlist to be analysed:\")\r\n n = int(input())\r\n while True:\r\n if n < 0 or n > index:\r\n self.console.print(\"Invalid input, please enter a correct number\")\r\n else:\r\n break\r\n to_analyse = playlist_indices[n]\r\n self.console.print(to_analyse)\r\n self.analyse_playlists(to_analyse['uri'].split(':')[2])\r\n\r\n def save_data(self,df, name = \"Default\", sub_folder = \"\"):\r\n if name == 'r/listentothis': \r\n name = 'listentothis'\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n if not os.path.exists('User Data Analysis'):\r\n os.mkdir('User Data Analysis')\r\n os.chdir(path + '/User Data Analysis')\r\n if not os.path.exists(name):\r\n os.mkdir(name)\r\n if sub_folder == \"\":\r\n df.to_csv(name + '/' + self.name + \".csv\")\r\n else:\r\n if not os.path.exists(os.curdir + '/' + sub_folder):\r\n os.mkdir(os.curdir + '/' + sub_folder)\r\n print(os.curdir)\r\n df.to_csv(name + '/' + sub_folder + '/' + self.name + '.csv')\r\n os.chdir(path)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n console = printing_essentials.Printer().get_instance().console\r\n console.rule(\"This is the data analysis section\", style=\"green\")\r\n console.print(\"Do you want to do an analysis of a public playlist or your own? Select 1 for public playlist, and 2 to list your playlists\")\r\n n = int(input())\r\n if(n == 1):\r\n console.print(\"Paste a playlist uri to get data analysis done, preferably one with more than 50 songs:\")\r\n uri = str(input())\r\n analyse = AnalysePlaylist()\r\n # analyse.jlt()\r\n analyse.analyse_playlists(playlist_id = uri[34:])\r\n elif(n == 2):\r\n console.print(\"Getting All your playlists.....\")\r\n analyse = AnalysePlaylist()\r\n analyse.analyse_user()\r\n"
] | [
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
castacks/tartan_drive | [
"c731ca65381f4a169a7ce7bcc02e8b1e68c407f4"
] | [
"rosbag_to_dataset/rosbag_to_dataset/visualization/traj_and_image_viz.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.animation import FuncAnimation\n\ndef make_plot(traj, odom_topic='/odom', image_topics=['/multisense/left/image_rect_color', '/multisense/depth'], t=0, fig=None, axs=None):\n \"\"\"\n Side-by side plot showing the traj and action sequences as well as the image at the current timestep.\n \"\"\"\n naxs = 2 + len(image_topics)\n\n if fig is None or axs is None:\n fig, axs = plt.subplots(1, naxs, figsize=(4 * naxs + 1, 4))\n\n for ax in axs:\n ax.cla()\n\n axs[0].set_title(\"Position\")\n axs[1].set_title(\"Action\")\n for i, topic in enumerate(image_topics):\n axs[i+2].set_title(topic)\n\n axs[0].set_xlabel('X(m)')\n axs[0].set_ylabel('Y(m)')\n axs[1].set_xlabel('T')\n \n axs[0].plot(traj['observation'][odom_topic][:, 0], traj['observation'][odom_topic][:, 1], label='Traj')\n axs[0].scatter(traj['observation'][odom_topic][t, 0], traj['observation'][odom_topic][t, 1], c='r', marker='x', label='Current')\n\n T = np.arange(traj['action'].shape[0])\n\n for i in range(traj['action'].shape[1]):\n axs[1].plot(T, traj['action'][:, i])\n axs[1].scatter(T[t], traj['action'][t, i])\n axs[1].axvline(t, color='k', linestyle='dotted')\n\n for i, topic in enumerate(image_topics):\n img = np.moveaxis(traj['observation'][topic][t], 0, 2)\n if img.shape[2] != 3:\n img = img[:, :, -1]\n\n axs[i+2].imshow(img)\n\n return fig, axs\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_fp', type=str, required=True, help='Data to visualize')\n args = parser.parse_args()\n\n dataset = np.load(args.data_fp, allow_pickle=True)\n dataset = {'observation':dataset['observation'].item(), 'action':dataset['action'], 'dt':dataset['dt']}\n \n fig, axs = plt.subplots(1, 4, figsize=(4 * 4 + 1, 4))\n anim = FuncAnimation(fig, func = lambda t:make_plot(dataset, odom_topic='/mocap_node/mushr/Odom', image_topics=[], t=t, fig=fig, axs=axs), frames=np.arange(dataset['action'].shape[0]), interval=dataset['dt']*1000)\n plt.show()\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.moveaxis",
"numpy.load",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paulorauber/rlnn | [
"49f6b380921690a2c4b1d10db5c2c3055f161f7a"
] | [
"examples/tmaze.py"
] | [
"from sklearn.utils import check_random_state\n\n\nclass TMaze:\n def __init__(self, length=4, random_state=None):\n if length < 0:\n raise Exception('Invalid corridor length')\n\n self.length = length\n self.n_actions = 4\n self.random_state = check_random_state(random_state)\n\n def start(self):\n self.go_up = self.random_state.randint(2)\n self.pos = 0\n\n return self.observation()\n\n def move(self, a):\n \"\"\"a: up, down, left, right\"\"\"\n if a < 0 or a > 3:\n raise Exception('Invalid action')\n\n if 0 <= self.pos <= self.length - 1:\n # On corridor\n if a == 2 and self.pos > 0:\n # Valid left\n self.pos -= 1\n elif a == 3:\n # Right\n self.pos += 1\n elif self.pos == self.length:\n # On intersection\n if a == 2 and self.pos > 0:\n # Valid left\n self.pos -= 1\n elif a == 0:\n # Up\n self.pos += 1\n elif a == 1:\n # Down\n self.pos += 2\n\n def next_state_reward(self, a):\n self.move(a)\n\n if self.won():\n return self.observation(), 100.\n\n return self.observation(), 0.0\n\n def ended(self):\n return self.pos > self.length\n\n def won(self):\n return self.ended() and (self.go_up == (self.pos == self.length + 1))\n\n def observation(self):\n return None\n\n def __repr__(self):\n down, up = 'G', 'T'\n if self.go_up:\n up, down = down, up\n\n l1 = ['#']*(self.length + 3)\n l1[-2] = up\n if self.pos == self.length + 1:\n l1[-2] = '@'\n\n l2 = ['#'] + ['.']*(self.length + 1) + ['#']\n if self.pos <= self.length:\n l2[self.pos + 1] = '@'\n\n l3 = ['#']*(self.length + 3)\n l3[-2] = down\n if self.pos == self.length + 2:\n l3[-2] = '@'\n\n return ''.join(l1) + '\\n' + ''.join(l2) + '\\n' + ''.join(l3) + '\\n'\n\n\ndef play(maze):\n maze.start()\n\n udlr = ['w', 's', 'a', 'd']\n while not maze.ended():\n print(maze)\n\n c = raw_input('Move:')\n if c not in udlr:\n break\n\n maze.move(udlr.index(c))\n\n print('Won' if maze.won() else 'Lost')\n\n\ndef main():\n maze = TMaze(8)\n play(maze)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.utils.check_random_state"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aliyun/3D-Local-CNN-for-Gait-Recognition | [
"ebff3fd395c5e864ebd881a1ff06aa8b36682929"
] | [
"solvers/c3d.py"
] | [
"#! /usr/bin/env python\nimport os\nimport pdb\nimport time\nimport yaml\nimport json\nimport pickle\nimport random\nimport shutil\nimport argparse\nimport numpy as np\nfrom collections import defaultdict\n\n# torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils import AverageMeter, LearningRate, accuracy, LossWeightDecay\nfrom solvers import BaselineSolver\n\n\nclass C3D_Solver(BaselineSolver):\n def build_optimizer(self):\n if self.cfg.optimizer == 'SGD':\n self.optimizer_backbone = self._build_sgd(\n self.model.module.backbone,\n self.model.module.spatial_pool,\n self.model.module.temporal_pool,\n self.model.module.hpm,\n )\n self.optimizer_top = self._build_sgd(\n self.model.module.compact_block, self.model.module.classifier)\n\n elif self.cfg.optimizer == 'Adam':\n self.optimizer_backbone = self._build_adam(\n self.model.module.backbone,\n self.model.module.spatial_pool,\n self.model.module.temporal_pool,\n self.model.module.hpm,\n )\n self.optimizer_top = self._build_adam(\n self.model.module.compact_block,\n self.model.module.classifier,\n )\n\n else:\n raise ValueError()\n self.lr_scheduler_backbone = LearningRate(self.optimizer_backbone,\n **self.cfg.lr_decay_backbone)\n self.lr_scheduler_top = LearningRate(self.optimizer_top,\n **self.cfg.lr_decay_top)\n\n def save_checkpoint(self, filename):\n state = {\n 'iteration': self.iter,\n 'model': self.model.module.state_dict(),\n 'optimizer_backbone': self.optimizer_backbone.state_dict(),\n 'optimizer_top': self.optimizer_top.state_dict(),\n }\n torch.save(state, filename)\n self.print_log('Save checkpoint to {}'.format(filename))\n return self.iter\n\n def load_checkpoint(self, filename, optim=True):\n state = torch.load(filename)\n iter = state['iteration']\n self.model.module.load_state_dict(state['model'])\n if optim:\n self.optimizer_backbone.load_state_dict(\n state['optimizer_backbone'])\n self.optimizer_top.load_state_dict(state['optimizer_top'])\n self.print_log('Load weights and optim from {}'.format(filename))\n else:\n self.print_log('Load weights from {}'.format(filename))\n return iter\n\n def build_loss(self):\n self.criterion_early = self._build_one_loss(self.cfg.early_loss,\n self.cfg.early_loss_args)\n self.criterion_mid = self._build_one_loss(self.cfg.mid_loss,\n self.cfg.mid_loss_args)\n self.criterion_late = self._build_one_loss(self.cfg.late_loss,\n self.cfg.late_loss_args)\n self.early_loss_weight = LossWeightDecay(**self.cfg.early_loss_weight)\n self.mid_loss_weight = LossWeightDecay(**self.cfg.mid_loss_weight)\n self.late_loss_weight = LossWeightDecay(**self.cfg.late_loss_weight)\n\n def train(self):\n self.build_data()\n self.build_model()\n self.build_optimizer()\n self.build_loss()\n start_time = time.time()\n self.iter = 0\n\n # Print out configurations\n self.print_log('{} samples in train set'.format(\n len(self.trainloader.dataset)))\n self.print_log('{} samples in test set'.format(\n len(self.testloader.dataset)))\n if self.cfg.print_model:\n self.print_log('Architecture:\\n{}'.format(self.model))\n num_params = sum(p.numel() for p in self.model.parameters()\n if p.requires_grad)\n self.print_log('Parameters: {}'.format(num_params))\n self.print_log('Configurations:\\n{}\\n'.format(\n json.dumps(vars(self.cfg), indent=4)))\n\n # Load from previous checkpoints\n self.load()\n\n # Meters\n self.best_acc, self.best_iter = [0], -1\n meters = defaultdict(lambda: AverageMeter())\n\n end = time.time()\n for seq, view, seq_type, label in self.trainloader:\n self.model.train()\n meters['dataTime'].update(time.time() - end)\n end = time.time()\n\n # Learning rate and loss weights decay\n lr_backbone = self.lr_scheduler_backbone.step(self.iter)\n lr_top = self.lr_scheduler_top.step(self.iter)\n lw_early = self.early_loss_weight.step(self.iter)\n lw_mid = self.mid_loss_weight.step(self.iter)\n lw_late = self.late_loss_weight.step(self.iter)\n self.iter += 1\n\n seq, label = seq.float().cuda(), label.long().cuda()\n\n # forward and calculate loss\n out1, out2, preds = self.model(seq)\n early_loss, loss_num = self.criterion_early(out1, label)\n mid_loss, mid_acc = self.criterion_mid(out2, label)\n late_loss = self.criterion_late(preds, label)\n prec, = accuracy(preds, label, topk=(1, ))\n loss = lw_early * early_loss + lw_mid * mid_loss + lw_late * late_loss\n\n # backward\n self.optimizer_top.zero_grad()\n self.optimizer_backbone.zero_grad()\n loss.backward()\n self.optimizer_top.step()\n self.optimizer_backbone.step()\n\n # record loss\n meters['modelTime'].update(time.time() - end)\n meters['earlyLoss'].update(early_loss)\n meters['midLoss'].update(mid_loss)\n meters['lateLoss'].update(late_loss)\n meters['lossNum'].update(loss_num)\n meters['Acc'].update(prec)\n meters['midAcc'].update(mid_acc)\n\n # show log info\n if self.iter % self.cfg.log_interval == 0:\n self.print_log(\n 'Iter: {}/{}'.format(self.iter, self.cfg.num_iter) +\n ' - Data: {:.0f}s'.format(meters['dataTime'].sum) +\n ' - Model: {:.0f}s'.format(meters['modelTime'].sum) +\n ' - Backbone: {:.2e}'.format(lr_backbone) +\n ' - Top: {:.2e}'.format(lr_top) +\n ' - W_Early: {:.2f}'.format(lw_early) +\n ' - W_Mid: {:.2f}'.format(lw_mid) +\n ' - W_Late: {:.2f}'.format(lw_late) +\n ' - Num: {:.2e}'.format(meters['lossNum'].avg) +\n ' - Loss_Mid: {:.2f}'.format(meters['midLoss'].avg) +\n ' - Loss_Late: {:.2f}'.format(meters['lateLoss'].avg) +\n ' - MidAcc: {:.2%}'.format(meters['midAcc'].avg) +\n ' - Acc: {:.2%}'.format(meters['Acc'].avg))\n\n for i in [\n 'earlyLoss', 'lossNum', 'midLoss', 'lateLoss',\n 'midAcc', 'Acc'\n ]:\n self.writer.add_scalar('train/{}'.format(i), meters[i].avg,\n self.iter)\n\n for m in meters.values():\n m.reset()\n\n # show distributions of weights and grads\n self.show_info()\n\n # save checkpoints\n self.save()\n\n # test\n if self.iter % self.cfg.test_interval == 0:\n acc = self._test()\n self.collect(acc)\n\n if self.iter == self.cfg.num_iter:\n self.print_log('\\nBest Acc: {}'.format(self.best_acc) +\n '\\nIter: {}'.format(self.best_iter) +\n '\\nDir: {}'.format(self.work_dir) +\n '\\nTime: {}'.format(\n self._convert_time(time.time() -\n start_time)))\n return\n end = time.time()\n\n def _test(self):\n self.model.eval()\n\n feature_list1 = list()\n feature_list2 = list()\n view_list = list()\n seq_type_list = list()\n label_list = list()\n\n for i, x in enumerate(self.testloader):\n seq, view, seq_type, label = x\n seq = seq.float().cuda()\n\n out1, out2 = self.model(seq)\n n = out1.size(0)\n feature_list1.append(out1.view(n, -1).data.cpu().numpy())\n feature_list2.append(out2.view(n, -1).data.cpu().numpy())\n view_list += view\n seq_type_list += seq_type\n label_list.append(label.item())\n\n self.print_log('Full Euclidean')\n acc_full_euc = self._compute_accuracy(feature_list1,\n view_list,\n seq_type_list,\n label_list,\n metric='euclidean')\n self.print_log('Compact Euclidean')\n acc_compact_euc = self._compute_accuracy(feature_list2,\n view_list,\n seq_type_list,\n label_list,\n metric='euclidean')\n self.print_log('Full Cosine')\n acc_full_cos = self._compute_accuracy(feature_list1,\n view_list,\n seq_type_list,\n label_list,\n metric='cosine')\n self.print_log('Compact Cosine')\n acc_compact_cos = self._compute_accuracy(feature_list2,\n view_list,\n seq_type_list,\n label_list,\n metric='cosine')\n\n if len(acc_compact_euc) > 1:\n self.writer.add_scalar('test_fullEuc/AccNM', acc_full_euc[0],\n self.iter)\n self.writer.add_scalar('test_fullEuc/AccBG', acc_full_euc[1],\n self.iter)\n self.writer.add_scalar('test_fullEuc/AccCL', acc_full_euc[2],\n self.iter)\n self.writer.add_scalar('test_compactEuc/AccNM', acc_compact_euc[0],\n self.iter)\n self.writer.add_scalar('test_compactEuc/AccBG', acc_compact_euc[1],\n self.iter)\n self.writer.add_scalar('test_compactEuc/AccCL', acc_compact_euc[2],\n self.iter)\n self.writer.add_scalar('test_fullCos/AccNM', acc_full_cos[0],\n self.iter)\n self.writer.add_scalar('test_fullCos/AccBG', acc_full_cos[1],\n self.iter)\n self.writer.add_scalar('test_fullCos/AccCL', acc_full_cos[2],\n self.iter)\n self.writer.add_scalar('test_compactCos/AccNM', acc_compact_cos[0],\n self.iter)\n self.writer.add_scalar('test_compactCos/AccBG', acc_compact_cos[1],\n self.iter)\n self.writer.add_scalar('test_compactCos/AccCL', acc_compact_cos[2],\n self.iter)\n else:\n self.writer.add_scalar('test/fullEucAcc', acc_full_euc[0],\n self.iter)\n self.writer.add_scalar('test/compactEucAcc', acc_compact_euc[0],\n self.iter)\n self.writer.add_scalar('test/fullCosAcc', acc_full_cos[0],\n self.iter)\n self.writer.add_scalar('test/compactCosAcc', acc_compact_cos[0],\n self.iter)\n target_acc = getattr(self.cfg, 'target_acc', 'full_euc')\n accs = {\n 'full_euc': acc_full_euc,\n 'full_cos': acc_full_cos,\n 'compact_euc': acc_compact_euc,\n 'compact_cos': acc_compact_cos\n }\n return accs[target_acc]\n\n def all_test(self):\n self.build_data()\n self.build_model()\n\n if self.cfg.pretrained is None:\n raise ValueError('Please appoint --pretrained.')\n self.load_checkpoint(self.cfg.pretrained, optim=False)\n self.model.eval()\n\n feature_list1 = list()\n feature_list2 = list()\n view_list = list()\n seq_type_list = list()\n label_list = list()\n\n for i, x in enumerate(self.testloader):\n seq, view, seq_type, label = x\n seq = seq.float().cuda()\n\n out1, out2 = self.model(seq)\n n = out1.size(0)\n feature_list1.append(out1.view(n, -1).data.cpu().numpy())\n feature_list2.append(out2.view(n, -1).data.cpu().numpy())\n view_list += view\n seq_type_list += seq_type\n label_list.append(label.item())\n\n self.print_log('Full Euclidean')\n acc1 = self._compute_accuracy(feature_list1,\n view_list,\n seq_type_list,\n label_list,\n metric='euclidean')\n self.print_log('Compact Euclidean')\n acc2 = self._compute_accuracy(feature_list2,\n view_list,\n seq_type_list,\n label_list,\n metric='euclidean')\n self.print_log('Full Cosine')\n acc3 = self._compute_accuracy(feature_list1,\n view_list,\n seq_type_list,\n label_list,\n metric='cosine')\n self.print_log('Compact Cosine')\n acc4 = self._compute_accuracy(feature_list2,\n view_list,\n seq_type_list,\n label_list,\n metric='cosine')\n"
] | [
[
"torch.load",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edaaydinea/CSMM102X-ML | [
"d1701a0638538e3332f1fae442f9de14349768c8"
] | [
"Week 6 - Classification/hw2_classification.py"
] | [
"from __future__ import division\r\nimport numpy as np\r\nimport sys\r\n\r\nX_train = np.genfromtxt(sys.argv[1], delimiter=\",\")\r\ny_train = np.genfromtxt(sys.argv[2])\r\nX_test = np.genfromtxt(sys.argv[3], delimiter=\",\")\r\n\r\nX_train = np.genfromtxt(\"X_train.csv\", delimiter=\",\")\r\ny_train = np.genfromtxt(\"y_train.csv\", delimiter=\",\")\r\nX_test = np.genfromtxt(\"X_test.csv\", delimiter=\",\")\r\ny_test = np.genfromtxt(\"y_test.csv\", delimiter=\",\")\r\n\r\n## can make more functions if required\r\ndef Countclasses(y_train): # just a counter per class\r\n Prior = []\r\n total = len(y_train)\r\n K_classes = np.unique(y_train)\r\n for i in K_classes:\r\n Prior.append(np.uint8(y_train==i).sum()/total)\r\n return Prior\r\n\r\ndef Probability(x, u, D): # Gaussian Distribution for MLE\r\n exponential_term = np.exp(-0.5 * (np.matmul((x-u) , np.linalg.pinv(D)) * (x-u)).sum(-1) )\r\n return ( exponential_term / np.sqrt(np.linalg.det(D)) ).squeeze() \r\n\r\ndef ClassConditionalDensity(X_train, y_train): # \r\n K_classes = np.unique(y_train)\r\n mean_y = []\r\n cov_y = []\r\n for i in K_classes:\r\n mask = y_train==i\r\n mean_y.append( X_train[mask].sum(0)/len(X_train[mask]) )\r\n cov_y.append( np.matmul( (X_train[mask]-mean_y[-1]).T , (X_train[mask]-mean_y[-1]) )/len(X_train[mask] ) )\r\n\r\n return mean_y, cov_y\r\n\r\n## can make more functions if required\r\ndef pluginClassifier(X_train, y_train, X_test): \r\n # this function returns the required output\r\n Prior = Countclasses(y_train) # Prior Distribution\r\n mean_y, cov_y = ClassConditionalDensity(X_train, y_train) # u and Cov parameters\r\n Likelihood = np.zeros([X_test.shape[0], len(Prior)])\r\n for k in range(len(Prior)):\r\n Likelihood[:,k] = Prior[k] * Probability(X_test, mean_y[k], cov_y[k]) # computing the Likelihood for Bayes Classifier\r\n Prob = Likelihood/Likelihood.sum(1)[:,None]\r\n return Prob\r\n\r\nfinal_outputs = pluginClassifier(X_train, y_train, X_test) # assuming final_outputs is returned from function\r\n\r\ny_ = final_outputs.argmax(1)\r\nm = confusion_matrix(y_test,y_)\r\nprint('Bayes Classifier')\r\nprint(m)\r\n\r\nnp.savetxt(\"probs_test.csv\", final_outputs, delimiter=\",\") # write output to file"
] | [
[
"numpy.unique",
"numpy.uint8",
"numpy.matmul",
"numpy.genfromtxt",
"numpy.linalg.det",
"numpy.linalg.pinv",
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mgvalverde/prince | [
"e5a705befbd122df33a0fefc4b26d63c96cf0552"
] | [
"prince/ca.py"
] | [
"\"\"\"Correspondence Analysis (CA)\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse\nfrom sklearn import base\nfrom sklearn import utils\n\nfrom . import plot\nfrom . import util\nfrom . import svd\n\n\nclass CA(base.BaseEstimator, base.TransformerMixin):\n\n def __init__(self, n_components=2, n_iter=10, copy=True, check_input=True, benzecri=False,\n random_state=None, engine='auto'):\n self.n_components = n_components\n self.n_iter = n_iter\n self.copy = copy\n self.check_input = check_input\n self.random_state = random_state\n self.benzecri = benzecri\n self.engine = engine\n\n def fit(self, X, y=None):\n\n # Check input\n if self.check_input:\n utils.check_array(X)\n\n # Check all values are positive\n if (X < 0).any().any():\n raise ValueError(\"All values in X should be positive\")\n\n _, row_names, _, col_names = util.make_labels_and_names(X)\n\n if isinstance(X, pd.DataFrame):\n X = X.to_numpy()\n\n if self.copy:\n X = np.copy(X)\n\n # Compute the correspondence matrix which contains the relative frequencies\n X /= np.sum(X)\n\n # Compute row and column masses\n self.row_masses_ = pd.Series(X.sum(axis=1), index=row_names)\n self.col_masses_ = pd.Series(X.sum(axis=0), index=col_names)\n\n # Compute standardised residuals\n r = self.row_masses_.to_numpy()\n c = self.col_masses_.to_numpy()\n S = sparse.diags(r ** -.5) @ (X - np.outer(r, c)) @ sparse.diags(c ** -.5)\n\n # Compute SVD on the standardised residuals\n self.U_, self.s_, self.V_ = svd.compute_svd(\n X=S,\n n_components=self.n_components,\n n_iter=self.n_iter,\n random_state=self.random_state,\n engine=self.engine\n )\n\n # Compute total inertia\n self.total_inertia_ = np.einsum('ij,ji->', S, S.T)\n\n return self\n\n def transform(self, X):\n \"\"\"Computes the row principal coordinates of a dataset.\n\n Same as calling `row_coordinates`. In most cases you should be using the same\n dataset as you did when calling the `fit` method. You might however also want to included\n supplementary data.\n\n \"\"\"\n utils.validation.check_is_fitted(self)\n if self.check_input:\n utils.check_array(X)\n return self.row_coordinates(X)\n\n @property\n def eigenvalues_(self):\n \"\"\"The eigenvalues associated with each principal component.\n\n Benzecri correction is applied if specified.\n\n \"\"\"\n utils.validation.check_is_fitted(self)\n\n K = len(self.col_masses_)\n\n if self.benzecri:\n return [\n (K / (K - 1.) * (s - 1. / K)) ** 2\n if s > 1. / K else 0\n for s in np.square(self.s_)\n ]\n\n return np.square(self.s_).tolist()\n\n @property\n def explained_inertia_(self):\n \"\"\"The percentage of explained inertia per principal component.\"\"\"\n utils.validation.check_is_fitted(self)\n return [eig / self.total_inertia_ for eig in self.eigenvalues_]\n\n def row_coordinates(self, X):\n \"\"\"The row principal coordinates.\"\"\"\n utils.validation.check_is_fitted(self)\n\n _, row_names, _, _ = util.make_labels_and_names(X)\n\n if isinstance(X, pd.DataFrame):\n try:\n X = X.sparse.to_coo().astype(float)\n except AttributeError:\n X = X.to_numpy()\n\n if self.copy:\n X = X.copy()\n\n # Normalise the rows so that they sum up to 1\n if isinstance(X, np.ndarray):\n X = X / X.sum(axis=1)[:, None]\n else:\n X = X / X.sum(axis=1)\n\n return pd.DataFrame(\n data=X @ sparse.diags(self.col_masses_.to_numpy() ** -0.5) @ self.V_.T,\n index=row_names\n )\n\n def column_coordinates(self, X):\n \"\"\"The column principal coordinates.\"\"\"\n utils.validation.check_is_fitted(self)\n\n _, _, _, col_names = util.make_labels_and_names(X)\n\n if isinstance(X, pd.DataFrame):\n is_sparse = X.dtypes.apply(pd.api.types.is_sparse).all()\n if is_sparse:\n X = X.sparse.to_coo()\n else:\n X = X.to_numpy()\n\n if self.copy:\n X = X.copy()\n\n # Transpose and make sure the rows sum up to 1\n if isinstance(X, np.ndarray):\n X = X.T / X.T.sum(axis=1)[:, None]\n else:\n X = X.T / X.T.sum(axis=1)\n\n return pd.DataFrame(\n data=X @ sparse.diags(self.row_masses_.to_numpy() ** -0.5) @ self.U_,\n index=col_names\n )\n\n def plot_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1,\n show_row_labels=True, show_col_labels=True, **kwargs):\n \"\"\"Plot the principal coordinates.\"\"\"\n\n utils.validation.check_is_fitted(self)\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n # Add style\n ax = plot.stylize_axis(ax)\n\n # Get labels and names\n row_label, row_names, col_label, col_names = util.make_labels_and_names(X)\n\n # Plot row principal coordinates\n row_coords = self.row_coordinates(X)\n ax.scatter(\n row_coords[x_component],\n row_coords[y_component],\n **kwargs,\n label=row_label\n )\n\n # Plot column principal coordinates\n col_coords = self.column_coordinates(X)\n ax.scatter(\n col_coords[x_component],\n col_coords[y_component],\n **kwargs,\n label=col_label\n )\n\n # Add row labels\n if show_row_labels:\n x = row_coords[x_component]\n y = row_coords[y_component]\n for xi, yi, label in zip(x, y, row_names):\n ax.annotate(label, (xi, yi))\n\n # Add column labels\n if show_col_labels:\n x = col_coords[x_component]\n y = col_coords[y_component]\n for xi, yi, label in zip(x, y, col_names):\n ax.annotate(label, (xi, yi))\n\n # Legend\n ax.legend()\n\n # Text\n ax.set_title('Principal coordinates')\n ei = self.explained_inertia_\n ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, 100 * ei[x_component]))\n ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, 100 * ei[y_component]))\n\n return ax\n"
] | [
[
"numpy.square",
"sklearn.utils.validation.check_is_fitted",
"numpy.einsum",
"sklearn.utils.check_array",
"scipy.sparse.diags",
"matplotlib.pyplot.subplots",
"numpy.copy",
"numpy.outer",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
keerthan2/NYSE_Prediction | [
"b91a6c5bf1ef3431d0f7482cc77ac81e3dde664e"
] | [
"nyse.py"
] | [
"import numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader, random_split, TensorDataset\r\nfrom torch.autograd import Variable\r\n\r\nfrom utils import train, test, load_data\r\nfrom model import RNNModel\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nseed = 0\r\ntorch.manual_seed(seed)\r\nif torch.cuda.is_available():\r\n torch.cuda.manual_seed_all(seed)\r\n\r\nprint(\"Reading CSV file\")\r\ndf = pd.read_csv(\"data/prices-split-adjusted.csv\", index_col = 0)\r\ndf_stock = df[df.symbol == 'EQIX'].copy()\r\ndf_stock.drop(['symbol'],1,inplace=True)\r\ndf_stock.drop(['volume'],1,inplace=True)\r\ndf_stock = df_stock.copy()\r\n\r\nseq_len = 50 \r\nvalid_set_size_percentage = 10 \r\ntest_set_size_percentage = 10 \r\nprint(\"Making train-validation-test data\")\r\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock, seq_len, valid_set_size_percentage, test_set_size_percentage)\r\n\r\nbatch_size = 50\r\nn_epochs = 50 \r\n\r\ntensor_x = torch.Tensor(x_train) \r\ntensor_y = torch.Tensor(y_train) \r\nmy_dataset = TensorDataset(tensor_x,tensor_y) \r\ndataloader_train = DataLoader(my_dataset, batch_size=batch_size,shuffle=True,num_workers=4,pin_memory=False) \r\n\r\ntensor_x = torch.Tensor(x_valid) \r\ntensor_y = torch.Tensor(y_valid) \r\nmy_dataset = TensorDataset(tensor_x,tensor_y) \r\ndataloader_val = DataLoader(my_dataset, batch_size=batch_size,shuffle=True,num_workers=4,pin_memory=False) \r\n\r\ntensor_x = torch.Tensor(x_test) \r\ntensor_y = torch.Tensor(y_test) \r\nmy_dataset = TensorDataset(tensor_x,tensor_y) \r\ndataloader_test = DataLoader(my_dataset, batch_size=batch_size,shuffle=True,num_workers=4,pin_memory=False) \r\n\r\nhidden_dim = 200\r\nnum_layers = 4\r\nlr = 1e-3\r\nlog_dir = './ckpt'\r\nmodel_name = 'model.pth'\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nprint(f\"Found {device} ...\")\r\nprint(\"Instantiating RNN Model\")\r\n\r\nif not os.path.exists(log_dir):\r\n os.mkdir(log_dir)\r\nmodel_save_path = os.path.join(log_dir,model_name)\r\nmodel = RNNModel(x_train.shape[-1],hidden_dim,num_layers,y_train.shape[-1]).to(device)\r\noptimizer = optim.Adam(model.parameters(),lr=lr)\r\ncriterion = nn.MSELoss()\r\n\r\nprint(\"< Training starts >\")\r\nmodel = train(model,dataloader_train,dataloader_val,device,criterion,optimizer,n_epochs,model_save_path)\r\n\r\n\r\nprint(\"Testing on test data-set \")\r\nlog_dir = './ckpt'\r\nmodel_name = 'model.pth'\r\nmodel_save_path = os.path.join(log_dir,model_name)\r\noutput_dim = 4\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nmodel = RNNModel(x_test.shape[-1],hidden_dim,num_layers,output_dim).to(device)\r\ny_test_pred = test(x_test,model,model_save_path,device)\r\nft_dict = {\r\n 0: 'open',\r\n 1: 'high',\r\n 2: 'low',\r\n 3: 'close', \r\n}\r\nfig = plt.figure(figsize=(15,10))\r\nfor i in range(4):\r\n n = len(y_test_pred)\r\n ax = fig.add_subplot(2,2,i+1)\r\n ax.plot(range(n),y_test[:,i],range(n),y_test_pred[:,i])\r\n ax.legend(['test','test_predicted'])\r\n ax.set_ylabel('Price')\r\n ax.set_xlabel('Time (days)')\r\n ax.set_title(f'Prediction of future stock prices - {ft_dict[i]} category - (on test-set)')\r\nplt.show()\r\n\r\n\r\n"
] | [
[
"pandas.read_csv",
"torch.Tensor",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"matplotlib.pyplot.show",
"torch.nn.MSELoss",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lauri-codes/matminer | [
"d19e0c921dda9c8eba78c07aeadbecaeadc51b23"
] | [
"matminer/featurizers/composition.py"
] | [
"from __future__ import division\n\nimport collections\nimport itertools\nimport os\nfrom functools import reduce, lru_cache\nfrom warnings import warn\n\nimport numpy as np\nimport pandas as pd\nfrom pymatgen import Element, MPRester\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.molecular_orbitals import MolecularOrbitals\nfrom pymatgen.core.periodic_table import get_el_sp\nfrom sklearn.neighbors.unsupervised import NearestNeighbors\n\nfrom matminer.featurizers.base import BaseFeaturizer\nfrom matminer.featurizers.utils.stats import PropertyStats\nfrom matminer.utils.data import DemlData, MagpieData, PymatgenData, \\\n CohesiveEnergyData, MixingEnthalpy, MatscholarElementData, MEGNetElementData\n\n__author__ = 'Logan Ward, Jiming Chen, Ashwin Aggarwal, Kiran Mathew, ' \\\n 'Saurabh Bajaj, Qi Wang, Maxwell Dylla, Anubhav Jain'\n\nmodule_dir = os.path.dirname(os.path.abspath(__file__))\ndata_dir = os.path.join(module_dir, \"..\", \"utils\", \"data_files\")\n\n\n# Utility operations\ndef has_oxidation_states(comp):\n \"\"\"Check if a composition object has oxidation states for each element\n\n TODO: Does this make sense to add to pymatgen? -wardlt\n\n Args:\n comp (Composition): Composition to check\n Returns:\n (boolean) Whether this composition object contains oxidation states\n \"\"\"\n for el in comp.elements:\n if not hasattr(el, \"oxi_state\") or el.oxi_state is None:\n return False\n return True\n\n\ndef is_ionic(comp):\n \"\"\"Determines whether a compound is an ionic compound.\n\n Looks at the oxidation states of each site and checks if both anions and cations exist\n\n Args:\n comp (Composition): Composition to check\n Returns:\n (bool) Whether the composition describes an ionic compound\n \"\"\"\n\n has_cations = False\n has_anions = False\n\n for el in comp.elements:\n if el.oxi_state < 0:\n has_anions = True\n if el.oxi_state > 0:\n has_cations = True\n if has_anions and has_cations:\n return True\n return False\n\n\nclass ElementProperty(BaseFeaturizer):\n \"\"\"\n Class to calculate elemental property attributes.\n\n To initialize quickly, use the from_preset() method.\n\n Features: Based on the statistics of the data_source chosen, computed\n by element stoichiometry. The format generally is:\n\n \"{data source} {statistic} {property}\"\n\n For example:\n\n \"PymetgenData range X\" # Range of electronegativity from Pymatgen data\n\n For a list of all statistics, see the PropertyStats documentation; for a\n list of all attributes available for a given data_source, see the\n documentation for the data sources (e.g., PymatgenData, MagpieData,\n MatscholarElementData, etc.).\n\n Args:\n data_source (AbstractData or str): source from which to retrieve\n element property data (or use str for preset: \"pymatgen\",\n \"magpie\", or \"deml\")\n features (list of strings): List of elemental properties to use\n (these must be supported by data_source)\n stats (list of strings): a list of weighted statistics to compute to for each\n property (see PropertyStats for available stats)\n \"\"\"\n\n def __init__(self, data_source, features, stats):\n if data_source == \"pymatgen\":\n self.data_source = PymatgenData()\n elif data_source == \"magpie\":\n self.data_source = MagpieData()\n elif data_source == \"deml\":\n self.data_source = DemlData()\n elif data_source == \"matscholar_el\":\n self.data_source = MatscholarElementData()\n elif data_source == \"megnet_el\":\n self.data_source = MEGNetElementData()\n else:\n self.data_source = data_source\n\n self.features = features\n self.stats = stats\n # Initialize stats computer\n self.pstats = PropertyStats()\n\n @classmethod\n def from_preset(cls, preset_name):\n \"\"\"\n Return ElementProperty from a preset string\n Args:\n preset_name: (str) can be one of \"magpie\", \"deml\", \"matminer\",\n \"matscholar_el\", or \"megnet_el\".\n\n Returns:\n ElementProperty based on the preset name.\n \"\"\"\n if preset_name == \"magpie\":\n data_source = \"magpie\"\n features = [\"Number\", \"MendeleevNumber\", \"AtomicWeight\",\n \"MeltingT\",\n \"Column\", \"Row\", \"CovalentRadius\",\n \"Electronegativity\", \"NsValence\", \"NpValence\",\n \"NdValence\", \"NfValence\", \"NValence\",\n \"NsUnfilled\", \"NpUnfilled\", \"NdUnfilled\", \"NfUnfilled\",\n \"NUnfilled\", \"GSvolume_pa\",\n \"GSbandgap\", \"GSmagmom\", \"SpaceGroupNumber\"]\n stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"avg_dev\", \"mode\"]\n\n elif preset_name == \"deml\":\n data_source = \"deml\"\n stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"std_dev\"]\n features = [\"atom_num\", \"atom_mass\", \"row_num\", \"col_num\",\n \"atom_radius\", \"molar_vol\", \"heat_fusion\",\n \"melting_point\", \"boiling_point\", \"heat_cap\",\n \"first_ioniz\", \"electronegativity\",\n \"electric_pol\", \"GGAU_Etot\", \"mus_fere\",\n \"FERE correction\"]\n\n elif preset_name == \"matminer\":\n data_source = \"pymatgen\"\n stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"std_dev\"]\n features = [\"X\", \"row\", \"group\", \"block\", \"atomic_mass\",\n \"atomic_radius\", \"mendeleev_no\",\n \"electrical_resistivity\", \"velocity_of_sound\",\n \"thermal_conductivity\", \"melting_point\",\n \"bulk_modulus\",\n \"coefficient_of_linear_thermal_expansion\"]\n\n elif preset_name == \"matscholar_el\":\n data_source = \"matscholar_el\"\n stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"std_dev\"]\n features = MatscholarElementData().prop_names\n\n elif preset_name == \"megnet_el\":\n data_source = \"megnet_el\"\n stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"std_dev\"]\n features = MEGNetElementData().prop_names\n\n else:\n raise ValueError(\"Invalid preset_name specified!\")\n\n return cls(data_source, features, stats)\n\n def featurize(self, comp):\n \"\"\"\n Get elemental property attributes\n\n Args:\n comp: Pymatgen composition object\n\n Returns:\n all_attributes: Specified property statistics of features\n \"\"\"\n\n all_attributes = []\n\n # Get the element names and fractions\n elements, fractions = zip(*comp.element_composition.items())\n\n for attr in self.features:\n elem_data = [self.data_source.get_elemental_property(e, attr) for e in elements]\n\n for stat in self.stats:\n all_attributes.append(self.pstats.calc_stat(elem_data, stat, fractions))\n\n return all_attributes\n\n def feature_labels(self):\n labels = []\n for attr in self.features:\n src = self.data_source.__class__.__name__\n for stat in self.stats:\n labels.append(\"{} {} {}\".format(src, stat, attr))\n return labels\n\n def citations(self):\n if self.data_source.__class__.__name__ == \"MagpieData\":\n citation = [\n \"@article{ward_agrawal_choudary_wolverton_2016, title={A general-purpose \"\n \"machine learning framework for predicting properties of inorganic materials}, \"\n \"volume={2}, DOI={10.1038/npjcompumats.2017.28}, number={1}, journal={npj \"\n \"Computational Materials}, author={Ward, Logan and Agrawal, Ankit and Choudhary, \"\n \"Alok and Wolverton, Christopher}, year={2016}}\"]\n elif self.data_source.__class__.__name__ == \"DemlData\":\n citation = [\n \"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\"]\n elif self.data_source.__class__.__name__ == \"PymatgenData\":\n citation = [\n \"@article{Ong2013, author = {Ong, Shyue Ping and Richards, William Davidson and Jain, Anubhav and Hautier, \"\n \"Geoffroy and Kocher, Michael and Cholia, Shreyas and Gunter, Dan and Chevrier, Vincent L. and Persson, \"\n \"Kristin A. and Ceder, Gerbrand}, doi = {10.1016/j.commatsci.2012.10.028}, issn = {09270256}, \"\n \"journal = {Computational Materials Science}, month = {feb}, pages = {314--319}, \"\n \"publisher = {Elsevier B.V.}, title = {{Python Materials Genomics (pymatgen): A robust, open-source python \"\n \"library for materials analysis}}, url = {http://linkinghub.elsevier.com/retrieve/pii/S0927025612006295}, \"\n \"volume = {68}, year = {2013} } \"]\n elif self.data_source.__class__.__name__ == \"MEGNetElementData\":\n # TODO: Cite MEGNet publication (not preprint) once released!\n citation = [\n \"@ARTICLE{2018arXiv181205055C,\"\n \"author = {{Chen}, Chi and {Ye}, Weike and {Zuo}, Yunxing and {Zheng}, Chen and {Ong}, Shyue Ping},\"\n \"title = '{Graph Networks as a Universal Machine Learning Framework for Molecules and Crystals}',\"\n \"journal = {arXiv e-prints},\"\n \"keywords = {Condensed Matter - Materials Science, Physics - Computational Physics},\"\n \"year = '2018',\"\n \"month = 'Dec',\"\n \"eid = {arXiv:1812.05055},\"\n \"pages = {arXiv:1812.05055},\"\n \"archivePrefix = {arXiv},\"\n \"eprint = {1812.05055},\"\n \"primaryClass = {cond-mat.mtrl-sci},\"\n \"adsurl = {https://ui.adsabs.harvard.edu/\\#abs/2018arXiv181205055C},\"\n \"adsnote = {Provided by the SAO/NASA Astrophysics Data System}}\"]\n else:\n citation = []\n return citation\n\n def implementors(self):\n return [\"Jiming Chen\", \"Logan Ward\", \"Anubhav Jain\", \"Alex Dunn\"]\n\nclass Meredig(BaseFeaturizer):\n \"\"\"\n Class to calculate features as defined in Meredig et. al.\n\n Features:\n Atomic fraction of each of the first 103 elements, in order of atomic number.\n 17 statistics of elemental properties;\n Mean atomic weight of constituent elements\n Mean periodic table row and column number\n Mean and range of atomic number\n Mean and range of atomic radius\n Mean and range of electronegativity\n Mean number of valence electrons in each orbital\n Fraction of total valence electrons in each orbital\n\n \"\"\"\n\n def __init__(self):\n self.data_source = MagpieData()\n\n #The labels for statistics on element properties\n self._element_property_feature_labels = [\"mean AtomicWeight\", \"mean Column\", \"mean Row\", \"range Number\", \"mean Number\",\n \"range AtomicRadius\", \"mean AtomicRadius\", \"range Electronegativity\", \"mean Electronegativity\"]\n # Initialize stats computer\n self.pstats = PropertyStats()\n\n def featurize(self, comp):\n \"\"\"\n Get elemental property attributes\n\n Args:\n comp: Pymatgen composition object\n\n Returns:\n all_attributes: Specified property statistics of features\n \"\"\"\n\n #First 103 features are element fractions, we can get these from the ElementFraction featurizer\n element_fraction_features = ElementFraction().featurize(comp)\n\n\n #Next 9 features are statistics on elemental properties\n elements, fractions = zip(*comp.element_composition.items())\n element_property_features = [0] * len(self._element_property_feature_labels)\n\n for i,feat in enumerate(self._element_property_feature_labels):\n stat = feat.split(\" \")[0]\n attr = \" \".join(feat.split(\" \")[1:])\n\n elem_data = [self.data_source.get_elemental_property(e, attr) for e in elements]\n element_property_features[i] = self.pstats.calc_stat(elem_data, stat, fractions)\n\n #Final 8 features are statistics on valence orbitals, available from the ValenceOrbital featurizer\n valence_orbital_features = ValenceOrbital(orbitals=(\"s\", \"p\", \"d\", \"f\"), props=(\"avg\", \"frac\")).featurize(comp)\n\n return element_fraction_features+element_property_features+valence_orbital_features\n\n def feature_labels(self):\n #Since we have more features than just element fractions, append 'fraction' to element symbols for clarity\n element_fraction_features = [e + \" fraction\" for e in ElementFraction().feature_labels()]\n valence_orbital_features = ValenceOrbital().feature_labels()\n return element_fraction_features+self._element_property_feature_labels+valence_orbital_features\n\n def citations(self):\n citation = [\n \"@article{meredig_agrawal_kirklin_saal_doak_thompson_zhang_choudhary_wolverton_2014, title={Combinatorial \"\n \"screening for new materials in unconstrained composition space with machine learning}, \"\n \"volume={89}, DOI={10.1103/PhysRevB.89.094104}, number={1}, journal={Physical \"\n \"Review B}, author={B. Meredig, A. Agrawal, S. Kirklin, J. E. Saal, J. W. Doak, A. Thompson, \"\n \"K. Zhang, A. Choudhary, and C. Wolverton}, year={2014}}\"]\n return citation\n\n def implementors(self):\n return [\"Amalie Trewartha\"]\n\n\nclass CationProperty(ElementProperty):\n \"\"\"\n Features based on properties of cations in a material\n\n Requires that oxidation states have already been determined. Property\n statistics weighted by composition.\n\n Features: Based on the statistics of the data_source chosen, computed\n by element stoichiometry. The format generally is:\n\n \"{data source} {statistic} {property}\"\n\n For example:\n\n \"DemlData range magn_moment\" # Range of magnetic moment via Deml et al. data\n\n For a list of all statistics, see the PropertyStats documentation; for a\n list of all attributes available for a given data_source, see the\n documentation for the data sources (e.g., PymatgenData, MagpieData,\n MatscholarElementData, etc.).\n \"\"\"\n\n @classmethod\n def from_preset(cls, preset_name):\n if preset_name == \"deml\":\n data_source = \"deml\"\n features = [\"total_ioniz\", \"xtal_field_split\", \"magn_moment\",\n \"so_coupling\", \"sat_magn\"]\n stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"std_dev\"]\n else:\n raise ValueError('Preset \"%s\" not found' % preset_name)\n return cls(data_source, features, stats)\n\n def feature_labels(self):\n return [f + \" of cations\" for f in super().feature_labels()]\n\n def featurize(self, comp):\n # Check if oxidation states are present\n if not has_oxidation_states(comp):\n raise ValueError('Oxidation states have not been determined')\n if not is_ionic(comp):\n raise ValueError('Composition is not ionic')\n\n # Prepare to store the attributes\n all_attributes = []\n\n # Initialize stats computer\n pstats = PropertyStats()\n\n # Get the cation species and fractions\n cations, fractions = zip(*[(s, f) for s, f in comp.items() if s.oxi_state > 0])\n\n for attr in self.features:\n elem_data = [self.data_source.get_charge_dependent_property_from_specie(c, attr)\n for c in cations]\n\n for stat in self.stats:\n all_attributes.append(pstats.calc_stat(elem_data, stat, fractions))\n\n return all_attributes\n\n def citations(self):\n return [\n \"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\"]\n\n\nclass OxidationStates(BaseFeaturizer):\n \"\"\"\n Statistics about the oxidation states for each specie.\n Features are concentration-weighted statistics of the oxidation states.\n \"\"\"\n\n def __init__(self, stats=None):\n \"\"\"\n\n Args:\n stats - (list of string), which statistics compute\n \"\"\"\n self.stats = stats or [\"minimum\", \"maximum\", \"range\", \"std_dev\"]\n\n @classmethod\n def from_preset(cls, preset_name):\n if preset_name == \"deml\":\n stats = [\"minimum\", \"maximum\", \"range\", \"std_dev\"]\n else:\n ValueError('Preset \"%s\" not found' % preset_name)\n return cls(stats=stats)\n\n def featurize(self, comp):\n # Check if oxidation states are present\n if not has_oxidation_states(comp):\n raise ValueError('Oxidation states have not been determined')\n\n # Get the oxidation states and their proportions\n oxid_states, fractions = zip(*[(s.oxi_state, f) for s, f in comp.items()])\n\n # Compute statistics\n return [PropertyStats.calc_stat(oxid_states, s, fractions) for s in self.stats]\n\n def feature_labels(self):\n return [\"%s oxidation state\"%s for s in self.stats]\n\n def citations(self):\n return [\"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\"]\n\n def implementors(self):\n return ['Logan Ward']\n\n\nclass AtomicOrbitals(BaseFeaturizer):\n \"\"\"\n Determine HOMO/LUMO features based on a composition.\n\n The highest occupied molecular orbital (HOMO) and lowest unoccupied\n molecular orbital (LUMO) are estiated from the atomic orbital energies\n of the composition. The atomic orbital energies are from NIST:\n https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations\n\n Warning:\n For compositions with inter-species fractions greater than 10,000 (e.g.\n dilute alloys such as FeC0.00001) the composition will be truncated (to Fe\n in this example). In such extreme cases, the truncation likely reflects the\n true physics of the situation (i.e. that the dilute element does not\n significantly contribute orbital character to the band structure), but the\n user should be aware of this behavior.\n \"\"\"\n\n def featurize(self, comp):\n \"\"\"\n Args:\n comp: (Composition)\n pymatgen Composition object\n\n Returns:\n HOMO_character: (str) orbital symbol ('s', 'p', 'd', or 'f')\n HOMO_element: (str) symbol of element for HOMO\n HOMO_energy: (float in eV) absolute energy of HOMO\n LUMO_character: (str) orbital symbol ('s', 'p', 'd', or 'f')\n LUMO_element: (str) symbol of element for LUMO\n LUMO_energy: (float in eV) absolute energy of LUMO\n gap_AO: (float in eV)\n the estimated bandgap from HOMO and LUMO energeis\n \"\"\"\n\n integer_comp, factor = comp.get_integer_formula_and_factor()\n\n # warning message if composition is dilute and truncated\n if not (len(Composition(comp).elements) ==\n len(Composition(integer_comp).elements)):\n warn('AtomicOrbitals: {} truncated to {}'.format(comp,\n integer_comp))\n\n homo_lumo = MolecularOrbitals(integer_comp).band_edges\n\n feat = collections.OrderedDict()\n for edge in ['HOMO', 'LUMO']:\n feat['{}_character'.format(edge)] = homo_lumo[edge][1][-1]\n feat['{}_element'.format(edge)] = homo_lumo[edge][0]\n feat['{}_energy'.format(edge)] = homo_lumo[edge][2]\n feat['gap_AO'] = feat['LUMO_energy'] - feat['HOMO_energy']\n\n return list(feat.values())\n\n def feature_labels(self):\n feat = []\n for edge in ['HOMO', 'LUMO']:\n feat.extend(['{}_character'.format(edge),\n '{}_element'.format(edge),\n '{}_energy'.format(edge)])\n feat.append(\"gap_AO\")\n return feat\n\n def citations(self):\n return [\n \"@article{PhysRevA.55.191,\"\n \"title = {Local-density-functional calculations of the energy of atoms},\"\n \"author = {Kotochigova, Svetlana and Levine, Zachary H. and Shirley, \"\n \"Eric L. and Stiles, M. D. and Clark, Charles W.},\"\n \"journal = {Phys. Rev. A}, volume = {55}, issue = {1}, pages = {191--199},\"\n \"year = {1997}, month = {Jan}, publisher = {American Physical Society},\"\n \"doi = {10.1103/PhysRevA.55.191}, \"\n \"url = {https://link.aps.org/doi/10.1103/PhysRevA.55.191}}\"]\n\n def implementors(self):\n return ['Maxwell Dylla', 'Anubhav Jain']\n\n\nclass BandCenter(BaseFeaturizer):\n \"\"\"\n Estimation of absolute position of band center using electronegativity.\n\n Features\n - Band center\n \"\"\"\n\n def featurize(self, comp):\n \"\"\"\n (Rough) estimation of absolution position of band center using\n geometric mean of electronegativity.\n\n Args:\n comp (Composition).\n\n Returns:\n (float) band center.\n\n \"\"\"\n gmean = 1.0\n sumamt = sum(comp.get_el_amt_dict().values())\n for el, amt in comp.get_el_amt_dict().items():\n gmean *= Element(el).X**(amt/sumamt)\n return [-gmean]\n\n def feature_labels(self):\n return [\"band center\"]\n\n def citations(self):\n return [\n \"@article{Butler1978, author = {Butler, M A and Ginley, D S}, \"\n \"doi = {10.1149/1.2131419}, isbn = {0013-4651}, issn = {00134651}, \"\n \"journal = {Journal of The Electrochemical Society}, month = {feb},\"\n \" number = {2}, pages = {228--232}, title = {{Prediction of \"\n \"Flatband Potentials at Semiconductor-Electrolyte Interfaces from \"\n \"Atomic Electronegativities}}, url = \"\n \"{http://jes.ecsdl.org/content/125/2/228}, volume = {125}, \"\n \"year = {1978} } \"]\n\n def implementors(self):\n return [\"Anubhav Jain\"]\n\n\nclass ElectronegativityDiff(BaseFeaturizer):\n \"\"\"\n Features from electronegativity differences between anions and cations.\n\n These features are computed by first determining the concentration-weighted\n average electronegativity of the anions. For example, the average\n electronegativity of the anions in CaCoSO is equal to 1/2 of that of S and 1/2 of that of O.\n We then compute the difference between the electronegativity of each cation\n and the average anion electronegativity.\n\n The feature values are then determined based on the concentration-weighted statistics\n in the same manner as ElementProperty features. For example, one value could be\n the mean electronegativity difference over all the anions.\n\n Parameters:\n data_source (data class): source from which to retrieve element data\n stats: Property statistics to compute\n\n Generates average electronegativity difference between cations and anions\n \"\"\"\n\n def __init__(self, stats=None):\n if stats == None:\n self.stats = [\"minimum\", \"maximum\", \"range\", \"mean\", \"std_dev\"]\n else:\n self.stats = stats\n\n def featurize(self, comp):\n \"\"\"\n Args:\n comp: Pymatgen Composition object\n\n Returns:\n en_diff_stats (list of floats): Property stats of electronegativity difference\n \"\"\"\n\n # Check if oxidation states have been determined\n if not has_oxidation_states(comp):\n raise ValueError('Oxidation states have not yet been determined')\n if not is_ionic(comp):\n raise ValueError('Composition is not ionic')\n\n # Determine the average anion EN\n anions, anion_fractions = zip(*[(s, x) for s, x in comp.items() if s.oxi_state < 0])\n\n # If there are no anions, raise an Exception\n if len(anions) == 0:\n raise Exception('Features not applicable: Compound contains no anions')\n\n anion_en = [s.element.X for s in anions]\n mean_anion_en = PropertyStats.mean(anion_en, anion_fractions)\n\n # Determine the EN difference for each cation\n cations, cation_fractions = zip(*[(s, x) for s, x in comp.items() if s.oxi_state > 0])\n\n # If there are no cations, raise an Exception\n # It is possible to construct a non-charge-balanced Composition,\n # so we have to check for both the presence of anions and cations\n if len(cations) == 0:\n raise Exception('Features not applicable: Compound contains no cations')\n\n en_difference = [mean_anion_en - s.element.X for s in cations]\n\n # Compute the statistics\n return [\n PropertyStats.calc_stat(en_difference, stat, cation_fractions) for stat in self.stats\n ]\n\n def feature_labels(self):\n labels = []\n for stat in self.stats:\n labels.append(\"%s EN difference\" % stat)\n return labels\n\n def citations(self):\n citation = [\"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\"]\n return citation\n\n def implementors(self):\n return [\"Jiming Chen\", \"Logan Ward\"]\n\n\nclass ElectronAffinity(BaseFeaturizer):\n \"\"\"\n Calculate average electron affinity times formal charge of anion elements.\n Note: The formal charges must already be computed before calling `featurize`.\n Generates average (electron affinity*formal charge) of anions.\n \"\"\"\n\n def __init__(self):\n self.data_source = DemlData()\n\n def featurize(self, comp):\n \"\"\"\n Args:\n comp: (Composition) Composition to be featurized\n\n Returns:\n avg_anion_affin (single-element list): average electron affinity*formal charge of anions\n \"\"\"\n\n # Check if oxidation states have been computed\n if not has_oxidation_states(comp):\n raise ValueError('Composition lacks oxidation states')\n\n # Get the species and fractions\n species, fractions = zip(*comp.items())\n\n # Determine which species are anions\n anions, fractions = zip(*[(s, f) for s, f in zip(species, fractions) if s.oxi_state < 0])\n\n # Compute the electron_affinity*formal_charge for each anion\n electron_affin = [\n self.data_source.get_elemental_property(s.element, \"electron_affin\") * s.oxi_state\n for s in anions\n ]\n\n # Compute the average affinity\n avg_anion_affin = PropertyStats.mean(electron_affin, fractions)\n\n return [avg_anion_affin]\n\n def feature_labels(self):\n return [\"avg anion electron affinity\"]\n\n def citations(self):\n citation = [\n \"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\"]\n return citation\n\n def implementors(self):\n return [\"Jiming Chen\", \"Logan Ward\"]\n\n\nclass Stoichiometry(BaseFeaturizer):\n \"\"\"\n Calculate norms of stoichiometric attributes.\n\n Parameters:\n p_list (list of ints): list of norms to calculate\n num_atoms (bool): whether to return number of atoms per formula unit\n \"\"\"\n\n def __init__(self, p_list=(0, 2, 3, 5, 7, 10), num_atoms=False):\n self.p_list = p_list\n self.num_atoms = num_atoms\n\n def featurize(self, comp):\n \"\"\"\n Get stoichiometric attributes\n Args:\n comp: Pymatgen composition object\n p_list (list of ints)\n\n Returns:\n p_norm (list of floats): Lp norm-based stoichiometric attributes.\n Returns number of atoms if no p-values specified.\n \"\"\"\n\n el_amt = comp.get_el_amt_dict()\n\n # Compute the number of atoms per formula unit\n n_atoms_per_unit = comp.num_atoms / \\\n comp.get_integer_formula_and_factor()[1]\n\n if self.p_list is None:\n stoich_attr = [n_atoms_per_unit] # return num atoms if no norms specified\n else:\n p_norms = [0] * len(self.p_list)\n n_atoms = sum(el_amt.values())\n\n for i in range(len(self.p_list)):\n if self.p_list[i] < 0:\n raise ValueError(\"p-norm not defined for p < 0\")\n if self.p_list[i] == 0:\n p_norms[i] = len(el_amt.values())\n else:\n for j in el_amt:\n p_norms[i] += (el_amt[j] / n_atoms) ** self.p_list[i]\n p_norms[i] = p_norms[i] ** (1.0 / self.p_list[i])\n\n if self.num_atoms:\n stoich_attr = [n_atoms_per_unit] + p_norms\n else:\n stoich_attr = p_norms\n\n return stoich_attr\n\n def feature_labels(self):\n labels = []\n if self.num_atoms:\n labels.append(\"num atoms\")\n\n if self.p_list != None:\n for p in self.p_list:\n labels.append(\"%d-norm\" % p)\n\n return labels\n\n def citations(self):\n citation = [\n \"@article{ward_agrawal_choudary_wolverton_2016, title={A general-purpose \"\n \"machine learning framework for predicting properties of inorganic materials}, \"\n \"volume={2}, DOI={10.1038/npjcompumats.2017.28}, number={1}, journal={npj \"\n \"Computational Materials}, author={Ward, Logan and Agrawal, Ankit and Choudhary, \"\n \"Alok and Wolverton, Christopher}, year={2016}}\"]\n return citation\n\n def implementors(self):\n return [\"Jiming Chen\", \"Logan Ward\"]\n\n\nclass ValenceOrbital(BaseFeaturizer):\n \"\"\"\n Attributes of valence orbital shells\n\n Args:\n data_source (data object): source from which to retrieve element data\n orbitals (list): orbitals to calculate\n props (list): specifies whether to return average number of electrons in each orbital,\n fraction of electrons in each orbital, or both\n \"\"\"\n\n def __init__(self, orbitals=(\"s\", \"p\", \"d\", \"f\"), props=(\"avg\", \"frac\")):\n self.data_source = MagpieData()\n self.orbitals = orbitals\n self.props = props\n\n def featurize(self, comp):\n \"\"\"Weighted fraction of valence electrons in each orbital\n\n Args:\n comp: Pymatgen composition object\n\n Returns:\n valence_attributes (list of floats): Average number and/or\n fraction of valence electrons in specfied orbitals\n \"\"\"\n\n elements, fractions = zip(*comp.element_composition.items())\n\n # Get the mean number of electrons in each shell\n avg = [\n PropertyStats.mean(\n self.data_source.get_elemental_properties(elements, \"N%sValence\" % orb),\n weights=fractions)\n for orb in self.orbitals\n ]\n\n # If needed, get fraction of electrons in each shell\n if \"frac\" in self.props:\n avg_total_valence = PropertyStats.mean(\n self.data_source.get_elemental_properties(elements, \"NValence\"),\n weights=fractions)\n frac = [a / avg_total_valence for a in avg]\n\n # Get the desired attributes\n valence_attributes = []\n for prop in self.props:\n valence_attributes += locals()[prop]\n\n return valence_attributes\n\n def feature_labels(self):\n labels = []\n for prop in self.props:\n for orb in self.orbitals:\n labels.append(\"%s %s valence electrons\" % (prop, orb))\n\n return labels\n\n def citations(self):\n ward_citation = (\n \"@article{ward_agrawal_choudary_wolverton_2016, title={A general-purpose \"\n \"machine learning framework for predicting properties of inorganic materials}, \"\n \"volume={2}, DOI={10.1038/npjcompumats.2017.28}, number={1}, journal={npj \"\n \"Computational Materials}, author={Ward, Logan and Agrawal, Ankit and Choudhary, \"\n \"Alok and Wolverton, Christopher}, year={2016}}\")\n deml_citation = (\n \"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\")\n citations = [ward_citation, deml_citation]\n return citations\n\n def implementors(self):\n return [\"Jiming Chen\", \"Logan Ward\"]\n\n\nclass IonProperty(BaseFeaturizer):\n \"\"\"\n Ionic property attributes. Similar to ElementProperty.\n \"\"\"\n\n def __init__(self, data_source=PymatgenData(), fast=False):\n \"\"\"\n\n Args:\n data_source - (OxidationStateMixin) - A AbstractData class that supports\n the `get_oxidation_state` method.\n fast - (boolean) whether to assume elements exist in a single oxidation state,\n which can dramatically accelerate the calculation of whether an ionic compound\n is possible, but will miss heterovalent compounds like Fe3O4.\n \"\"\"\n self.data_source = data_source\n self.fast = fast\n\n def featurize(self, comp):\n \"\"\"\n Ionic character attributes\n\n Args:\n comp: (Composition) Composition to be featurized\n\n Returns:\n cpd_possible (bool): Indicates if a neutral ionic compound is possible\n max_ionic_char (float): Maximum ionic character between two atoms\n avg_ionic_char (float): Average ionic character\n \"\"\"\n\n elements, fractions = zip(*comp.element_composition.items())\n\n if len(elements) < 2: # Single element\n cpd_possible = True\n max_ionic_char = 0\n avg_ionic_char = 0\n else:\n # Get magpie data for each element\n elec = self.data_source.get_elemental_properties(elements, \"X\")\n\n # Determine if neutral compound is possible\n if has_oxidation_states(comp):\n charges, fractions = zip(*[(s.oxi_state, f) for s, f in comp.items()])\n cpd_possible = np.isclose(np.dot(charges, fractions), 0)\n else:\n oxidation_states = [self.data_source.get_oxidation_states(e) for e in elements]\n if self.fast:\n # Assume each element can have only 1 oxidation state\n cpd_possible = False\n for ox in itertools.product(*oxidation_states):\n if np.isclose(np.dot(ox, fractions), 0):\n cpd_possible = True\n break\n else:\n # Use pymatgen's oxidation state checker which\n # can detect whether an takes >1 oxidation state (as in Fe3O4)\n oxi_state_dict = dict(zip([e.symbol for e in elements],\n oxidation_states))\n cpd_possible = len(comp.oxi_state_guesses(oxi_states_override=oxi_state_dict)) > 0\n\n # Ionic character attributes\n atom_pairs = itertools.combinations(range(len(elements)), 2)\n el_frac = list(np.true_divide(fractions, sum(fractions)))\n\n ionic_char = []\n avg_ionic_char = 0\n\n for pair in atom_pairs:\n XA = elec[pair[0]]\n XB = elec[pair[1]]\n ionic_char.append(1.0 - np.exp(-0.25 * ((XA - XB) ** 2)))\n avg_ionic_char += el_frac[pair[0]] * el_frac[pair[1]] * \\\n ionic_char[-1]\n\n max_ionic_char = np.max(ionic_char)\n\n return [cpd_possible, max_ionic_char, avg_ionic_char]\n\n def feature_labels(self):\n labels = [\"compound possible\", \"max ionic char\", \"avg ionic char\"]\n return labels\n\n def citations(self):\n citation = [\n \"@article{ward_agrawal_choudary_wolverton_2016, title={A general-purpose \"\n \"machine learning framework for predicting properties of inorganic materials}, \"\n \"volume={2}, DOI={10.1038/npjcompumats.2017.28}, number={1}, journal={npj \"\n \"Computational Materials}, author={Ward, Logan and Agrawal, Ankit and Choudhary, \"\n \"Alok and Wolverton, Christopher}, year={2016}}\"]\n return citation\n\n def implementors(self):\n return [\"Jiming Chen\", \"Logan Ward\"]\n\n\nclass ElementFraction(BaseFeaturizer):\n \"\"\"\n Class to calculate the atomic fraction of each element in a composition.\n\n Generates a vector where each index represents an element in atomic number order.\n \"\"\"\n\n def __init__(self):\n pass\n\n def featurize(self, comp):\n \"\"\"\n Args:\n comp: Pymatgen Composition object\n\n Returns:\n vector (list of floats): fraction of each element in a composition\n \"\"\"\n\n vector = [0] * 103\n el_list = list(comp.element_composition.fractional_composition.items())\n for el in el_list:\n obj = el\n atomic_number_i = obj[0].number - 1\n vector[atomic_number_i] = obj[1]\n return vector\n\n def feature_labels(self):\n labels = []\n for i in range(1, 104):\n labels.append(Element.from_Z(i).symbol)\n return labels\n\n def implementors(self):\n return [\"Ashwin Aggarwal\", \"Logan Ward\"]\n\n def citations(self):\n return []\n\n\nclass TMetalFraction(BaseFeaturizer):\n \"\"\"\n Class to calculate fraction of magnetic transition metals in a composition.\n\n Parameters:\n data_source (data class): source from which to retrieve element data\n\n Generates: Fraction of magnetic transition metal atoms in a compound\n \"\"\"\n\n def __init__(self):\n self.magn_elem = ['Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Nb',\n 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Ta', 'W', 'Re',\n 'Os', 'Ir', 'Pt']\n\n def featurize(self, comp):\n \"\"\"\n Args:\n comp: Pymatgen Composition object\n\n Returns:\n frac_magn_atoms (single-element list): fraction of magnetic transitional metal atoms in a compound\n \"\"\"\n\n el_amt = comp.get_el_amt_dict()\n\n frac_magn_atoms = 0\n for el in el_amt:\n if el in self.magn_elem:\n frac_magn_atoms += el_amt[el]\n\n frac_magn_atoms /= sum(el_amt.values())\n\n return [frac_magn_atoms]\n\n def feature_labels(self):\n labels = [\"transition metal fraction\"]\n return labels\n\n def citations(self):\n citation = [\n \"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density \"\n \"functional theory total energies and enthalpies of formation of metal-nonmetal \"\n \"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, \"\n \"number={44}, journal={ChemInform}, author={Deml, Ann M. and Ohayre, Ryan and \"\n \"Wolverton, Chris and Stevanovic, Vladan}, year={2016}}\"]\n return citation\n\n def implementors(self):\n return [\"Jiming Chen, Logan Ward\"]\n\n\nclass CohesiveEnergy(BaseFeaturizer):\n \"\"\"\n Cohesive energy per atom using elemental cohesive energies and\n formation energy.\n\n Get cohesive energy per atom of a compound by adding known\n elemental cohesive energies from the formation energy of the\n compound.\n\n Parameters:\n mapi_key (str): Materials API key for looking up formation energy\n by composition alone (if you don't set the formation energy\n yourself).\n \"\"\"\n\n def __init__(self, mapi_key=None):\n self.mapi_key = mapi_key\n\n def featurize(self, comp, formation_energy_per_atom=None):\n \"\"\"\n\n Args:\n comp: (str) compound composition, eg: \"NaCl\"\n formation_energy_per_atom: (float) the formation energy per atom of\n your compound. If not set, will look up the most stable\n formation energy from the Materials Project database.\n \"\"\"\n comp = comp.reduced_composition\n el_amt_dict = comp.get_el_amt_dict()\n\n formation_energy_per_atom = formation_energy_per_atom or None\n\n if not formation_energy_per_atom:\n # Get formation energy of most stable structure from MP\n struct_lst = MPRester(self.mapi_key).get_data(comp.reduced_formula)\n if len(struct_lst) > 0:\n most_stable_entry = sorted(struct_lst, key=lambda e: e['energy_per_atom'])[0]\n formation_energy_per_atom = most_stable_entry['formation_energy_per_atom']\n else:\n raise ValueError('No structure found in MP for {}'.format(comp))\n\n # Subtract elemental cohesive energies from formation energy\n cohesive_energy = -formation_energy_per_atom * comp.num_atoms\n for el in el_amt_dict:\n cohesive_energy += el_amt_dict[el] * \\\n CohesiveEnergyData().get_elemental_property(el)\n\n cohesive_energy_per_atom = cohesive_energy / comp.num_atoms\n\n return [cohesive_energy_per_atom]\n\n def feature_labels(self):\n return [\"cohesive energy\"]\n\n def implementors(self):\n return [\"Saurabh Bajaj\", \"Anubhav Jain\"]\n\n def citations(self):\n # Cohesive energy values for the elements are taken from the\n # Knowledgedoor web site, which obtained those values from Kittel.\n # We include both citations.\n return [\n \"@misc{, title = {{Knowledgedoor Cohesive energy handbook}}, \"\n \"url = {http://www.knowledgedoor.com/2/elements{\\_}handbook/cohesive{\\_}energy.html}}\",\n \"@book{Kittel, author = {Kittel, C}, isbn = {978-0-471-41526-8}, \"\n \"publisher = {Wiley}, title = {{Introduction to Solid State \"\n \"Physics, 8th Edition}}, year = {2005}}\"]\n\n\nclass Miedema(BaseFeaturizer):\n \"\"\"\n Formation enthalpies of intermetallic compounds, from Miedema et al.\n\n Calculate the formation enthalpies of the intermetallic compound,\n solid solution and amorphous phase of a given composition, based on\n semi-empirical Miedema model (and some extensions), particularly for\n transitional metal alloys.\n\n Support elemental, binary and multicomponent alloys.\n For elemental/binary alloys, the formulation is based on the original\n works by Miedema et al. in 1980s;\n For multicomponent alloys, the formulation is basically the linear\n combination of sub-binary systems. This is reported to work well for\n ternary alloys, but needs to be careful with quaternary alloys and more.\n\n Args:\n struct_types (str or [str]): default='all'\n 'inter': intermetallic compound; 'ss': solid solution\n 'amor': amorphous phase; 'all': same for ['inter', 'ss', 'amor']\n ['inter', 'ss']: amorphous phase and solid solution\n ss_types (str or [str]): only for ss, default='min'\n 'fcc': fcc solid solution; 'bcc': bcc solid solution\n 'hcp': hcp solid solution;\n 'no_latt': solid solution with no specific structure type\n 'min': min value of ['fcc', 'bcc', 'hcp', 'no_latt']\n 'all': same for ['fcc', 'bcc', 'hcp', 'no_latt']\n ['fcc', 'bcc']: fcc and bcc solid solutions\n data_source (str): source of dataset, default='Miedema'\n 'Miedema': 'Miedema.csv' placed in \"matminer/utils/data_files/\",\n containing the following model parameters for 73 elements:\n 'molar_volume', 'electron_density', 'electronegativity'\n 'valence_electrons', 'a_const', 'R_const', 'H_trans'\n 'compressibility', 'shear_modulus', 'melting_point'\n 'structural_stability'. Please see the references for details.\n Returns:\n (list of floats) Miedema formation enthalpies (eV/atom) for input\n struct_types:\n -Miedema_deltaH_inter: for intermetallic compound\n -Miedema_deltaH_ss: for solid solution, can include 'fcc', 'bcc',\n 'hcp', 'no_latt', 'min' based on input ss_types\n -Miedema_deltaH_amor: for amorphous phase\n \"\"\"\n\n def __init__(self, struct_types='all', ss_types='min',\n data_source='Miedema'):\n if isinstance(struct_types, list):\n self.struct_types = struct_types\n else:\n if struct_types == 'all':\n self.struct_types = ['inter', 'amor', 'ss']\n else:\n self.struct_types = [struct_types]\n\n if isinstance(ss_types, list):\n self.ss_types = ss_types\n else:\n if ss_types == 'all':\n self.ss_types = ['fcc', 'bcc', 'hcp', 'no_latt']\n else:\n self.ss_types = [ss_types]\n\n self.data_source = data_source\n if self.data_source == 'Miedema':\n self.df_dataset = pd.read_csv(\n os.path.join(data_dir, 'Miedema.csv'), index_col='element')\n else:\n raise NotImplementedError('data_source {} not implemented yet'.\n format(self, data_source))\n\n self.element_list = [Element(estr) for estr in self.df_dataset.index]\n\n def precheck(self, c: Composition) -> bool:\n \"\"\"\n Precheck a single entry. Miedema does not work for compositons\n containing any elments for which the Miedema model has no parameters.\n To precheck an entire dataframe (qnd automatically gather\n the fraction of structures that will pass the precheck), please use\n precheck_dataframe.\n\n Args:\n c (pymatgen.Composition): The composition to precheck.\n\n Returns:\n (bool): If True, s passed the precheck; otherwise, it failed.\n \"\"\"\n return all([e in self.element_list\n for e in c.element_composition.elements])\n\n def deltaH_chem(self, elements, fracs, struct):\n \"\"\"\n Chemical term of formation enthalpy\n Args:\n elements (list of str): list of elements\n fracs (list of floats): list of atomic fractions\n struct (str): 'inter', 'ss' or 'amor'\n Returns:\n deltaH_chem (float): chemical term of formation enthalpy\n \"\"\"\n if any([el not in self.df_dataset.index for el in elements]):\n return np.nan\n df_el = self.df_dataset.loc[elements]\n v_molar = np.array(df_el['molar_volume'])\n n_ws = np.array(df_el['electron_density'])\n elec = np.array(df_el['electronegativity'])\n val = np.array(df_el['valence_electrons'])\n a = np.array(df_el['a_const'])\n r = np.array(df_el['R_const'])\n h_trans = np.array(df_el['H_trans'])\n\n if struct == 'inter':\n gamma = 8\n elif struct == 'amor':\n gamma = 5\n else:\n gamma = 0\n\n c_sf = (fracs * np.power(v_molar, 2 / 3) / np.dot(fracs, np.power(v_molar, 2 / 3)))\n f = (c_sf * (1 + gamma * np.power(np.multiply.reduce(c_sf, 0), 2)))[::-1]\n v_a = np.array([np.power(v_molar[0], 2 / 3) * (1 + a[0] * f[0] * (elec[0] - elec[1])),\n np.power(v_molar[1], 2 / 3) * (1 + a[1] * f[1] * (elec[1] - elec[0]))])\n c_sf_a = fracs * v_a / np.dot(fracs, v_a)\n f_a = (c_sf_a * (1 + gamma * np.power(np.multiply.reduce\n (c_sf_a, 0), 2)))[::-1]\n\n threshold = range(3, 12)\n if (val[0] in threshold) and (val[1] in threshold):\n p = 14.1\n r = 0.\n elif (val[0] not in threshold) and (val[1] not in threshold):\n p = 10.7\n r = 0.\n else:\n p = 12.35\n r = np.multiply.reduce(r, 0) * p\n q = p * 9.4\n\n eta_ab = (2 * (-p * np.power(elec[0] - elec[1], 2) - r +\n q * np.power(np.power(n_ws[0], 1 / 3) -\n np.power(n_ws[1], 1 / 3), 2)) /\n reduce(lambda x, y: 1 / x + 1 / y, np.power(n_ws, 1 / 3)))\n\n deltaH_chem = (f_a[0] * fracs[0] * v_a[0] * eta_ab +\n np.dot(fracs, h_trans))\n return deltaH_chem\n\n def deltaH_elast(self, elements, fracs):\n \"\"\"\n Elastic term of formation enthalpy\n Args:\n elements (list of str): list of elements\n fracs (list of floats): list of atomic fractions\n Returns:\n deltaH_elastic (float): elastic term of formation enthalpy\n \"\"\"\n if any([el not in self.df_dataset.index for el in elements]):\n return np.nan\n df_el = self.df_dataset.loc[elements]\n v_molar = np.array(df_el['molar_volume'])\n n_ws = np.array(df_el['electron_density'])\n elec = np.array(df_el['electronegativity'])\n compr = np.array(df_el['compressibility'])\n shear_mod = np.array(df_el['shear_modulus'])\n\n alp = (np.multiply(1.5, np.power(v_molar, 2 / 3)) /\n reduce(lambda x, y: 1 / x + 1 / y, np.power(n_ws, 1 / 3)))\n v_a = (v_molar + np.array([alp[0] * (elec[0] - elec[1]) / n_ws[0],\n alp[1] * (elec[1] - elec[0]) / n_ws[1]]))\n alp_a = (np.multiply(1.5, np.power(v_a, 2 / 3)) /\n reduce(lambda x, y: 1 / x + 1 / y, np.power(n_ws, 1 / 3)))\n\n # effective volume in alloy\n vab_a = (v_molar[0] +\n np.array([alp_a[0] * (elec[0] - elec[1]) / n_ws[0],\n alp_a[1] * (elec[1] - elec[0]) / n_ws[0]]))\n vba_a = (v_molar[1] +\n np.array([alp_a[0] * (elec[0] - elec[1]) / n_ws[1],\n alp_a[1] * (elec[1] - elec[0]) / n_ws[1]]))\n\n # H_elast A in B\n hab_elast = ((2 * compr[0] * shear_mod[1] *\n np.power((vab_a[0] - vba_a[0]), 2)) /\n (4 * shear_mod[1] * vab_a[0] +\n 3 * compr[0] * vba_a[0]))\n # H_elast B in A\n hba_elast = ((2 * compr[1] * shear_mod[0] *\n np.power((vba_a[1] - vab_a[1]), 2)) /\n (4 * shear_mod[0] * vba_a[1] +\n 3 * compr[1] * vab_a[1]))\n\n deltaH_elast = (np.multiply.reduce(fracs, 0) *\n (fracs[1] * hab_elast + fracs[0] * hba_elast))\n return deltaH_elast\n\n def deltaH_struct(self, elements, fracs, latt):\n \"\"\"\n Structural term of formation enthalpy, only for solid solution\n Args:\n elements (list of str): list of elements\n fracs (list of floats): list of atomic fractions\n latt (str): 'fcc', 'bcc', 'hcp' or 'no_latt'\n Returns:\n deltaH_struct (float): structural term of formation enthalpy\n \"\"\"\n if any([el not in self.df_dataset.index for el in elements]):\n return np.nan\n df_el = self.df_dataset.loc[elements]\n val = np.array(df_el['valence_electrons'])\n struct_stab = np.array(df_el['structural_stability'])\n\n if latt == 'fcc':\n latt_stab_dict = {0.: 0., 1.: 0, 2.: 0, 3.: -2, 4.: -1.5,\n 5.: 9., 5.5: 14., 6.: 11., 7.: -3., 8.: -9.5,\n 8.5: -11., 9.: -9., 10.: -2., 11.: 1.5,\n 12.: 0., 13.: 0., 14.: 0., 15.: 0.}\n elif latt == 'bcc':\n latt_stab_dict = {0.: 0., 1.: 0., 2.: 0., 3.: 2.2, 4.: 2.,\n 5.: -9.5, 5.5: -14.5, 6.: -12., 7.: 4.,\n 8.: 10., 8.5: 11., 9.: 8.5, 10.: 1.5,\n 11.: 1.5, 12.: 0., 13.: 0., 14.: 0., 15.: 0.}\n elif latt == 'hcp':\n latt_stab_dict = {0.: 0., 1.: 0., 2.: 0., 3.: -2.5, 4.: -2.5,\n 5.: 10., 5.5: 15., 6.: 13., 7.: -5.,\n 8.: -10.5, 8.5: -11., 9.: -8., 10.: -1.,\n 11.: 2.5, 12.: 0., 13.: 0., 14.: 0., 15.: 0.}\n else:\n return 0\n latt_stab_dict = collections.OrderedDict(sorted(latt_stab_dict.items(),\n key=lambda t: t[0]))\n # lattice stability of different lattice_types\n val_avg = np.dot(fracs, val)\n val_bd_lower, val_bd_upper = 0, 0\n for key in latt_stab_dict.keys():\n if val_avg - key <= 0:\n val_bd_upper = key\n break\n else:\n val_bd_lower = key\n\n latt_stab = ((val_avg - val_bd_lower) * latt_stab_dict[val_bd_upper] /\n (val_bd_upper - val_bd_lower) +\n (val_bd_upper - val_avg) * latt_stab_dict[val_bd_lower] /\n (val_bd_upper - val_bd_lower))\n\n deltaH_struct = latt_stab - np.dot(fracs, struct_stab)\n return deltaH_struct\n\n def deltaH_topo(self, elements, fracs):\n \"\"\"\n Topological term of formation enthalpy, only for amorphous phase\n Args:\n elements (list of str): list of elements\n fracs (list of floats): list of atomic fractions\n Returns:\n deltaH_topo (float): topological term of formation enthalpy\n \"\"\"\n if any([el not in self.df_dataset.index for el in elements]):\n return np.nan\n df_el = self.df_dataset.loc[elements]\n melt_point = np.array(df_el['melting_point'])\n\n deltaH_topo = 3.5 * np.dot(fracs, melt_point) / 1000\n return deltaH_topo\n\n def featurize(self, comp):\n \"\"\"\n Get Miedema formation enthalpies of target structures: inter, amor,\n ss (can be further divided into 'min', 'fcc', 'bcc', 'hcp', 'no_latt'\n for different lattice_types)\n Args:\n comp: Pymatgen composition object\n Returns:\n miedema (list of floats): formation enthalpies of target structures\n \"\"\"\n el_amt = comp.fractional_composition.get_el_amt_dict()\n elements = sorted(el_amt.keys(), key=lambda sym: get_el_sp(sym).X)\n fracs = [el_amt[el] for el in elements]\n el_num = len(elements)\n # divide into a list of sub-binaries\n el_bins = []\n frac_bins = []\n for i in range(el_num - 1):\n for j in range(i + 1, el_num):\n el_bins.append([elements[i], elements[j]])\n frac_bins.append([fracs[i], fracs[j]])\n\n miedema = []\n for struct_type in self.struct_types:\n # inter: intermetallic compound\n if struct_type == 'inter':\n deltaH_chem_inter = 0\n for i_inter, el_bin in enumerate(el_bins):\n deltaH_chem_inter += self.deltaH_chem(el_bin,\n frac_bins[i_inter],\n 'inter')\n miedema.append(deltaH_chem_inter)\n # ss: solid solution\n elif struct_type == 'ss':\n deltaH_chem_ss = 0\n deltaH_elast_ss = 0\n for sub_bin, el_bin in enumerate(el_bins):\n deltaH_chem_ss += self.deltaH_chem(el_bin, frac_bins[sub_bin], 'ss')\n deltaH_elast_ss += self.deltaH_elast(el_bin, frac_bins[sub_bin])\n\n for ss_type in self.ss_types:\n if ss_type == 'min':\n deltaH_ss_all = []\n for latt in ['fcc', 'bcc', 'hcp', 'no_latt']:\n deltaH_ss_all.append(\n deltaH_chem_ss + deltaH_elast_ss +\n self.deltaH_struct(elements, fracs, latt))\n deltaH_ss_min = min(deltaH_ss_all)\n miedema.append(deltaH_ss_min)\n else:\n deltaH_struct_ss = self.deltaH_struct(elements,\n fracs, ss_type)\n miedema.append(deltaH_chem_ss + deltaH_elast_ss +\n deltaH_struct_ss)\n # amor: amorphous phase\n elif struct_type == 'amor':\n deltaH_chem_amor = 0\n deltaH_topo_amor = self.deltaH_topo(elements, fracs)\n for sub_bin, el_bin in enumerate(el_bins):\n deltaH_chem_amor += self.deltaH_chem(el_bin,\n frac_bins[sub_bin],\n 'amor')\n miedema.append(deltaH_chem_amor + deltaH_topo_amor)\n\n # convert kJ/mol to eV/atom. The original Miedema model is in kJ/mol.\n miedema = [deltaH / 96.4853 for deltaH in miedema]\n return miedema\n\n def feature_labels(self):\n labels = []\n for struct_type in self.struct_types:\n if struct_type == 'ss':\n for ss_type in self.ss_types:\n labels.append('Miedema_deltaH_ss_' + ss_type)\n else:\n labels.append('Miedema_deltaH_' + struct_type)\n return labels\n\n def citations(self):\n miedema_citation = (\n '@article{miedema_1988, '\n 'title={Cohesion in metals},'\n 'author={De Boer, Frank R and Mattens, WCM '\n 'and Boom, R and Miedema, AR and Niessen, AK},'\n 'year={1988}}')\n zhang_citation = (\n '@article{miedema_zhang_2016, '\n 'title={Miedema Calculator: A thermodynamic platform '\n 'for predicting formation enthalpies of alloys within '\n 'framework of Miedema\\'s Theory},'\n 'author={R.F. Zhang, S.H. Zhang, Z.J. He, J. Jing and S.H. Sheng},'\n 'journal={Computer Physics Communications}'\n 'year={2016}}')\n ternary_citation = (\n '@article{miedema_alonso_1990, '\n 'title={Glass formation in ternary transition metal alloys},'\n 'author={L J Gallego, J A Somoza and J A Alonso},'\n 'journal={Journal of Physics: Condensed Matter}'\n 'year={1990}}')\n return [miedema_citation, zhang_citation, ternary_citation]\n\n def implementors(self):\n return ['Qi Wang', 'Alireza Faghaninia']\n\n\nclass YangSolidSolution(BaseFeaturizer):\n \"\"\"\n Mixing thermochemistry and size mismatch terms of Yang and Zhang (2012)\n\n This featurizer returns two different features developed by\n .. Yang and Zhang `https://linkinghub.elsevier.com/retrieve/pii/S0254058411009357`\n to predict whether metal alloys will form metallic glasses,\n crystalline solid solutions, or intermetallics.\n The first, Omega, is related to the balance between the mixing entropy and\n mixing enthalpy of the liquid phase. The second, delta, is related to the\n atomic size mismatch between the different elements of the material.\n\n Features\n Yang omega - Mixing thermochemistry feature, Omega\n Yang delta - Atomic size mismatch term\n\n References:\n .. Yang and Zhang (2012) `https://linkinghub.elsevier.com/retrieve/pii/S0254058411009357`.\n \"\"\"\n\n def __init__(self):\n # Load in the mixing enthalpy data\n # Creates a lookup table of the liquid mixing enthalpies\n self.dhf_mix = MixingEnthalpy()\n\n # Load in a table of elemental properties\n self.elem_data = MagpieData()\n\n def precheck(self, c: Composition) -> bool:\n \"\"\"\n Precheck a single entry. YangSolidSolution does not work for compositons\n containing any binary elment combinations for which the model has no\n parameters. We can nearly equivalently approximate this by checking\n against the unary element list.\n\n To precheck an entire dataframe (qnd automatically gather\n the fraction of structures that will pass the precheck), please use\n precheck_dataframe.\n\n Args:\n c (pymatgen.Composition): The composition to precheck.\n\n Returns:\n (bool): If True, s passed the precheck; otherwise, it failed.\n \"\"\"\n return all([e in self.dhf_mix.valid_element_list\n for e in c.element_composition.elements])\n\n def featurize(self, comp):\n return [self.compute_omega(comp), self.compute_delta(comp)]\n\n def compute_omega(self, comp):\n \"\"\"Compute Yang's mixing thermodynamics descriptor\n\n :math:`\\\\frac{T_m \\Delta S_{mix}}{ | \\Delta H_{mix} | }`\n\n Where :math:`T_m` is average melting temperature,\n :math:`\\Delta S_{mix}` is the ideal mixing entropy,\n and :math:`\\Delta H_{mix}` is the average mixing enthalpies\n of all pairs of elements in the alloy\n\n Args:\n comp (Composition) - Composition to featurizer\n Returns:\n (float) Omega\n \"\"\"\n\n # Special case: Elemental compound (entropy == 0 -> Omega == 1)\n if len(comp) == 1:\n return 0\n\n # Get the element names and fractions\n elements, fractions = zip(*comp.element_composition.fractional_composition.items())\n\n # Get the mean melting temperature\n mean_Tm = PropertyStats.mean(\n self.elem_data.get_elemental_properties(elements, \"MeltingT\"),\n fractions\n )\n\n # Get the mixing entropy\n entropy = np.dot(fractions, np.log(fractions)) * 8.314 / 1000\n\n # Get the mixing enthalpy\n enthalpy = 0\n for i, (e1, f1) in enumerate(zip(elements, fractions)):\n for e2, f2 in zip(elements[:i], fractions):\n enthalpy += f1 * f2 * self.dhf_mix.get_mixing_enthalpy(e1, e2)\n enthalpy *= 4\n\n # Make sure the enthalpy is nonzero\n # The limit as dH->0 of omega is +\\inf. A very small positive dH will approximate\n # this limit without causing issues with infinite features\n enthalpy = max(1e-6, abs(enthalpy))\n\n return abs(mean_Tm * entropy / enthalpy)\n\n def compute_delta(self, comp):\n \"\"\"Compute Yang's delta parameter\n\n :math:`\\sqrt{\\sum^n_{i=1} c_i \\left( 1 - \\\\frac{r_i}{\\\\bar{r}} \\\\right)^2 }`\n\n where :math:`c_i` and :math:`r_i` are the fraction and radius of\n element :math:`i`, and :math:`\\\\bar{r}` is the fraction-weighted\n average of the radii. We use the radii compiled by\n .. Miracle et al. `https://www.tandfonline.com/doi/ref/10.1179/095066010X12646898728200?scroll=top`.\n\n Args:\n comp (Composition) - Composition to assess\n Returns:\n (float) delta\n\n \"\"\"\n\n elements, fractions = zip(*comp.element_composition.items())\n\n # Get the radii of elements\n radii = self.elem_data.get_elemental_properties(elements,\n \"MiracleRadius\")\n mean_r = PropertyStats.mean(radii, fractions)\n\n # Compute the mean (1 - r/\\\\bar{r})^2\n r_dev = np.power(1.0 - np.divide(radii, mean_r), 2)\n return np.sqrt(PropertyStats.mean(r_dev, fractions))\n\n def feature_labels(self):\n return ['Yang omega', 'Yang delta']\n\n def citations(self):\n return [\"@article{Yang2012,\"\n \"author = {Yang, X. and Zhang, Y.},\"\n \"doi = {10.1016/j.matchemphys.2011.11.021},\"\n \"journal = {Materials Chemistry and Physics},\"\n \"number = {2-3},\"\n \"pages = {233--238},\"\n \"title = {{Prediction of high-entropy stabilized solid-solution in multi-component alloys}},\"\n \"url = {http://dx.doi.org/10.1016/j.matchemphys.2011.11.021},\"\n \"volume = {132},year = {2012}}\"]\n\n def implementors(self):\n return ['Logan Ward']\n\n\nclass AtomicPackingEfficiency(BaseFeaturizer):\n \"\"\"\n Packing efficiency based on a geometric theory of the amorphous packing\n of hard spheres.\n\n This featurizer computes two different kinds of the features. The first\n relate to the distance between a composition and the composition of\n the clusters of atoms expected to be efficiently packed based on a\n theory from\n `Laws et al.<http://www.nature.com/doifinder/10.1038/ncomms9123>`_.\n The second corresponds to the packing efficiency of a system if all atoms\n in the alloy are simultaneously as efficiently-packed as possible.\n\n The packing efficiency in these models is based on the Atomic Packing\n Efficiency (APE), which measures the difference between the ratio of\n the radii of the central atom to its neighbors and the ideal ratio\n of a cluster with the same number of atoms that has optimal packing\n efficiency. If the difference between the ratios is too large, the APE is\n positive. If the difference is too small, the APE is negative.\n\n Features:\n dist from {k} clusters |APE| < {thr} - The distance between an\n alloy composition and the k clusters that have a packing efficiency\n below thr from ideal\n mean simul. packing efficiency - Mean packing efficiency of all atoms.\n The packing efficiency is measured with respect to ideal (0)\n mean abs simul. packing efficiency - Mean absolute value of the\n packing efficiencies. Closer to zero is more efficiently packed\n\n References:\n [1] K.J. Laws, D.B. Miracle, M. Ferry, A predictive structural model\n for bulk metallic glasses, Nat. Commun. 6 (2015) 8123. doi:10.1038/ncomms9123.\n \"\"\"\n\n def __init__(self, threshold=0.01, n_nearest=(1, 3, 5), max_types=6):\n \"\"\"\n Initialize the featurizer\n\n Args:\n threshold (float):Threshold to use for determining whether\n a cluster is efficiently packed.\n n_nearest ({int}): Number of nearest clusters to use when considering features\n max_types (int): Maximum number of atom types to consider when\n looking for efficient clusters. The process for finding\n efficient clusters very expensive for large numbers of types\n \"\"\"\n\n # Store the options\n self.threshold = threshold\n self.n_nearest = n_nearest\n self.max_types = max_types\n\n # Tool to convert composition objects to fractions as a vector\n self._el_frac = ElementFraction()\n\n # Get the number of elements in the output of `_el_frac`\n self._n_elems = len(self._el_frac.featurize(Composition('H')))\n\n # Tool for looking up radii\n self._data_source = MagpieData()\n\n # Lookup table of ideal radius ratios\n self.ideal_ratio = dict(\n [(3, 0.154701), (4, 0.224745), (5, 0.361654), (6, 0.414214),\n (7, 0.518145), (8, 0.616517), (9, 0.709914), (10, 0.798907),\n (11, 0.884003), (12, 0.902113), (13, 0.976006), (14, 1.04733),\n (15, 1.11632), (16, 1.18318), (17, 1.2481), (18, 1.31123),\n (19, 1.37271), (20, 1.43267), (21, 1.49119), (22, 1.5484),\n (23, 1.60436), (24, 1.65915)])\n\n def __hash__(self):\n return hash(self.threshold)\n\n def __eq__(self, other):\n if isinstance(other, AtomicPackingEfficiency):\n return self.get_params() == other.get_params()\n\n def featurize(self, comp):\n return list(self.compute_simultaneous_packing_efficiency(comp)) + \\\n self.compute_nearest_cluster_distance(comp)\n\n def feature_labels(self):\n return ['mean simul. packing efficiency',\n 'mean abs simul. packing efficiency'] + [\n 'dist from {} clusters |APE| < {:.3f}'.format(k,\n self.threshold)\n for k in self.n_nearest]\n\n def citations(self):\n return [\"@article{Laws2015,\"\n \"author = {Laws, K. J. and Miracle, D. B. and Ferry, M.},\"\n \"doi = {10.1038/ncomms9123},\"\n \"journal = {Nature Communications},\"\n \"pages = {8123},\"\n \"title = {{A predictive structural model for bulk metallic glasses}},\"\n \"url = {http://www.nature.com/doifinder/10.1038/ncomms9123},\"\n \"volume = {6},\"\n \"year = {2015}\"]\n\n def implementors(self):\n return ['Logan Ward']\n\n def compute_simultaneous_packing_efficiency(self, comp):\n \"\"\"Compute the packing efficiency of the system when the neighbor\n shell of each atom has the same composition as the alloy. When this\n criterion is satisfied, it is possible for every atom in this system\n to be simultaneously as efficiently-packed as possible.\n\n Args:\n comp (Composition): Composition to be assessed\n Returns\n (float) Average APE of all atoms\n (float) Average deviation of the APE of each atom from ideal (0)\n \"\"\"\n\n # Compute the average atomic radius of the system\n elements, fractions = zip(*comp.element_composition.items())\n radii = self._data_source.get_elemental_properties(elements,\n 'MiracleRadius')\n mean_radius = PropertyStats.mean(radii, fractions)\n\n # Compute the APE for each cluster\n best_ape = [\n self.find_ideal_cluster_size(r / mean_radius)[1] for r in radii\n ]\n\n # Return the averages\n return PropertyStats.mean(best_ape, fractions), \\\n PropertyStats.mean(np.abs(best_ape), fractions)\n\n def compute_nearest_cluster_distance(self, comp):\n \"\"\"Compute the distance between a composition and that the nearest\n efficiently-packed clusters.\n\n Measures the mean :math:`L_2` distance between the alloy composition\n and the :math:`k`-nearest clusters with Atomic Packing Efficiencies\n within the user-specified tolerance of 1. :math:`k` is any of the\n numbers defined in the \"n_nearest\" parameter of this class.\n\n If there are less than `k` efficient clusters in the system, we use\n the maximum distance betweeen any two compositions (1) for the\n unmatched neighbors.\n\n Args:\n comp (Composition): Composition of material to evaluate\n Return:\n [float] Average distances\n \"\"\"\n\n # Get the most common elements\n elems, _ = zip(*sorted(comp.element_composition.items(),\n key=lambda x: x[1], reverse=True))\n\n # Get the cluster lookup tool using the most common elements\n cluster_lookup = self.create_cluster_lookup_tool(\n elems[:self.max_types]\n )\n\n # Compute the composition vector\n comp_vec = self._el_frac.featurize(comp)\n\n # Compute the distances\n means = []\n for k in self.n_nearest:\n # Get the nearest clusters\n if cluster_lookup is None:\n dists = (np.array([]),)\n to_lookup = 0\n else:\n to_lookup = min(cluster_lookup._fit_X.shape[0], k)\n dists, _ = cluster_lookup.kneighbors([comp_vec], to_lookup)\n\n # Pad the list with 1's\n dists = dists[0].tolist() + [1]*(k - to_lookup)\n\n # Compute the average\n means.append(np.mean(dists))\n\n return means\n\n def create_cluster_lookup_tool(self, elements):\n \"\"\"\n Get the compositions of efficiently-packed clusters in a certain system\n of elements\n\n Args:\n elements ([Element]): Elements in system\n Return:\n (NearNeighbors): Tool to find nearby clusters in this system. None\n if there are no efficiently-packed clusters for this combination of elements\n \"\"\"\n elements = list(set(elements))\n return self._create_cluster_lookup_tool(tuple(sorted(elements)))\n\n @lru_cache()\n def _create_cluster_lookup_tool(self, elements):\n \"\"\"\n Cached version of `create_cluster_lookup_tool`. Assumes that the\n elements are passed as sorted tuple with no duplicates\n\n Args:\n elements ([Element]): Elements in system\n Return:\n (NearNeighbors): Tool to find nearby clusters in this system. If\n there are no clusters, this class returns None\n \"\"\"\n\n # Get the radii\n radii = self._data_source.get_elemental_properties(elements,\n \"MiracleRadius\")\n\n # Get the maximum and minimum cluster sizes\n max_size = self.find_ideal_cluster_size(max(radii) / min(radii))[0]\n min_size = self.find_ideal_cluster_size(min(radii) / max(radii))[0]\n\n # Prepare a list to hold all possible clusters\n eff_clusters = []\n\n # Loop through all possible neighbor shells\n for size in range(min_size, max_size + 1):\n # Get the ideal radius ratio for a cluster of this size\n ideal_ratio = self.get_ideal_radius_ratio(size)\n\n # Get the mean radii and compositions of all possible\n # combinations of elements in the neighbor shell\n s_radii = itertools.combinations_with_replacement(radii, size)\n s_elems = itertools.combinations_with_replacement(elements, size)\n\n # Put the results in arrays for fast indexing\n mean_radii = np.array(list(s_radii)).mean(axis=1)\n s_elems = np.array(list(s_elems))\n\n # For each type of central atom, determine which have an APE\n # within `self.threshold` of 1\n for center_radius, center_elem in zip(radii, elements):\n # Compute the APE of each cluster\n ape = 1 - np.divide(ideal_ratio, np.divide(center_radius,\n mean_radii))\n\n # Get those which are within the threshold of 0\n # and add their composition to the list of OK elements\n for hit in s_elems[np.abs(ape) < self.threshold]:\n eff_clusters.append([center_elem] + hit.tolist())\n\n # Compute the composition vectors for all of the efficient clusters\n comps = np.zeros((len(eff_clusters), self._n_elems))\n for i, elems in enumerate(eff_clusters):\n for elem in elems:\n comps[i, elem.Z - 1] += 1\n comps = np.divide(comps, comps.sum(axis=1)[:, None])\n\n # Return tool to quickly determine distance from efficient clusters\n # NearNeighbors requires at least 1 entry, so we return None if\n # there are no nearby clusters\n return NearestNeighbors().fit(comps) if len(comps) > 0 else None\n\n def find_ideal_cluster_size(self, radius_ratio):\n \"\"\"\n Get the optimal cluster size for a certain radius ratio\n\n Finds the number of nearest neighbors :math:`n` that minimizes\n :math:`|1 - rp(n)/r|`, where :math:`rp(n)` is the ideal radius\n ratio for a certain :math:`n` and :math:`r` is the actual ratio.\n\n Args:\n radius_ratio (float): :math:`r / r_{neighbor}`\n Returns:\n (int) number of neighboring atoms for that will be the most\n efficiently packed.\n (float) Optimal APE\n \"\"\"\n\n # Loop through cluster sizes from 3 to 24\n best_ape = np.inf\n best_n = None\n for n in range(3, 25):\n # Compute APE, check if it is the best\n ape = 1 - self.get_ideal_radius_ratio(n) / radius_ratio\n if abs(ape) < abs(best_ape):\n best_ape = ape\n best_n = n\n\n # If the APE is negative, this is either the best APE or\n # We have already passed it\n if ape < 0:\n return best_n, best_ape\n\n return best_n, best_ape\n\n def get_ideal_radius_ratio(self, n_neighbors):\n \"\"\"Compute the idea ratio between the central atom and neighboring\n atoms for a neighbor with a certain number of nearest neighbors.\n\n Based on work by `Miracle, Lord, and Ranganathan\n <https://www.jstage.jst.go.jp/article/matertrans/47/7/47_7_1737/_article/-char/en>`_.\n\n Args:\n n_neighbors (int): Number of atoms in 1st NN shell\n Return:\n (float) ideal radius ratio :math:`r / r_{neighbor}`\n \"\"\"\n\n # NN must be in [3, 24]\n n = max(3, min(n_neighbors, 24))\n\n return self.ideal_ratio[n]\n"
] | [
[
"numpy.dot",
"numpy.multiply.reduce",
"numpy.log",
"numpy.abs",
"numpy.power",
"numpy.max",
"sklearn.neighbors.unsupervised.NearestNeighbors",
"numpy.mean",
"numpy.exp",
"numpy.array",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
azedarach/matrix-factorization-case-studies | [
"b689c8af677c378bad75f68e56671a5c6f6589ec"
] | [
"tests/test_simplex_projection.py"
] | [
"\"\"\"\nProvides unit tests for simplex projections.\n\"\"\"\n\n# License: MIT\n\n\nimport numpy as np\n\nfrom convex_dim_red.simplex_projection import (simplex_project_rows,\n simplex_project_vector)\n\ndef test_correct_projection_for_1D_vector():\n \"\"\"Test single value correctly projected.\"\"\"\n\n x = np.array([-0.5])\n\n expected = np.array([1.])\n\n x = simplex_project_vector(x)\n\n assert np.all(x == expected)\n\n\ndef test_1D_vector_in_simplex_invariant():\n \"\"\"Test does not change vector already in simplex.\"\"\"\n\n x = np.array([1.0])\n\n projection = simplex_project_vector(x)\n\n assert np.all(x == projection)\n\n\ndef test_returns_correct_projection_for_2D_vector():\n \"\"\"Test 2D vector is correctly projected.\"\"\"\n\n x = np.array([0.8, 0.8])\n expected = np.array([0.5, 0.5])\n\n x = simplex_project_vector(x)\n\n assert np.all(x == expected)\n\n x = np.array([0.0, 2.0])\n expected = np.array([0.0, 1.0])\n\n x = simplex_project_vector(x)\n\n assert np.all(x == expected)\n\n x = np.array([0.5, -0.5])\n expected = np.array([1.0, 0.0])\n\n x = simplex_project_vector(x)\n\n assert np.all(x == expected)\n\n\ndef test_2D_vector_in_simplex_invariant():\n \"\"\"Test does not change 2D vector already in simplex.\"\"\"\n\n x = np.array([0.4, 0.6])\n\n projection = simplex_project_vector(x)\n\n assert np.all(x == projection)\n\n\ndef test_5D_vector_in_simplex():\n \"\"\"Test 5D vector projected into simplex.\"\"\"\n\n n_features = 5\n tolerance = 1e-14\n\n x = np.random.uniform(size=(n_features,))\n x = simplex_project_vector(x)\n\n assert np.all(x >= 0)\n\n s = x.sum()\n\n assert np.abs(s - 1) < tolerance\n\n\ndef test_10D_vector_in_simplex():\n \"\"\"Test 10D vector projected into simplex.\"\"\"\n\n n_features = 10\n tolerance = 1e-14\n\n x = np.random.uniform(size=(n_features,))\n x = simplex_project_vector(x)\n\n assert np.all(x >= 0)\n\n s = x.sum()\n\n assert np.abs(s - 1) < tolerance\n\n\ndef test_100D_vector_in_simplex():\n \"\"\"Test 100D vector projected into simplex.\"\"\"\n\n n_features = 100\n tolerance = 1e-14\n\n x = np.random.uniform(size=(n_features,))\n x = simplex_project_vector(x)\n\n assert np.all(x >= 0)\n\n s = x.sum()\n\n assert np.abs(s - 1) < tolerance\n\n\ndef test_1D_rows_in_simplex_invariant():\n \"\"\"Test 1D rows in simplex unchanged.\"\"\"\n\n n_features = 1\n n_samples = 15\n\n X = np.ones((n_samples, n_features))\n\n projection = simplex_project_rows(X) # pylint: disable=no-value-for-parameter\n\n assert np.all(projection == X)\n\n\ndef test_correctly_projects_1D_rows():\n \"\"\"Test 1D rows correctly projected.\"\"\"\n\n n_features = 1\n n_samples = 50\n tolerance = 1e-15\n\n X = np.random.uniform(size=(n_samples, n_features))\n X = simplex_project_rows(X) # pylint: disable=no-value-for-parameter\n\n expected = np.ones((n_samples, n_features))\n\n assert np.allclose(X, expected, tolerance)\n\n\ndef test_2D_rows_in_simplex_invariant():\n \"\"\"Test 2D rows in simplex unchanged.\"\"\"\n\n n_features = 2\n n_samples = 10\n tolerance = 1e-15\n\n X = np.random.uniform(size=(n_samples, n_features))\n\n row_sums = X.sum(axis=1)\n X = X / row_sums[:, np.newaxis]\n\n row_sums = X.sum(axis=1)\n assert np.allclose(row_sums, 1)\n\n projection = simplex_project_rows(X) # pylint: disable=no-value-for-parameter\n\n assert np.allclose(X, projection, tolerance)\n\n\ndef test_correctly_projects_2D_rows():\n \"\"\"Test 2D rows correctly projected.\"\"\"\n\n tolerance = 1e-15\n\n X = np.array([[0.5, 0.5], [0.5, 1.0], [0.0, -0.5]])\n expected = np.array([[0.5, 0.5], [0.25, 0.75], [0.75, 0.25]])\n\n X = simplex_project_rows(X) # pylint: disable=no-value-for-parameter\n\n assert np.allclose(X, expected, tolerance)\n\n\ndef test_5D_projection_in_simplex():\n \"\"\"Test 5D rows are projected into simplex.\"\"\"\n\n n_features = 5\n n_samples = 57\n tolerance = 1e-15\n\n X = np.random.uniform(size=(n_samples, n_features))\n X = simplex_project_rows(X) # pylint: disable=no-value-for-parameter\n\n assert np.all(X >= 0)\n\n row_sums = X.sum(axis=1)\n assert np.allclose(row_sums, 1, tolerance)\n\n\ndef test_317D_projection_in_simplex():\n \"\"\"Test 317D rows are projected into simplex.\"\"\"\n\n n_features = 317\n n_samples = 341\n tolerance = 1e-14\n\n X = np.random.uniform(size=(n_samples, n_features))\n X = simplex_project_rows(X) # pylint: disable=no-value-for-parameter\n\n assert np.all(X >= 0)\n\n row_sums = X.sum(axis=1)\n assert np.allclose(row_sums, 1, tolerance)\n"
] | [
[
"numpy.allclose",
"numpy.abs",
"numpy.ones",
"numpy.all",
"numpy.random.uniform",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ksuderman/GalaxyMods | [
"c450a6e67d6ffed6bc0d3950d6c7fc20171bb1d4"
] | [
"tools/learning/ml_performance.py"
] | [
"import argparse\nimport pandas as pd\nimport plotly\nimport pickle\nimport plotly.graph_objs as go\nfrom sklearn.metrics import confusion_matrix, precision_recall_fscore_support,classification_report\nfrom sklearn.preprocessing import label_binarize\n\n\ndef main(infile_input, infile_output, outfile):\n \"\"\"\n Produce an interactive confusion matrix (heatmap), precision, recall, fscore\n Args:\n infile_input: str, input tabular file with true labels\n infile_output: str, input tabular file with predicted labels\n \"\"\"\n \n df_input = pd.read_csv(infile_input, sep='\\t', parse_dates=True)\n df_output = pd.read_csv(infile_output, sep='\\t', parse_dates=True)\n true_labels = df_input.iloc[:, -1].copy()\n predicted_labels = df_output.iloc[:, -1].copy()\n axis_labels = list(set(true_labels))\n c_matrix = confusion_matrix(true_labels, predicted_labels)\n\n #print(classification_report(true_labels, predicted_labels))\n #print(\"Confusion matrix:\\n\\n\",c_matrix)\n\n f = open(outfile, \"w\")\n f.write(classification_report(true_labels, predicted_labels))\n f.write(\"\\n\")\n f.write(\"Confusion Matrix:\\n\\n\")\n f.write(str(c_matrix))\n f.write(\"\\n\")\n f.close()\n\n '''\n data = [\n go.Heatmap(\n z=c_matrix,\n x=axis_labels,\n y=axis_labels,\n colorscale='Portland',\n )\n ]\n\n layout = go.Layout(\n title='Confusion Matrix between true and predicted class labels',\n xaxis=dict(title='Predicted class labels'),\n yaxis=dict(title='True class labels')\n )\n\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=\"output_confusion.html\", auto_open=False)\n\n # plot precision, recall and f_score for each class label\n precision, recall, f_score, _ = precision_recall_fscore_support(true_labels, predicted_labels)\n\n trace_precision = go.Scatter(\n x=axis_labels,\n y=precision,\n mode='lines+markers',\n name='Precision'\n )\n\n trace_recall = go.Scatter(\n x=axis_labels,\n y=recall,\n mode='lines+markers',\n name='Recall'\n )\n\n trace_fscore = go.Scatter(\n x=axis_labels,\n y=f_score,\n mode='lines+markers',\n name='F-score'\n )\n\n layout_prf = go.Layout(\n title='Precision, recall and f-score of true and predicted class labels',\n xaxis=dict(title='Class labels'),\n yaxis=dict(title='Precision, recall and f-score')\n )\n\n data_prf = [trace_precision, trace_recall, trace_fscore]\n fig_prf = go.Figure(data=data_prf, layout=layout_prf)\n plotly.offline.plot(fig_prf, filename=\"output_prf.html\", auto_open=False)\n '''\n\nif __name__ == \"__main__\":\n aparser = argparse.ArgumentParser()\n aparser.add_argument(\"-t\", \"--true\", dest=\"infile_true\", required=True)\n aparser.add_argument(\"-p\", \"--predicted\", dest=\"infile_predicted\", required=True)\n aparser.add_argument(\"-o\", \"--output\", dest=\"outfile\", required=True)\n args = aparser.parse_args()\n main(args.infile_true, args.infile_predicted,args.outfile)\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.metrics.classification_report",
"sklearn.metrics.confusion_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
snek5000/snek5000-cbox | [
"5e61897b762dde3858a08987bac26d6ace804556"
] | [
"doc/examples/run_side_simple.py"
] | [
"\"\"\"\nTo define environmental variable (in the terminal or in your .bashrc)\nuse something like: export FLUIDSIM_PATH=\"/fsnet/project/meige/2020/20CONVECTION/numerical/\"\n\n\"\"\"\n\nimport numpy as np\n\nfrom snek5000_cbox.solver import Simul\n\nparams = Simul.create_default_params()\n\naspect_ratio = params.oper.aspect_ratio = 1.0\nparams.prandtl = 0.71\n\n# The onset of oscillatory flow for aspect ration 1.0 is at Ra_c = 1.825e8\nparams.Ra_side = 1.86e8\n\nparams.output.sub_directory = \"examples_cbox/simple/SW\"\n\nparams.oper.dim = 2\n\nnb_elements = ny = 10\nparams.oper.ny = nb_elements\nnx = params.oper.nx = int(nb_elements / aspect_ratio)\nparams.oper.nz = int(nb_elements / aspect_ratio)\n\nLy = params.oper.Ly\nLx = params.oper.Lx = Ly / aspect_ratio\nLz = params.oper.Lz = Ly / aspect_ratio\n\n\norder = params.oper.elem.order = params.oper.elem.order_out = 10\n\nparams.oper.mesh_stretch_factor = 0.08 # zero means regular\n\nparams.short_name_type_run = f\"Ra{params.Ra_side:.3e}_{nx*order}x{ny*order}\"\n\n# creation of the coordinates of the points saved by history points\nn1d = 5\nsmall = Lx / 10\n\nxs = np.linspace(0, Lx, n1d)\nxs[0] = small\nxs[-1] = Lx - small\n\nys = np.linspace(0, Ly, n1d)\nys[0] = small\nys[-1] = Ly - small\n\ncoords = [(x, y) for x in xs for y in ys]\n\nif params.oper.dim == 3:\n\n zs = np.linspace(0, Lz, n1d)\n zs[0] = small\n zs[-1] = Lz - small\n\n coords = [(x, y, z) for x in xs for y in ys for z in zs]\n\n\nparams.output.history_points.coords = coords\nparams.oper.max.hist = len(coords) + 1\n\n# params.oper.enable_sfd = float(True)\n\nparams.nek.general.end_time = 800\nparams.nek.general.stop_at = \"endTime\"\nparams.nek.general.target_cfl = 2.0\nparams.nek.general.time_stepper = \"BDF3\"\nparams.nek.general.extrapolation = \"OIFS\"\n\nparams.nek.general.write_control = \"runTime\"\nparams.nek.general.write_interval = 10\n\nparams.output.history_points.write_interval = 10\n\nsim = Simul(params)\n\nsim.make.exec(\"run_fg\", resources={\"nproc\": 4})\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OSSome01/SLAM-Lecture | [
"c053eaa6fbe49b152af78dad5ff3f7626c9e88da"
] | [
"Unit_B/slam_04_c_estimate_transform_question.py"
] | [
"# For each cylinder in the scan, find its cartesian coordinates,\r\n# in the world coordinate system.\r\n# Find the closest pairs of cylinders from the scanner and cylinders\r\n# from the reference, and the optimal transformation which aligns them.\r\n# Then, output the scanned cylinders, using this transform.\r\n# 04_c_estimate_transform\r\n# Claus Brenner, 14 NOV 2012\r\nfrom lego_robot import *\r\nfrom slam_b_library import filter_step\r\nfrom slam_04_a_project_landmarks import\\\r\n compute_scanner_cylinders, write_cylinders\r\nfrom math import sqrt\r\nimport numpy as np\r\n\r\n# Given a list of \r\n# cylinders (points) and reference_cylinders:\r\n# For every cylinder, find the closest reference_cylinder and add\r\n# the index pair (i, j), where i is the index of the cylinder, and\r\n# j is the index of the reference_cylinder, to the result list.\r\n# This is the function developed in slam_04_b_find_cylinder_pairs.\r\ndef compute_dist(a, b):\r\n x = a[0] - b[0]\r\n y = a[1] - b[1]\r\n return np.sqrt(x*x + y*y)\r\n\r\ndef find_cylinder_pairs(cylinders, reference_cylinders, max_radius):\r\n cylinder_pairs = []\r\n\r\n # --->>> Enter your code here.\r\n # Make a loop over all cylinders and reference_cylinders.\r\n # In the loop, if cylinders[i] is closest to reference_cylinders[j],\r\n # and their distance is below max_radius, then add the\r\n # tuple (i,j) to cylinder_pairs, i.e., cylinder_pairs.append( (i,j) ).\r\n for i, c in enumerate(cylinders):\r\n for j, r in enumerate(reference_cylinders):\r\n if compute_dist(c, r) < max_radius:\r\n cylinder_pairs.append((i, j))\r\n\r\n return cylinder_pairs\r\n\r\n# Given a point list, return the center of mass.\r\ndef compute_center(point_list):\r\n # Safeguard against empty list.\r\n if not point_list:\r\n return (0.0, 0.0)\r\n # If not empty, sum up and divide.\r\n sx = sum([p[0] for p in point_list])\r\n sy = sum([p[1] for p in point_list])\r\n return (float(sx) / len(point_list), float(sy) / len(point_list))\r\n\r\n# Given a left_list of points and a right_list of points, compute\r\n# the parameters of a similarity transform: scale, rotation, translation.\r\n# If fix_scale is True, use the fixed scale of 1.0.\r\n# The returned value is a tuple of:\r\n# (scale, cos(angle), sin(angle), x_translation, y_translation)\r\n# i.e., the rotation angle is not given in radians, but rather in terms\r\n# of the cosine and sine.\r\ndef estimate_transform(left_list, right_list, fix_scale = False):\r\n \r\n # Compute left and right center.\r\n lc = compute_center(left_list)\r\n rc = compute_center(right_list)\r\n\r\n # --->>> Insert here your code to compute lambda, c, s and tx, ty.\r\n left_prime = []\r\n right_prime = []\r\n for l, r in zip(left_list, right_list):\r\n # l = (x, y) coordinate of i_th detected world cylinder\r\n # r = (x, y) coordinate of i_th reference cylinder\r\n l_prime = (l[0] - lc[0], l[1] - lc[1])\r\n r_prime = (r[0] - lc[0], r[1] - lc[1])\r\n left_prime.append(l_prime) # create list of tuples containing reduced coordinates of each cylinder in left_list\r\n right_prime.append(r_prime) # create list of tuples containing reduced coordinate of each cylinder in right_list\r\n\r\n n = len(left_list) # not right_list since not every detected cylinder finds a match\r\n cs, ss, rr, ll = 0, 0, 0, 0\r\n for i in range(n):\r\n cs += (right_prime[i][0] * left_prime[i][0] + right_prime[i][1] * left_prime[i][1])\r\n ss += (-right_prime[i][0] * left_prime[i][1] + right_prime[i][1] * left_prime[i][0])\r\n rr += (right_prime[i][0] * right_prime[i][0] + right_prime[i][1] * right_prime[i][1])\r\n ll += (left_prime[i][0] * left_prime[i][0] + left_prime[i][1] * left_prime[i][1])\r\n\r\n # safeguard against exceptionally high value of lambda\r\n if ((ll - 0.0) < 0.00001):\r\n return None\r\n la = np.sqrt(rr / ll)\r\n cs_sum = np.sqrt(cs * cs + ss * ss)\r\n c = cs / cs_sum\r\n s = ss / cs_sum\r\n tx = rc[0] - la * (c * lc[0] - s * lc[1])\r\n ty = rc[1] - la * (s * lc[0] + c * lc[1])\r\n return la, c, s, tx, ty # these values are returned as a tuple of 5 elements\r\n\r\n# Given a similarity transformation:\r\n# trafo = (scale, cos(angle), sin(angle), x_translation, y_translation)\r\n# and a point p = (x, y), return the transformed point.\r\ndef apply_transform(trafo, p):\r\n la, c, s, tx, ty = trafo\r\n lac = la * c\r\n las = la * s\r\n x = lac * p[0] - las * p[1] + tx\r\n y = las * p[0] + lac * p[1] + ty\r\n return (x, y)\r\n\r\n\r\nif __name__ == '__main__':\r\n # The constants we used for the filter_step.\r\n scanner_displacement = 30.0\r\n ticks_to_mm = 0.349\r\n robot_width = 150.0\r\n\r\n # The constants we used for the cylinder detection in our scan. \r\n minimum_valid_distance = 20.0\r\n depth_jump = 100.0\r\n cylinder_offset = 90.0\r\n\r\n # The maximum distance allowed for cylinder assignment.\r\n max_cylinder_distance = 300.0\r\n\r\n # The start pose we obtained miraculously.\r\n pose = (1850.0, 1897.0, 3.717551306747922)\r\n\r\n # Read the logfile which contains all scans.\r\n logfile = LegoLogfile()\r\n logfile.read(\"robot4_motors.txt\")\r\n logfile.read(\"robot4_scan.txt\")\r\n\r\n # Also read the reference cylinders (this is our map).\r\n logfile.read(\"robot_arena_landmarks.txt\")\r\n reference_cylinders = [l[1:3] for l in logfile.landmarks]\r\n\r\n out_file = file(\"estimate_transform.txt\", \"w\")\r\n for i in xrange(len(logfile.scan_data)):\r\n # Compute the new pose.\r\n pose = filter_step(pose, logfile.motor_ticks[i],\r\n ticks_to_mm, robot_width,\r\n scanner_displacement)\r\n\r\n # Extract cylinders, also convert them to world coordinates.\r\n cartesian_cylinders = compute_scanner_cylinders(\r\n logfile.scan_data[i],\r\n depth_jump, minimum_valid_distance, cylinder_offset)\r\n world_cylinders = [LegoLogfile.scanner_to_world(pose, c)\r\n for c in cartesian_cylinders]\r\n\r\n # For every cylinder, find the closest reference cylinder.\r\n cylinder_pairs = find_cylinder_pairs(\r\n world_cylinders, reference_cylinders, max_cylinder_distance)\r\n\r\n # Estimate a transformation using the cylinder pairs.\r\n trafo = estimate_transform(\r\n [world_cylinders[pair[0]] for pair in cylinder_pairs],\r\n [reference_cylinders[pair[1]] for pair in cylinder_pairs],\r\n fix_scale = True)\r\n # print \"trafo =\",trafo\r\n # Transform the cylinders using the estimated transform.\r\n transformed_world_cylinders = []\r\n if trafo:\r\n transformed_world_cylinders =\\\r\n [apply_transform(trafo, c) for c in\r\n [world_cylinders[pair[0]] for pair in cylinder_pairs]] \r\n # Write to file.\r\n # The pose.\r\n print >> out_file, \"F %f %f %f\" % pose\r\n # The detected cylinders in the scanner's coordinate system.\r\n write_cylinders(out_file, \"D C\", cartesian_cylinders)\r\n # The detected cylinders, transformed using the estimated trafo.\r\n write_cylinders(out_file, \"W C\", transformed_world_cylinders)\r\n out_file.close()\r\n"
] | [
[
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NREL/wp3-precon | [
"c1c163007d16986d04bc34deefbf1c1e1c754aa8"
] | [
"test/unit/test_power_curve_toolkit.py"
] | [
"import unittest\n\nimport numpy as np\nimport pandas as pd\nfrom numpy import testing as nptest\n\nfrom operational_analysis.toolkits import power_curve\nfrom operational_analysis.toolkits.power_curve.parametric_forms import (\n logistic5param,\n logistic5param_capped,\n)\n\n\nnoise = 0.1\n\n\nclass TestPowerCurveFunctions(unittest.TestCase):\n def setUp(self):\n np.random.seed(42)\n params = [1300, -7, 11, 2, 0.5]\n self.x = pd.Series(np.random.random(100) * 30)\n self.y = pd.Series(logistic5param(self.x, *params) + np.random.random(100) * noise)\n\n # power curve source: https://github.com/NREL/turbine-models/blob/master/Offshore/2020ATB_NREL_Reference_15MW_240.csv\n self.nrel_15mw_wind = pd.Series(np.arange(4, 26))\n self.nrel_15mw_power = pd.Series(\n np.array(\n [\n 720,\n 1239,\n 2271,\n 3817,\n 5876,\n 8450,\n 11536,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 15000,\n 1500,\n ]\n )\n )\n\n def test_IEC(self):\n # Create test data using logistic5param form\n curve = power_curve.IEC(self.x, self.y)\n y_pred = curve(self.x)\n # Does the IEC power curve match the test data?\n nptest.assert_allclose(\n self.y, y_pred, rtol=1, atol=noise * 2, err_msg=\"Power curve did not properly fit.\"\n )\n\n def test_IEC_with_bounds(self):\n # Create the power curve with bounds at 4m/s adn 25m/s and bin width from power curve of 1m/s\n cut_in = 4\n cut_out = 25\n curve = power_curve.IEC(\n self.nrel_15mw_wind,\n self.nrel_15mw_power,\n windspeed_start=cut_in,\n windspeed_end=cut_out,\n bin_width=1,\n )\n\n # Create the test data\n test_windspeeds = np.arange(0, 31)\n test_power = curve(test_windspeeds)\n\n # Test all windspeeds outside of cut-in and cut-out windspeeds produce no power\n should_be_zeros = test_power[(test_windspeeds < cut_in) | (test_windspeeds > cut_out)]\n nptest.assert_array_equal(should_be_zeros, np.zeros(should_be_zeros.shape))\n\n # Test all the valid windspeeds are equal\n valid_power = test_power[(test_windspeeds >= cut_in) & (test_windspeeds <= cut_out)]\n nptest.assert_array_equal(self.nrel_15mw_power, valid_power)\n\n def test_logistic_5_param(self):\n # Create test data using logistic5param form\n curve = power_curve.logistic_5_parametric(self.x, self.y)\n y_pred = curve(self.x)\n # Does the logistic-5 power curve match the test data?\n nptest.assert_allclose(\n self.y, y_pred, rtol=1, atol=noise * 2, err_msg=\"Power curve did not properly fit.\"\n )\n\n def test_gam(self):\n # Create test data using logistic5param form\n curve = power_curve.gam(windspeed_column=self.x, power_column=self.y, n_splines=20)\n y_pred = curve(self.x)\n # Does the spline-fit power curve match the test data?\n nptest.assert_allclose(\n self.y, y_pred, rtol=0.05, atol=20, err_msg=\"Power curve did not properly fit.\"\n )\n\n def test_3paramgam(self):\n # Create test data using logistic5param form\n winddir = np.random.random(100)\n airdens = np.random.random(100)\n curve = power_curve.gam_3param(\n windspeed_column=self.x,\n winddir_column=winddir,\n airdens_column=airdens,\n power_column=self.y,\n n_splines=20,\n )\n y_pred = curve(self.x, winddir, airdens)\n # Does the spline-fit power curve match the test data?\n nptest.assert_allclose(\n self.y, y_pred, rtol=0.05, atol=20, err_msg=\"Power curve did not properly fit.\"\n )\n\n def tearDown(self):\n pass\n\n\nclass TestParametricForms(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_logistic5parameter(self):\n y_pred = logistic5param(np.array([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5])\n y = np.array([2.29403585, 5.32662505, 15.74992462])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not properly fit.\")\n\n y_pred = logistic5param(np.array([1, 2, 3]), *[1300.0, -7.0, 11.0, 2.0, 0.5])\n y = np.array([2.29403585, 5.32662505, 15.74992462])\n nptest.assert_allclose(\n y, y_pred, err_msg=\"Power curve did not handle integer inputs properly.\"\n )\n\n y_pred = logistic5param(np.array([0.01, 0.0]), 1300, 7, 11, 2, 0.5)\n y = np.array([1300.0, 1300.0])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not handle zero properly (b>0).\")\n\n y_pred = logistic5param(np.array([0.01, 0.0]), 1300, -7, 11, 2, 0.5)\n y = np.array([2.0, 2.0])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not handle zero properly (b<0).\")\n\n def test_logistic5parameter_capped(self):\n # Numpy array + Lower Bound\n y_pred = logistic5param_capped(\n np.array([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=20.0\n )\n y = np.array([5.0, 5.32662505, 15.74992462])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not properly fit.\")\n\n # Numpy array + Upper and Lower Bound\n y_pred = logistic5param_capped(\n np.array([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=10.0\n )\n y = np.array([5.0, 5.32662505, 10.0])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not properly fit.\")\n\n # Pandas Series + Upper and Lower Bound\n y_pred = logistic5param_capped(\n pd.Series([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=20.0\n )\n y = pd.Series([5.0, 5.32662505, 15.74992462])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not properly fit.\")\n\n # Pandas Series + Upper and Lower Bound\n y_pred = logistic5param_capped(\n pd.Series([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=10.0\n )\n y = pd.Series([5.0, 5.32662505, 10.0])\n nptest.assert_allclose(y, y_pred, err_msg=\"Power curve did not properly fit.\")\n\n def tearDown(self):\n pass\n"
] | [
[
"numpy.random.random",
"pandas.Series",
"numpy.random.seed",
"numpy.arange",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Chessmag/pymatgen | [
"61a4bb7a1792e1ea2379abd45b3c40efb816fd64"
] | [
"pymatgen/core/structure.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nThis module provides classes used to define a non-periodic molecule and a\nperiodic structure.\n\"\"\"\n\nimport math\nimport os\nimport json\nimport collections\nimport itertools\nfrom abc import ABCMeta, abstractmethod\nimport random\nimport warnings\nfrom fnmatch import fnmatch\nimport re\nimport functools\nfrom typing import Dict, List, Tuple, Optional, Union, Iterator, Set, Sequence, Iterable\nimport numpy as np\n\nfrom tabulate import tabulate\n\nfrom monty.dev import deprecated\nfrom monty.io import zopen\nfrom monty.json import MSONable\n\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen.core.lattice import Lattice, get_points_in_spheres\nfrom pymatgen.core.periodic_table import Element, Species, get_el_sp, DummySpecies\nfrom pymatgen.core.sites import Site, PeriodicSite\nfrom pymatgen.core.bonds import CovalentBond, get_bond_length\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.util.coord import get_angle, all_distances, \\\n lattice_points_in_supercell\nfrom pymatgen.core.units import Mass, Length\n\n\nclass Neighbor(Site):\n \"\"\"\n Simple Site subclass to contain a neighboring atom that skips all the\n unnecessary checks for speed. Can be\n used as a fixed-length tuple of size 3 to retain backwards compatibility\n with past use cases.\n\n (site, nn_distance, index).\n\n In future, usage should be to call attributes, e.g., Neighbor.index,\n Neighbor.distance, etc.\n \"\"\"\n\n def __init__(self,\n species: Composition,\n coords: np.ndarray,\n properties: dict = None,\n nn_distance: float = 0.0,\n index: int = 0):\n \"\"\"\n :param species: Same as Site\n :param coords: Same as Site, but must be fractional.\n :param properties: Same as Site\n :param nn_distance: Distance to some other Site.\n :param index: Index within structure.\n \"\"\"\n self.coords = coords\n self._species = species\n self.properties = properties or {}\n self.nn_distance = nn_distance\n self.index = index\n\n def __len__(self):\n \"\"\"\n Make neighbor Tuple-like to retain backwards compatibility.\n \"\"\"\n return 3\n\n def __getitem__(self, i: int): # type: ignore\n \"\"\"\n Make neighbor Tuple-like to retain backwards compatibility.\n\n :param i:\n :return:\n \"\"\"\n return (self, self.nn_distance, self.index)[i]\n\n\nclass PeriodicNeighbor(PeriodicSite):\n \"\"\"\n Simple PeriodicSite subclass to contain a neighboring atom that skips all\n the unnecessary checks for speed. Can be used as a fixed-length tuple of\n size 4 to retain backwards compatibility with past use cases.\n\n (site, distance, index, image).\n\n In future, usage should be to call attributes, e.g., PeriodicNeighbor.index,\n PeriodicNeighbor.distance, etc.\n \"\"\"\n\n def __init__(self,\n species: Composition,\n coords: np.ndarray,\n lattice: Lattice,\n properties: dict = None,\n nn_distance: float = 0.0,\n index: int = 0,\n image: tuple = (0, 0, 0)):\n \"\"\"\n :param species: Same as PeriodicSite\n :param coords: Same as PeriodicSite, but must be fractional.\n :param lattice: Same as PeriodicSite\n :param properties: Same as PeriodicSite\n :param nn_distance: Distance to some other Site.\n :param index: Index within structure.\n :param image: PeriodicImage\n \"\"\"\n self._lattice = lattice\n self._frac_coords = coords\n self._species = species\n self.properties = properties or {}\n self.nn_distance = nn_distance\n self.index = index\n self.image = image\n\n @property # type: ignore\n def coords(self):\n \"\"\"\n :return: Cartesian coords.\n \"\"\"\n return self._lattice.get_cartesian_coords(self._frac_coords)\n\n def __len__(self):\n \"\"\"\n Make neighbor Tuple-like to retain backwards compatibility.\n \"\"\"\n return 4\n\n def __getitem__(self, i: int): # type: ignore\n \"\"\"\n Make neighbor Tuple-like to retain backwards compatibility.\n\n :param i:\n :return:\n \"\"\"\n return (self, self.nn_distance, self.index, self.image)[i]\n\n\nclass SiteCollection(collections.abc.Sequence, metaclass=ABCMeta):\n \"\"\"\n Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.\n This serves as a base class for Molecule (a collection of Site, i.e., no\n periodicity) and Structure (a collection of PeriodicSites, i.e.,\n periodicity). Not meant to be instantiated directly.\n \"\"\"\n\n # Tolerance in Angstrom for determining if sites are too close.\n DISTANCE_TOLERANCE = 0.5\n\n @property\n @abstractmethod\n def sites(self) -> Tuple[Union[Site, PeriodicSite]]:\n \"\"\"\n Returns a tuple of sites.\n \"\"\"\n\n @abstractmethod\n def get_distance(self, i: int, j: int) -> float:\n \"\"\"\n Returns distance between sites at index i and j.\n\n Args:\n i: Index of first site\n j: Index of second site\n\n Returns:\n Distance between sites at index i and index j.\n \"\"\"\n\n @property\n def distance_matrix(self) -> np.ndarray:\n \"\"\"\n Returns the distance matrix between all sites in the structure. For\n periodic structures, this is overwritten to return the nearest image\n distance.\n \"\"\"\n return all_distances(self.cart_coords, self.cart_coords)\n\n @property\n def species(self) -> List[Composition]:\n \"\"\"\n Only works for ordered structures.\n Disordered structures will raise an AttributeError.\n\n Returns:\n ([Species]) List of species at each site of the structure.\n \"\"\"\n return [site.specie for site in self]\n\n @property\n def species_and_occu(self) -> List[Composition]:\n \"\"\"\n List of species and occupancies at each site of the structure.\n \"\"\"\n return [site.species for site in self]\n\n @property\n def ntypesp(self) -> int:\n \"\"\"Number of types of atoms.\"\"\"\n return len(self.types_of_species)\n\n @property\n def types_of_species(self) -> Tuple[Union[Element, Species, DummySpecies]]:\n \"\"\"\n List of types of specie.\n \"\"\"\n # Cannot use set since we want a deterministic algorithm.\n types = [] # type: List[Union[Element, Species, DummySpecies]]\n for site in self:\n for sp, v in site.species.items():\n if v != 0:\n types.append(sp)\n return tuple(set(types)) # type: ignore\n\n @property\n def types_of_specie(self) -> Tuple[Union[Element, Species, DummySpecies]]:\n \"\"\"\n Specie->Species rename. Maintained for backwards compatibility.\n \"\"\"\n return self.types_of_species\n\n def group_by_types(self) -> Iterator[Union[Site, PeriodicSite]]:\n \"\"\"Iterate over species grouped by type\"\"\"\n for t in self.types_of_species:\n for site in self:\n if site.specie == t:\n yield site\n\n def indices_from_symbol(self, symbol: str) -> Tuple[int, ...]:\n \"\"\"\n Returns a tuple with the sequential indices of the sites\n that contain an element with the given chemical symbol.\n \"\"\"\n return tuple((i for i, specie in enumerate(self.species)\n if specie.symbol == symbol))\n\n @property\n def symbol_set(self) -> Tuple[str]:\n \"\"\"\n Tuple with the set of chemical symbols.\n Note that len(symbol_set) == len(types_of_specie)\n \"\"\"\n return tuple(sorted(specie.symbol for specie in self.types_of_species)) # type: ignore\n\n @property # type: ignore\n def atomic_numbers(self) -> Tuple[int]:\n \"\"\"List of atomic numbers.\"\"\"\n return tuple(site.specie.Z for site in self) # type: ignore\n\n @property\n def site_properties(self) -> Dict[str, List]:\n \"\"\"\n Returns the site properties as a dict of sequences. E.g.,\n {\"magmom\": (5,-5), \"charge\": (-4,4)}.\n \"\"\"\n props = {} # type: Dict[str, List]\n prop_keys = set() # type: Set[str]\n for site in self:\n prop_keys.update(site.properties.keys())\n\n for k in prop_keys:\n props[k] = [site.properties.get(k, None) for site in self]\n return props\n\n def __contains__(self, site):\n return site in self.sites\n\n def __iter__(self):\n return self.sites.__iter__()\n\n def __getitem__(self, ind):\n return self.sites[ind]\n\n def __len__(self):\n return len(self.sites)\n\n def __hash__(self):\n # for now, just use the composition hash code.\n return self.composition.__hash__()\n\n @property\n def num_sites(self) -> int:\n \"\"\"\n Number of sites.\n \"\"\"\n return len(self)\n\n @property\n def cart_coords(self):\n \"\"\"\n Returns a np.array of the cartesian coordinates of sites in the\n structure.\n \"\"\"\n return np.array([site.coords for site in self])\n\n @property\n def formula(self) -> str:\n \"\"\"\n (str) Returns the formula.\n \"\"\"\n return self.composition.formula\n\n @property\n def composition(self) -> Composition:\n \"\"\"\n (Composition) Returns the composition\n \"\"\"\n elmap = collections.defaultdict(float) # type: Dict[Species, float]\n for site in self:\n for species, occu in site.species.items():\n elmap[species] += occu\n return Composition(elmap)\n\n @property\n def charge(self) -> float:\n \"\"\"\n Returns the net charge of the structure based on oxidation states. If\n Elements are found, a charge of 0 is assumed.\n \"\"\"\n charge = 0\n for site in self:\n for specie, amt in site.species.items():\n charge += getattr(specie, \"oxi_state\", 0) * amt\n return charge\n\n @property\n def is_ordered(self) -> bool:\n \"\"\"\n Checks if structure is ordered, meaning no partial occupancies in any\n of the sites.\n \"\"\"\n return all((site.is_ordered for site in self))\n\n def get_angle(self, i: int, j: int, k: int) -> float:\n \"\"\"\n Returns angle specified by three sites.\n\n Args:\n i: Index of first site.\n j: Index of second site.\n k: Index of third site.\n\n Returns:\n Angle in degrees.\n \"\"\"\n v1 = self[i].coords - self[j].coords\n v2 = self[k].coords - self[j].coords\n return get_angle(v1, v2, units=\"degrees\")\n\n def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:\n \"\"\"\n Returns dihedral angle specified by four sites.\n\n Args:\n i: Index of first site\n j: Index of second site\n k: Index of third site\n l: Index of fourth site\n\n Returns:\n Dihedral angle in degrees.\n \"\"\"\n v1 = self[k].coords - self[l].coords\n v2 = self[j].coords - self[k].coords\n v3 = self[i].coords - self[j].coords\n v23 = np.cross(v2, v3)\n v12 = np.cross(v1, v2)\n return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),\n np.dot(v12, v23)))\n\n def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:\n \"\"\"\n True if SiteCollection does not contain atoms that are too close\n together. Note that the distance definition is based on type of\n SiteCollection. Cartesian distances are used for non-periodic\n Molecules, while PBC is taken into account for periodic structures.\n\n Args:\n tol (float): Distance tolerance. Default is 0.5A.\n\n Returns:\n (bool) True if SiteCollection does not contain atoms that are too\n close together.\n \"\"\"\n if len(self.sites) == 1:\n return True\n all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]\n return bool(np.min(all_dists) > tol)\n\n @abstractmethod\n def to(self, fmt: str = None, filename: str = None):\n \"\"\"\n Generates well-known string representations of SiteCollections (e.g.,\n molecules / structures). Should return a string type or write to a file.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def from_str(cls, input_string: str, fmt: str):\n \"\"\"\n Reads in SiteCollection from a string.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def from_file(cls, filename: str):\n \"\"\"\n Reads in SiteCollection from a filename.\n \"\"\"\n\n def add_site_property(self, property_name: str, values: List):\n \"\"\"\n Adds a property to a site.\n\n Args:\n property_name (str): The name of the property to add.\n values (list): A sequence of values. Must be same length as\n number of sites.\n \"\"\"\n if len(values) != len(self.sites):\n raise ValueError(\"Values must be same length as sites.\")\n for site, val in zip(self.sites, values):\n site.properties[property_name] = val\n\n def remove_site_property(self, property_name):\n \"\"\"\n Adds a property to a site.\n\n Args:\n property_name (str): The name of the property to add.\n \"\"\"\n for site in self.sites:\n del site.properties[property_name]\n\n def replace_species(self, species_mapping: Dict[str, str]):\n \"\"\"\n Swap species.\n\n Args:\n species_mapping (dict): dict of species to swap. Species can be\n elements too. E.g., {Element(\"Li\"): Element(\"Na\")} performs\n a Li for Na substitution. The second species can be a\n sp_and_occu dict. For example, a site with 0.5 Si that is\n passed the mapping {Element('Si): {Element('Ge'):0.75,\n Element('C'):0.25} } will have .375 Ge and .125 C.\n \"\"\"\n\n species_mapping = {get_el_sp(k): v\n for k, v in species_mapping.items()}\n sp_to_replace = set(species_mapping.keys())\n sp_in_structure = set(self.composition.keys())\n if not sp_in_structure.issuperset(sp_to_replace):\n warnings.warn(\n \"Some species to be substituted are not present in \"\n \"structure. Pls check your input. Species to be \"\n \"substituted = %s; Species in structure = %s\"\n % (sp_to_replace, sp_in_structure))\n\n for site in self.sites:\n if sp_to_replace.intersection(site.species):\n c = Composition()\n for sp, amt in site.species.items():\n new_sp = species_mapping.get(sp, sp)\n try:\n c += Composition(new_sp) * amt\n except Exception:\n c += {new_sp: amt}\n site.species = c\n\n def add_oxidation_state_by_element(self, oxidation_states: Dict[str, float]):\n \"\"\"\n Add oxidation states.\n\n Args:\n oxidation_states (dict): Dict of oxidation states.\n E.g., {\"Li\":1, \"Fe\":2, \"P\":5, \"O\":-2}\n \"\"\"\n try:\n for site in self.sites:\n new_sp = {}\n for el, occu in site.species.items():\n sym = el.symbol\n new_sp[Species(sym, oxidation_states[sym])] = occu\n site.species = Composition(new_sp)\n except KeyError:\n raise ValueError(\"Oxidation state of all elements must be \"\n \"specified in the dictionary.\")\n\n def add_oxidation_state_by_site(self, oxidation_states: List[float]):\n \"\"\"\n Add oxidation states to a structure by site.\n\n Args:\n oxidation_states (list): List of oxidation states.\n E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]\n \"\"\"\n if len(oxidation_states) != len(self.sites):\n raise ValueError(\"Oxidation states of all sites must be \"\n \"specified.\")\n for site, ox in zip(self.sites, oxidation_states):\n new_sp = {}\n for el, occu in site.species.items():\n sym = el.symbol\n new_sp[Species(sym, ox)] = occu\n site.species = Composition(new_sp)\n\n def remove_oxidation_states(self):\n \"\"\"\n Removes oxidation states from a structure.\n \"\"\"\n for site in self.sites:\n new_sp = collections.defaultdict(float)\n for el, occu in site.species.items():\n sym = el.symbol\n new_sp[Element(sym)] += occu\n site.species = Composition(new_sp)\n\n def add_oxidation_state_by_guess(self, **kwargs):\n \"\"\"\n Decorates the structure with oxidation state, guessing\n using Composition.oxi_state_guesses()\n\n Args:\n **kwargs: parameters to pass into oxi_state_guesses()\n \"\"\"\n oxid_guess = self.composition.oxi_state_guesses(**kwargs)\n oxid_guess = oxid_guess or [{e.symbol: 0 for e in self.composition}]\n self.add_oxidation_state_by_element(oxid_guess[0])\n\n def add_spin_by_element(self, spins: Dict[str, float]):\n \"\"\"\n Add spin states to a structure.\n\n Args:\n spins (dict): Dict of spins associated with elements or species,\n e.g. {\"Ni\":+5} or {\"Ni2+\":5}\n \"\"\"\n for site in self.sites:\n new_sp = {}\n for sp, occu in site.species.items():\n sym = sp.symbol\n oxi_state = getattr(sp, \"oxi_state\", None)\n new_sp[Species(sym, oxidation_state=oxi_state,\n properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu\n site.species = Composition(new_sp)\n\n def add_spin_by_site(self, spins: List[float]):\n \"\"\"\n Add spin states to a structure by site.\n\n Args:\n spins (list): List of spins\n E.g., [+5, -5, 0, 0]\n \"\"\"\n if len(spins) != len(self.sites):\n raise ValueError(\"Spin of all sites must be \"\n \"specified in the dictionary.\")\n\n for site, spin in zip(self.sites, spins):\n new_sp = {}\n for sp, occu in site.species.items():\n sym = sp.symbol\n oxi_state = getattr(sp, \"oxi_state\", None)\n new_sp[Species(sym, oxidation_state=oxi_state,\n properties={'spin': spin})] = occu\n site.species = Composition(new_sp)\n\n def remove_spin(self):\n \"\"\"\n Removes spin states from a structure.\n \"\"\"\n for site in self.sites:\n new_sp = collections.defaultdict(float)\n for sp, occu in site.species.items():\n oxi_state = getattr(sp, \"oxi_state\", None)\n new_sp[Species(sp.symbol, oxidation_state=oxi_state)] += occu\n site.species = new_sp\n\n def extract_cluster(self, target_sites: List[Site], **kwargs):\n r\"\"\"\n Extracts a cluster of atoms based on bond lengths\n\n Args:\n target_sites ([Site]): List of initial sites to nucleate cluster.\n **kwargs: kwargs passed through to CovalentBond.is_bonded.\n\n Returns:\n [Site/PeriodicSite] Cluster of atoms.\n \"\"\"\n cluster = list(target_sites)\n others = [site for site in self if site not in cluster]\n size = 0\n while len(cluster) > size:\n size = len(cluster)\n new_others = []\n for site in others:\n for site2 in cluster:\n if CovalentBond.is_bonded(site, site2, **kwargs):\n cluster.append(site)\n break\n else:\n new_others.append(site)\n others = new_others\n return cluster\n\n\nclass IStructure(SiteCollection, MSONable):\n \"\"\"\n Basic immutable Structure object with periodicity. Essentially a sequence\n of PeriodicSites having a common lattice. IStructure is made to be\n (somewhat) immutable so that they can function as keys in a dict. To make\n modifications, use the standard Structure object instead. Structure\n extends Sequence and Hashable, which means that in many cases,\n it can be used like any Python sequence. Iterating through a\n structure is equivalent to going through the sites in sequence.\n \"\"\"\n\n def __init__(self,\n lattice: Union[List, np.ndarray, Lattice],\n species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],\n coords: Sequence[Sequence[float]],\n charge: float = None,\n validate_proximity: bool = False,\n to_unit_cell: bool = False,\n coords_are_cartesian: bool = False,\n site_properties: dict = None):\n \"\"\"\n Create a periodic structure.\n\n Args:\n lattice (Lattice/3x3 array): The lattice, either as a\n :class:`pymatgen.core.lattice.Lattice` or\n simply as any 2D array. Each row should correspond to a lattice\n vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a\n lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].\n species ([Species]): Sequence of species on each site. Can take in\n flexible input, including:\n\n i. A sequence of element / species specified either as string\n symbols, e.g. [\"Li\", \"Fe2+\", \"P\", ...] or atomic numbers,\n e.g., (3, 56, ...) or actual Element or Species objects.\n\n ii. List of dict of elements/species and occupancies, e.g.,\n [{\"Fe\" : 0.5, \"Mn\":0.5}, ...]. This allows the setup of\n disordered structures.\n coords (Nx3 array): list of fractional/cartesian coordinates of\n each species.\n charge (int): overall charge of the structure. Defaults to behavior\n in SiteCollection where total charge is the sum of the oxidation\n states.\n validate_proximity (bool): Whether to check if there are sites\n that are less than 0.01 Ang apart. Defaults to False.\n to_unit_cell (bool): Whether to map all sites into the unit cell,\n i.e., fractional coords between 0 and 1. Defaults to False.\n coords_are_cartesian (bool): Set to True if you are providing\n coordinates in cartesian coordinates. Defaults to False.\n site_properties (dict): Properties associated with the sites as a\n dict of sequences, e.g., {\"magmom\":[5,5,5,5]}. The sequences\n have to be the same length as the atomic species and\n fractional_coords. Defaults to None for no properties.\n \"\"\"\n if len(species) != len(coords):\n raise StructureError(\"The list of atomic species must be of the\"\n \" same length as the list of fractional\"\n \" coordinates.\")\n\n if isinstance(lattice, Lattice):\n self._lattice = lattice\n else:\n self._lattice = Lattice(lattice)\n\n sites = []\n for i, sp in enumerate(species):\n prop = None\n if site_properties:\n prop = {k: v[i]\n for k, v in site_properties.items()}\n\n sites.append(\n PeriodicSite(sp, coords[i], self._lattice,\n to_unit_cell,\n coords_are_cartesian=coords_are_cartesian,\n properties=prop))\n self._sites = tuple(sites)\n if validate_proximity and not self.is_valid():\n raise StructureError((\"Structure contains sites that are \",\n \"less than 0.01 Angstrom apart!\"))\n self._charge = charge\n\n @classmethod\n def from_sites(cls,\n sites: List[PeriodicSite],\n charge: float = None,\n validate_proximity: bool = False,\n to_unit_cell: bool = False):\n \"\"\"\n Convenience constructor to make a Structure from a list of sites.\n\n Args:\n sites: Sequence of PeriodicSites. Sites must have the same\n lattice.\n charge: Charge of structure.\n validate_proximity (bool): Whether to check if there are sites\n that are less than 0.01 Ang apart. Defaults to False.\n to_unit_cell (bool): Whether to translate sites into the unit\n cell.\n\n Returns:\n (Structure) Note that missing properties are set as None.\n \"\"\"\n if len(sites) < 1:\n raise ValueError(\"You need at least one site to construct a %s\" %\n cls)\n prop_keys = [] # type: List[str]\n props = {}\n lattice = sites[0].lattice\n for i, site in enumerate(sites):\n if site.lattice != lattice:\n raise ValueError(\"Sites must belong to the same lattice\")\n for k, v in site.properties.items():\n if k not in prop_keys:\n prop_keys.append(k)\n props[k] = [None] * len(sites)\n props[k][i] = v\n for k, v in props.items():\n if any((vv is None for vv in v)):\n warnings.warn(\"Not all sites have property %s. Missing values \"\n \"are set to None.\" % k)\n return cls(lattice, [site.species for site in sites],\n [site.frac_coords for site in sites],\n charge=charge,\n site_properties=props,\n validate_proximity=validate_proximity,\n to_unit_cell=to_unit_cell)\n\n @classmethod\n def from_spacegroup(cls,\n sg: str,\n lattice: Union[List, np.ndarray, Lattice],\n species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],\n coords: Sequence[Sequence[float]],\n site_properties: Dict[str, Sequence] = None,\n coords_are_cartesian: bool = False,\n tol: float = 1e-5):\n \"\"\"\n Generate a structure using a spacegroup. Note that only symmetrically\n distinct species and coords should be provided. All equivalent sites\n are generated from the spacegroup operations.\n\n Args:\n sg (str/int): The spacegroup. If a string, it will be interpreted\n as one of the notations supported by\n pymatgen.symmetry.groups.Spacegroup. E.g., \"R-3c\" or \"Fm-3m\".\n If an int, it will be interpreted as an international number.\n lattice (Lattice/3x3 array): The lattice, either as a\n :class:`pymatgen.core.lattice.Lattice` or\n simply as any 2D array. Each row should correspond to a lattice\n vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a\n lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].\n Note that no attempt is made to check that the lattice is\n compatible with the spacegroup specified. This may be\n introduced in a future version.\n species ([Species]): Sequence of species on each site. Can take in\n flexible input, including:\n\n i. A sequence of element / species specified either as string\n symbols, e.g. [\"Li\", \"Fe2+\", \"P\", ...] or atomic numbers,\n e.g., (3, 56, ...) or actual Element or Species objects.\n\n ii. List of dict of elements/species and occupancies, e.g.,\n [{\"Fe\" : 0.5, \"Mn\":0.5}, ...]. This allows the setup of\n disordered structures.\n coords (Nx3 array): list of fractional/cartesian coordinates of\n each species.\n coords_are_cartesian (bool): Set to True if you are providing\n coordinates in cartesian coordinates. Defaults to False.\n site_properties (dict): Properties associated with the sites as a\n dict of sequences, e.g., {\"magmom\":[5,5,5,5]}. The sequences\n have to be the same length as the atomic species and\n fractional_coords. Defaults to None for no properties.\n tol (float): A fractional tolerance to deal with numerical\n precision issues in determining if orbits are the same.\n \"\"\"\n from pymatgen.symmetry.groups import SpaceGroup\n try:\n i = int(sg)\n sgp = SpaceGroup.from_int_number(i)\n except ValueError:\n sgp = SpaceGroup(sg)\n\n if isinstance(lattice, Lattice):\n latt = lattice\n else:\n latt = Lattice(lattice)\n\n if not sgp.is_compatible(latt):\n raise ValueError(\n \"Supplied lattice with parameters %s is incompatible with \"\n \"supplied spacegroup %s!\" % (latt.parameters, sgp.symbol)\n )\n\n if len(species) != len(coords):\n raise ValueError(\n \"Supplied species and coords lengths (%d vs %d) are \"\n \"different!\" % (len(species), len(coords))\n )\n\n frac_coords = np.array(coords, dtype=np.float) if not coords_are_cartesian else \\\n latt.get_fractional_coords(coords)\n\n props = {} if site_properties is None else site_properties\n\n all_sp = [] # type: List[Union[str, Element, Species, DummySpecies, Composition]]\n all_coords = [] # type: List[List[float]]\n all_site_properties = collections.defaultdict(list) # type: Dict[str, List]\n for i, (sp, c) in enumerate(zip(species, frac_coords)):\n cc = sgp.get_orbit(c, tol=tol)\n all_sp.extend([sp] * len(cc))\n all_coords.extend(cc)\n for k, v in props.items():\n all_site_properties[k].extend([v[i]] * len(cc))\n\n return cls(latt, all_sp, all_coords,\n site_properties=all_site_properties)\n\n @classmethod\n def from_magnetic_spacegroup(\n cls,\n msg: Union[str, 'MagneticSpaceGroup'], # type: ignore # noqa: F821\n lattice: Union[List, np.ndarray, Lattice],\n species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],\n coords: Sequence[Sequence[float]],\n site_properties: Dict[str, Sequence],\n coords_are_cartesian: bool = False,\n tol: float = 1e-5):\n \"\"\"\n Generate a structure using a magnetic spacegroup. Note that only\n symmetrically distinct species, coords and magmoms should be provided.]\n All equivalent sites are generated from the spacegroup operations.\n\n Args:\n msg (str/list/:class:`pymatgen.symmetry.maggroups.MagneticSpaceGroup`):\n The magnetic spacegroup.\n If a string, it will be interpreted as one of the notations\n supported by MagneticSymmetryGroup, e.g., \"R-3'c\" or \"Fm'-3'm\".\n If a list of two ints, it will be interpreted as the number of\n the spacegroup in its Belov, Neronova and Smirnova (BNS) setting.\n lattice (Lattice/3x3 array): The lattice, either as a\n :class:`pymatgen.core.lattice.Lattice` or\n simply as any 2D array. Each row should correspond to a lattice\n vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a\n lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].\n Note that no attempt is made to check that the lattice is\n compatible with the spacegroup specified. This may be\n introduced in a future version.\n species ([Species]): Sequence of species on each site. Can take in\n flexible input, including:\n i. A sequence of element / species specified either as string\n symbols, e.g. [\"Li\", \"Fe2+\", \"P\", ...] or atomic numbers,\n e.g., (3, 56, ...) or actual Element or Species objects.\n\n ii. List of dict of elements/species and occupancies, e.g.,\n [{\"Fe\" : 0.5, \"Mn\":0.5}, ...]. This allows the setup of\n disordered structures.\n coords (Nx3 array): list of fractional/cartesian coordinates of\n each species.\n site_properties (dict): Properties associated with the sites as a\n dict of sequences, e.g., {\"magmom\":[5,5,5,5]}. The sequences\n have to be the same length as the atomic species and\n fractional_coords. Unlike Structure.from_spacegroup(),\n this argument is mandatory, since magnetic moment information\n has to be included. Note that the *direction* of the supplied\n magnetic moment relative to the crystal is important, even if\n the resulting structure is used for collinear calculations.\n coords_are_cartesian (bool): Set to True if you are providing\n coordinates in cartesian coordinates. Defaults to False.\n tol (float): A fractional tolerance to deal with numerical\n precision issues in determining if orbits are the same.\n \"\"\"\n from pymatgen.electronic_structure.core import Magmom\n from pymatgen.symmetry.maggroups import MagneticSpaceGroup\n\n if 'magmom' not in site_properties:\n raise ValueError('Magnetic moments have to be defined.')\n\n magmoms = [Magmom(m) for m in site_properties['magmom']]\n\n if not isinstance(msg, MagneticSpaceGroup):\n msg = MagneticSpaceGroup(msg) # type: ignore\n\n if isinstance(lattice, Lattice):\n latt = lattice\n else:\n latt = Lattice(lattice)\n\n if not msg.is_compatible(latt):\n raise ValueError(\n \"Supplied lattice with parameters %s is incompatible with \"\n \"supplied spacegroup %s!\" % (latt.parameters, msg.sg_symbol)\n )\n\n if len(species) != len(coords):\n raise ValueError(\n \"Supplied species and coords lengths (%d vs %d) are \"\n \"different!\" % (len(species), len(coords))\n )\n\n if len(species) != len(magmoms):\n raise ValueError(\n \"Supplied species and magmom lengths (%d vs %d) are \"\n \"different!\" % (len(species), len(magmoms))\n )\n\n frac_coords = coords if not coords_are_cartesian else latt.get_fractional_coords(coords)\n\n all_sp = [] # type: List[Union[str, Element, Species, DummySpecies, Composition]]\n all_coords = [] # type: List[List[float]]\n all_magmoms = [] # type: List[float]\n all_site_properties = collections.defaultdict(list) # type: Dict[str, List]\n for i, (sp, c, m) in enumerate(zip(species, frac_coords, magmoms)):\n cc, mm = msg.get_orbit(c, m, tol=tol)\n all_sp.extend([sp] * len(cc))\n all_coords.extend(cc)\n all_magmoms.extend(mm)\n for k, v in site_properties.items():\n if k != 'magmom':\n all_site_properties[k].extend([v[i]] * len(cc))\n\n all_site_properties['magmom'] = all_magmoms\n\n return cls(latt, all_sp, all_coords,\n site_properties=all_site_properties)\n\n @property\n def charge(self):\n \"\"\"\n Overall charge of the structure\n \"\"\"\n if self._charge is None:\n return super().charge\n return self._charge\n\n @property\n def distance_matrix(self):\n \"\"\"\n Returns the distance matrix between all sites in the structure. For\n periodic structures, this should return the nearest image distance.\n \"\"\"\n return self.lattice.get_all_distances(self.frac_coords,\n self.frac_coords)\n\n @property\n def sites(self):\n \"\"\"\n Returns an iterator for the sites in the Structure.\n \"\"\"\n return self._sites\n\n @property\n def lattice(self):\n \"\"\"\n Lattice of the structure.\n \"\"\"\n return self._lattice\n\n @property\n def density(self):\n \"\"\"\n Returns the density in units of g/cc\n \"\"\"\n m = Mass(self.composition.weight, \"amu\")\n return m.to(\"g\") / (self.volume * Length(1, \"ang\").to(\"cm\") ** 3)\n\n def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):\n \"\"\"\n Convenience method to quickly get the spacegroup of a structure.\n\n Args:\n symprec (float): Same definition as in SpacegroupAnalyzer.\n Defaults to 1e-2.\n angle_tolerance (float): Same definition as in SpacegroupAnalyzer.\n Defaults to 5 degrees.\n\n Returns:\n spacegroup_symbol, international_number\n \"\"\"\n # Import within method needed to avoid cyclic dependency.\n from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n a = SpacegroupAnalyzer(self, symprec=symprec,\n angle_tolerance=angle_tolerance)\n return a.get_space_group_symbol(), a.get_space_group_number()\n\n def matches(self, other, anonymous=False, **kwargs):\n \"\"\"\n Check whether this structure is similar to another structure.\n Basically a convenience method to call structure matching.\n\n Args:\n other (IStructure/Structure): Another structure.\n **kwargs: Same **kwargs as in\n :class:`pymatgen.analysis.structure_matcher.StructureMatcher`.\n\n Returns:\n (bool) True is the structures are similar under some affine\n transformation.\n \"\"\"\n from pymatgen.analysis.structure_matcher import StructureMatcher\n m = StructureMatcher(**kwargs)\n if not anonymous:\n return m.fit(self, other)\n return m.fit_anonymous(self, other)\n\n def __eq__(self, other):\n if other is self:\n return True\n if other is None:\n return False\n if len(self) != len(other):\n return False\n if self.lattice != other.lattice:\n return False\n for site in self:\n if site not in other:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n # For now, just use the composition hash code.\n return self.composition.__hash__()\n\n def __mul__(self, scaling_matrix):\n \"\"\"\n Makes a supercell. Allowing to have sites outside the unit cell\n\n Args:\n scaling_matrix: A scaling matrix for transforming the lattice\n vectors. Has to be all integers. Several options are possible:\n\n a. A full 3x3 scaling matrix defining the linear combination\n the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,\n 1]] generates a new structure with lattice vectors a' =\n 2a + b, b' = 3b, c' = c where a, b, and c are the lattice\n vectors of the original structure.\n b. An sequence of three scaling factors. E.g., [2, 1, 1]\n specifies that the supercell should have dimensions 2a x b x\n c.\n c. A number, which simply scales all lattice vectors by the\n same factor.\n\n Returns:\n Supercell structure. Note that a Structure is always returned,\n even if the input structure is a subclass of Structure. This is\n to avoid different arguments signatures from causing problems. If\n you prefer a subclass to return its own type, you need to override\n this method in the subclass.\n \"\"\"\n scale_matrix = np.array(scaling_matrix, np.int16)\n if scale_matrix.shape != (3, 3):\n scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)\n new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))\n\n f_lat = lattice_points_in_supercell(scale_matrix)\n c_lat = new_lattice.get_cartesian_coords(f_lat)\n\n new_sites = []\n for site in self:\n for v in c_lat:\n s = PeriodicSite(\n site.species, site.coords + v,\n new_lattice, properties=site.properties,\n coords_are_cartesian=True, to_unit_cell=False,\n skip_checks=True)\n new_sites.append(s)\n\n new_charge = self._charge * np.linalg.det(scale_matrix) if self._charge else None\n return Structure.from_sites(new_sites, charge=new_charge)\n\n def __rmul__(self, scaling_matrix):\n \"\"\"\n Similar to __mul__ to preserve commutativeness.\n \"\"\"\n return self.__mul__(scaling_matrix)\n\n @property\n def frac_coords(self):\n \"\"\"\n Fractional coordinates as a Nx3 numpy array.\n \"\"\"\n return np.array([site.frac_coords for site in self._sites])\n\n @property\n def volume(self):\n \"\"\"\n Returns the volume of the structure.\n \"\"\"\n return self._lattice.volume\n\n def get_distance(self, i, j, jimage=None):\n \"\"\"\n Get distance between site i and j assuming periodic boundary\n conditions. If the index jimage of two sites atom j is not specified it\n selects the jimage nearest to the i atom and returns the distance and\n jimage indices in terms of lattice vector translations if the index\n jimage of atom j is specified it returns the distance between the i\n atom and the specified jimage atom.\n\n Args:\n i (int): Index of first site\n j (int): Index of second site\n jimage: Number of lattice translations in each lattice direction.\n Default is None for nearest image.\n\n Returns:\n distance\n \"\"\"\n return self[i].distance(self[j], jimage)\n\n def get_sites_in_sphere(self, pt: np.array, r: float,\n include_index: bool = False,\n include_image: bool = False) \\\n -> List[Tuple[PeriodicSite, float, Optional[int], Optional[Tuple[int]]]]:\n \"\"\"\n Find all sites within a sphere from the point, including a site (if any)\n sitting on the point itself. This includes sites in other periodic\n images.\n\n Algorithm:\n\n 1. place sphere of radius r in crystal and determine minimum supercell\n (parallelpiped) which would contain a sphere of radius r. for this\n we need the projection of a_1 on a unit vector perpendicular\n to a_2 & a_3 (i.e. the unit vector in the direction b_1) to\n determine how many a_1\"s it will take to contain the sphere.\n\n Nxmax = r * length_of_b_1 / (2 Pi)\n\n 2. keep points falling within r.\n\n Args:\n pt (3x1 array): cartesian coordinates of center of sphere.\n r (float): Radius of sphere.\n include_index (bool): Whether the non-supercell site index\n is included in the returned data\n include_image (bool): Whether to include the supercell image\n is included in the returned data\n\n Returns:\n [(site, dist) ...] since most of the time, subsequent processing\n requires the distance.\n \"\"\"\n site_fcoords = np.mod(self.frac_coords, 1)\n neighbors = [] # type: List[Tuple[PeriodicSite, float, Optional[int], Optional[Tuple[int]]]]\n for fcoord, dist, i, img in self._lattice.get_points_in_sphere(\n site_fcoords, pt, r):\n nnsite = PeriodicSite(self[i].species,\n fcoord, self._lattice,\n properties=self[i].properties,\n skip_checks=True)\n\n # Get the neighbor data\n nn_data = (nnsite, dist) if not include_index else (nnsite, dist, i) # type: ignore\n if include_image:\n nn_data += (img, ) # type: ignore\n neighbors.append(nn_data) # type: ignore\n return neighbors\n\n def get_neighbors(self, site: PeriodicSite, r: float,\n include_index: bool = False, include_image: bool = False) \\\n -> List[PeriodicNeighbor]:\n \"\"\"\n Get all neighbors to a site within a sphere of radius r. Excludes the\n site itself.\n\n Args:\n site (Site): Which is the center of the sphere.\n r (float): Radius of sphere.\n include_index (bool): Deprecated. Now, the non-supercell site index\n is always included in the returned data.\n include_image (bool): Deprecated. Now the supercell image\n is always included in the returned data.\n\n Returns:\n [PeriodicNeighbor] where PeriodicNeighbor is a namedtuple containing\n (site, distance, index, image).\n \"\"\"\n return self.get_all_neighbors(r, include_index=include_index,\n include_image=include_image,\n sites=[site])[0]\n\n @deprecated(get_neighbors, \"This is retained purely for checking purposes.\")\n def get_neighbors_old(self, site, r, include_index=False, include_image=False):\n \"\"\"\n Get all neighbors to a site within a sphere of radius r. Excludes the\n site itself.\n\n Args:\n site (Site): Which is the center of the sphere.\n r (float): Radius of sphere.\n include_index (bool): Whether the non-supercell site index\n is included in the returned data\n include_image (bool): Whether to include the supercell image\n is included in the returned data\n\n Returns:\n [PeriodicNeighbor] where PeriodicNeighbor is a namedtuple containing\n (site, distance, index, image).\n \"\"\"\n nn = self.get_sites_in_sphere(site.coords, r,\n include_index=include_index,\n include_image=include_image)\n return [d for d in nn if site != d[0]]\n\n def _get_neighbor_list_py(self, r: float,\n sites: List[PeriodicSite] = None,\n numerical_tol: float = 1e-8,\n exclude_self: bool = True) -> Tuple[np.ndarray, ...]:\n \"\"\"\n A python version of getting neighbor_list. The returned values are a tuple of\n numpy arrays (center_indices, points_indices, offset_vectors, distances).\n Atom `center_indices[i]` has neighbor atom `points_indices[i]` that is\n translated by `offset_vectors[i]` lattice vectors, and the distance is\n `distances[i]`.\n\n Args:\n r (float): Radius of sphere\n sites (list of Sites or None): sites for getting all neighbors,\n default is None, which means neighbors will be obtained for all\n sites. This is useful in the situation where you are interested\n only in one subspecies type, and makes it a lot faster.\n numerical_tol (float): This is a numerical tolerance for distances.\n Sites which are < numerical_tol are determined to be conincident\n with the site. Sites which are r + numerical_tol away is deemed\n to be within r from the site. The default of 1e-8 should be\n ok in most instances.\n exclude_self (bool): whether to exclude atom neighboring with itself within\n numerical tolerance distance, default to True\n Returns: (center_indices, points_indices, offset_vectors, distances)\n \"\"\"\n neighbors = self.get_all_neighbors_py(r=r, include_index=True, include_image=True,\n sites=sites, numerical_tol=1e-8)\n center_indices = []\n points_indices = []\n offsets = []\n distances = []\n for i, nns in enumerate(neighbors):\n if len(nns) > 0:\n for n in nns:\n if exclude_self and (i == n.index) and (n.nn_distance <= numerical_tol):\n continue\n center_indices.append(i)\n points_indices.append(n.index)\n offsets.append(n.image)\n distances.append(n.nn_distance)\n return tuple((np.array(center_indices), np.array(points_indices),\n np.array(offsets), np.array(distances)))\n\n def get_neighbor_list(self, r: float,\n sites: List[PeriodicSite] = None,\n numerical_tol: float = 1e-8,\n exclude_self: bool = True) -> Tuple[np.ndarray, ...]:\n \"\"\"\n Get neighbor lists using numpy array representations without constructing\n Neighbor objects. If the cython extension is installed, this method will\n be orders of magnitude faster than `get_all_neighbors`.\n The returned values are a tuple of numpy arrays\n (center_indices, points_indices, offset_vectors, distances).\n Atom `center_indices[i]` has neighbor atom `points_indices[i]` that is\n translated by `offset_vectors[i]` lattice vectors, and the distance is\n `distances[i]`.\n\n Args:\n r (float): Radius of sphere\n sites (list of Sites or None): sites for getting all neighbors,\n default is None, which means neighbors will be obtained for all\n sites. This is useful in the situation where you are interested\n only in one subspecies type, and makes it a lot faster.\n numerical_tol (float): This is a numerical tolerance for distances.\n Sites which are < numerical_tol are determined to be conincident\n with the site. Sites which are r + numerical_tol away is deemed\n to be within r from the site. The default of 1e-8 should be\n ok in most instances.\n exclude_self (bool): whether to exclude atom neighboring with itself within\n numerical tolerance distance, default to True\n Returns: (center_indices, points_indices, offset_vectors, distances)\n\n \"\"\"\n try:\n from pymatgen.optimization.neighbors import find_points_in_spheres # type: ignore\n except ImportError:\n return self._get_neighbor_list_py(r, sites, exclude_self=exclude_self)\n else:\n if sites is None:\n sites = self.sites\n site_coords = np.array([site.coords for site in sites], dtype=float)\n cart_coords = np.ascontiguousarray(np.array(self.cart_coords), dtype=float)\n lattice_matrix = np.ascontiguousarray(np.array(self.lattice.matrix), dtype=float)\n r = float(r)\n center_indices, points_indices, images, distances = \\\n find_points_in_spheres(cart_coords, site_coords, r=r,\n pbc=np.array([1, 1, 1], dtype=int),\n lattice=lattice_matrix, tol=numerical_tol)\n cond = np.array([True] * len(center_indices))\n if exclude_self:\n self_pair = (center_indices == points_indices) & (distances <= numerical_tol)\n cond = ~self_pair\n return tuple((center_indices[cond], points_indices[cond],\n images[cond], distances[cond]))\n\n def get_all_neighbors(self, r: float,\n include_index: bool = False,\n include_image: bool = False,\n sites: List[PeriodicSite] = None,\n numerical_tol: float = 1e-8) -> List[List[PeriodicNeighbor]]:\n\n \"\"\"\n Get neighbors for each atom in the unit cell, out to a distance r\n Returns a list of list of neighbors for each site in structure.\n Use this method if you are planning on looping over all sites in the\n crystal. If you only want neighbors for a particular site, use the\n method get_neighbors as it may not have to build such a large supercell\n However if you are looping over all sites in the crystal, this method\n is more efficient since it only performs one pass over a large enough\n supercell to contain all possible atoms out to a distance r.\n The return type is a [(site, dist) ...] since most of the time,\n subsequent processing requires the distance.\n\n A note about periodic images: Before computing the neighbors, this\n operation translates all atoms to within the unit cell (having\n fractional coordinates within [0,1)). This means that the \"image\" of a\n site does not correspond to how much it has been translates from its\n current position, but which image of the unit cell it resides.\n\n Args:\n r (float): Radius of sphere.\n include_index (bool): Deprecated. Now, the non-supercell site index\n is always included in the returned data.\n include_image (bool): Deprecated. Now the supercell image\n is always included in the returned data.\n sites (list of Sites or None): sites for getting all neighbors,\n default is None, which means neighbors will be obtained for all\n sites. This is useful in the situation where you are interested\n only in one subspecies type, and makes it a lot faster.\n numerical_tol (float): This is a numerical tolerance for distances.\n Sites which are < numerical_tol are determined to be conincident\n with the site. Sites which are r + numerical_tol away is deemed\n to be within r from the site. The default of 1e-8 should be\n ok in most instances.\n\n Returns:\n [PeriodicNeighbor] where PeriodicNeighbor is a namedtuple containing\n (site, distance, index, image).\n \"\"\"\n if sites is None:\n sites = self.sites\n center_indices, points_indices, images, distances = \\\n self.get_neighbor_list(r=r, sites=sites, numerical_tol=numerical_tol)\n if len(points_indices) < 1:\n return [[]] * len(sites)\n f_coords = self.frac_coords[points_indices] + images\n neighbor_dict: Dict[int, List] = collections.defaultdict(list)\n lattice = self.lattice\n atol = Site.position_atol\n all_sites = self.sites\n for cindex, pindex, image, f_coord, d in zip(center_indices, points_indices, images, f_coords, distances):\n psite = all_sites[pindex]\n csite = sites[cindex]\n if (d > numerical_tol or\n # This simply compares the psite and csite. The reason why manual comparison is done is\n # for speed. This does not check the lattice since they are always equal. Also, the or construct\n # returns True immediately once one of the conditions are satisfied.\n psite.species != csite.species or\n (not np.allclose(psite.coords, csite.coords, atol=atol)) or\n (not psite.properties == csite.properties)):\n neighbor_dict[cindex].append(PeriodicNeighbor(\n species=psite.species,\n coords=f_coord,\n lattice=lattice,\n properties=psite.properties,\n nn_distance=d,\n index=pindex,\n image=tuple(image)))\n\n neighbors: List[List[PeriodicNeighbor]] = []\n\n for i in range(len(sites)):\n neighbors.append(neighbor_dict[i])\n return neighbors\n\n def get_all_neighbors_py(self, r: float,\n include_index: bool = False,\n include_image: bool = False,\n sites: List[PeriodicSite] = None,\n numerical_tol: float = 1e-8) \\\n -> List[List[PeriodicNeighbor]]:\n\n \"\"\"\n Get neighbors for each atom in the unit cell, out to a distance r\n Returns a list of list of neighbors for each site in structure.\n Use this method if you are planning on looping over all sites in the\n crystal. If you only want neighbors for a particular site, use the\n method get_neighbors as it may not have to build such a large supercell\n However if you are looping over all sites in the crystal, this method\n is more efficient since it only performs one pass over a large enough\n supercell to contain all possible atoms out to a distance r.\n The return type is a [(site, dist) ...] since most of the time,\n subsequent processing requires the distance.\n\n A note about periodic images: Before computing the neighbors, this\n operation translates all atoms to within the unit cell (having\n fractional coordinates within [0,1)). This means that the \"image\" of a\n site does not correspond to how much it has been translates from its\n current position, but which image of the unit cell it resides.\n\n Args:\n r (float): Radius of sphere.\n include_index (bool): Deprecated. Now, the non-supercell site index\n is always included in the returned data.\n include_image (bool): Deprecated. Now the supercell image\n is always included in the returned data.\n sites (list of Sites or None): sites for getting all neighbors,\n default is None, which means neighbors will be obtained for all\n sites. This is useful in the situation where you are interested\n only in one subspecies type, and makes it a lot faster.\n numerical_tol (float): This is a numerical tolerance for distances.\n Sites which are < numerical_tol are determined to be conincident\n with the site. Sites which are r + numerical_tol away is deemed\n to be within r from the site. The default of 1e-8 should be\n ok in most instances.\n\n Returns:\n [PeriodicNeighbor] where PeriodicNeighbor is a namedtuple containing\n (site, distance, index, image).\n \"\"\"\n\n if sites is None:\n sites = self.sites\n site_coords = np.array([site.coords for site in sites])\n point_neighbors = get_points_in_spheres(self.cart_coords, site_coords, r=r, pbc=True,\n numerical_tol=numerical_tol, lattice=self.lattice)\n neighbors: List[List[PeriodicNeighbor]] = []\n for point_neighbor, site in zip(point_neighbors, sites):\n nns: List[PeriodicNeighbor] = []\n if len(point_neighbor) < 1:\n neighbors.append([])\n continue\n for n in point_neighbor:\n coord, d, index, image = n\n if (d > numerical_tol) or (self[index] != site):\n neighbor = PeriodicNeighbor(\n species=self[index].species,\n coords=coord,\n lattice=self.lattice,\n properties=self[index].properties,\n nn_distance=d,\n index=index,\n image=tuple(image)\n )\n nns.append(neighbor)\n neighbors.append(nns)\n return neighbors\n\n @deprecated(get_all_neighbors, \"This is retained purely for checking purposes.\")\n def get_all_neighbors_old(self, r, include_index=False, include_image=False,\n include_site=True):\n \"\"\"\n Get neighbors for each atom in the unit cell, out to a distance r\n Returns a list of list of neighbors for each site in structure.\n Use this method if you are planning on looping over all sites in the\n crystal. If you only want neighbors for a particular site, use the\n method get_neighbors as it may not have to build such a large supercell\n However if you are looping over all sites in the crystal, this method\n is more efficient since it only performs one pass over a large enough\n supercell to contain all possible atoms out to a distance r.\n The return type is a [(site, dist) ...] since most of the time,\n subsequent processing requires the distance.\n\n A note about periodic images: Before computing the neighbors, this\n operation translates all atoms to within the unit cell (having\n fractional coordinates within [0,1)). This means that the \"image\" of a\n site does not correspond to how much it has been translates from its\n current position, but which image of the unit cell it resides.\n\n Args:\n r (float): Radius of sphere.\n include_index (bool): Whether to include the non-supercell site\n in the returned data\n include_image (bool): Whether to include the supercell image\n in the returned data\n include_site (bool): Whether to include the site in the returned\n data. Defaults to True.\n\n Returns:\n [Neighbor] where Neighbor is a namedtuple containing\n (site, distance, index, image).\n \"\"\"\n # Use same algorithm as get_sites_in_sphere to determine supercell but\n # loop over all atoms in crystal\n recp_len = np.array(self.lattice.reciprocal_lattice.abc)\n maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))\n nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr\n nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr\n\n all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]\n latt = self._lattice\n matrix = latt.matrix\n neighbors = [list() for _ in range(len(self._sites))]\n all_fcoords = np.mod(self.frac_coords, 1)\n coords_in_cell = np.dot(all_fcoords, matrix)\n site_coords = self.cart_coords\n\n indices = np.arange(len(self))\n\n for image in itertools.product(*all_ranges):\n coords = np.dot(image, matrix) + coords_in_cell\n all_dists = all_distances(coords, site_coords)\n all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)\n\n for (j, d, within_r) in zip(indices, all_dists, all_within_r):\n if include_site:\n nnsite = PeriodicSite(self[j].species, coords[j],\n latt, properties=self[j].properties,\n coords_are_cartesian=True,\n skip_checks=True)\n\n for i in indices[within_r]:\n item = []\n if include_site:\n item.append(nnsite)\n item.append(d[i])\n if include_index:\n item.append(j)\n # Add the image, if requested\n if include_image:\n item.append(image)\n neighbors[i].append(item)\n return neighbors\n\n def get_neighbors_in_shell(self, origin, r, dr, include_index=False, include_image=False):\n \"\"\"\n Returns all sites in a shell centered on origin (coords) between radii\n r-dr and r+dr.\n\n Args:\n origin (3x1 array): Cartesian coordinates of center of sphere.\n r (float): Inner radius of shell.\n dr (float): Width of shell.\n include_index (bool): Deprecated. Now, the non-supercell site index\n is always included in the returned data.\n include_image (bool): Deprecated. Now the supercell image\n is always included in the returned data.\n\n Returns:\n [NearestNeighbor] where Nearest Neighbor is a named tuple containing\n (site, distance, index, image).\n \"\"\"\n outer = self.get_sites_in_sphere(origin, r + dr,\n include_index=include_index,\n include_image=include_image)\n inner = r - dr\n return [t for t in outer if t[1] > inner]\n\n def get_sorted_structure(self, key=None, reverse=False):\n \"\"\"\n Get a sorted copy of the structure. The parameters have the same\n meaning as in list.sort. By default, sites are sorted by the\n electronegativity of the species.\n\n Args:\n key: Specifies a function of one argument that is used to extract\n a comparison key from each list element: key=str.lower. The\n default value is None (compare the elements directly).\n reverse (bool): If set to True, then the list elements are sorted\n as if each comparison were reversed.\n \"\"\"\n sites = sorted(self, key=key, reverse=reverse)\n return self.__class__.from_sites(sites, charge=self._charge)\n\n def get_reduced_structure(self, reduction_algo: str = \"niggli\"):\n \"\"\"\n Get a reduced structure.\n\n Args:\n reduction_algo (str): The lattice reduction algorithm to use.\n Currently supported options are \"niggli\" or \"LLL\".\n \"\"\"\n if reduction_algo == \"niggli\":\n reduced_latt = self._lattice.get_niggli_reduced_lattice()\n elif reduction_algo == \"LLL\":\n reduced_latt = self._lattice.get_lll_reduced_lattice()\n else:\n raise ValueError(\"Invalid reduction algo : {}\"\n .format(reduction_algo))\n\n if reduced_latt != self.lattice:\n return self.__class__( # type: ignore\n reduced_latt,\n self.species_and_occu,\n self.cart_coords,\n coords_are_cartesian=True,\n to_unit_cell=True,\n site_properties=self.site_properties,\n charge=self._charge)\n return self.copy()\n\n def copy(self, site_properties=None, sanitize=False):\n \"\"\"\n Convenience method to get a copy of the structure, with options to add\n site properties.\n\n Args:\n site_properties (dict): Properties to add or override. The\n properties are specified in the same way as the constructor,\n i.e., as a dict of the form {property: [values]}. The\n properties should be in the order of the *original* structure\n if you are performing sanitization.\n sanitize (bool): If True, this method will return a sanitized\n structure. Sanitization performs a few things: (i) The sites are\n sorted by electronegativity, (ii) a LLL lattice reduction is\n carried out to obtain a relatively orthogonalized cell,\n (iii) all fractional coords for sites are mapped into the\n unit cell.\n\n Returns:\n A copy of the Structure, with optionally new site_properties and\n optionally sanitized.\n \"\"\"\n props = self.site_properties\n if site_properties:\n props.update(site_properties)\n if not sanitize:\n return self.__class__(self._lattice,\n self.species_and_occu,\n self.frac_coords,\n charge=self._charge,\n site_properties=props)\n reduced_latt = self._lattice.get_lll_reduced_lattice()\n new_sites = []\n for i, site in enumerate(self):\n frac_coords = reduced_latt.get_fractional_coords(site.coords)\n site_props = {}\n for p in props:\n site_props[p] = props[p][i]\n new_sites.append(PeriodicSite(site.species,\n frac_coords, reduced_latt,\n to_unit_cell=True,\n properties=site_props,\n skip_checks=True))\n new_sites = sorted(new_sites)\n return self.__class__.from_sites(new_sites, charge=self._charge)\n\n def interpolate(self, end_structure,\n nimages: Union[int, Iterable] = 10,\n interpolate_lattices: bool = False,\n pbc: bool = True,\n autosort_tol: float = 0):\n \"\"\"\n Interpolate between this structure and end_structure. Useful for\n construction of NEB inputs.\n\n Args:\n end_structure (Structure): structure to interpolate between this\n structure and end.\n nimages (int,list): No. of interpolation images or a list of\n interpolation images. Defaults to 10 images.\n interpolate_lattices (bool): Whether to interpolate the lattices.\n Interpolates the lengths and angles (rather than the matrix)\n so orientation may be affected.\n pbc (bool): Whether to use periodic boundary conditions to find\n the shortest path between endpoints.\n autosort_tol (float): A distance tolerance in angstrom in\n which to automatically sort end_structure to match to the\n closest points in this particular structure. This is usually\n what you want in a NEB calculation. 0 implies no sorting.\n Otherwise, a 0.5 value usually works pretty well.\n\n Returns:\n List of interpolated structures. The starting and ending\n structures included as the first and last structures respectively.\n A total of (nimages + 1) structures are returned.\n \"\"\"\n # Check length of structures\n if len(self) != len(end_structure):\n raise ValueError(\"Structures have different lengths!\")\n\n if not (interpolate_lattices or self.lattice == end_structure.lattice):\n raise ValueError(\"Structures with different lattices!\")\n\n if not isinstance(nimages, collections.abc.Iterable):\n images = np.arange(nimages + 1) / nimages\n else:\n images = nimages\n\n # Check that both structures have the same species\n for i, site in enumerate(self):\n if site.species != end_structure[i].species:\n raise ValueError(\"Different species!\\nStructure 1:\\n\" +\n str(self) + \"\\nStructure 2\\n\" +\n str(end_structure))\n\n start_coords = np.array(self.frac_coords)\n end_coords = np.array(end_structure.frac_coords)\n\n if autosort_tol:\n dist_matrix = self.lattice.get_all_distances(start_coords,\n end_coords)\n site_mappings = collections.defaultdict(list) # type: Dict[int, List[int]]\n unmapped_start_ind = []\n for i, row in enumerate(dist_matrix):\n ind = np.where(row < autosort_tol)[0]\n if len(ind) == 1:\n site_mappings[i].append(ind[0])\n else:\n unmapped_start_ind.append(i)\n\n if len(unmapped_start_ind) > 1:\n raise ValueError(\"Unable to reliably match structures \"\n \"with auto_sort_tol = %f. unmapped indices \"\n \"= %s\" % (autosort_tol, unmapped_start_ind))\n\n sorted_end_coords = np.zeros_like(end_coords)\n matched = []\n for i, j in site_mappings.items():\n if len(j) > 1:\n raise ValueError(\"Unable to reliably match structures \"\n \"with auto_sort_tol = %f. More than one \"\n \"site match!\" % autosort_tol)\n sorted_end_coords[i] = end_coords[j[0]]\n matched.append(j[0])\n\n if len(unmapped_start_ind) == 1:\n i = unmapped_start_ind[0]\n j = list(set(range(len(start_coords))).difference(matched))[0] # type: ignore\n sorted_end_coords[i] = end_coords[j]\n\n end_coords = sorted_end_coords\n\n vec = end_coords - start_coords\n if pbc:\n vec -= np.round(vec)\n sp = self.species_and_occu\n structs = []\n\n if interpolate_lattices:\n # interpolate lattice matrices using polar decomposition\n from scipy.linalg import polar\n # u is unitary (rotation), p is stretch\n u, p = polar(np.dot(end_structure.lattice.matrix.T,\n np.linalg.inv(self.lattice.matrix.T)))\n lvec = p - np.identity(3)\n lstart = self.lattice.matrix.T\n\n for x in images:\n if interpolate_lattices:\n l_a = np.dot(np.identity(3) + x * lvec, lstart).T\n lat = Lattice(l_a)\n else:\n lat = self.lattice\n fcoords = start_coords + x * vec\n structs.append(self.__class__(lat, sp, fcoords, site_properties=self.site_properties)) # type: ignore\n return structs\n\n def get_miller_index_from_site_indexes(self, site_ids, round_dp=4,\n verbose=True):\n \"\"\"\n Get the Miller index of a plane from a set of sites indexes.\n\n A minimum of 3 sites are required. If more than 3 sites are given\n the best plane that minimises the distance to all points will be\n calculated.\n\n Args:\n site_ids (list of int): A list of site indexes to consider. A\n minimum of three site indexes are required. If more than three\n sites are provided, the best plane that minimises the distance\n to all sites will be calculated.\n round_dp (int, optional): The number of decimal places to round the\n miller index to.\n verbose (bool, optional): Whether to print warnings.\n\n Returns:\n (tuple): The Miller index.\n \"\"\"\n return self.lattice.get_miller_index_from_coords(\n self.frac_coords[site_ids], coords_are_cartesian=False,\n round_dp=round_dp, verbose=verbose)\n\n def get_primitive_structure(self, tolerance=0.25, use_site_props=False,\n constrain_latt=None):\n \"\"\"\n This finds a smaller unit cell than the input. Sometimes it doesn\"t\n find the smallest possible one, so this method is recursively called\n until it is unable to find a smaller cell.\n\n NOTE: if the tolerance is greater than 1/2 the minimum inter-site\n distance in the primitive cell, the algorithm will reject this lattice.\n\n Args:\n tolerance (float), Angstroms: Tolerance for each coordinate of a\n particular site. For example, [0.1, 0, 0.1] in cartesian\n coordinates will be considered to be on the same coordinates\n as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.\n use_site_props (bool): Whether to account for site properties in\n differntiating sites.\n constrain_latt (list/dict): List of lattice parameters we want to\n preserve, e.g. [\"alpha\", \"c\"] or dict with the lattice\n parameter names as keys and values we want the parameters to\n be e.g. {\"alpha\": 90, \"c\": 2.5}.\n\n Returns:\n The most primitive structure found.\n \"\"\"\n if constrain_latt is None:\n constrain_latt = []\n\n def site_label(site):\n if not use_site_props:\n return site.species_string\n d = [site.species_string]\n for k in sorted(site.properties.keys()):\n d.append(k + \"=\" + str(site.properties[k]))\n return \", \".join(d)\n\n # group sites by species string\n sites = sorted(self._sites, key=site_label)\n\n grouped_sites = [\n list(a[1])\n for a in itertools.groupby(sites, key=site_label)]\n grouped_fcoords = [np.array([s.frac_coords for s in g])\n for g in grouped_sites]\n\n # min_vecs are approximate periodicities of the cell. The exact\n # periodicities from the supercell matrices are checked against these\n # first\n min_fcoords = min(grouped_fcoords, key=lambda x: len(x))\n min_vecs = min_fcoords - min_fcoords[0]\n\n # fractional tolerance in the supercell\n super_ftol = np.divide(tolerance, self.lattice.abc)\n super_ftol_2 = super_ftol * 2\n\n def pbc_coord_intersection(fc1, fc2, tol):\n \"\"\"\n Returns the fractional coords in fc1 that have coordinates\n within tolerance to some coordinate in fc2\n \"\"\"\n d = fc1[:, None, :] - fc2[None, :, :]\n d -= np.round(d)\n np.abs(d, d)\n return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]\n\n # here we reduce the number of min_vecs by enforcing that every\n # vector in min_vecs approximately maps each site onto a similar site.\n # The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no\n # reduction.\n # This reduction is O(n^3) so usually is an improvement. Using double\n # the tolerance because both vectors are approximate\n for g in sorted(grouped_fcoords, key=lambda x: len(x)):\n for f in g:\n min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)\n\n def get_hnf(fu):\n \"\"\"\n Returns all possible distinct supercell matrices given a\n number of formula units in the supercell. Batches the matrices\n by the values in the diagonal (for less numpy overhead).\n Computational complexity is O(n^3), and difficult to improve.\n Might be able to do something smart with checking combinations of a\n and b first, though unlikely to reduce to O(n^2).\n \"\"\"\n\n def factors(n):\n for i in range(1, n + 1):\n if n % i == 0:\n yield i\n\n for det in factors(fu):\n if det == 1:\n continue\n for a in factors(det):\n for e in factors(det // a):\n g = det // a // e\n yield det, np.array(\n [[[a, b, c], [0, e, f], [0, 0, g]]\n for b, c, f in\n itertools.product(range(a), range(a),\n range(e))])\n\n # we cant let sites match to their neighbors in the supercell\n grouped_non_nbrs = []\n for gfcoords in grouped_fcoords:\n fdist = gfcoords[None, :, :] - gfcoords[:, None, :]\n fdist -= np.round(fdist)\n np.abs(fdist, fdist)\n non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)\n # since we want sites to match to themselves\n np.fill_diagonal(non_nbrs, True)\n grouped_non_nbrs.append(non_nbrs)\n\n num_fu = functools.reduce(math.gcd, map(len, grouped_sites))\n for size, ms in get_hnf(num_fu):\n inv_ms = np.linalg.inv(ms)\n\n # find sets of lattice vectors that are are present in min_vecs\n dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]\n dist -= np.round(dist)\n np.abs(dist, dist)\n is_close = np.all(dist < super_ftol, axis=-1)\n any_close = np.any(is_close, axis=-1)\n inds = np.all(any_close, axis=-1)\n\n for inv_m, m in zip(inv_ms[inds], ms[inds]):\n new_m = np.dot(inv_m, self.lattice.matrix)\n ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))\n\n valid = True\n new_coords = []\n new_sp = []\n new_props = collections.defaultdict(list)\n for gsites, gfcoords, non_nbrs in zip(grouped_sites,\n grouped_fcoords,\n grouped_non_nbrs):\n all_frac = np.dot(gfcoords, m)\n\n # calculate grouping of equivalent sites, represented by\n # adjacency matrix\n fdist = all_frac[None, :, :] - all_frac[:, None, :]\n fdist = np.abs(fdist - np.round(fdist))\n close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)\n groups = np.logical_and(close_in_prim, non_nbrs)\n\n # check that groups are correct\n if not np.all(np.sum(groups, axis=0) == size):\n valid = False\n break\n\n # check that groups are all cliques\n for g in groups:\n if not np.all(groups[g][:, g]):\n valid = False\n break\n if not valid:\n break\n\n # add the new sites, averaging positions\n added = np.zeros(len(gsites))\n new_fcoords = all_frac % 1\n for i, group in enumerate(groups):\n if not added[i]:\n added[group] = True\n inds = np.where(group)[0]\n coords = new_fcoords[inds[0]]\n for n, j in enumerate(inds[1:]):\n offset = new_fcoords[j] - coords\n coords += (offset - np.round(offset)) / (n + 2)\n new_sp.append(gsites[inds[0]].species)\n for k in gsites[inds[0]].properties:\n new_props[k].append(gsites[inds[0]].properties[k])\n new_coords.append(coords)\n\n if valid:\n inv_m = np.linalg.inv(m)\n new_l = Lattice(np.dot(inv_m, self.lattice.matrix))\n s = Structure(new_l, new_sp, new_coords,\n site_properties=new_props,\n coords_are_cartesian=False)\n\n # Default behavior\n p = s.get_primitive_structure(\n tolerance=tolerance, use_site_props=use_site_props,\n constrain_latt=constrain_latt\n ).get_reduced_structure()\n if not constrain_latt:\n return p\n\n # Only return primitive structures that\n # satisfy the restriction condition\n p_latt, s_latt = p.lattice, self.lattice\n if type(constrain_latt).__name__ == \"list\":\n if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]):\n return p\n elif type(constrain_latt).__name__ == \"dict\":\n if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]):\n return p\n\n return self.copy()\n\n def __repr__(self):\n outs = [\"Structure Summary\", repr(self.lattice)]\n if self._charge:\n if self._charge >= 0:\n outs.append(\"Overall Charge: +{}\".format(self._charge))\n else:\n outs.append(\"Overall Charge: -{}\".format(self._charge))\n for s in self:\n outs.append(repr(s))\n return \"\\n\".join(outs)\n\n def __str__(self):\n outs = [\"Full Formula ({s})\".format(s=self.composition.formula),\n \"Reduced Formula: {}\".format(self.composition.reduced_formula)]\n\n def to_s(x):\n return \"%0.6f\" % x\n outs.append(\"abc : \" + \" \".join([to_s(i).rjust(10)\n for i in self.lattice.abc]))\n outs.append(\"angles: \" + \" \".join([to_s(i).rjust(10)\n for i in self.lattice.angles]))\n if self._charge:\n if self._charge >= 0:\n outs.append(\"Overall Charge: +{}\".format(self._charge))\n else:\n outs.append(\"Overall Charge: -{}\".format(self._charge))\n outs.append(\"Sites ({i})\".format(i=len(self)))\n data = []\n props = self.site_properties\n keys = sorted(props.keys())\n for i, site in enumerate(self):\n row = [str(i), site.species_string]\n row.extend([to_s(j) for j in site.frac_coords])\n for k in keys:\n row.append(props[k][i])\n data.append(row)\n outs.append(tabulate(data, headers=[\"#\", \"SP\", \"a\", \"b\", \"c\"] + keys,\n ))\n return \"\\n\".join(outs)\n\n def get_orderings(self, mode: str = \"enum\", **kwargs):\n r\"\"\"\n Returns list of orderings for a disordered structure. If structure\n does not contain disorder, the default structure is returned.\n\n Args:\n mode (str): Either \"enum\" or \"sqs\". If enum,\n the enumlib will be used to return all distinct\n orderings. If sqs, mcsqs will be used to return\n an sqs structure.\n kwargs: kwargs passed to either\n pymatgen.command_line..enumlib_caller.EnumlibAdaptor\n or pymatgen.command_line.mcsqs_caller.run_mcsqs.\n For run_mcsqs, a default cluster search of 2 cluster interactions\n with 1NN distance and 3 cluster interactions with 2NN distance\n is set.\n\n Returns:\n List[Structure]\n \"\"\"\n if self.is_ordered:\n return [self]\n if mode.startswith(\"enum\"):\n from pymatgen.command_line.enumlib_caller import EnumlibAdaptor\n adaptor = EnumlibAdaptor(self, **kwargs)\n adaptor.run()\n return adaptor.structures\n if mode == \"sqs\":\n from pymatgen.command_line.mcsqs_caller import run_mcsqs\n if \"clusters\" not in kwargs:\n disordered_sites = [site for site in self if not site.is_ordered]\n subset_structure = Structure.from_sites(disordered_sites)\n dist_matrix = subset_structure.distance_matrix\n dists = sorted(set(dist_matrix.ravel()))\n unique_dists = []\n for i in range(1, len(dists)):\n if dists[i] - dists[i-1] > 0.1:\n unique_dists.append(dists[i])\n clusters = {(i+2): d + 0.01 for i, d in enumerate(unique_dists) if i < 2}\n kwargs[\"clusters\"] = clusters\n print(kwargs[\"clusters\"])\n return [run_mcsqs(self, **kwargs).bestsqs]\n raise ValueError()\n\n def as_dict(self, verbosity=1, fmt=None, **kwargs):\n \"\"\"\n Dict representation of Structure.\n\n Args:\n verbosity (int): Verbosity level. Default of 1 includes both\n direct and cartesian coordinates for all sites, lattice\n parameters, etc. Useful for reading and for insertion into a\n database. Set to 0 for an extremely lightweight version\n that only includes sufficient information to reconstruct the\n object.\n fmt (str): Specifies a format for the dict. Defaults to None,\n which is the default format used in pymatgen. Other options\n include \"abivars\".\n **kwargs: Allow passing of other kwargs needed for certain\n formats, e.g., \"abivars\".\n\n Returns:\n JSON serializable dict representation.\n \"\"\"\n if fmt == \"abivars\":\n \"\"\"Returns a dictionary with the ABINIT variables.\"\"\"\n from pymatgen.io.abinit.abiobjects import structure_to_abivars\n return structure_to_abivars(self, **kwargs)\n\n latt_dict = self._lattice.as_dict(verbosity=verbosity)\n del latt_dict[\"@module\"]\n del latt_dict[\"@class\"]\n\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"charge\": self._charge,\n \"lattice\": latt_dict, \"sites\": []}\n for site in self:\n site_dict = site.as_dict(verbosity=verbosity)\n del site_dict[\"lattice\"]\n del site_dict[\"@module\"]\n del site_dict[\"@class\"]\n d[\"sites\"].append(site_dict)\n return d\n\n def as_dataframe(self):\n \"\"\"\n Returns a Pandas dataframe of the sites. Structure level attributes are stored in DataFrame.attrs. Example:\n\n Species a b c x y z magmom\n 0 (Si) 0.0 0.0 0.000000e+00 0.0 0.000000e+00 0.000000e+00 5\n 1 (Si) 0.0 0.0 1.000000e-07 0.0 -2.217138e-07 3.135509e-07 -5\n \"\"\"\n data = []\n site_properties = self.site_properties\n prop_keys = list(site_properties.keys())\n for site in self:\n row = [site.species] + list(site.frac_coords) + list(site.coords)\n for k in prop_keys:\n row.append(site.properties.get(k))\n data.append(row)\n import pandas as pd\n df = pd.DataFrame(data, columns=[\"Species\", \"a\", \"b\", \"c\", \"x\", \"y\", \"z\"] + prop_keys)\n df.attrs[\"Reduced Formula\"] = self.composition.reduced_formula\n df.attrs[\"Lattice\"] = self.lattice\n return df\n\n @classmethod\n def from_dict(cls, d, fmt=None):\n \"\"\"\n Reconstitute a Structure object from a dict representation of Structure\n created using as_dict().\n\n Args:\n d (dict): Dict representation of structure.\n\n Returns:\n Structure object\n \"\"\"\n if fmt == \"abivars\":\n from pymatgen.io.abinit.abiobjects import structure_from_abivars\n return structure_from_abivars(cls=cls, **d)\n\n lattice = Lattice.from_dict(d[\"lattice\"])\n sites = [PeriodicSite.from_dict(sd, lattice) for sd in d[\"sites\"]]\n charge = d.get(\"charge\", None)\n return cls.from_sites(sites, charge=charge)\n\n def to(self, fmt=None, filename=None, **kwargs):\n r\"\"\"\n Outputs the structure to a file or string.\n\n Args:\n fmt (str): Format to output to. Defaults to JSON unless filename\n is provided. If fmt is specifies, it overrides whatever the\n filename is. Options include \"cif\", \"poscar\", \"cssr\", \"json\".\n Non-case sensitive.\n filename (str): If provided, output will be written to a file. If\n fmt is not specified, the format is determined from the\n filename. Defaults is None, i.e. string output.\n **kwargs: Kwargs passthru to relevant methods. E.g., This allows\n the passing of parameters like symprec to the\n CifWriter.__init__ method for generation of symmetric cifs.\n\n Returns:\n (str) if filename is None. None otherwise.\n \"\"\"\n filename = filename or \"\"\n fmt = \"\" if fmt is None else fmt.lower()\n fname = os.path.basename(filename)\n\n if fmt == \"cif\" or fnmatch(fname.lower(), \"*.cif*\"):\n from pymatgen.io.cif import CifWriter\n writer = CifWriter(self, **kwargs)\n elif fmt == \"mcif\" or fnmatch(fname.lower(), \"*.mcif*\"):\n from pymatgen.io.cif import CifWriter\n writer = CifWriter(self, write_magmoms=True, **kwargs)\n elif fmt == \"poscar\" or fnmatch(fname, \"*POSCAR*\"):\n from pymatgen.io.vasp import Poscar\n writer = Poscar(self, **kwargs)\n elif fmt == \"cssr\" or fnmatch(fname.lower(), \"*.cssr*\"):\n from pymatgen.io.cssr import Cssr\n writer = Cssr(self, **kwargs)\n elif fmt == \"json\" or fnmatch(fname.lower(), \"*.json\"):\n s = json.dumps(self.as_dict())\n if filename:\n with zopen(filename, \"wt\") as f:\n f.write(\"%s\" % s)\n return s\n elif fmt == \"xsf\" or fnmatch(fname.lower(), \"*.xsf*\"):\n from pymatgen.io.xcrysden import XSF\n s = XSF(self).to_string()\n if filename:\n with zopen(fname, \"wt\", encoding='utf8') as f:\n f.write(s)\n return s\n elif fmt == 'mcsqs' or fnmatch(fname, \"*rndstr.in*\") \\\n or fnmatch(fname, \"*lat.in*\") \\\n or fnmatch(fname, \"*bestsqs*\"):\n from pymatgen.io.atat import Mcsqs\n s = Mcsqs(self).to_string()\n if filename:\n with zopen(fname, \"wt\", encoding='ascii') as f:\n f.write(s)\n return s\n elif fmt == 'prismatic' or fnmatch(fname, \"*prismatic*\"):\n from pymatgen.io.prismatic import Prismatic\n s = Prismatic(self).to_string()\n return s\n elif fmt == \"yaml\" or fnmatch(fname, \"*.yaml*\") or fnmatch(fname, \"*.yml*\"):\n import ruamel.yaml as yaml\n if filename:\n with zopen(filename, \"wt\") as f:\n yaml.safe_dump(self.as_dict(), f)\n return None\n return yaml.safe_dump(self.as_dict())\n else:\n raise ValueError(\"Invalid format: `%s`\" % str(fmt))\n\n if filename:\n writer.write_file(filename)\n return None\n return writer.__str__()\n\n @classmethod\n def from_str(cls, input_string, fmt, primitive=False, sort=False,\n merge_tol=0.0):\n \"\"\"\n Reads a structure from a string.\n\n Args:\n input_string (str): String to parse.\n fmt (str): A format specification.\n primitive (bool): Whether to find a primitive cell. Defaults to\n False.\n sort (bool): Whether to sort the sites in accordance to the default\n ordering criteria, i.e., electronegativity.\n merge_tol (float): If this is some positive number, sites that\n are within merge_tol from each other will be merged. Usually\n 0.01 should be enough to deal with common numerical issues.\n\n Returns:\n IStructure / Structure\n \"\"\"\n from pymatgen.io.cif import CifParser\n from pymatgen.io.vasp import Poscar\n from pymatgen.io.cssr import Cssr\n from pymatgen.io.xcrysden import XSF\n from pymatgen.io.atat import Mcsqs\n fmt = fmt.lower()\n if fmt == \"cif\":\n parser = CifParser.from_string(input_string)\n s = parser.get_structures(primitive=primitive)[0]\n elif fmt == \"poscar\":\n s = Poscar.from_string(input_string, False,\n read_velocities=False).structure\n elif fmt == \"cssr\":\n cssr = Cssr.from_string(input_string)\n s = cssr.structure\n elif fmt == \"json\":\n d = json.loads(input_string)\n s = Structure.from_dict(d)\n elif fmt == \"yaml\":\n import ruamel.yaml as yaml\n d = yaml.safe_load(input_string)\n s = Structure.from_dict(d)\n elif fmt == \"xsf\":\n s = XSF.from_string(input_string).structure\n elif fmt == \"mcsqs\":\n s = Mcsqs.structure_from_string(input_string)\n else:\n raise ValueError(\"Unrecognized format `%s`!\" % fmt)\n\n if sort:\n s = s.get_sorted_structure()\n if merge_tol:\n s.merge_sites(merge_tol)\n return cls.from_sites(s)\n\n @classmethod\n def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):\n \"\"\"\n Reads a structure from a file. For example, anything ending in\n a \"cif\" is assumed to be a Crystallographic Information Format file.\n Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,\n vasprun.xml, CSSR, Netcdf and pymatgen's JSON serialized structures.\n\n Args:\n filename (str): The filename to read from.\n primitive (bool): Whether to convert to a primitive cell\n Only available for cifs. Defaults to False.\n sort (bool): Whether to sort sites. Default to False.\n merge_tol (float): If this is some positive number, sites that\n are within merge_tol from each other will be merged. Usually\n 0.01 should be enough to deal with common numerical issues.\n\n Returns:\n Structure.\n \"\"\"\n filename = str(filename)\n if filename.endswith(\".nc\"):\n # Read Structure from a netcdf file.\n from pymatgen.io.abinit.netcdf import structure_from_ncdata\n s = structure_from_ncdata(filename, cls=cls)\n if sort:\n s = s.get_sorted_structure()\n return s\n\n from pymatgen.io.lmto import LMTOCtrl\n from pymatgen.io.vasp import Vasprun, Chgcar\n from pymatgen.io.exciting import ExcitingInput\n fname = os.path.basename(filename)\n with zopen(filename, \"rt\") as f:\n contents = f.read()\n if fnmatch(fname.lower(), \"*.cif*\") or fnmatch(fname.lower(), \"*.mcif*\"):\n return cls.from_str(contents, fmt=\"cif\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n if fnmatch(fname, \"*POSCAR*\") or fnmatch(fname, \"*CONTCAR*\") or fnmatch(fname, \"*.vasp\"):\n s = cls.from_str(contents, fmt=\"poscar\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n\n elif fnmatch(fname, \"CHGCAR*\") or fnmatch(fname, \"LOCPOT*\"):\n s = Chgcar.from_file(filename).structure\n elif fnmatch(fname, \"vasprun*.xml*\"):\n s = Vasprun(filename).final_structure\n elif fnmatch(fname.lower(), \"*.cssr*\"):\n return cls.from_str(contents, fmt=\"cssr\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n elif fnmatch(fname, \"*.json*\") or fnmatch(fname, \"*.mson*\"):\n return cls.from_str(contents, fmt=\"json\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n elif fnmatch(fname, \"*.yaml*\"):\n return cls.from_str(contents, fmt=\"yaml\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n elif fnmatch(fname, \"*.xsf\"):\n return cls.from_str(contents, fmt=\"xsf\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n elif fnmatch(fname, \"input*.xml\"):\n return ExcitingInput.from_file(fname).structure\n elif fnmatch(fname, \"*rndstr.in*\") or fnmatch(fname, \"*lat.in*\") or fnmatch(fname, \"*bestsqs*\"):\n return cls.from_str(contents, fmt=\"mcsqs\",\n primitive=primitive, sort=sort,\n merge_tol=merge_tol)\n elif fnmatch(fname, \"CTRL*\"):\n return LMTOCtrl.from_file(filename=filename).structure\n else:\n raise ValueError(\"Unrecognized file extension!\")\n if sort:\n s = s.get_sorted_structure()\n if merge_tol:\n s.merge_sites(merge_tol)\n\n s.__class__ = cls\n return s\n\n\nclass IMolecule(SiteCollection, MSONable):\n \"\"\"\n Basic immutable Molecule object without periodicity. Essentially a\n sequence of sites. IMolecule is made to be immutable so that they can\n function as keys in a dict. For a mutable molecule,\n use the :class:Molecule.\n\n Molecule extends Sequence and Hashable, which means that in many cases,\n it can be used like any Python sequence. Iterating through a molecule is\n equivalent to going through the sites in sequence.\n \"\"\"\n\n def __init__(self,\n species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],\n coords: Sequence[Sequence[float]],\n charge: float = 0.0,\n spin_multiplicity: float = None,\n validate_proximity: bool = False,\n site_properties: dict = None):\n \"\"\"\n Creates a Molecule.\n\n Args:\n species: list of atomic species. Possible kinds of input include a\n list of dict of elements/species and occupancies, a List of\n elements/specie specified as actual Element/Species, Strings\n (\"Fe\", \"Fe2+\") or atomic numbers (1,56).\n coords (3x1 array): list of cartesian coordinates of each species.\n charge (float): Charge for the molecule. Defaults to 0.\n spin_multiplicity (int): Spin multiplicity for molecule.\n Defaults to None, which means that the spin multiplicity is\n set to 1 if the molecule has no unpaired electrons and to 2\n if there are unpaired electrons.\n validate_proximity (bool): Whether to check if there are sites\n that are less than 1 Ang apart. Defaults to False.\n site_properties (dict): Properties associated with the sites as\n a dict of sequences, e.g., {\"magmom\":[5,5,5,5]}. The\n sequences have to be the same length as the atomic species\n and fractional_coords. Defaults to None for no properties.\n \"\"\"\n if len(species) != len(coords):\n raise StructureError((\"The list of atomic species must be of the\",\n \" same length as the list of fractional \",\n \"coordinates.\"))\n\n sites = []\n for i, _ in enumerate(species):\n prop = None\n if site_properties:\n prop = {k: v[i] for k, v in site_properties.items()}\n sites.append(Site(species[i], coords[i], properties=prop))\n\n self._sites = tuple(sites)\n if validate_proximity and not self.is_valid():\n raise StructureError((\"Molecule contains sites that are \",\n \"less than 0.01 Angstrom apart!\"))\n\n self._charge = charge\n nelectrons = 0.0\n for site in sites:\n for sp, amt in site.species.items():\n if not isinstance(sp, DummySpecies):\n nelectrons += sp.Z * amt # type: ignore\n nelectrons -= charge\n self._nelectrons = nelectrons\n if spin_multiplicity:\n if (nelectrons + spin_multiplicity) % 2 != 1:\n raise ValueError(\n \"Charge of %d and spin multiplicity of %d is\"\n \" not possible for this molecule\" %\n (self._charge, spin_multiplicity))\n self._spin_multiplicity = spin_multiplicity\n else:\n self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2\n\n @property\n def charge(self):\n \"\"\"\n Charge of molecule\n \"\"\"\n return self._charge\n\n @property\n def spin_multiplicity(self):\n \"\"\"\n Spin multiplicity of molecule.\n \"\"\"\n return self._spin_multiplicity\n\n @property\n def nelectrons(self):\n \"\"\"\n Number of electrons in the molecule.\n \"\"\"\n return self._nelectrons\n\n @property\n def center_of_mass(self):\n \"\"\"\n Center of mass of molecule.\n \"\"\"\n center = np.zeros(3)\n total_weight = 0\n for site in self:\n wt = site.species.weight\n center += site.coords * wt\n total_weight += wt\n return center / total_weight\n\n @property\n def sites(self):\n \"\"\"\n Returns a tuple of sites in the Molecule.\n \"\"\"\n return self._sites\n\n @classmethod\n def from_sites(cls, sites, charge=0, spin_multiplicity=None,\n validate_proximity=False):\n \"\"\"\n Convenience constructor to make a Molecule from a list of sites.\n\n Args:\n sites ([Site]): Sequence of Sites.\n charge (int): Charge of molecule. Defaults to 0.\n spin_multiplicity (int): Spin multicipity. Defaults to None,\n in which it is determined automatically.\n validate_proximity (bool): Whether to check that atoms are too\n close.\n \"\"\"\n props = collections.defaultdict(list)\n for site in sites:\n for k, v in site.properties.items():\n props[k].append(v)\n return cls([site.species for site in sites],\n [site.coords for site in sites],\n charge=charge, spin_multiplicity=spin_multiplicity,\n validate_proximity=validate_proximity,\n site_properties=props)\n\n def break_bond(self, ind1, ind2, tol=0.2):\n \"\"\"\n Returns two molecules based on breaking the bond between atoms at index\n ind1 and ind2.\n\n Args:\n ind1 (int): Index of first site.\n ind2 (int): Index of second site.\n tol (float): Relative tolerance to test. Basically, the code\n checks if the distance between the sites is less than (1 +\n tol) * typical bond distances. Defaults to 0.2, i.e.,\n 20% longer.\n\n Returns:\n Two Molecule objects representing the two clusters formed from\n breaking the bond.\n \"\"\"\n sites = self._sites\n clusters = [[sites[ind1]], [sites[ind2]]]\n\n sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]\n\n def belongs_to_cluster(site, cluster):\n for test_site in cluster:\n if CovalentBond.is_bonded(site, test_site, tol=tol):\n return True\n return False\n\n while len(sites) > 0:\n unmatched = []\n for site in sites:\n for cluster in clusters:\n if belongs_to_cluster(site, cluster):\n cluster.append(site)\n break\n else:\n unmatched.append(site)\n\n if len(unmatched) == len(sites):\n raise ValueError(\"Not all sites are matched!\")\n sites = unmatched\n\n return (self.__class__.from_sites(cluster)\n for cluster in clusters)\n\n def get_covalent_bonds(self, tol=0.2):\n \"\"\"\n Determines the covalent bonds in a molecule.\n\n Args:\n tol (float): The tol to determine bonds in a structure. See\n CovalentBond.is_bonded.\n\n Returns:\n List of bonds\n \"\"\"\n bonds = []\n for site1, site2 in itertools.combinations(self._sites, 2):\n if CovalentBond.is_bonded(site1, site2, tol):\n bonds.append(CovalentBond(site1, site2))\n return bonds\n\n def __eq__(self, other):\n if other is None:\n return False\n if len(self) != len(other):\n return False\n if self.charge != other.charge:\n return False\n if self.spin_multiplicity != other.spin_multiplicity:\n return False\n for site in self:\n if site not in other:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n # For now, just use the composition hash code.\n return self.composition.__hash__()\n\n def __repr__(self):\n outs = [\"Molecule Summary\"]\n for s in self:\n outs.append(s.__repr__())\n return \"\\n\".join(outs)\n\n def __str__(self):\n outs = [\"Full Formula (%s)\" % self.composition.formula,\n \"Reduced Formula: \" + self.composition.reduced_formula,\n \"Charge = %s, Spin Mult = %s\" % (\n self._charge, self._spin_multiplicity),\n \"Sites (%d)\" % len(self)]\n for i, site in enumerate(self):\n outs.append(\" \".join([str(i), site.species_string,\n \" \".join([(\"%0.6f\" % j).rjust(12)\n for j in site.coords])]))\n return \"\\n\".join(outs)\n\n def as_dict(self):\n \"\"\"\n Json-serializable dict representation of Molecule\n \"\"\"\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"charge\": self._charge,\n \"spin_multiplicity\": self._spin_multiplicity,\n \"sites\": []}\n for site in self:\n site_dict = site.as_dict()\n del site_dict[\"@module\"]\n del site_dict[\"@class\"]\n d[\"sites\"].append(site_dict)\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Reconstitute a Molecule object from a dict representation created using\n as_dict().\n\n Args:\n d (dict): dict representation of Molecule.\n\n Returns:\n Molecule object\n \"\"\"\n sites = [Site.from_dict(sd) for sd in d[\"sites\"]]\n charge = d.get(\"charge\", 0)\n spin_multiplicity = d.get(\"spin_multiplicity\")\n return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)\n\n def get_distance(self, i, j):\n \"\"\"\n Get distance between site i and j.\n\n Args:\n i (int): Index of first site\n j (int): Index of second site\n\n Returns:\n Distance between the two sites.\n \"\"\"\n return self[i].distance(self[j])\n\n def get_sites_in_sphere(self, pt, r):\n \"\"\"\n Find all sites within a sphere from a point.\n\n Args:\n pt (3x1 array): Cartesian coordinates of center of sphere\n r (float): Radius of sphere.\n\n Returns:\n [Neighbor] since most of the time, subsequent processing\n requires the distance.\n \"\"\"\n neighbors = []\n for i, site in enumerate(self._sites):\n dist = site.distance_from_point(pt)\n if dist <= r:\n neighbors.append(Neighbor(site.species, site.coords,\n site.properties, dist, i))\n return neighbors\n\n def get_neighbors(self, site, r):\n \"\"\"\n Get all neighbors to a site within a sphere of radius r. Excludes the\n site itself.\n\n Args:\n site (Site): Site at the center of the sphere.\n r (float): Radius of sphere.\n\n Returns:\n [(site, dist) ...] since most of the time, subsequent processing\n requires the distance.\n \"\"\"\n nns = self.get_sites_in_sphere(site.coords, r)\n return [nn for nn in nns if nn != site]\n\n def get_neighbors_in_shell(self, origin, r, dr):\n \"\"\"\n Returns all sites in a shell centered on origin (coords) between radii\n r-dr and r+dr.\n\n Args:\n origin (3x1 array): Cartesian coordinates of center of sphere.\n r (float): Inner radius of shell.\n dr (float): Width of shell.\n\n Returns:\n [(site, dist) ...] since most of the time, subsequent processing\n requires the distance.\n \"\"\"\n outer = self.get_sites_in_sphere(origin, r + dr)\n inner = r - dr\n return [nn for nn in outer if nn.nn_distance > inner]\n\n def get_boxed_structure(self, a, b, c, images=(1, 1, 1),\n random_rotation=False, min_dist=1, cls=None,\n offset=None, no_cross=False, reorder=True):\n \"\"\"\n Creates a Structure from a Molecule by putting the Molecule in the\n center of a orthorhombic box. Useful for creating Structure for\n calculating molecules using periodic codes.\n\n Args:\n a (float): a-lattice parameter.\n b (float): b-lattice parameter.\n c (float): c-lattice parameter.\n images: No. of boxed images in each direction. Defaults to\n (1, 1, 1), meaning single molecule with 1 lattice parameter\n in each direction.\n random_rotation (bool): Whether to apply a random rotation to\n each molecule. This jumbles all the molecules so that they\n are not exact images of each other.\n min_dist (float): The minimum distance that atoms should be from\n each other. This is only used if random_rotation is True.\n The randomized rotations are searched such that no two atoms\n are less than min_dist from each other.\n cls: The Structure class to instantiate (defaults to pymatgen\n structure)\n offset: Translation to offset molecule from center of mass coords\n no_cross: Whether to forbid molecule coords from extending beyond\n boundary of box.\n reorder: Whether to reorder the sites to be in electronegativity\n order.\n\n Returns:\n Structure containing molecule in a box.\n \"\"\"\n if offset is None:\n offset = np.array([0, 0, 0])\n\n coords = np.array(self.cart_coords)\n x_range = max(coords[:, 0]) - min(coords[:, 0])\n y_range = max(coords[:, 1]) - min(coords[:, 1])\n z_range = max(coords[:, 2]) - min(coords[:, 2])\n\n if a <= x_range or b <= y_range or c <= z_range:\n raise ValueError(\"Box is not big enough to contain Molecule.\")\n lattice = Lattice.from_parameters(a * images[0], b * images[1],\n c * images[2],\n 90, 90, 90)\n nimages = images[0] * images[1] * images[2]\n coords = []\n\n centered_coords = self.cart_coords - self.center_of_mass + offset\n\n for i, j, k in itertools.product(list(range(images[0])),\n list(range(images[1])),\n list(range(images[2]))):\n box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c]\n if random_rotation:\n while True:\n op = SymmOp.from_origin_axis_angle(\n (0, 0, 0), axis=np.random.rand(3),\n angle=random.uniform(-180, 180))\n m = op.rotation_matrix\n new_coords = np.dot(m, centered_coords.T).T + box_center\n if no_cross:\n x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])\n y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])\n z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])\n if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:\n raise ValueError(\"Molecule crosses boundary of box.\")\n if len(coords) == 0:\n break\n distances = lattice.get_all_distances(\n lattice.get_fractional_coords(new_coords),\n lattice.get_fractional_coords(coords))\n if np.amin(distances) > min_dist:\n break\n else:\n new_coords = centered_coords + box_center\n if no_cross:\n x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])\n y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])\n z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])\n if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:\n raise ValueError(\"Molecule crosses boundary of box.\")\n coords.extend(new_coords)\n sprops = {k: v * nimages for k, v in self.site_properties.items()}\n\n if cls is None:\n cls = Structure\n\n if reorder:\n return cls(lattice, self.species * nimages, coords,\n coords_are_cartesian=True,\n site_properties=sprops).get_sorted_structure()\n\n return cls(lattice, self.species * nimages, coords,\n coords_are_cartesian=True,\n site_properties=sprops)\n\n def get_centered_molecule(self):\n \"\"\"\n Returns a Molecule centered at the center of mass.\n\n Returns:\n Molecule centered with center of mass at origin.\n \"\"\"\n center = self.center_of_mass\n new_coords = np.array(self.cart_coords) - center\n return self.__class__(self.species_and_occu, new_coords,\n charge=self._charge,\n spin_multiplicity=self._spin_multiplicity,\n site_properties=self.site_properties)\n\n def to(self, fmt=None, filename=None):\n \"\"\"\n Outputs the molecule to a file or string.\n\n Args:\n fmt (str): Format to output to. Defaults to JSON unless filename\n is provided. If fmt is specifies, it overrides whatever the\n filename is. Options include \"xyz\", \"gjf\", \"g03\", \"json\". If\n you have OpenBabel installed, any of the formats supported by\n OpenBabel. Non-case sensitive.\n filename (str): If provided, output will be written to a file. If\n fmt is not specified, the format is determined from the\n filename. Defaults is None, i.e. string output.\n\n Returns:\n (str) if filename is None. None otherwise.\n \"\"\"\n from pymatgen.io.xyz import XYZ\n from pymatgen.io.gaussian import GaussianInput\n from pymatgen.io.babel import BabelMolAdaptor\n\n fmt = \"\" if fmt is None else fmt.lower()\n fname = os.path.basename(filename or \"\")\n if fmt == \"xyz\" or fnmatch(fname.lower(), \"*.xyz*\"):\n writer = XYZ(self)\n elif any([fmt == r or fnmatch(fname.lower(), \"*.{}*\".format(r))\n for r in [\"gjf\", \"g03\", \"g09\", \"com\", \"inp\"]]):\n writer = GaussianInput(self)\n elif fmt == \"json\" or fnmatch(fname, \"*.json*\") or fnmatch(fname,\n \"*.mson*\"):\n if filename:\n with zopen(filename, \"wt\", encoding='utf8') as f:\n return json.dump(self.as_dict(), f)\n else:\n return json.dumps(self.as_dict())\n elif fmt == \"yaml\" or fnmatch(fname, \"*.yaml*\"):\n import ruamel.yaml as yaml\n\n if filename:\n with zopen(fname, \"wt\", encoding='utf8') as f:\n return yaml.safe_dump(self.as_dict(), f)\n else:\n return yaml.safe_dump(self.as_dict())\n\n else:\n m = re.search(r\"\\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)\",\n fname.lower())\n if (not fmt) and m:\n fmt = m.group(1)\n writer = BabelMolAdaptor(self)\n return writer.write_file(filename, file_format=fmt)\n\n if filename:\n writer.write_file(filename)\n return str(writer)\n\n @classmethod\n def from_str(cls, input_string: str, fmt: str):\n \"\"\"\n Reads the molecule from a string.\n\n Args:\n input_string (str): String to parse.\n fmt (str): Format to output to. Defaults to JSON unless filename\n is provided. If fmt is specifies, it overrides whatever the\n filename is. Options include \"xyz\", \"gjf\", \"g03\", \"json\". If\n you have OpenBabel installed, any of the formats supported by\n OpenBabel. Non-case sensitive.\n\n Returns:\n IMolecule or Molecule.\n \"\"\"\n from pymatgen.io.xyz import XYZ\n from pymatgen.io.gaussian import GaussianInput\n if fmt.lower() == \"xyz\":\n m = XYZ.from_string(input_string).molecule\n elif fmt in [\"gjf\", \"g03\", \"g09\", \"com\", \"inp\"]:\n m = GaussianInput.from_string(input_string).molecule\n elif fmt == \"json\":\n d = json.loads(input_string)\n return cls.from_dict(d)\n elif fmt == \"yaml\":\n import ruamel.yaml as yaml\n d = yaml.safe_load(input_string)\n return cls.from_dict(d)\n else:\n from pymatgen.io.babel import BabelMolAdaptor\n m = BabelMolAdaptor.from_string(input_string,\n file_format=fmt).pymatgen_mol\n return cls.from_sites(m)\n\n @classmethod\n def from_file(cls, filename):\n \"\"\"\n Reads a molecule from a file. Supported formats include xyz,\n gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and\n pymatgen's JSON serialized molecules. Using openbabel,\n many more extensions are supported but requires openbabel to be\n installed.\n\n Args:\n filename (str): The filename to read from.\n\n Returns:\n Molecule\n \"\"\"\n filename = str(filename)\n from pymatgen.io.gaussian import GaussianOutput\n with zopen(filename) as f:\n contents = f.read()\n fname = filename.lower()\n if fnmatch(fname, \"*.xyz*\"):\n return cls.from_str(contents, fmt=\"xyz\")\n if any([fnmatch(fname.lower(), \"*.{}*\".format(r))\n for r in [\"gjf\", \"g03\", \"g09\", \"com\", \"inp\"]]):\n return cls.from_str(contents, fmt=\"g09\")\n if any([fnmatch(fname.lower(), \"*.{}*\".format(r))\n for r in [\"out\", \"lis\", \"log\"]]):\n return GaussianOutput(filename).final_structure\n if fnmatch(fname, \"*.json*\") or fnmatch(fname, \"*.mson*\"):\n return cls.from_str(contents, fmt=\"json\")\n if fnmatch(fname, \"*.yaml*\"):\n return cls.from_str(contents, fmt=\"yaml\")\n from pymatgen.io.babel import BabelMolAdaptor\n m = re.search(r\"\\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)\",\n filename.lower())\n if m:\n new = BabelMolAdaptor.from_file(filename,\n m.group(1)).pymatgen_mol\n new.__class__ = cls\n return new\n raise ValueError(\"Cannot determine file type.\")\n\n\nclass Structure(IStructure, collections.abc.MutableSequence):\n \"\"\"\n Mutable version of structure.\n \"\"\"\n __hash__ = None # type: ignore\n\n def __init__(self,\n lattice: Union[List, np.ndarray, Lattice],\n species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],\n coords: Sequence[Sequence[float]],\n charge: float = None,\n validate_proximity: bool = False,\n to_unit_cell: bool = False,\n coords_are_cartesian: bool = False,\n site_properties: dict = None):\n \"\"\"\n Create a periodic structure.\n\n Args:\n lattice: The lattice, either as a pymatgen.core.lattice.Lattice or\n simply as any 2D array. Each row should correspond to a lattice\n vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a\n lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].\n species: List of species on each site. Can take in flexible input,\n including:\n\n i. A sequence of element / species specified either as string\n symbols, e.g. [\"Li\", \"Fe2+\", \"P\", ...] or atomic numbers,\n e.g., (3, 56, ...) or actual Element or Species objects.\n\n ii. List of dict of elements/species and occupancies, e.g.,\n [{\"Fe\" : 0.5, \"Mn\":0.5}, ...]. This allows the setup of\n disordered structures.\n coords (Nx3 array): list of fractional/cartesian coordinates of\n each species.\n charge (int): overall charge of the structure. Defaults to behavior\n in SiteCollection where total charge is the sum of the oxidation\n states.\n validate_proximity (bool): Whether to check if there are sites\n that are less than 0.01 Ang apart. Defaults to False.\n to_unit_cell (bool): Whether to map all sites into the unit cell,\n i.e., fractional coords between 0 and 1. Defaults to False.\n coords_are_cartesian (bool): Set to True if you are providing\n coordinates in cartesian coordinates. Defaults to False.\n site_properties (dict): Properties associated with the sites as a\n dict of sequences, e.g., {\"magmom\":[5,5,5,5]}. The sequences\n have to be the same length as the atomic species and\n fractional_coords. Defaults to None for no properties.\n \"\"\"\n super().__init__(\n lattice, species, coords, charge=charge,\n validate_proximity=validate_proximity, to_unit_cell=to_unit_cell,\n coords_are_cartesian=coords_are_cartesian,\n site_properties=site_properties)\n\n self._sites = list(self._sites) # type: ignore\n\n def __setitem__(self, i, site):\n \"\"\"\n Modify a site in the structure.\n\n Args:\n i (int, [int], slice, Species-like): Indices to change. You can\n specify these as an int, a list of int, or a species-like\n string.\n site (PeriodicSite/Species/Sequence): Three options exist. You\n can provide a PeriodicSite directly (lattice will be\n checked). Or more conveniently, you can provide a\n specie-like object or a tuple of up to length 3.\n\n Examples:\n s[0] = \"Fe\"\n s[0] = Element(\"Fe\")\n both replaces the species only.\n s[0] = \"Fe\", [0.5, 0.5, 0.5]\n Replaces site and *fractional* coordinates. Any properties\n are inherited from current site.\n s[0] = \"Fe\", [0.5, 0.5, 0.5], {\"spin\": 2}\n Replaces site and *fractional* coordinates and properties.\n\n s[(0, 2, 3)] = \"Fe\"\n Replaces sites 0, 2 and 3 with Fe.\n\n s[0::2] = \"Fe\"\n Replaces all even index sites with Fe.\n\n s[\"Mn\"] = \"Fe\"\n Replaces all Mn in the structure with Fe. This is\n a short form for the more complex replace_species.\n\n s[\"Mn\"] = \"Fe0.5Co0.5\"\n Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,\n creates a disordered structure!\n \"\"\"\n\n if isinstance(i, int):\n indices = [i]\n elif isinstance(i, (str, Element, Species)):\n self.replace_species({i: site})\n return\n elif isinstance(i, slice):\n to_mod = self[i]\n indices = [ii for ii, s in enumerate(self._sites)\n if s in to_mod]\n else:\n indices = list(i)\n\n for ii in indices:\n if isinstance(site, PeriodicSite):\n if site.lattice != self._lattice:\n raise ValueError(\"PeriodicSite added must have same lattice \"\n \"as Structure!\")\n if len(indices) != 1:\n raise ValueError(\"Site assignments makes sense only for \"\n \"single int indices!\")\n self._sites[ii] = site\n else:\n if isinstance(site, str) or (\n not isinstance(site, collections.abc.Sequence)):\n self._sites[ii].species = site\n else:\n self._sites[ii].species = site[0]\n if len(site) > 1:\n self._sites[ii].frac_coords = site[1]\n if len(site) > 2:\n self._sites[ii].properties = site[2]\n\n def __delitem__(self, i):\n \"\"\"\n Deletes a site from the Structure.\n \"\"\"\n self._sites.__delitem__(i)\n\n @property\n def lattice(self):\n \"\"\"\n :return: Lattice assciated with structure.\n \"\"\"\n return self._lattice\n\n @lattice.setter\n def lattice(self, lattice):\n self._lattice = lattice\n for site in self._sites:\n site.lattice = lattice\n\n def append(self, species, coords, coords_are_cartesian=False,\n validate_proximity=False, properties=None):\n \"\"\"\n Append a site to the structure.\n\n Args:\n species: Species of inserted site\n coords (3x1 array): Coordinates of inserted site\n coords_are_cartesian (bool): Whether coordinates are cartesian.\n Defaults to False.\n validate_proximity (bool): Whether to check if inserted site is\n too close to an existing site. Defaults to False.\n properties (dict): Properties of the site.\n\n Returns:\n New structure with inserted site.\n \"\"\"\n return self.insert(len(self), species, coords,\n coords_are_cartesian=coords_are_cartesian,\n validate_proximity=validate_proximity,\n properties=properties)\n\n def insert(self, i, species, coords, coords_are_cartesian=False,\n validate_proximity=False, properties=None):\n \"\"\"\n Insert a site to the structure.\n\n Args:\n i (int): Index to insert site\n species (species-like): Species of inserted site\n coords (3x1 array): Coordinates of inserted site\n coords_are_cartesian (bool): Whether coordinates are cartesian.\n Defaults to False.\n validate_proximity (bool): Whether to check if inserted site is\n too close to an existing site. Defaults to False.\n properties (dict): Properties associated with the site.\n\n Returns:\n New structure with inserted site.\n \"\"\"\n if not coords_are_cartesian:\n new_site = PeriodicSite(species, coords, self._lattice,\n properties=properties)\n else:\n frac_coords = self._lattice.get_fractional_coords(coords)\n new_site = PeriodicSite(species, frac_coords, self._lattice,\n properties=properties)\n\n if validate_proximity:\n for site in self:\n if site.distance(new_site) < self.DISTANCE_TOLERANCE:\n raise ValueError(\"New site is too close to an existing \"\n \"site!\")\n\n self._sites.insert(i, new_site)\n\n def replace(self, i, species, coords=None, coords_are_cartesian=False,\n properties=None):\n \"\"\"\n Replace a single site. Takes either a species or a dict of species and\n occupations.\n\n Args:\n i (int): Index of the site in the _sites list.\n species (species-like): Species of replacement site\n coords (3x1 array): Coordinates of replacement site. If None,\n the current coordinates are assumed.\n coords_are_cartesian (bool): Whether coordinates are cartesian.\n Defaults to False.\n properties (dict): Properties associated with the site.\n \"\"\"\n if coords is None:\n frac_coords = self[i].frac_coords\n elif coords_are_cartesian:\n frac_coords = self._lattice.get_fractional_coords(coords)\n else:\n frac_coords = coords\n\n new_site = PeriodicSite(species, frac_coords, self._lattice,\n properties=properties)\n self._sites[i] = new_site\n\n def substitute(self, index, func_grp, bond_order=1):\n \"\"\"\n Substitute atom at index with a functional group.\n\n Args:\n index (int): Index of atom to substitute.\n func_grp: Substituent molecule. There are two options:\n\n 1. Providing an actual Molecule as the input. The first atom\n must be a DummySpecies X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n bond_order (int): A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n \"\"\"\n\n # Find the nearest neighbor that is not a terminal atom.\n all_non_terminal_nn = []\n for nn, dist, _, _ in self.get_neighbors(self[index], 3):\n # Check that the nn has neighbors within a sensible distance but\n # is not the site being substituted.\n for inn, dist2, _, _ in self.get_neighbors(nn, 3):\n if inn != self[index] and \\\n dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):\n all_non_terminal_nn.append((nn, dist))\n break\n\n if len(all_non_terminal_nn) == 0:\n raise RuntimeError(\"Can't find a non-terminal neighbor to attach\"\n \" functional group to.\")\n\n non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]\n\n # Set the origin point to be the coordinates of the nearest\n # non-terminal neighbor.\n origin = non_terminal_nn.coords\n\n # Pass value of functional group--either from user-defined or from\n # functional.json\n if isinstance(func_grp, Molecule):\n func_grp = func_grp\n else:\n # Check to see whether the functional group is in database.\n if func_grp not in FunctionalGroups:\n raise RuntimeError(\"Can't find functional group in list. \"\n \"Provide explicit coordinate instead\")\n func_grp = FunctionalGroups[func_grp]\n\n # If a bond length can be found, modify func_grp so that the X-group\n # bond length is equal to the bond length.\n try:\n bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,\n bond_order=bond_order)\n # Catches for case of incompatibility between Element(s) and Species(s)\n except TypeError:\n bl = None\n\n if bl is not None:\n func_grp = func_grp.copy()\n vec = func_grp[0].coords - func_grp[1].coords\n vec /= np.linalg.norm(vec)\n func_grp[0] = \"X\", func_grp[1].coords + float(bl) * vec\n\n # Align X to the origin.\n x = func_grp[0]\n func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)\n\n # Find angle between the attaching bond and the bond to be replaced.\n v1 = func_grp[1].coords - origin\n v2 = self[index].coords - origin\n angle = get_angle(v1, v2)\n\n if 1 < abs(angle % 180) < 179:\n # For angles which are not 0 or 180, we perform a rotation about\n # the origin along an axis perpendicular to both bonds to align\n # bonds.\n axis = np.cross(v1, v2)\n op = SymmOp.from_origin_axis_angle(origin, axis, angle)\n func_grp.apply_operation(op)\n elif abs(abs(angle) - 180) < 1:\n # We have a 180 degree angle. Simply do an inversion about the\n # origin\n for i, fg in enumerate(func_grp):\n func_grp[i] = (fg.species, origin - (fg.coords - origin))\n\n # Remove the atom to be replaced, and add the rest of the functional\n # group.\n del self[index]\n for site in func_grp[1:]:\n s_new = PeriodicSite(site.species, site.coords,\n self.lattice, coords_are_cartesian=True)\n self._sites.append(s_new)\n\n def remove_species(self, species):\n \"\"\"\n Remove all occurrences of several species from a structure.\n\n Args:\n species: Sequence of species to remove, e.g., [\"Li\", \"Na\"].\n \"\"\"\n new_sites = []\n species = [get_el_sp(s) for s in species]\n\n for site in self._sites:\n new_sp_occu = {sp: amt for sp, amt in site.species.items()\n if sp not in species}\n if len(new_sp_occu) > 0:\n new_sites.append(PeriodicSite(\n new_sp_occu, site.frac_coords, self._lattice,\n properties=site.properties))\n self._sites = new_sites\n\n def remove_sites(self, indices):\n \"\"\"\n Delete sites with at indices.\n\n Args:\n indices: Sequence of indices of sites to delete.\n \"\"\"\n self._sites = [s for i, s in enumerate(self._sites)\n if i not in indices]\n\n def apply_operation(self, symmop, fractional=False):\n \"\"\"\n Apply a symmetry operation to the structure and return the new\n structure. The lattice is operated by the rotation matrix only.\n Coords are operated in full and then transformed to the new lattice.\n\n Args:\n symmop (SymmOp): Symmetry operation to apply.\n fractional (bool): Whether the symmetry operation is applied in\n fractional space. Defaults to False, i.e., symmetry operation\n is applied in cartesian coordinates.\n \"\"\"\n if not fractional:\n self._lattice = Lattice([symmop.apply_rotation_only(row)\n for row in self._lattice.matrix])\n\n def operate_site(site):\n new_cart = symmop.operate(site.coords)\n new_frac = self._lattice.get_fractional_coords(new_cart)\n return PeriodicSite(site.species, new_frac,\n self._lattice,\n properties=site.properties,\n skip_checks=True)\n\n else:\n new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)\n self._lattice = Lattice(new_latt)\n\n def operate_site(site):\n return PeriodicSite(site.species,\n symmop.operate(site.frac_coords),\n self._lattice,\n properties=site.properties,\n skip_checks=True)\n\n self._sites = [operate_site(s) for s in self._sites]\n\n @deprecated(message=\"Simply set using Structure.lattice = lattice. This will be removed in pymatgen v2020.\")\n def modify_lattice(self, new_lattice):\n \"\"\"\n Modify the lattice of the structure. Mainly used for changing the\n basis.\n\n Args:\n new_lattice (Lattice): New lattice\n \"\"\"\n self._lattice = new_lattice\n for site in self._sites:\n site.lattice = new_lattice\n\n def apply_strain(self, strain):\n \"\"\"\n Apply a strain to the lattice.\n\n Args:\n strain (float or list): Amount of strain to apply. Can be a float,\n or a sequence of 3 numbers. E.g., 0.01 means all lattice\n vectors are increased by 1%. This is equivalent to calling\n modify_lattice with a lattice with lattice parameters that\n are 1% larger.\n \"\"\"\n s = (1 + np.array(strain)) * np.eye(3)\n self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)\n\n def sort(self, key=None, reverse=False):\n \"\"\"\n Sort a structure in place. The parameters have the same meaning as in\n list.sort. By default, sites are sorted by the electronegativity of\n the species. The difference between this method and\n get_sorted_structure (which also works in IStructure) is that the\n latter returns a new Structure, while this just sorts the Structure\n in place.\n\n Args:\n key: Specifies a function of one argument that is used to extract\n a comparison key from each list element: key=str.lower. The\n default value is None (compare the elements directly).\n reverse (bool): If set to True, then the list elements are sorted\n as if each comparison were reversed.\n \"\"\"\n self._sites.sort(key=key, reverse=reverse)\n\n def translate_sites(self, indices, vector, frac_coords=True,\n to_unit_cell=True):\n \"\"\"\n Translate specific sites by some vector, keeping the sites within the\n unit cell.\n\n Args:\n indices: Integer or List of site indices on which to perform the\n translation.\n vector: Translation vector for sites.\n frac_coords (bool): Whether the vector corresponds to fractional or\n cartesian coordinates.\n to_unit_cell (bool): Whether new sites are transformed to unit\n cell\n \"\"\"\n if not isinstance(indices, collections.abc.Iterable):\n indices = [indices]\n\n for i in indices:\n site = self._sites[i]\n if frac_coords:\n fcoords = site.frac_coords + vector\n else:\n fcoords = self._lattice.get_fractional_coords(\n site.coords + vector)\n if to_unit_cell:\n fcoords = np.mod(fcoords, 1)\n self._sites[i].frac_coords = fcoords\n\n def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None,\n to_unit_cell=True):\n \"\"\"\n Rotate specific sites by some angle around vector at anchor.\n\n Args:\n indices (list): List of site indices on which to perform the\n translation.\n theta (float): Angle in radians\n axis (3x1 array): Rotation axis vector.\n anchor (3x1 array): Point of rotation.\n to_unit_cell (bool): Whether new sites are transformed to unit\n cell\n \"\"\"\n\n from numpy.linalg import norm\n from numpy import cross, eye\n from scipy.linalg import expm\n\n if indices is None:\n indices = range(len(self))\n\n if axis is None:\n axis = [0, 0, 1]\n\n if anchor is None:\n anchor = [0, 0, 0]\n\n anchor = np.array(anchor)\n axis = np.array(axis)\n\n theta %= 2 * np.pi\n\n rm = expm(cross(eye(3), axis / norm(axis)) * theta)\n for i in indices:\n site = self._sites[i]\n coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()\n new_site = PeriodicSite(\n site.species, coords, self._lattice,\n to_unit_cell=to_unit_cell, coords_are_cartesian=True,\n properties=site.properties,\n skip_checks=True)\n self._sites[i] = new_site\n\n def perturb(self, distance, min_distance=None):\n \"\"\"\n Performs a random perturbation of the sites in a structure to break\n symmetries.\n\n Args:\n distance (float): Distance in angstroms by which to perturb each\n site.\n min_distance (None, int, or float): if None, all displacements will\n be equal amplitude. If int or float, perturb each site a\n distance drawn from the uniform distribution between\n 'min_distance' and 'distance'.\n\n \"\"\"\n\n def get_rand_vec():\n # deals with zero vectors.\n vector = np.random.randn(3)\n vnorm = np.linalg.norm(vector)\n dist = distance\n if isinstance(min_distance, (float, int)):\n dist = np.random.uniform(min_distance, dist)\n return vector / vnorm * dist if vnorm != 0 else get_rand_vec()\n\n for i in range(len(self._sites)):\n self.translate_sites([i], get_rand_vec(), frac_coords=False)\n\n def make_supercell(self, scaling_matrix, to_unit_cell=True):\n \"\"\"\n Create a supercell.\n\n Args:\n scaling_matrix: A scaling matrix for transforming the lattice\n vectors. Has to be all integers. Several options are possible:\n\n a. A full 3x3 scaling matrix defining the linear combination\n the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,\n 1]] generates a new structure with lattice vectors a' =\n 2a + b, b' = 3b, c' = c where a, b, and c are the lattice\n vectors of the original structure.\n b. An sequence of three scaling factors. E.g., [2, 1, 1]\n specifies that the supercell should have dimensions 2a x b x\n c.\n c. A number, which simply scales all lattice vectors by the\n same factor.\n to_unit_cell: Whether or not to fall back sites into the unit cell\n \"\"\"\n s = self * scaling_matrix\n if to_unit_cell:\n for site in s:\n site.to_unit_cell(in_place=True)\n self._sites = s.sites\n self._lattice = s.lattice\n\n def scale_lattice(self, volume):\n \"\"\"\n Performs a scaling of the lattice vectors so that length proportions\n and angles are preserved.\n\n Args:\n volume (float): New volume of the unit cell in A^3.\n \"\"\"\n self.lattice = self._lattice.scale(volume)\n\n def merge_sites(self, tol=0.01, mode=\"sum\"):\n \"\"\"\n Merges sites (adding occupancies) within tol of each other.\n Removes site properties.\n\n Args:\n tol (float): Tolerance for distance to merge sites.\n mode (str): Three modes supported. \"delete\" means duplicate sites are\n deleted. \"sum\" means the occupancies are summed for the sites.\n \"average\" means that the site is deleted but the properties are averaged\n Only first letter is considered.\n\n \"\"\"\n mode = mode.lower()[0]\n from scipy.spatial.distance import squareform\n from scipy.cluster.hierarchy import fcluster, linkage\n\n d = self.distance_matrix\n np.fill_diagonal(d, 0)\n clusters = fcluster(linkage(squareform((d + d.T) / 2)),\n tol, 'distance')\n sites = []\n for c in np.unique(clusters):\n inds = np.where(clusters == c)[0]\n species = self[inds[0]].species\n coords = self[inds[0]].frac_coords\n props = self[inds[0]].properties\n for n, i in enumerate(inds[1:]):\n sp = self[i].species\n if mode == \"s\":\n species += sp\n offset = self[i].frac_coords - coords\n coords = coords + ((offset - np.round(offset)) / (n + 2)).astype(\n coords.dtype)\n for key in props.keys():\n if props[key] is not None and self[i].properties[key] != props[key]:\n if mode == 'a' and isinstance(props[key], float):\n # update a running total\n props[key] = props[key] * (n + 1) / (n + 2) + self[i].properties[key] / (n + 2)\n else:\n props[key] = None\n warnings.warn(\"Sites with different site property %s are merged. \"\n \"So property is set to none\" % key)\n sites.append(PeriodicSite(species, coords, self.lattice, properties=props))\n\n self._sites = sites\n\n def set_charge(self, new_charge: float = 0.):\n \"\"\"\n Sets the overall structure charge\n\n Args:\n new_charge (float): new charge to set\n \"\"\"\n self._charge = new_charge\n\n\nclass Molecule(IMolecule, collections.abc.MutableSequence):\n \"\"\"\n Mutable Molecule. It has all the methods in IMolecule, but in addition,\n it allows a user to perform edits on the molecule.\n \"\"\"\n __hash__ = None # type: ignore\n\n def __init__(self,\n species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],\n coords: Sequence[Sequence[float]],\n charge: float = 0.0,\n spin_multiplicity: float = None,\n validate_proximity: bool = False,\n site_properties: dict = None):\n \"\"\"\n Creates a MutableMolecule.\n\n Args:\n species: list of atomic species. Possible kinds of input include a\n list of dict of elements/species and occupancies, a List of\n elements/specie specified as actual Element/Species, Strings\n (\"Fe\", \"Fe2+\") or atomic numbers (1,56).\n coords (3x1 array): list of cartesian coordinates of each species.\n charge (float): Charge for the molecule. Defaults to 0.\n spin_multiplicity (int): Spin multiplicity for molecule.\n Defaults to None, which means that the spin multiplicity is\n set to 1 if the molecule has no unpaired electrons and to 2\n if there are unpaired electrons.\n validate_proximity (bool): Whether to check if there are sites\n that are less than 1 Ang apart. Defaults to False.\n site_properties (dict): Properties associated with the sites as\n a dict of sequences, e.g., {\"magmom\":[5,5,5,5]}. The\n sequences have to be the same length as the atomic species\n and fractional_coords. Defaults to None for no properties.\n \"\"\"\n super().__init__(species, coords, charge=charge,\n spin_multiplicity=spin_multiplicity,\n validate_proximity=validate_proximity,\n site_properties=site_properties)\n self._sites = list(self._sites) # type: ignore\n\n def __setitem__(self, i, site):\n \"\"\"\n Modify a site in the molecule.\n\n Args:\n i (int, [int], slice, Species-like): Indices to change. You can\n specify these as an int, a list of int, or a species-like\n string.\n site (PeriodicSite/Species/Sequence): Three options exist. You can\n provide a Site directly, or for convenience, you can provide\n simply a Species-like string/object, or finally a (Species,\n coords) sequence, e.g., (\"Fe\", [0.5, 0.5, 0.5]).\n \"\"\"\n\n if isinstance(i, int):\n indices = [i]\n elif isinstance(i, (str, Element, Species)):\n self.replace_species({i: site})\n return\n elif isinstance(i, slice):\n to_mod = self[i]\n indices = [ii for ii, s in enumerate(self._sites)\n if s in to_mod]\n else:\n indices = list(i)\n\n for ii in indices:\n if isinstance(site, Site):\n self._sites[ii] = site\n else:\n if isinstance(site, str) or (\n not isinstance(site, collections.abc.Sequence)):\n self._sites[ii].species = site\n else:\n self._sites[ii].species = site[0]\n if len(site) > 1:\n self._sites[ii].coords = site[1]\n if len(site) > 2:\n self._sites[ii].properties = site[2]\n\n def __delitem__(self, i):\n \"\"\"\n Deletes a site from the Structure.\n \"\"\"\n self._sites.__delitem__(i)\n\n def append(self, species, coords, validate_proximity=True, properties=None):\n \"\"\"\n Appends a site to the molecule.\n\n Args:\n species: Species of inserted site\n coords: Coordinates of inserted site\n validate_proximity (bool): Whether to check if inserted site is\n too close to an existing site. Defaults to True.\n properties (dict): A dict of properties for the Site.\n\n Returns:\n New molecule with inserted site.\n \"\"\"\n return self.insert(len(self), species, coords,\n validate_proximity=validate_proximity,\n properties=properties)\n\n def set_charge_and_spin(self, charge: float, spin_multiplicity: Optional[float] = None):\n \"\"\"\n Set the charge and spin multiplicity.\n\n Args:\n charge (int): Charge for the molecule. Defaults to 0.\n spin_multiplicity (int): Spin multiplicity for molecule.\n Defaults to None, which means that the spin multiplicity is\n set to 1 if the molecule has no unpaired electrons and to 2\n if there are unpaired electrons.\n \"\"\"\n self._charge = charge\n nelectrons = 0.0\n for site in self._sites:\n for sp, amt in site.species.items():\n if not isinstance(sp, DummySpecies):\n nelectrons += sp.Z * amt\n nelectrons -= charge\n self._nelectrons = nelectrons\n if spin_multiplicity:\n if (nelectrons + spin_multiplicity) % 2 != 1:\n raise ValueError(\n \"Charge of {} and spin multiplicity of {} is\"\n \" not possible for this molecule\".format(\n self._charge, spin_multiplicity))\n self._spin_multiplicity = spin_multiplicity\n else:\n self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2\n\n def insert(self, i, species, coords, validate_proximity=False,\n properties=None):\n \"\"\"\n Insert a site to the molecule.\n\n Args:\n i (int): Index to insert site\n species: species of inserted site\n coords (3x1 array): coordinates of inserted site\n validate_proximity (bool): Whether to check if inserted site is\n too close to an existing site. Defaults to True.\n properties (dict): Dict of properties for the Site.\n\n Returns:\n New molecule with inserted site.\n \"\"\"\n new_site = Site(species, coords, properties=properties)\n if validate_proximity:\n for site in self:\n if site.distance(new_site) < self.DISTANCE_TOLERANCE:\n raise ValueError(\"New site is too close to an existing \"\n \"site!\")\n self._sites.insert(i, new_site)\n\n def remove_species(self, species):\n \"\"\"\n Remove all occurrences of a species from a molecule.\n\n Args:\n species: Species to remove.\n \"\"\"\n new_sites = []\n species = [get_el_sp(sp) for sp in species]\n for site in self._sites:\n new_sp_occu = {sp: amt for sp, amt in site.species.items()\n if sp not in species}\n if len(new_sp_occu) > 0:\n new_sites.append(Site(new_sp_occu, site.coords,\n properties=site.properties))\n self._sites = new_sites\n\n def remove_sites(self, indices):\n \"\"\"\n Delete sites with at indices.\n\n Args:\n indices: Sequence of indices of sites to delete.\n \"\"\"\n self._sites = [self._sites[i] for i in range(len(self._sites))\n if i not in indices]\n\n def translate_sites(self, indices=None, vector=None):\n \"\"\"\n Translate specific sites by some vector, keeping the sites within the\n unit cell.\n\n Args:\n indices (list): List of site indices on which to perform the\n translation.\n vector (3x1 array): Translation vector for sites.\n \"\"\"\n if indices is None:\n indices = range(len(self))\n if vector is None:\n vector == [0, 0, 0]\n for i in indices:\n site = self._sites[i]\n new_site = Site(site.species, site.coords + vector,\n properties=site.properties)\n self._sites[i] = new_site\n\n def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None):\n \"\"\"\n Rotate specific sites by some angle around vector at anchor.\n\n Args:\n indices (list): List of site indices on which to perform the\n translation.\n theta (float): Angle in radians\n axis (3x1 array): Rotation axis vector.\n anchor (3x1 array): Point of rotation.\n \"\"\"\n\n from numpy.linalg import norm\n from numpy import cross, eye\n from scipy.linalg import expm\n\n if indices is None:\n indices = range(len(self))\n\n if axis is None:\n axis = [0, 0, 1]\n\n if anchor is None:\n anchor = [0, 0, 0]\n\n anchor = np.array(anchor)\n axis = np.array(axis)\n\n theta %= 2 * np.pi\n\n rm = expm(cross(eye(3), axis / norm(axis)) * theta)\n\n for i in indices:\n site = self._sites[i]\n s = ((np.dot(rm, (site.coords - anchor).T)).T + anchor).ravel()\n new_site = Site(site.species, s,\n properties=site.properties)\n self._sites[i] = new_site\n\n def perturb(self, distance):\n \"\"\"\n Performs a random perturbation of the sites in a structure to break\n symmetries.\n\n Args:\n distance (float): Distance in angstroms by which to perturb each\n site.\n \"\"\"\n\n def get_rand_vec():\n # deals with zero vectors.\n vector = np.random.randn(3)\n vnorm = np.linalg.norm(vector)\n return vector / vnorm * distance if vnorm != 0 else get_rand_vec()\n\n for i in range(len(self._sites)):\n self.translate_sites([i], get_rand_vec())\n\n def apply_operation(self, symmop):\n \"\"\"\n Apply a symmetry operation to the molecule.\n\n Args:\n symmop (SymmOp): Symmetry operation to apply.\n \"\"\"\n\n def operate_site(site):\n new_cart = symmop.operate(site.coords)\n return Site(site.species, new_cart,\n properties=site.properties)\n\n self._sites = [operate_site(s) for s in self._sites]\n\n def copy(self):\n \"\"\"\n Convenience method to get a copy of the molecule.\n\n Returns:\n A copy of the Molecule.\n \"\"\"\n return self.__class__.from_sites(self)\n\n def substitute(self, index, func_grp, bond_order=1):\n \"\"\"\n Substitute atom at index with a functional group.\n\n Args:\n index (int): Index of atom to substitute.\n func_grp: Substituent molecule. There are two options:\n\n 1. Providing an actual molecule as the input. The first atom\n must be a DummySpecies X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n bond_order (int): A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n \"\"\"\n\n # Find the nearest neighbor that is not a terminal atom.\n all_non_terminal_nn = []\n for nn in self.get_neighbors(self[index], 3):\n # Check that the nn has neighbors within a sensible distance but\n # is not the site being substituted.\n for nn2 in self.get_neighbors(nn, 3):\n if nn2 != self[index] and nn2.nn_distance < 1.2 * get_bond_length(nn.specie, nn2.specie):\n all_non_terminal_nn.append(nn)\n break\n\n if len(all_non_terminal_nn) == 0:\n raise RuntimeError(\"Can't find a non-terminal neighbor to attach\"\n \" functional group to.\")\n\n non_terminal_nn = min(all_non_terminal_nn, key=lambda nn: nn.nn_distance)\n\n # Set the origin point to be the coordinates of the nearest\n # non-terminal neighbor.\n origin = non_terminal_nn.coords\n\n # Pass value of functional group--either from user-defined or from\n # functional.json\n if isinstance(func_grp, Molecule):\n func_grp = func_grp\n else:\n # Check to see whether the functional group is in database.\n if func_grp not in FunctionalGroups:\n raise RuntimeError(\"Can't find functional group in list. \"\n \"Provide explicit coordinate instead\")\n func_grp = FunctionalGroups[func_grp]\n\n # If a bond length can be found, modify func_grp so that the X-group\n # bond length is equal to the bond length.\n bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,\n bond_order=bond_order)\n if bl is not None:\n func_grp = func_grp.copy()\n vec = func_grp[0].coords - func_grp[1].coords\n vec /= np.linalg.norm(vec)\n func_grp[0] = \"X\", func_grp[1].coords + float(bl) * vec\n\n # Align X to the origin.\n x = func_grp[0]\n func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)\n\n # Find angle between the attaching bond and the bond to be replaced.\n v1 = func_grp[1].coords - origin\n v2 = self[index].coords - origin\n angle = get_angle(v1, v2)\n\n if 1 < abs(angle % 180) < 179:\n # For angles which are not 0 or 180, we perform a rotation about\n # the origin along an axis perpendicular to both bonds to align\n # bonds.\n axis = np.cross(v1, v2)\n op = SymmOp.from_origin_axis_angle(origin, axis, angle)\n func_grp.apply_operation(op)\n elif abs(abs(angle) - 180) < 1:\n # We have a 180 degree angle. Simply do an inversion about the\n # origin\n for i, fg in enumerate(func_grp):\n func_grp[i] = (fg.species, origin - (fg.coords - origin))\n\n # Remove the atom to be replaced, and add the rest of the functional\n # group.\n del self[index]\n for site in func_grp[1:]:\n self._sites.append(site)\n\n\nclass StructureError(Exception):\n \"\"\"\n Exception class for Structure.\n Raised when the structure has problems, e.g., atoms that are too close.\n \"\"\"\n pass\n\n\nwith open(os.path.join(os.path.dirname(__file__),\n \"func_groups.json\"), \"rt\") as f:\n FunctionalGroups = {k: Molecule(v[\"species\"], v[\"coords\"])\n for k, v in json.load(f).items()}\n"
] | [
[
"numpy.dot",
"pandas.DataFrame",
"numpy.round",
"numpy.all",
"numpy.max",
"numpy.zeros_like",
"numpy.fill_diagonal",
"numpy.any",
"numpy.cross",
"numpy.random.randn",
"scipy.spatial.distance.squareform",
"numpy.where",
"numpy.divide",
"numpy.allclose",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.linalg.det",
"numpy.ceil",
"numpy.zeros",
"numpy.min",
"numpy.linalg.inv",
"numpy.amin",
"numpy.identity",
"numpy.random.rand",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"numpy.abs",
"numpy.linalg.norm",
"numpy.bitwise_and",
"numpy.mod",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
tareeqav/frustum-pointnets | [
"07a0385172067db4c3a792141f7171ab62143d4d"
] | [
"sunrgbd/sunrgbd_data/utils.py"
] | [
"''' Provides Python helper function to read My SUNRGBD dataset.\n\nAuthor: Charles R. Qi\nDate: October, 2017\n'''\nimport numpy as np\nimport cv2\nimport os\n\nclass SUNObject3d(object):\n def __init__(self, line):\n data = line.split(' ')\n data[1:] = [float(x) for x in data[1:]]\n self.classname = data[0]\n self.xmin = data[1] \n self.ymin = data[2]\n self.xmax = data[1]+data[3]\n self.ymax = data[2]+data[4]\n self.box2d = np.array([self.xmin,self.ymin,self.xmax,self.ymax])\n self.centroid = np.array([data[5],data[6],data[7]])\n self.unused_dimension = np.array([data[8],data[9],data[10]])\n self.w = data[8]\n self.l = data[9]\n self.h = data[10]\n self.unused_basis = np.zeros((3,3))\n self.unused_basis[0,0] = data[11]\n self.unused_basis[0,1] = data[12]\n self.unused_basis[1,0] = data[13]\n self.unused_basis[1,1] = data[14]\n self.unused_basis[2,2] = 1\n self.orientation = np.zeros((3,))\n self.orientation[0] = data[15]\n self.orientation[1] = data[16]\n self.heading_angle = -1 * np.arctan2(self.orientation[1], self.orientation[0])\n\nclass SUNRGBD_Calibration(object):\n ''' Calibration matrices and utils\n We define five coordinate system in SUN RGBD dataset\n\n camera coodinate:\n Z is forward, Y is downward, X is rightward\n\n depth coordinate:\n Just change axis order and flip up-down axis from camera coord\n\n upright depth coordinate: tilted depth coordinate by Rtilt such that Z is gravity direction,\n Z is up-axis, Y is forward, X is right-ward\n\n upright camera coordinate:\n Just change axis order and flip up-down axis from upright depth coordinate\n\n image coordinate:\n ----> x-axis (u)\n |\n v\n y-axis (v) \n\n depth points are stored in upright depth coordinate.\n labels for 3d box (basis, centroid, size) are in upright depth coordinate.\n 2d boxes are in image coordinate\n\n We generate frustum point cloud and 3d box in upright camera coordinate\n '''\n\n def __init__(self, calib_filepath):\n lines = [line.rstrip() for line in open(calib_filepath)]\n Rtilt = np.array([float(x) for x in lines[0].split(' ')])\n self.Rtilt = np.reshape(Rtilt, (3,3), order='F')\n K = np.array([float(x) for x in lines[1].split(' ')])\n self.K = np.reshape(K, (3,3), order='F')\n self.f_u = self.K[0,0]\n self.f_v = self.K[1,1]\n self.c_u = self.K[0,2]\n self.c_v = self.K[1,2]\n \n def flip_axis_to_camera(self, pc):\n ''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward\n Input and output are both (N,3) array\n '''\n pc2 = np.copy(pc)\n pc2[:,[0,1,2]] = pc2[:,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y\n pc2[:,1] *= -1\n return pc2\n\n def flip_axis_to_depth(self, pc):\n pc2 = np.copy(pc)\n pc2[:,[0,1,2]] = pc2[:,[0,2,1]] # depth X,Y,Z = cam X,Z,-Y\n pc2[:,2] *= -1\n return pc2\n\n def project_upright_depth_to_camera(self, pc):\n ''' project point cloud from depth coord to camera coordinate\n Input: (N,3) Output: (N,3)\n '''\n # Project upright depth to depth coordinate\n pc2 = np.dot(np.transpose(self.Rtilt), np.transpose(pc[:,0:3])) # (3,n)\n return self.flip_axis_to_camera(np.transpose(pc2))\n\n def project_upright_depth_to_image(self, pc):\n ''' Input: (N,3) Output: (N,2) UV and (N,) depth '''\n pc2 = self.project_upright_depth_to_camera(pc)\n uv = np.dot(pc2, np.transpose(self.K)) # (n,3)\n uv[:,0] /= uv[:,2]\n uv[:,1] /= uv[:,2]\n return uv[:,0:2], pc2[:,2]\n\n def project_upright_depth_to_upright_camera(self, pc):\n return self.flip_axis_to_camera(pc)\n\n def project_upright_camera_to_upright_depth(self, pc):\n return self.flip_axis_to_depth(pc)\n\n def project_image_to_camera(self, uv_depth):\n n = uv_depth.shape[0]\n x = ((uv_depth[:,0]-self.c_u)*uv_depth[:,2])/self.f_u\n y = ((uv_depth[:,1]-self.c_v)*uv_depth[:,2])/self.f_v\n pts_3d_camera = np.zeros((n,3))\n pts_3d_camera[:,0] = x\n pts_3d_camera[:,1] = y\n pts_3d_camera[:,2] = uv_depth[:,2]\n return pts_3d_camera\n\n def project_image_to_upright_camerea(self, uv_depth):\n pts_3d_camera = self.project_image_to_camera(uv_depth)\n pts_3d_depth = self.flip_axis_to_depth(pts_3d_camera)\n pts_3d_upright_depth = np.transpose(np.dot(self.Rtilt, np.transpose(pts_3d_depth)))\n return self.project_upright_depth_to_upright_camera(pts_3d_upright_depth)\n\n \n \ndef rotx(t):\n \"\"\"Rotation about the x-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[1, 0, 0],\n [0, c, -s],\n [0, s, c]])\n\n\ndef roty(t):\n \"\"\"Rotation about the y-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\n\ndef rotz(t):\n \"\"\"Rotation about the z-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n\ndef transform_from_rot_trans(R, t):\n \"\"\"Transforation matrix from rotation matrix and translation vector.\"\"\"\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))\n\n\ndef inverse_rigid_trans(Tr):\n \"\"\"Inverse a rigid body transform matrix (3x4 as [R|t])\n [R'|-R't; 0|1]\n \"\"\" \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr\n\ndef read_sunrgbd_label(label_filename):\n lines = [line.rstrip() for line in open(label_filename)]\n objects = [SUNObject3d(line) for line in lines]\n return objects\n\ndef load_image(img_filename):\n return cv2.imread(img_filename)\n\ndef load_depth_points(depth_filename):\n depth = np.loadtxt(depth_filename)\n return depth\n\n\ndef random_shift_box2d(box2d, shift_ratio=0.1):\n ''' Randomly shift box center, randomly scale width and height \n '''\n r = shift_ratio\n xmin,ymin,xmax,ymax = box2d\n h = ymax-ymin\n w = xmax-xmin\n cx = (xmin+xmax)/2.0\n cy = (ymin+ymax)/2.0\n cx2 = cx + w*r*(np.random.random()*2-1)\n cy2 = cy + h*r*(np.random.random()*2-1)\n h2 = h*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n w2 = w*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n return np.array([cx2-w2/2.0, cy2-h2/2.0, cx2+w2/2.0, cy2+h2/2.0])\n \ndef extract_pc_in_box3d(pc, box3d):\n ''' pc: (N,3), box3d: (8,3) '''\n box3d_roi_inds = in_hull(pc[:,0:3], box3d)\n return pc[box3d_roi_inds,:], box3d_roi_inds\n\ndef compute_box_3d(obj, calib):\n ''' Takes an object and a projection matrix (P) and projects the 3d\n bounding box into the image plane.\n Returns:\n corners_2d: (8,2) array in image coord.\n corners_3d: (8,3) array in in upright depth coord.\n '''\n center = obj.centroid\n\n # compute rotational matrix around yaw axis\n R = rotz(-1*obj.heading_angle)\n #b,a,c = dimension\n #print R, a,b,c\n \n # 3d bounding box dimensions\n l = obj.l # along heading arrow\n w = obj.w # perpendicular to heading arrow\n h = obj.h\n\n # rotate and translate 3d bounding box\n x_corners = [-l,l,l,-l,-l,l,l,-l]\n y_corners = [w,w,-w,-w,w,w,-w,-w]\n z_corners = [h,h,h,h,-h,-h,-h,-h]\n corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))\n corners_3d[0,:] += center[0]\n corners_3d[1,:] += center[1]\n corners_3d[2,:] += center[2]\n\n # project the 3d bounding box into the image plane\n corners_2d,_ = calib.project_upright_depth_to_image(np.transpose(corners_3d))\n #print 'corners_2d: ', corners_2d\n return corners_2d, np.transpose(corners_3d)\n\ndef compute_orientation_3d(obj, calib):\n ''' Takes an object and a projection matrix (P) and projects the 3d\n object orientation vector into the image plane.\n Returns:\n orientation_2d: (2,2) array in image coord.\n orientation_3d: (2,3) array in depth coord.\n '''\n \n # orientation in object coordinate system\n ori = obj.orientation\n orientation_3d = np.array([[0, ori[0]],[0, ori[1]],[0,0]])\n center = obj.centroid\n orientation_3d[0,:] = orientation_3d[0,:] + center[0]\n orientation_3d[1,:] = orientation_3d[1,:] + center[1]\n orientation_3d[2,:] = orientation_3d[2,:] + center[2]\n \n # project orientation into the image plane\n orientation_2d,_ = calib.project_upright_depth_to_image(np.transpose(orientation_3d))\n return orientation_2d, np.transpose(orientation_3d)\n\ndef draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):\n ''' Draw 3d bounding box in image\n qs: (8,2) array of vertices for the 3d box in following order:\n 1 -------- 0\n /| /|\n 2 -------- 3 .\n | | | |\n . 5 -------- 4\n |/ |/\n 6 -------- 7\n '''\n qs = qs.astype(np.int32)\n for k in range(0,4):\n #http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html\n i,j=k,(k+1)%4\n cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.LINE_AA) # use LINE_AA for opencv3\n\n i,j=k+4,(k+1)%4 + 4\n cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.LINE_AA)\n\n i,j=k,k+4\n cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.LINE_AA)\n return image\n\n\nimport cPickle\nimport gzip\n\ndef save_zipped_pickle(obj, filename, protocol=-1):\n with gzip.open(filename, 'wb') as f:\n cPickle.dump(obj, f, protocol)\n\ndef load_zipped_pickle(filename):\n with gzip.open(filename, 'rb') as f:\n loaded_object = cPickle.load(f)\n return loaded_object\n"
] | [
[
"numpy.hstack",
"numpy.random.random",
"numpy.reshape",
"numpy.vstack",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.copy",
"numpy.zeros_like",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Christoph9402/CVND_Image_Captioning | [
"47b24fee7e30e3ab216b5c42ae7ffb7f449f19e8"
] | [
"model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\n#----------------------------------------------------------------------------------------------------------\n# ENCODER\nclass EncoderCNN(nn.Module):\n def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet50(pretrained=True)\n for param in resnet.parameters():\n param.requires_grad_(False)\n \n modules = list(resnet.children())[:-1]\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features, embed_size)\n\n def forward(self, images):\n features = self.resnet(images)\n features = features.view(features.size(0), -1)\n features = self.embed(features)\n return features\n \n#----------------------------------------------------------------------------------------------------------\n# DECODER\nclass DecoderRNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):\n super(DecoderRNN, self).__init__()\n \n #Implementing structure shown in the video from lesson 7.9\n #Embedding module. using embed_size as the dimension of the embedding_vector\n #and setting the number of embeddings to the size of the dictionary\n self.embedded=nn.Embedding(embedding_dim=embed_size,num_embeddings=vocab_size)\n #Long Shrt term memory cell\n self.lstm=nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers,batch_first = True)\n #Implementing a dense layer (linear in pytorch). Use hidden_size as an input and vocab_size as an output\n self.linear=nn.Linear(in_features=hidden_size,out_features=vocab_size,bias=True)\n \n def forward(self, features, captions):\n #call the embedding layer from above and usethe captions as an input\n #make sure to remove the end token of the caption\n embedded=self.embedded(captions[:,:-1])\n #concatenate image features using torch.cat\n inputs = torch.cat((features.unsqueeze(1), embedded), 1)\n #call lstm, whicht will output the prediction and hidden state\n pred, hidden = self.lstm(inputs)\n #call linear with prediction as input\n linear_out=self.linear(pred)\n return linear_out\n\n def sample(self, inputs, states=None, max_len=20):\n \" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) \"\n \n #create empty list, where integer values are appended\n list = []\n for i in range(max_len):\n #call lstm to get prediction and hidden state\n pred, states = self.lstm(inputs, states)\n #print(pred)\n #call linear\n linear_out = self.linear(pred)\n #print(linear_out)\n #get the highest value in linear_out\n prob, max_out = linear_out.max(2)\n #append item to list\n list.append(max_out.item())\n #print(list)\n #check if th end token is detected and if so, dont continue the loop\n if max_out.item()==1:\n break\n #define input the following iteration (use prediction)\n inputs = self.embedded(max_out)\n return list\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n "
] | [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.Embedding",
"torch.nn.LSTM"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nimisis/DeDOL | [
"d7d8b1f0ce8c548c05cf993717ed5368d9340a23"
] | [
"DeDOL.py"
] | [
"import numpy as np\r\nimport argparse\r\nimport sys\r\nimport tensorflow as tf\r\nimport os\r\nfrom threading import Thread\r\nimport time\r\n\r\nfrom patroller_cnn import Patroller_CNN\r\nfrom patroller_rule import Patroller_Rule as Patroller_h\r\nfrom poacher_cnn import Poacher\r\nfrom poacher_rule import Poacher as Poacher_h\r\nfrom env import Env\r\nfrom replay_buffer import ReplayBuffer\r\nfrom DeDOL_util import simulate_payoff\r\nfrom DeDOL_util import calc_pa_best_response_PER as calc_pa_best_response\r\nfrom DeDOL_util import extend_payoff\r\nfrom DeDOL_util import calc_NE,calc_NE_zero\r\nfrom DeDOL_util import calc_po_best_response_PER as calc_po_best_response\r\nfrom DeDOL_util import tf_copy\r\nfrom DeDOL_util import PRDsolver\r\nfrom patroller_randomsweeping import RandomSweepingPatroller\r\nfrom maps import Mountainmap, generate_map\r\nfrom GUI_util import test_gui\r\n\r\n\r\nglobal eps_pa\r\nglobal eps_po\r\nglobal pa_start_num\r\nglobal po_strat_num \r\neps_pa, eps_po = [], []\r\npa_strat_num, po_strat_num = 0, 0\r\n\r\n\r\n\r\nargparser = argparse.ArgumentParser()\r\n########################################################################################\r\n### Environment\r\nargparser.add_argument('--row_num', type=int, default=3)\r\nargparser.add_argument('--column_num', type=int, default=3)\r\nargparser.add_argument('--ani_den_seed', type=int, default=66)\r\n\r\n### Patroller\r\nargparser.add_argument('--pa_state_size', type=int, default=20)\r\nargparser.add_argument('--pa_num_actions', type=int, default=5)\r\n\r\n### Poacher CNN\r\nargparser.add_argument('--snare_num', type=int, default=6)\r\nargparser.add_argument('--po_state_size', type=int, default=22) # add self footprint to poacher\r\nargparser.add_argument('--po_num_actions', type=int, default=10)\r\n\r\n### Poacher Rule Base, parameters set following advice from domain experts\r\nargparser.add_argument('--po_act_den_w', type=float, default=3.)\r\nargparser.add_argument('--po_act_enter_w', type=float, default=0.3)\r\nargparser.add_argument('--po_act_leave_w', type=float, default=-1.0)\r\nargparser.add_argument('--po_act_temp', type=float, default=5.0)\r\nargparser.add_argument('--po_home_dir_w', type=float, default=3.0)\r\n\r\n### Training \r\nargparser.add_argument('--Delta', type = float, default = 0.0, help = 'the exploration rate in the meta-strategy') \r\nargparser.add_argument('--naive', type = bool, default = False, help = 'whehter using naive PSRO') \r\nargparser.add_argument('--advanced_training', type = bool, default = True, \r\n help = 'whether using dueling double DQN with graident clipping') \r\nargparser.add_argument('--map_type', type = str, default = 'random')\r\nargparser.add_argument('--po_location', type = int, default = None, help = '0, 1, 2, 3 for local modes; None for global mode')\r\nargparser.add_argument('--save_path', type=str, default='./Results_33_random/')\r\n\r\nargparser.add_argument('--pa_episode_num', type=int, default=300000)\r\nargparser.add_argument('--po_episode_num', type=int, default=300000)\r\nargparser.add_argument('--epi_num_incr', type=int, default=0) # no usage now\r\nargparser.add_argument('--final_incr_iter', type = int, default = 10) # no usage now\r\nargparser.add_argument('--pa_replay_buffer_size', type=int, default=200000)\r\nargparser.add_argument('--po_replay_buffer_size', type=int, default=100000)\r\nargparser.add_argument('--test_episode_num', type=int, default=5000)\r\nargparser.add_argument('--iter_num', type=int, default=20, help = 'DO iteraion num') \r\nargparser.add_argument('--load_path', type=str, default='./Results5x5/')\r\nargparser.add_argument('--load_num', type=int, default=0)\r\nargparser.add_argument('--pa_initial_lr', type=float, default=1e-4)\r\nargparser.add_argument('--po_initial_lr', type=float, default=5e-5)\r\n\r\nargparser.add_argument('--br_po_DQN_episode_num', type=int, default=500)\r\nargparser.add_argument('--print_every', type=int, default=50)\r\nargparser.add_argument('--zero_sum', type=int, default=1, help = 'whether to set the game zero-sum')\r\nargparser.add_argument('--batch_size', type=int, default=32)\r\nargparser.add_argument('--target_update_every', type=int, default=2000)\r\nargparser.add_argument('--reward_gamma', type=float, default=0.95)\r\nargparser.add_argument('--save_every_episode', type=int, default= 200) #10000)\r\nargparser.add_argument('--test_every_episode', type=int, default= 10000)\r\nargparser.add_argument('--gui_every_episode', type=int, default=500)\r\nargparser.add_argument('--gui_test_num', type = int, default = 20)\r\nargparser.add_argument('--gui', type = int, default = 0)\r\nargparser.add_argument('--mix_every_episode', type=int, default=250) \r\nargparser.add_argument('--epsilon_decrease', type=float, default=0.05, help = 'decrease of the epsilon exploration rate in DQN') \r\nargparser.add_argument('--PER', type = bool, default = False, help = 'wheter to use prioterized experience replay')\r\nargparser.add_argument('--reward_shaping', type = bool, default = False, help = 'whether to use reward shaping in training')\r\n#########################################################################################\r\nargs = argparser.parse_args()\r\n\r\nif args.row_num == 7:\r\n args.column_num = 7\r\n args.max_time = 75\r\n args.pa_initial_lr = 1e-4\r\n args.po_initial_lr = 5e-5\r\n args.pa_replay_buffer_size = 200000\r\n args.po_replay_buffer_size = 100000\r\n if args.po_location is not None:\r\n args.pa_episode_num = 200000\r\n args.po_episode_num = 200000\r\n\r\nelif args.row_num == 5:\r\n args.column_num = 5\r\n args.max_time = 25\r\n args.pa_episode_num = 300000\r\n args.po_episode_num = 300000\r\n args.pa_initial_lr = 1e-4\r\n args.po_initial_lr = 5e-5\r\n args.pa_replay_buffer_size = 50000\r\n args.po_replay_buffer_size = 40000\r\n if args.po_location is not None:\r\n args.pa_episode_num = 200000\r\n\r\n\r\nelif args.row_num == 3:\r\n args.column_num = 3\r\n args.max_time = 4\r\n args.snare_num = 3\r\n args.pa_episode_num = 500 #100000\r\n args.po_episode_num = 500 #100000\r\n args.pa_initial_lr = 5e-5\r\n args.po_initial_lr = 5e-5\r\n args.pa_replay_buffer_size = 200 #10000\r\n args.po_replay_buffer_size = 200 #8000\r\n if args.po_location is not None:\r\n args.pa_episode_num = 200 # 80000\r\n args.po_episode_num = 200 # 80000\r\n\r\nif args.naive:\r\n args.Delta = 0.0\r\n args.po_location = None\r\nelse:\r\n pass\r\n\r\nif args.po_location is not None:\r\n args.save_path = './Results_' + str(args.row_num) + str(args.column_num) + '_' \\\r\n + args.map_type + '_mode' + str(args.po_location) + '/'\r\n\r\nif args.save_path and (not os.path.exists(args.save_path)):\r\n os.makedirs(args.save_path)\r\n\r\nparalog = open(args.save_path + 'paralog.txt', 'w')\r\nparalog.write('row_num {0} \\n'.format(args.row_num))\r\nparalog.write('snare_num {0} \\n'.format(args.snare_num))\r\nparalog.write('max_time {0} \\n'.format(args.max_time))\r\nparalog.write('animal density seed {0} \\n'.format(args.ani_den_seed))\r\nparalog.write('pa_initial_episode_num {0} \\n'.format(args.pa_episode_num))\r\nparalog.write('po_initial_episode_num {0} \\n'.format(args.po_episode_num))\r\nparalog.write('epi_num_incr {0} \\n'.format(args.epi_num_incr))\r\nparalog.write('final_incr_iter {0} \\n'.format(args.final_incr_iter))\r\nparalog.write('pa_replay_buffer_size {0} \\n'.format(args.pa_replay_buffer_size))\r\nparalog.write('po_replay_buffer_size {0} \\n'.format(args.po_replay_buffer_size))\r\nparalog.write('pa_initial_lr {0} \\n'.format(args.pa_initial_lr))\r\nparalog.write('po_initial_lr {0} \\n'.format(args.po_initial_lr))\r\nparalog.write('test_episode_num {0} \\n'.format(args.test_episode_num))\r\nparalog.write('Delta {0} \\n'.format(args.Delta))\r\nparalog.write('po_location {0} \\n'.format(str(args.po_location)))\r\nparalog.write('map_type {0} \\n'.format(str(args.map_type)))\r\nparalog.write('naive {0} \\n'.format(str(args.naive)))\r\nparalog.flush()\r\nparalog.close()\r\n\r\n################## for initialization ###########################\r\nglobal log_file\r\n\r\nlog_file = open(args.save_path + 'log.txt', 'w')\r\n\r\nanimal_density = generate_map(args)\r\nenv_pa = Env(args, animal_density, cell_length=None, canvas=None, gui=False)\r\nenv_po = Env(args, animal_density, cell_length=None, canvas=None, gui=False)\r\n\r\npatrollers = [Patroller_CNN(args, 'pa_model' + str(i)) for i in range(args.iter_num + 1)]\r\npoachers = [Poacher(args, 'po_model' + str(i)) for i in range(args.iter_num + 1)]\r\npa_type = ['DQN']\r\npo_type = ['DQN']\r\n\r\n### initialize poachers needed for training a separate best-response poacher DQN \r\nbr_poacher = Poacher(args, 'br_poacher')\r\nbr_target_poacher = Poacher(args, 'br_target_poacher')\r\nbr_good_poacher = Poacher(args, 'br_good_poacher')\r\nbr_utility = np.zeros(2)\r\n\r\nif not args.naive:\r\n patrollers[0] = RandomSweepingPatroller(args, mode = args.po_location)\r\n pa_type[0] = 'RS'\r\nif not args.naive:\r\n poachers[0] = Poacher_h(args, animal_density)\r\n po_type[0] = 'PARAM'\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsess = tf.Session(config=config)\r\nsess.run(tf.global_variables_initializer())\r\n\r\n### copy ops needed for training a separate best-response poacher DQN \r\nbr_po_copy_ops = tf_copy(br_target_poacher, br_poacher, sess)\r\nbr_po_good_copy_ops = tf_copy(br_good_poacher, br_poacher, sess)\r\n\r\npa_payoff = np.zeros((1,1))\r\npo_payoff = np.zeros((1,1))\r\nlength= np.zeros((1,1))\r\n \r\npa_payoff[0, 0], po_payoff[0, 0], _ = simulate_payoff(patrollers, poachers, 0, 0, env_pa, sess, \r\nargs, pa_type = pa_type[0], po_type = po_type[0])\r\n\r\npa_strategy, po_strategy = np.array([1]), np.array([1])\r\n\r\nnp.save(file = args.save_path + 'pa_strategy_iter_0', arr = pa_strategy)\r\nnp.save(file = args.save_path + 'po_strategy_iter_0', arr = po_strategy)\r\n\r\nnp.save(file = args.save_path + 'pa_payoff_iter_0', arr = pa_payoff)\r\nnp.save(file = args.save_path + 'po_payoff_iter_0', arr = po_payoff)\r\n\r\nlog_file.write('pa_payoff:\\n' + str(pa_payoff) + '\\n')\r\nlog_file.write('po_payoff:\\n' + str(po_payoff) + '\\n')\r\n\r\nlog_file.write('pa_strat:\\n' + str(pa_strategy) + '\\n')\r\nlog_file.write('po_strat:\\n' + str(po_strategy) + '\\n')\r\n\r\n\r\n############## starting DO ####################\r\niteration = 1\r\npa_pointer, po_pointer = 1, 1 # the pointer counting the number of strategies for pa and po.\r\n\r\nwhile(1):\r\n time_begin = time.time() \r\n\r\n pa_payoff, po_payoff, length = extend_payoff(pa_payoff, po_payoff, length, \r\n pa_pointer + 1, po_pointer + 1)\r\n po_type.append('DQN')\r\n pa_type.append('DQN')\r\n\r\n log_file.flush()\r\n\r\n print('\\n' + 'NEW_ITERATION: ' + str(iteration) + '\\n')\r\n log_file.write('\\n' + 'NEW_ITERATION: ' + str(iteration) + '\\n')\r\n\r\n ### compute the NE utility for both sides\r\n po_ne_utility = 0\r\n pa_ne_utility = 0\r\n for pa_strat in range(pa_pointer):\r\n for po_strat in range(po_pointer):\r\n po_ne_utility += pa_strategy[pa_strat] * po_strategy[po_strat] * po_payoff[pa_strat, po_strat] \r\n pa_ne_utility += pa_strategy[pa_strat] * po_strategy[po_strat] * pa_payoff[pa_strat, po_strat]\r\n\r\n log_file.write('last_pa_ne_utility:' + str(pa_ne_utility) + '\\n')\r\n log_file.write('last_po_ne_utility:' + str(po_ne_utility) + '\\n')\r\n pre_pa_strategy = pa_strategy\r\n pre_po_strategy = po_strategy\r\n\r\n ### compute the best response poacher utility\r\n ### 1. train a best response poacher DQN against the current pa strategy\r\n calc_po_best_response(br_poacher, br_target_poacher, br_po_copy_ops, br_po_good_copy_ops, patrollers, \r\n pa_strategy, pa_type, iteration, sess, env_pa, args, br_utility, 0, train_episode_num=args.br_po_DQN_episode_num)\r\n br_DQN_utility = br_utility[1]\r\n \r\n ### 2. test against the heuristic poacher stored in poachers[0]\r\n br_heuristic_utility = 0.\r\n for i in range(pa_pointer):\r\n _, po_utility, _ = simulate_payoff(patrollers, poachers, i, 0, env_pa, sess, args,\r\n pa_type=pa_type[i], po_type = po_type[0])\r\n br_heuristic_utility += po_utility * pa_strategy[i]\r\n\r\n ### choose the better one\r\n better = 'DQN' if br_DQN_utility >= br_heuristic_utility else 'heuristic'\r\n br_poacher_utility = max(br_DQN_utility, br_heuristic_utility)\r\n log_file.write('Iteration {0} poacher best response utility {1} poacher best response type {2} \\n'.format(\r\n iteration, br_poacher_utility, better))\r\n print('Iteration {0} poacher best response utility {1} poacher best response type {2}'.format(\r\n iteration, br_poacher_utility, better))\r\n\r\n\r\n ### train the best response agent\r\n ### using threading to accelerate the training\r\n good_patrollers = []\r\n good_poachers = []\r\n final_utility = [0.0, 0.0]\r\n target_patroller = Patroller_CNN(args, 'target_patroller' + str(iteration))\r\n good_patroller = Patroller_CNN(args, 'good_patroller' + str(iteration))\r\n pa_copy_ops = tf_copy(target_patroller, patrollers[pa_pointer], sess)\r\n pa_good_copy_ops = tf_copy(good_patroller, patrollers[pa_pointer], sess)\r\n pa_inverse_ops = tf_copy(patrollers[pa_pointer], good_patroller, sess)\r\n \r\n target_poacher = Poacher(args, 'target_poacher' + str(iteration))\r\n good_poacher = Poacher(args, 'good_poacher' + str(iteration))\r\n po_copy_ops = tf_copy(target_poacher, poachers[po_pointer], sess)\r\n po_good_copy_ops = tf_copy(good_poacher, poachers[po_pointer], sess)\r\n po_inverse_ops = tf_copy(poachers[po_pointer], good_poacher, sess)\r\n\r\n funcs = [calc_pa_best_response, calc_po_best_response]\r\n params = [[patrollers[pa_pointer], target_patroller, pa_copy_ops, pa_good_copy_ops, poachers, \r\n po_strategy, po_type, iteration, sess, env_pa, args, final_utility,0], \r\n [poachers[po_pointer], target_poacher, po_copy_ops, po_good_copy_ops, patrollers, \r\n pa_strategy, pa_type, iteration, sess, env_po, args, final_utility,0]]\r\n\r\n ### if the maximum iteration number is achieved\r\n if args.iter_num == iteration:\r\n log_file.write('\\n DO reaches terminating iteration {0}'.format(iteration) + '\\n')\r\n log_file.write('Final Pa-payoff: \\n' + str(pa_payoff) + '\\n')\r\n log_file.write('Final Po-payoff: \\n'+ str(po_payoff) + '\\n')\r\n log_file.write('Final pa_strat:\\n' + str(pa_strategy) + '\\n')\r\n log_file.write('Final po_strat:\\n'+ str(po_strategy) + '\\n')\r\n log_file.write('Final pa_ne_utility:' + str(pa_ne_utility) + '\\n')\r\n log_file.write('Final po_ne_utility:' + str(po_ne_utility) + '\\n')\r\n log_file.flush()\r\n\r\n threads = []\r\n for i in range(2):\r\n process = Thread(target=funcs[i], args=params[i])\r\n process.start()\r\n threads.append(process)\r\n ### We now pause execution on the main thread by 'joining' all of our started threads.\r\n for process in threads:\r\n process.join()\r\n\r\n pa_exploit = final_utility[0] - pa_ne_utility\r\n po_exploit = final_utility[1] - po_ne_utility\r\n log_file.write('Final pa_best_response_utility:' + str(final_utility[0]) + '\\n')\r\n log_file.write('Final po_best_response_utility:' + str(final_utility[1]) + '\\n')\r\n log_file.write('Final pa exploitibility:' + str(pa_exploit) + '\\n')\r\n log_file.write('Final po exploitibility:' + str(po_exploit) + '\\n')\r\n break\r\n \r\n ### not the final iteration\r\n threads = []\r\n\r\n for i in range(2):\r\n process = Thread(target=funcs[i], args=params[i])\r\n process.start()\r\n threads.append(process)\r\n for process in threads:\r\n process.join()\r\n\r\n # calc_pa_best_response(patrollers[pa_pointer], target_patroller, pa_copy_ops, pa_good_copy_ops, poachers, \r\n # po_strategy, iteration, sess, env_pa, args, final_utility,0)\r\n\r\n sess.run(pa_inverse_ops)\r\n sess.run(po_inverse_ops)\r\n\r\n for pa_strat in range(pa_pointer):\r\n pa_payoff[pa_strat, po_pointer ],po_payoff[pa_strat, po_pointer], _ = \\\r\n simulate_payoff(patrollers, poachers, pa_strat, po_pointer, env_pa, sess, args,\r\n pa_type=pa_type[pa_strat], po_type=po_type[po_pointer]) \r\n\r\n for po_strat in range(po_pointer):\r\n pa_payoff[pa_pointer, po_strat],po_payoff[pa_pointer, po_strat],_ = \\\r\n simulate_payoff(patrollers, poachers, pa_pointer, po_strat, env_pa, sess, args, \r\n pa_type=pa_type[pa_pointer], po_type = po_type[po_strat])\r\n\r\n pa_payoff[pa_pointer, po_pointer],po_payoff[pa_pointer, po_pointer],_ = \\\r\n simulate_payoff(patrollers, poachers, pa_pointer, po_pointer, env_pa, sess, args,\r\n pa_type=pa_type[pa_pointer], po_type = po_type[po_pointer])\r\n \r\n pa_strategy, po_strategy = calc_NE_zero(pa_payoff, po_payoff, args.Delta)\r\n # pa_strategy, po_strategy = np.ones(iteration + 1) / (iteration + 1), np.ones(iteration + 1) / (iteration + 1)\r\n\r\n params[0][5] = po_strategy\r\n params[1][5] = pa_strategy \r\n\r\n po_best_response = final_utility[1]\r\n pa_best_response = final_utility[0]\r\n # for pa_strat in range(pa_pointer):\r\n # po_best_response += pre_pa_strategy[pa_strat] * po_payoff[pa_strat, po_pointer] \r\n # for po_strat in range(po_pointer):\r\n # pa_best_response += pre_po_strategy[po_strat] * pa_payoff[pa_pointer, po_strat]\r\n\r\n # eps_po.append(po_best_response - po_ne_utility)\r\n # eps_pa.append(pa_best_response - pa_ne_utility)\r\n\r\n log_file.write('In DO pa_best_utility:' + str(pa_best_response) + '\\n')\r\n log_file.write('In DO po_best_utility:' + str(po_best_response) + '\\n')\r\n # log_file.write('eps_pa: ' + str(eps_pa) + '\\n')\r\n # log_file.write('eps_po: ' + str(eps_po) + '\\n')\r\n\r\n ######### save models for this iteration #############\r\n save_name = args.save_path + 'iteration_' + str(iteration) + '_pa_model.ckpt'\r\n patrollers[pa_pointer].save(sess =sess, filename = save_name)\r\n save_name = args.save_path + 'iteration_' + str(iteration) + '_po_model.ckpt'\r\n poachers[po_pointer].save(sess =sess, filename = save_name)\r\n\r\n ### save payoff matrix and ne strategies\r\n np.save(file = args.save_path + 'pa_payoff_iter_' + str(iteration), arr = pa_payoff)\r\n np.save(file = args.save_path + 'po_payoff_iter_' + str(iteration), arr = po_payoff)\r\n np.save(file = args.save_path + 'pa_strategy_iter_' + str(iteration), arr = pa_strategy)\r\n np.save(file = args.save_path + 'po_strategy_iter_' + str(iteration), arr = po_strategy)\r\n\r\n log_file.write('pa_payoff:\\n' + str(pa_payoff) + '\\n')\r\n log_file.write('po_payoff:\\n' + str(po_payoff) + '\\n')\r\n log_file.write('pa_strategy:\\n' + str(pa_strategy) + '\\n')\r\n log_file.write('po_strategy:\\n' + str(po_strategy) + '\\n')\r\n\r\n iteration += 1\r\n pa_pointer += 1\r\n po_pointer += 1\r\n\r\n time_end = time.time()\r\n\r\n log_file.write('Using time: \\n' + str(time_end - time_begin) + '\\n')\r\n log_file.flush()\r\n\r\nlog_file.close()\r\n"
] | [
[
"numpy.save",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
nmmp1234/desitarget | [
"71051d9a8816101dc69ddfc28a52d8c50803a0a0"
] | [
"py/desitarget/tychomatch.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\n=====================\ndesitarget.tychomatch\n=====================\n\nUseful Tycho catalog matching and manipulation routines.\n\"\"\"\nimport os\nimport numpy as np\nimport fitsio\nimport requests\nimport pickle\nfrom datetime import datetime\n\nfrom pkg_resources import resource_filename\nfrom time import time\nfrom astropy.io import ascii\nfrom glob import glob\nimport healpy as hp\n\nfrom desitarget import io\nfrom desitarget.internal import sharedmem\nfrom desimodel.footprint import radec2pix\nfrom desitarget.geomask import add_hp_neighbors, radec_match_to, nside2nside\n\n# ADM set up the DESI default logger\nfrom desiutil.log import get_logger\nlog = get_logger()\n\n# ADM start the clock\nstart = time()\n\n# ADM columns contained in our version of the Tycho fits files.\ntychodatamodel = np.array([], dtype=[\n ('TYC1', '>i2'), ('TYC2', '>i2'), ('TYC3', '|u1'),\n ('RA', '>f8'), ('DEC', '>f8'),\n ('MEAN_RA', '>f8'), ('MEAN_DEC', '>f8'),\n ('SIGMA_RA', '>f4'), ('SIGMA_DEC', '>f4'),\n # ADM these are converted to be in mas/yr for consistency with Gaia.\n ('PM_RA', '>f4'), ('PM_DEC', '>f4'),\n ('SIGMA_PM_RA', '>f4'), ('SIGMA_PM_DEC', '>f4'),\n ('EPOCH_RA', '>f4'), ('EPOCH_DEC', '>f4'),\n ('MAG_BT', '>f4'), ('MAG_VT', '>f4'), ('MAG_HP', '>f4'), ('ISGALAXY', '|u1'),\n ('JMAG', '>f4'), ('HMAG', '>f4'), ('KMAG', '>f4'), ('ZGUESS', '>f4')\n])\n\n\ndef get_tycho_dir():\n \"\"\"Convenience function to grab the Tycho environment variable.\n\n Returns\n -------\n :class:`str`\n The directory stored in the $TYCHO_DIR environment variable.\n \"\"\"\n # ADM check that the $TYCHO_DIR environment variable is set.\n tychodir = os.environ.get('TYCHO_DIR')\n if tychodir is None:\n msg = \"Set $TYCHO_DIR environment variable!\"\n log.critical(msg)\n raise ValueError(msg)\n\n return tychodir\n\n\ndef get_tycho_nside():\n \"\"\"Grab the HEALPixel nside to be used throughout this module.\n\n Returns\n -------\n :class:`int`\n The HEALPixel nside number for Tycho file creation and retrieval.\n \"\"\"\n nside = 4\n\n return nside\n\n\ndef grab_tycho(cosmodir=\"/global/cfs/cdirs/cosmo/staging/tycho2/\"):\n \"\"\"Retrieve the cosmo versions of the Tycho files at NERSC.\n\n Parameters\n ----------\n cosmodir : :class:`str`\n The NERSC directory that hosts the Tycho files.\n\n Returns\n -------\n Nothing\n But the Tycho fits file, README are written to $TYCHO_DIR/fits.\n\n Notes\n -----\n - The environment variable $TYCHO_DIR must be set.\n - The fits file is \"cleaned up\" to conform to DESI Data Systems\n standards (e.g. all columns are converted to upper-case).\n \"\"\"\n # ADM check that the TYCHO_DIR is set and retrieve it.\n tychodir = get_tycho_dir()\n\n # ADM construct the directory to which to write files.\n fitsdir = os.path.join(tychodir, 'fits')\n # ADM the directory better be empty for the copy!\n if os.path.exists(fitsdir):\n if len(os.listdir(fitsdir)) > 0:\n msg = \"{} should be empty to get TYCHO FITS file!\".format(fitsdir)\n log.critical(msg)\n raise ValueError(msg)\n # ADM make the directory, if needed.\n else:\n log.info('Making TYCHO directory for storing FITS files')\n os.makedirs(fitsdir)\n\n # ADM the actual name of the Tycho file and the associated README.\n tychofn = \"tycho2.kd.fits\"\n cosmofile = os.path.join(cosmodir, tychofn)\n rfile = os.path.join(cosmodir, \"README\")\n\n # ADM the associated output files.\n outfile = os.path.join(fitsdir, tychofn)\n routfile = os.path.join(fitsdir, \"README\")\n\n # ADM read in the Tycho file and header in upper-case.\n objs, hdr = fitsio.read(cosmofile, header=True, upper=True)\n nobjs = len(objs)\n done = np.zeros(nobjs, dtype=tychodatamodel.dtype)\n for col in tychodatamodel.dtype.names:\n # ADM proper motions need converted to mas/yr.\n if \"PM\" in col:\n done[col] = objs[col]*1000\n else:\n done[col] = objs[col]\n\n # ADM add some information to the header\n copydate = datetime.utcnow().isoformat(timespec='seconds')\n hdr[\"COSMODIR\"] = cosmodir\n hdr[\"COPYDATE\"] = copydate\n\n # ADM write the data.\n fitsio.write(outfile, done, extname='TYCHOFITS', header=hdr)\n\n # ADM also update the README.\n msg = \"\\nCopied from: {}\\non: {}\\nthe specific file being: {}\\n\".format(\n cosmodir, copydate, cosmofile)\n with open(rfile) as f:\n readme = f.read()\n with open(routfile, 'w') as f:\n f.write(readme+msg)\n\n log.info('Wrote Tycho FITS file...t={:.1f}s'.format(time()-start))\n\n return\n\n\ndef tycho_fits_to_healpix():\n \"\"\"Convert files in $TYCHO_DIR/fits to files in $TYCHO_DIR/healpix.\n\n Returns\n -------\n Nothing\n But the archived Tycho FITS files in $TYCHO_DIR/fits are\n rearranged by HEALPixel in the directory $TYCHO_DIR/healpix.\n The HEALPixel sense is nested with nside=get_tycho_nside(), and\n each file in $TYCHO_DIR/healpix is called healpix-xxxxx.fits,\n where xxxxx corresponds to the HEALPixel number.\n\n Notes\n -----\n - The environment variable $TYCHO_DIR must be set.\n \"\"\"\n # ADM the resolution at which the Tycho HEALPix files are stored.\n nside = get_tycho_nside()\n npix = hp.nside2npix(nside)\n\n # ADM check that the TYCHO_DIR is set.\n tychodir = get_tycho_dir()\n\n # ADM construct the directories for reading/writing files.\n fitsdir = os.path.join(tychodir, \"fits\")\n tychofn = os.path.join(fitsdir, \"tycho2.kd.fits\")\n hpxdir = os.path.join(tychodir, \"healpix\")\n\n # ADM make sure the output directory is empty.\n if os.path.exists(hpxdir):\n if len(os.listdir(hpxdir)) > 0:\n msg = \"{} must be empty to make Tycho HEALPix files!\".format(hpxdir)\n log.critical(msg)\n raise ValueError(msg)\n # ADM make the output directory, if needed.\n else:\n log.info(\"Making Tycho directory for storing HEALPix files\")\n os.makedirs(hpxdir)\n\n # ADM read in the Tycho file and assing Tycho objects to HEALPixels.\n objs, allhdr = fitsio.read(tychofn, header=True, upper=True)\n pix = radec2pix(nside, objs[\"RA\"], objs[\"DEC\"])\n\n # ADM loop through the pixels and write out the files.\n for pixnum in range(npix):\n # ADM construct the name of the output file.\n outfilename = io.hpx_filename(pixnum)\n outfile = os.path.join(hpxdir, outfilename)\n # ADM update the header with new information.\n hdr = dict(allhdr).copy()\n hdr[\"HPXNSIDE\"] = nside\n hdr[\"HPXNEST\"] = True\n hdr[\"HPXDATE\"] = datetime.utcnow().isoformat(timespec='seconds')\n\n # ADM determine which objects are in this pixel and write out.\n done = objs[pix == pixnum]\n\n fitsio.write(outfile, done, extname=\"TYCHOHPX\", header=hdr)\n\n log.info('Wrote Tycho HEALPix files...t={:.1f}s'.format(time()-start))\n\n return\n\n\ndef make_tycho_files():\n \"\"\"Make the HEALPix-split Tycho files in one fell swoop.\n\n Returns\n -------\n Nothing\n But produces:\n - A FITS file with appropriate header and columns from\n `tychodatamodel`, and a README in $TYCHO_DIR/fits.\n - FITS files reorganized by HEALPixel in $TYCHO_DIR/healpix.\n\n The HEALPixel sense is nested with nside=get_tycho_nside(), and\n each file in $TYCHO_DIR/healpix is called healpix-xxxxx.fits,\n where xxxxx corresponds to the HEALPixel number.\n\n Notes\n -----\n - The environment variable $TYCHO_DIR must be set.\n \"\"\"\n t0 = time()\n log.info('Begin making Tycho files...t={:.1f}s'.format(time()-t0))\n\n # ADM check that the TYCHO_DIR is set.\n tychodir = get_tycho_dir()\n\n # ADM a quick check that the fits and healpix directories are empty\n # ADM before embarking on the slower parts of the code.\n fitsdir = os.path.join(tychodir, 'fits')\n hpxdir = os.path.join(tychodir, 'healpix')\n for direc in [fitsdir, hpxdir]:\n if os.path.exists(direc):\n if len(os.listdir(direc)) > 0:\n msg = \"{} should be empty to make Tycho files!\".format(direc)\n log.critical(msg)\n raise ValueError(msg)\n\n grab_tycho()\n log.info('Copied Tycho FITS file from cosmo...t={:.1f}s'.format(time()-t0))\n\n tycho_fits_to_healpix()\n log.info('Rearranged FITS files by HEALPixel...t={:.1f}s'.format(time()-t0))\n\n return\n\n\ndef find_tycho_files(objs, neighbors=True, radec=False):\n \"\"\"Find full paths to Tycho healpix files for objects by RA/Dec.\n\n Parameters\n ----------\n objs : :class:`~numpy.ndarray`\n Array of objects. Must contain the columns \"RA\" and \"DEC\".\n neighbors : :class:`bool`, optional, defaults to ``True``\n Also return all pixels that touch the files of interest\n to prevent edge effects (e.g. if a Tycho source is 1 arcsec\n away from a primary source and so in an adjacent pixel).\n radec : :class:`bool`, optional, defaults to ``False``\n If ``True`` then the passed `objs` is an [RA, Dec] list\n instead of a rec array that contains \"RA\" and \"DEC\".\n\n Returns\n -------\n :class:`list`\n A list of all Tycho files to read to account for objects at\n the passed locations.\n\n Notes\n -----\n - The environment variable $TYCHO_DIR must be set.\n \"\"\"\n # ADM the resolution at which the Tycho HEALPix files are stored.\n nside = get_tycho_nside()\n\n # ADM check that the TYCHO_DIR is set and retrieve it.\n tychodir = get_tycho_dir()\n hpxdir = os.path.join(tychodir, 'healpix')\n\n return io.find_star_files(objs, hpxdir, nside,\n neighbors=neighbors, radec=radec)\n\n\ndef find_tycho_files_hp(nside, pixlist, neighbors=True):\n \"\"\"Find full paths to Tycho healpix files in a set of HEALPixels.\n\n Parameters\n ----------\n nside : :class:`int`\n (NESTED) HEALPixel nside.\n pixlist : :class:`list` or `int`\n A set of HEALPixels at `nside`.\n neighbors : :class:`bool`, optional, defaults to ``True``\n Also return files corresponding to all neighbors that touch the\n pixels in `pixlist` to prevent edge effects (e.g. a Tycho source\n is 1 arcsec outside of `pixlist` and so in an adjacent pixel).\n\n Returns\n -------\n :class:`list`\n A list of all Tycho files that need to be read in to account for\n objects in the passed list of pixels.\n\n Notes\n -----\n - The environment variable $TYCHO_DIR must be set.\n \"\"\"\n # ADM the resolution at which the healpix files are stored.\n filenside = get_tycho_nside()\n\n # ADM check that the TYCHO_DIR is set and retrieve it.\n tychodir = get_tycho_dir()\n hpxdir = os.path.join(tychodir, 'healpix')\n\n # ADM work with pixlist as an array.\n pixlist = np.atleast_1d(pixlist)\n\n # ADM determine the pixels that touch the passed pixlist.\n pixnum = nside2nside(nside, filenside, pixlist)\n\n # ADM if neighbors was sent, then retrieve all pixels that touch each\n # ADM pixel covered by the provided locations, to prevent edge effects...\n if neighbors:\n pixnum = add_hp_neighbors(filenside, pixnum)\n\n # ADM reformat in the healpix format used by desitarget.\n tychofiles = [os.path.join(hpxdir, io.hpx_filename(pn)) for pn in pixnum]\n\n return tychofiles\n\n\ndef match_to_tycho(objs, matchrad=1., radec=False):\n \"\"\"Match objects to Tycho healpixel files.\n\n Parameters\n ----------\n objs : :class:`~numpy.ndarray`\n Must contain at least \"RA\" and \"DEC\".\n matchrad : :class:`float`, optional, defaults to 1 arcsec\n The radius at which to match in arcseconds.\n radec : :class:`bool`, optional, defaults to ``False``\n If ``True`` then the passed `objs` is an [RA, Dec] list instead of\n a rec array.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n The matching Tycho information for each object. The returned\n format is as for desitarget.tychomatch.tychodatamodel with\n an extra column \"TYCHO_SEP\" which is the matching distance\n in ARCSECONDS.\n\n Notes\n -----\n - For objects with NO match in Tycho, the \"TYC1\", \"TYC2\" and\n \"TYCHO_SEP\" columns are -1, and other columns are zero.\n - Retrieves the CLOSEST match to Tycho for each passed object.\n - Because this reads in HEALPixel split files, it's (far) faster\n for objects that are clumped rather than widely distributed.\n \"\"\"\n # ADM parse whether a structure or coordinate list was passed.\n if radec:\n ra, dec = objs\n else:\n ra, dec = objs[\"RA\"], objs[\"DEC\"]\n\n # ADM set up an array of Tycho information for the output.\n nobjs = len(ra)\n done = np.zeros(nobjs, dtype=tychodatamodel.dtype)\n\n # ADM objects without matches should have TYC1/2/3, TYCHO_SEP of -1.\n for col in \"TYC1\", \"TYC2\":\n done[col] = -1\n tycho_sep = np.zeros(nobjs) - 1\n\n # ADM determine which Tycho files need to be scraped.\n tychofiles = find_tycho_files([ra, dec], radec=True)\n nfiles = len(tychofiles)\n\n # ADM catch the case of no matches to Tycho.\n if nfiles > 0:\n # ADM loop through the Tycho files and find matches.\n for ifn, fn in enumerate(tychofiles):\n if ifn % 500 == 0 and ifn > 0:\n log.info('{}/{} files; {:.1f} total mins elapsed'\n .format(ifn, nfiles, (time()-start)/60.))\n tycho = fitsio.read(fn)\n idtycho, idobjs, dist = radec_match_to(\n [tycho[\"RA\"], tycho[\"DEC\"]], [ra, dec],\n sep=matchrad, radec=True, return_sep=True)\n\n # ADM update matches whenever we have a CLOSER match.\n ii = (tycho_sep[idobjs] == -1) | (tycho_sep[idobjs] > dist)\n done[idobjs[ii]] = tycho[idtycho[ii]]\n tycho_sep[idobjs[ii]] = dist[ii]\n\n # ADM add the separation distances to the output array.\n dt = tychodatamodel.dtype.descr + [(\"TYCHO_SEP\", \">f4\")]\n output = np.zeros(nobjs, dtype=dt)\n for col in tychodatamodel.dtype.names:\n output[col] = done[col]\n output[\"TYCHO_SEP\"] = tycho_sep\n\n return output\n"
] | [
[
"numpy.atleast_1d",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shaunwbell/EcoFOCI_AcrobatProcessing | [
"4a8c2514d09c98779b473bfd25067e73dbf94956"
] | [
"gridtime_resample.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\n Background:\n --------\n gridtime_resample.py\n \n \n Purpose:\n --------\n Resample and fill timegaps\n\n History:\n --------\n\n\"\"\"\n\nimport argparse\nimport pandas as pd\n\n\n\"\"\"---\"\"\"\n\nparser = argparse.ArgumentParser(description='CTD plots')\nparser.add_argument('DataPath', metavar='DataPath', type=str,\n\thelp='full path to directory of processed nc files')\nparser.add_argument('resolution', metavar='resolution', type=str,\n\thelp='choose: 1S, 60S, 3600S')\n\nargs = parser.parse_args()\n\ndf = pd.read_csv(args.DataPath,parse_dates=['DateTime'])\ndf.set_index(pd.DatetimeIndex(df['DateTime']),inplace=True,drop=True)\n\ndf.resample(args.resolution).to_csv(args.DataPath.replace('.csv','.resample.csv'))"
] | [
[
"pandas.read_csv",
"pandas.DatetimeIndex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
mrcabo/Grasping_Detection_System | [
"18cf23258a1c9342bc9ce57137ac399768bb1fed"
] | [
"code/similarities.py"
] | [
"from scipy.spatial.distance import jaccard\nimport numpy as np\nimport pandas as pd\n\n\n# Computing Jaccard Distance of two 5D-Rectangles\n\n# Issues to deal with:\n# Normalizing values?\n# Input format correct?\n# Weighting of the different dimensions?\n\n\ndef jaccard_distance(datFr, name, pred):\n \"\"\"\n Should return the \"closest\" jaccard distance of the rectangles in the label dat\n and the prediction distance.\n\n Input:\n datFr: 5 Dim. DataFrame including all labels, assuming that column 0\n includes the names of the respective files the rectangles belong to.\n name: Name as string of the correct file.\n pred: Prediction rectangle\n\n Return:\n Closest Distance (should be a float)\n\n \"\"\"\n # Indexing the correct rectangles based on the name, retrieving all\n # columns, minus the \"name\"-one\n corr_rect = datFr.loc[datFr[0].str.match(name), 1:]\n # Computing all Jaccard Distances\n jacc_distances = corr_rect.apply(jaccard, axis=1, args=[pred])\n # Returning closest distance\n return jacc_distances.min()\n\n\n\"\"\"\nReturns closest Ruzicka Distance, related to Jaccard Distance, of rectangles\nin the label dat and the prediction distance.\n\nInput:\n datFr: 5 Dim. DataFrame including all labels, assuming that column 0 \n includes the names of the respective files the rectangles belong to.\n name: Name as string of the correct file.\n pred: Prediction rectangle\n \nReturn:\n Closest Distance (should be a float)\n\"\"\"\n\n\ndef ruzicka_distance(datFr, name, pred):\n \"\"\"\n Chooses max and min per point, ultimately returning 1 minus the sum of the \n vector of minimal values by the sum of the vector of maximal values.\n (Ruzicka Similarity and Soergel Distance). So, if they are the same it\n returns 0, else it returns a higher value.\n \"\"\"\n\n def ruz_similarity(x, y):\n min_vec = np.minimum(x, y)\n max_vec = np.maximum(x, y)\n # Return Soergel Distance\n return 1 - min_vec.sum() / max_vec.sum()\n\n # Indexing the correct rectangles based on the name, retrieving all\n # columns, minus the \"name\"-one\n corr_rect = datFr.loc[datFr[0].str.match(name), 1:]\n # Getting Ruzicka for all correct Rectangles\n ruz_distances = corr_rect.apply(ruz_similarity, axis=1, args=[pred])\n return ruz_distances.min()\n\n\n\"\"\"\nFunction to incorporate both the positive and negative rectangles. Computes\nboth the Ruzicka distance to the closest positive and negative rectangle and \nreturns the positive plus the inverted negative Soergel Distance divided by two.\n\nInput:\n pos_df: 5 Dim. DataFrame including all labels for pos. rectangles \n (see ruzicka_distance)\n neg_df: 5 DIm. DataFrame, but for negative rectangles\n name: Name as string of correct image\n pred: Prediction Rectangle\n\"\"\"\n\n\ndef ruz_posneg(pos_df, neg_df, name, pred):\n ruz_pos = ruzicka_distance(pos_df, name, pred)\n ruz_neg = 1 - ruzicka_distance(neg_df, name, pred)\n return (ruz_pos + ruz_neg) / 2\n"
] | [
[
"numpy.maximum",
"numpy.minimum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
esimionato/sgg | [
"037e8732a7a77496636ed2f49ea859afa424c5a4"
] | [
"dataloaders/blob.py"
] | [
"\"\"\"\nData blob, hopefully to make collating less painful and MGPU training possible\n\"\"\"\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport os\nfrom config import TORCH12\n\nif TORCH12:\n cuda_args = {'non_blocking': True}\nelse:\n # pytorch 0.3\n cuda_args = {'async': True}\n\n\nclass Blob(object):\n def __init__(self, mode='det', is_train=False, num_gpus=1, primary_gpu=0, batch_size_per_gpu=3, mrcnn=False, is_cuda=True):\n \"\"\"\n Initializes an empty Blob object.\n :param mode: 'det' for detection and 'rel' for det+relationship\n :param is_train: True if it's training\n \"\"\"\n assert mode in ('det', 'rel')\n assert num_gpus >= 1\n self.mode = mode\n self.is_train = is_train\n self.num_gpus = num_gpus\n self.batch_size_per_gpu = batch_size_per_gpu\n self.primary_gpu = primary_gpu\n self.mrcnn = mrcnn\n self.is_cuda = is_cuda\n\n self.fns = [] # image file ids\n self.imgs = [] # [num_images, 3, IM_SCALE, IM_SCALE] array\n self.im_sizes = [] # [num_images, 4] array of (h, w, scale, num_valid_anchors)\n self.all_anchor_inds = [] # [all_anchors, 2] array of (img_ind, anchor_idx). Only has valid\n # boxes (meaning some are gonna get cut out)\n self.all_anchors = [] # [num_im, IM_SCALE/4, IM_SCALE/4, num_anchors, 4] shapes. Anchors outside get squashed\n # to 0\n self.gt_boxes = [] # [num_gt, 4] boxes\n self.gt_classes = [] # [num_gt,2] array of img_ind, class\n self.gt_rels = [] # [num_rels, 3]. Each row is (gtbox0, gtbox1, rel).\n\n self.gt_sents = []\n self.gt_nodes = []\n self.sent_lengths = []\n\n self.train_anchor_labels = [] # [train_anchors, 5] array of (img_ind, h, w, A, labels)\n self.train_anchors = [] # [train_anchors, 8] shapes with anchor, target\n\n self.train_anchor_inds = None # This will be split into GPUs, just (img_ind, h, w, A).\n\n self.batch_size = None\n self.gt_box_chunks = None\n self.anchor_chunks = None\n self.train_chunks = None\n self.proposal_chunks = None\n self.proposals = []\n\n @property\n def is_flickr(self):\n return self.mode == 'flickr'\n\n @property\n def is_rel(self):\n return self.mode == 'rel'\n\n @property\n def volatile(self):\n return not self.is_train\n\n def append(self, d):\n \"\"\"\n Adds a single image to the blob\n :param datom:\n :return:\n \"\"\"\n self.fns.append(os.path.basename(d['fn']))\n\n i = len(self.imgs)\n self.imgs.append(d['img'])\n\n h, w, scale = d['img_size']\n\n # all anchors\n self.im_sizes.append((h, w, scale))\n\n gt_boxes_ = d['gt_boxes'].astype(np.float32) * d['scale']\n self.gt_boxes.append(gt_boxes_)\n\n # print('blob', w, h, np.max(gt_boxes_, axis=0))\n\n self.gt_classes.append(np.column_stack((\n i * np.ones(d['gt_classes'].shape[0], dtype=np.int64),\n d['gt_classes'],\n )))\n\n # Add relationship info\n if self.is_rel:\n self.gt_rels.append(np.column_stack((\n i * np.ones(d['gt_relations'].shape[0], dtype=np.int64),\n d['gt_relations'])))\n\n # Augment with anchor targets\n # if self.is_train:\n # train_anchors_, train_anchor_inds_, train_anchor_targets_, train_anchor_labels_ = \\\n # anchor_target_layer(gt_boxes_, (h, w), mrcnn=self.mrcnn)\n #\n # self.train_anchors.append(np.hstack((train_anchors_, train_anchor_targets_)))\n #\n # self.train_anchor_labels.append(np.column_stack((\n # i * np.ones(train_anchor_inds_.shape[0], dtype=np.int64),\n # train_anchor_inds_,\n # train_anchor_labels_,\n # )))\n\n if 'proposals' in d:\n self.proposals.append(np.column_stack((i * np.ones(d['proposals'].shape[0], dtype=np.float32),\n d['scale'] * d['proposals'].astype(np.float32))))\n\n\n\n def _chunkize(self, datom, tensor=torch.LongTensor):\n \"\"\"\n Turn data list into chunks, one per GPU\n :param datom: List of lists of numpy arrays that will be concatenated.\n :return:\n \"\"\"\n chunk_sizes = [0] * self.num_gpus\n for i in range(self.num_gpus):\n for j in range(self.batch_size_per_gpu):\n chunk_sizes[i] += datom[i * self.batch_size_per_gpu + j].shape[0]\n t = np.concatenate(datom, 0)\n if len(t) == 0:\n return 0, chunk_sizes # Variable(tensor([]), volatile=self.volatile)\n return Variable(tensor(t)), chunk_sizes\n\n # @profile\n def reduce(self):\n \"\"\" Merges all the detections into flat lists + numbers of how many are in each\"\"\"\n if len(self.imgs) != self.batch_size_per_gpu * self.num_gpus:\n raise ValueError(\"Wrong batch size? imgs len {} bsize/gpu {} numgpus {}\".format(\n len(self.imgs), self.batch_size_per_gpu, self.num_gpus\n ))\n\n if not self.mrcnn:\n self.imgs = Variable(torch.stack(self.imgs, 0))\n\n self.im_sizes = np.stack(self.im_sizes).reshape(\n (self.num_gpus, self.batch_size_per_gpu, 3))\n\n if self.is_rel:\n self.gt_rels, self.gt_rel_chunks = self._chunkize(self.gt_rels)\n\n self.gt_boxes, self.gt_box_chunks = self._chunkize(self.gt_boxes, tensor=torch.FloatTensor)\n self.gt_classes, _ = self._chunkize(self.gt_classes)\n # if self.is_train:\n # self.train_anchor_labels, self.train_chunks = self._chunkize(self.train_anchor_labels)\n # self.train_anchors, _ = self._chunkize(self.train_anchors, tensor=torch.FloatTensor)\n # self.train_anchor_inds = self.train_anchor_labels[:, :-1].contiguous()\n\n if len(self.proposals) != 0:\n self.proposals, self.proposal_chunks = self._chunkize(self.proposals, tensor=torch.FloatTensor)\n\n\n\n def _scatter(self, x, chunk_sizes, dim=0):\n \"\"\" Helper function\"\"\"\n if self.num_gpus == 1:\n return x.cuda(self.primary_gpu, **cuda_args) if self.is_cuda else x\n return torch.nn.parallel.scatter_gather.Scatter.apply(\n list(range(self.num_gpus)), chunk_sizes, dim, x)\n\n # @profile\n def scatter(self):\n \"\"\" Assigns everything to the GPUs\"\"\"\n if not self.mrcnn:\n self.imgs = self._scatter(self.imgs, [self.batch_size_per_gpu] * self.num_gpus)\n\n self.gt_classes_primary = self.gt_classes.cuda(self.primary_gpu, **cuda_args) if self.is_cuda else self.gt_classes\n self.gt_boxes_primary = self.gt_boxes.cuda(self.primary_gpu, **cuda_args) if self.is_cuda else self.gt_boxes\n\n # Predcls might need these\n self.gt_classes = self._scatter(self.gt_classes, self.gt_box_chunks)\n self.gt_boxes = self._scatter(self.gt_boxes, self.gt_box_chunks)\n\n if self.is_train:\n\n # self.train_anchor_inds = self._scatter(self.train_anchor_inds,\n # self.train_chunks)\n # self.train_anchor_labels = self.train_anchor_labels.cuda(self.primary_gpu, **cuda_args)\n # self.train_anchors = self.train_anchors.cuda(self.primary_gpu, **cuda_args)\n\n if self.is_rel:\n self.gt_rels = self._scatter(self.gt_rels, self.gt_rel_chunks)\n else:\n if self.is_rel:\n self.gt_rels = self.gt_rels.cuda(self.primary_gpu, **cuda_args) if self.is_cuda else self.gt_rels\n\n if self.proposal_chunks is not None:\n self.proposals = self._scatter(self.proposals, self.proposal_chunks)\n\n # @profile\n def __getitem__(self, index):\n \"\"\"\n Returns a tuple containing data\n :param index: Which GPU we're on, or 0 if no GPUs\n :return: If training:\n (image, im_size, img_start_ind, anchor_inds, anchors, gt_boxes, gt_classes, \n train_anchor_inds)\n test:\n (image, im_size, img_start_ind, anchor_inds, anchors)\n \"\"\"\n if index not in list(range(self.num_gpus)):\n raise ValueError(\"Out of bounds with index {} and {} gpus\".format(index, self.num_gpus))\n\n if self.is_rel:\n rels = self.gt_rels\n if index > 0 or self.num_gpus != 1:\n rels_i = rels[index] if self.is_rel else None\n elif self.is_flickr:\n rels = (self.gt_sents, self.gt_nodes)\n if index > 0 or self.num_gpus != 1:\n rels_i = (self.gt_sents[index], self.gt_nodes[index])\n else:\n rels = None\n rels_i = None\n\n if self.proposal_chunks is None:\n proposals = None\n else:\n proposals = self.proposals\n\n if index == 0 and self.num_gpus == 1:\n image_offset = 0\n if self.is_train:\n return (self.imgs, self.im_sizes[0], image_offset,\n self.gt_boxes, self.gt_classes, rels, proposals, None, self.fns)\n return self.imgs, self.im_sizes[0], image_offset, self.gt_boxes, self.gt_classes, rels, proposals, self.fns\n\n # Otherwise proposals is None\n assert proposals is None\n\n image_offset = self.batch_size_per_gpu * index\n # TODO: Return a namedtuple\n if self.is_train:\n return (\n self.imgs[index], self.im_sizes[index], image_offset,\n self.gt_boxes[index], self.gt_classes[index], rels_i, None, None, self.fns[index])\n return (self.imgs[index], self.im_sizes[index], image_offset,\n self.gt_boxes[index], self.gt_classes[index], rels_i, None, self.fns[index])\n"
] | [
[
"numpy.concatenate",
"numpy.ones",
"numpy.stack",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tarunkathuria1/project121 | [
"ee2e5dd40f790c7608cc5a22747be041b9441a33"
] | [
"blackScreen.py"
] | [
"import cv2\nfrom matplotlib import image\nimport numpy as np\n\nvideo = cv2.VideoCapture(0)\nimage = cv2.imread(\"me.jpeg\")\n\nwhile True:\n\n ret,frame = video.read()\n print(frame)\n frame = cv2.resize(frame,(640,480))\n image = cv2.resize(frame,(640,480))\n\n u_black = np.array([104,153,70])\n l_black = np.array([30,30,0])\n\n mask = cv2.inRange(frame,l_black,u_black)\n res = cv2.bitwise_and(frame,frame,mask=mask)\n\n f=frame-res\n f= np.where(f==0,image,f)\n\n cv2.imshow(\"video\",frame)\n cv2.imshow(\"mask\",f)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo.release()\ncv2.destroyAllWindows()"
] | [
[
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dartas-F/SCNN_Pytorch | [
"cbcf35e96e4e4d8de7d884c3debcfd5ff441e86a"
] | [
"new_train.py"
] | [
"\"\"\"\nsource: https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel\n\n\"\"\"\nimport os\nimport time\nimport cv2\nimport numpy as np\nimport shutil\nimport sys\nfrom numpy.core.fromnumeric import argmax\n\nimport torch\nimport torch.optim as optim\nimport torchvision\nfrom dataset.BDD100k import BDDDataset\nfrom model import SCNN\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom utils.lr_scheduler import PolyLR\n#from utils.transforms import *\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm #progress bar\n#import pdb pdb.set_trace() for debugging\n\n# Directory settings\nworking_dir = \"C:/Users/ynfuc/Documents/Masterarbeit/.vscode/BDD100k_implements/SCNN_Pytorch\"\nbdd100k_train_img_path = working_dir + \"/dataset/images/train/\"\nbdd100k_train_dl_path = working_dir + \"/dataset/drivable_area/labels_dl/train/\"\nbdd100k_val_img_path = working_dir + \"/dataset/images/val/\"\nbdd100k_val_dl_path = working_dir + \"/dataset/drivable_area/labels_dl/val/\"\nexp_dir = working_dir + \"/experiments/exp2/drivable/\"\nexp_name = \"t001\"\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint(\"Use device: \", device)\ntorch.backends.cudnn.benchmark = True\n\n\n#Data loader parameters\nparams = {\"batch_size\": 20, \"shuffle\": True, \"num_workers\": 4, \"pin_memory\": True}\nmax_epoch = 100\nresize_shape = tuple([512, 288])\noptim_set = {\"lr\": 0.01, \"momentum\": 0.9, \"weight_decay\": 1e-4, \"nesterov\": True}\nlr_set = {\"warmup\": 200, \"max_iter\": 15000, \"min_lrs\": 1e-05}\n\n# Define training dataset and data loader\ntrain_bdd100k = BDDDataset(image_path=bdd100k_train_img_path, drivable_path = bdd100k_train_dl_path)\ntrain_bdd100k_dataset_loader = DataLoader(dataset=train_bdd100k, **params)\n\n# Define validation dataset and data loader\nval_bdd100k = BDDDataset(image_path=bdd100k_val_img_path, drivable_path = bdd100k_val_dl_path)\nval_bdd100k_dataset_loader = DataLoader(dataset=val_bdd100k, **params)\n\n#Declare model & optimizers\nnet = SCNN(resize_shape, pretrained=True)\nnet = net.to(device)\n#torch.distributed.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n#torch.cuda.set_device()\n#net = torch.nn.parallel.DistributedDataParallel(net)\n#net = torch.nn.DataParallel(net)\n#\n#net.eval()\ntensorboard = SummaryWriter(exp_dir + \"tb/\")\n\n\noptimizer = optim.SGD(net.parameters(), **optim_set)\nlr_scheduler = PolyLR(optimizer, 0.9, **lr_set)\nbest_val_loss = 1000\n\n#@profile\ndef train(epoch):\n print(\"Train Epoch: {}\".format(epoch))\n net.train()\n train_loss = 0\n train_loss_seg = 0\n ##train_loss_exist = 0\n epoch_accuracy = 0\n\n progressbar = tqdm(range(len(train_bdd100k_dataset_loader)))\n #Training loop\n for batch_idx, sample in enumerate(train_bdd100k_dataset_loader):\n # move to GPU\n img = sample['image'].to(device)\n segLabel = sample['label'].to(device)\n\n #null gradient, get model output\n optimizer.zero_grad()\n seg_pred, exist_pred, loss_seg, loss_exist = net(img, segLabel) # loss\n\n \n loss_seg = loss_seg.sum()\n loss_seg.requres_grad = True\n #loss_exist = loss_exist.sum()\n #loss = loss.sum()\n #loss.requres_grad = True\n\n #backprop, grad, learning rate update\n loss_seg.backward()\n optimizer.step()\n lr_scheduler.step()\n\n iter_idx = epoch * len(train_bdd100k_dataset_loader) + batch_idx\n #train_loss = loss.item()\n train_loss_seg = loss_seg.item()\n #train_loss_exist = loss_exist.item()\n\n #Calculate accuracy\n predicted = torch.argmax(seg_pred.data, dim=1) #returns sec arg of torch.max \n correct_train = predicted.eq(segLabel.data).sum().item()\n accuracy = 100 * correct_train / segLabel.numel()\n \n #Save epoch accuracy in tensorboard\n epoch_accuracy +=accuracy\n if batch_idx >= (len(train_bdd100k_dataset_loader)-1):\n tensorboard.add_scalar(\"accuracy\", epoch_accuracy, iter_idx)\n\n progressbar.set_description(\"batch loss: {:.3f}\".format(loss_seg.item()))\n progressbar.update(1)\n\n lr = optimizer.param_groups[0]['lr']\n tensorboard.add_scalar(\"train_loss\", train_loss, iter_idx)\n tensorboard.add_scalar(\"learning_rate\", lr, iter_idx)\n \"\"\"\n print(\"img size: \", img.size(0), \"label size: \", segLabel.size(0))\n print(\"img size: \", type(img.size(0)), \"label size: \", type(segLabel.size(0)))\n print(\"same: \", img.size(0)==segLabel.size(0), \"diff: \", img.size(0)!=segLabel.size(0))\n \"\"\"\n #tensorboard.add_graph(net, input_to_model=img, verbose=False)\n\n\n progressbar.close()\n tensorboard.flush()\n\n #Save model & settings in exp_name.pth\n if epoch % 1 == 0:\n save_dict = {\n \"epoch\": epoch,\n \"net\": net.module.state_dict() if isinstance(net, torch.nn.DataParallel) else net.state_dict(),\n \"optim\": optimizer.state_dict(),\n \"lr_scheduler\": lr_scheduler.state_dict(),\n \"best_val_loss\": best_val_loss\n }\n save_name = os.path.join(exp_dir, exp_name + '.pth')\n torch.save(save_dict, save_name)\n print(\"model is saved: {}\".format(save_name))\n\n print(\"------------------------\\n\")\n\n\"\"\"\n #average trainloss calc + print every 100 batches\n train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) \n if batch_idx % 100 == 0:\n print('Epoch %d, Batch %d loss: %.6f' %(epoch, batch_idx + 1, train_loss))\n \"\"\"\n\n\ndef val(epoch):\n global best_val_loss\n net.eval()\n print(\"Val Epoch: {}\".format(epoch))\n\n net.eval()\n val_loss = 0\n val_loss_seg = 0\n #val_loss_exist = 0 #CBE_loss not available for BDD100k\n progressbar = tqdm(range(len(val_bdd100k_dataset_loader)))\n\n #Validation\n with torch.set_grad_enabled(False):\n total_train = 0\n correct_train = 0\n epoch_accuracy = 0\n\n for batch_idx, sample in enumerate(val_bdd100k_dataset_loader):\n #Transfer to GPU\n img = sample['image'].to(device)\n segLabel = sample['label'].to(device)\n #exist = sample['exist'].cuda()\n #local_batch, local_labels = local_batch.to(device), local_labels.to(device)\n\n seg_pred, exist_pred, loss_seg, loss_exist, loss = net(img, segLabel)\n\n loss_seg = loss_seg.sum()\n #loss_exist = loss_exist.sum()\n loss = loss.sum()\n\n predicted = torch.argmax(seg_pred.data, dim=1) #returns sec arg of torch.max\n #print(total_train, predicted.shape, segLabel.shape) \n correct_train = predicted.eq(segLabel.data).sum().item()\n accuracy = 100 * correct_train / segLabel.numel()\n predict = predicted.eq(segLabel) #True/False übereinstimmung\n np.set_printoptions(threshold=sys.maxsize)\n #print(\"Variante1: {:.3f}\".format(accuracy))\n epoch_accuracy +=accuracy\n if batch_idx >= (len(train_bdd100k_dataset_loader)-1):\n tensorboard.add_scalar(\"val_accuracy\", epoch_accuracy, epoch)\n\n \"\"\"\n https://www.kaggle.com/devilsknight/malaria-detection-with-pytorch\n # convert output probabilities to predicted class\n pred = output.data.max(1, keepdim=True)[1]\n # compare predictions to true label\n correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())\n total += data.size(0)\n \"\"\"\n \n \"\"\"\n #prediction plot\n #print(\"Label: \",segLabel[0,110:190,210:290].numpy())\n #print(\"predic: \",predicted[0,110:190,210:290].numpy())\n #print(\"Compare: \",predict[0,110:190,210:290].numpy())\n print (seg_pred.shape)\n f = plt.figure()\n f.add_subplot(2,2,1)\n plt.imshow(img[0].permute(1,2,0))\n f.add_subplot(2,2,2)\n plt.imshow(segLabel[0])\n f.add_subplot(2,2,3)\n plt.imshow(predicted[0])\n f.add_subplot(2,2,4)\n plt.imshow(predict[0].detach().cpu())\n plt.show(block=True)\n #plt.pause(5)\n \"\"\"\n if batch_idx ==0:\n \n #val_images = [img[0].permute(1,2,0), segLabel[0], predicted[0], predict[0].detach().cpu()]\n tensorboard.add_image(\"Image: \", img[0], global_step=epoch, dataformats='CHW')\n \"\"\"\n tensorboard.add_image(\"Image_gt: \", segLabel[0], global_step=epoch, dataformats='HW')\n tensorboard.add_image(\"Image_predicted: \", predicted[0], global_step=epoch, dataformats='HW')\n tensorboard.add_image(\"Image_compare: \", predict[0].detach().cpu(), global_step=epoch, dataformats='HW')\n \"\"\"\n img_grid = torchvision.utils.make_grid([segLabel[0], predicted[0], predict[0].detach().cpu()])\n tensorboard.add_image(\"Val_Images: \", img_grid, global_step=epoch, dataformats='CHW')\n\n # visualize validation every 5 frame, 50 frames in all\n #gap_num = 25\n #if batch_idx%gap_num == 0 and batch_idx < 50 * gap_num:\n origin_imgs = []\n #seg_pred = seg_pred.detach().cpu().numpy()\n if batch_idx ==0:\n #np.set_printoptions(threshold=sys.maxsize)\n #print(net)\n #print(segLabel[0,180:230,450:512].numpy())\n #print(seg_pred[0,0,180:230,450:512].numpy())\n #plt.imshow(predict[0].detach().cpu())\n #plt.pause(5)\n \"\"\"\n #plt.subplot(1, 6, 1)\n plt.imshow(segLabel[0])\n #plt.pause(5)\n\n for i in range (0, 3):\n plt.figure()\n print(seg_pred.shape)\n plt.imshow(torch.exp(seg_pred[0, i, :, :]).detach().numpy())\n plt.pause(5)\n \n plt.close()\n \"\"\"\n #exist_pred = exist_pred.detach().cpu().numpy()\n \n val_loss += loss.item()\n val_loss_seg += loss_seg.item()\n #val_loss_exist += loss_exist.item()\n\n progressbar.set_description(\"batch loss: {:.3f}\".format(loss.item()))\n progressbar.update(1)\n\n progressbar.close()\n iter_idx = (epoch + 1) * len(train_bdd100k_dataset_loader) # keep align with training process iter_idx\n tensorboard.add_scalar(\"val_loss\", val_loss, iter_idx)\n tensorboard.add_scalar(\"val_loss_seg\", val_loss_seg, iter_idx)\n #tensorboard.scalar_summary(\"val_loss_exist\", val_loss_exist, iter_idx)\n tensorboard.flush()\n \n\n print(\"------------------------\\n\")\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n save_name = os.path.join(exp_dir, exp_name + '.pth')\n copy_name = os.path.join(exp_dir, exp_name + '_best.pth')\n shutil.copyfile(save_name, copy_name)\n\n\n #Model computions\n\ndef main():\n global best_val_loss\n resume = False\n if resume:\n save_dict = torch.load(os.path.join(exp_dir, exp_name + '.pth'))\n if isinstance(net, torch.nn.DataParallel):\n net.module.load_state_dict(save_dict['net'])\n else:\n net.load_state_dict(save_dict['net'])\n optimizer.load_state_dict(save_dict['optim'])\n lr_scheduler.load_state_dict(save_dict['lr_scheduler'])\n start_epoch = save_dict['epoch'] + 1\n best_val_loss = save_dict.get(\"best_val_loss\", 1e6)\n else:\n start_epoch = 0\n\n for epoch in range (start_epoch, max_epoch):\n train(epoch)\n #val(epoch)\n if epoch % 1 == 0:\n print(\"\\nValidation For Experiment: \", exp_dir)\n print(time.strftime('%H:%M:%S', time.localtime()))\n val(epoch)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\"\"\"\n probs = torch.log_softmax(seg_pred, dim = 1)\n _, tags = torch.max(probs, dim = 1)\n corrects = torch.eq(tags,segLabel).int()\n acc = corrects.sum()/corrects.numel()\n acc = acc * 100\n print(\"Variante2: \",float(acc))\n \"\"\"\n\n#for images, labels in train_bdd100k_dataset_loader:\n #Feed the data to the model"
] | [
[
"numpy.set_printoptions",
"torch.utils.data.DataLoader",
"torch.set_grad_enabled",
"torch.save",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.device",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sergeant-wizard/pytorch-pfn-extras | [
"221c07aedb9d88e4b96b55da49f6c104f631e01a"
] | [
"tests/pytorch_pfn_extras_tests/onnx/test_export_testcase.py"
] | [
"import io\nimport os\nimport json\n\nimport numpy as np\nimport onnx\nimport onnx.numpy_helper\nimport pytest\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.onnx.symbolic_helper import _default_onnx_opset_version\n\nfrom pytorch_pfn_extras.onnx import export\nfrom pytorch_pfn_extras.onnx import export_testcase\nfrom pytorch_pfn_extras.onnx import is_large_tensor\nfrom pytorch_pfn_extras.onnx import LARGE_TENSOR_DATA_THRESHOLD\n\n\noutput_dir = 'out'\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef _get_output_dir(d, **kwargs):\n output_dir_base = 'out'\n opset_ver = kwargs.get('opset_version', _default_onnx_opset_version)\n\n output_dir = os.path.join(\n output_dir_base, 'opset{}'.format(opset_ver), d)\n os.makedirs(output_dir, exist_ok=True)\n return output_dir\n\n\ndef _helper(model, args, d, **kwargs):\n output_dir = _get_output_dir(d)\n if 'training' not in kwargs:\n kwargs['training'] = model.training\n export_testcase(model, args, output_dir, **kwargs)\n return output_dir\n\n\ndef test_export_testcase():\n model = Net().to('cpu')\n x = torch.zeros((1, 1, 28, 28))\n\n output_dir = _helper(model, x, 'mnist', output_grad=True)\n\n assert os.path.isdir(output_dir)\n assert os.path.isfile(os.path.join(output_dir, 'meta.json'))\n assert os.path.isfile(os.path.join(output_dir, 'model.onnx'))\n test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')\n assert os.path.isfile(os.path.join(test_data_set_dir, 'input_0.pb'))\n assert os.path.isfile(os.path.join(test_data_set_dir, 'output_0.pb'))\n assert os.path.isfile(os.path.join(\n test_data_set_dir, 'gradient_input_0.pb'))\n\n for i in range(8):\n assert os.path.isfile(os.path.join(\n test_data_set_dir, 'gradient_{}.pb'.format(i)))\n assert not os.path.isfile(os.path.join(test_data_set_dir, 'gradient_8.pb'))\n\n\ndef test_export_filename():\n model = nn.Sequential(nn.Linear(5, 10, bias=False))\n x = torch.zeros((2, 5))\n\n output_dir = _get_output_dir('export_filename')\n model_path = os.path.join(output_dir, 'model.onnx')\n\n with pytest.warns(UserWarning):\n out = export(model, x, model_path, return_output=True)\n\n assert os.path.isfile(model_path)\n expected_out = torch.zeros((2, 10)) # check only shape size\n np.testing.assert_allclose(\n out.detach().cpu().numpy(), expected_out.detach().cpu().numpy())\n\n\[email protected](\"ignore::UserWarning\")\ndef test_export_stream():\n model = nn.Sequential(nn.Linear(5, 10, bias=False))\n x = torch.zeros((2, 5))\n\n bytesio = io.BytesIO()\n assert len(bytesio.getvalue()) == 0\n out = export(model, x, bytesio, return_output=True)\n\n assert len(bytesio.getvalue()) > 0\n expected_out = torch.zeros((2, 10)) # check only shape size\n np.testing.assert_allclose(\n out.detach().cpu().numpy(), expected_out.detach().cpu().numpy())\n\n\ndef test_cuda_tensor():\n if not torch.cuda.is_available():\n pytest.skip('CUDA is not available')\n\n device = 'cuda'\n model = Net().to(device)\n x = torch.zeros((1, 1, 28, 28), device=device)\n\n _helper(model, x, 'mnist_cuda', output_grad=True)\n\n\ndef test_model_not_overwrite():\n model = Net().to('cpu')\n x = torch.zeros((1, 1, 28, 28))\n\n dir_name = 'multiple_test_dataset'\n output_dir = _helper(model, x, dir_name)\n assert os.path.isdir(output_dir)\n\n output_dir = _helper(model, x + 0.5, dir_name, model_overwrite=False)\n\n test_data_set_dir = os.path.join(output_dir, 'test_data_set_1')\n assert os.path.isfile(os.path.join(test_data_set_dir, 'input_0.pb'))\n assert os.path.isfile(os.path.join(test_data_set_dir, 'output_0.pb'))\n\n\ndef _to_array(f, name=None):\n assert os.path.isfile(f)\n onnx_tensor = onnx.TensorProto()\n with open(f, 'rb') as fp:\n onnx_tensor.ParseFromString(fp.read())\n if name is not None:\n assert onnx_tensor.name == name\n return onnx.numpy_helper.to_array(onnx_tensor)\n\n\ndef test_backward():\n model = nn.Sequential(nn.Linear(5, 10, bias=False))\n x = torch.ones((2, 5))\n\n output_dir = _helper(model, x, 'backword_default', output_grad=True)\n\n assert os.path.isdir(output_dir)\n test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')\n assert os.path.isdir(test_data_set_dir)\n\n grad = _to_array(os.path.join(test_data_set_dir, 'gradient_0.pb'))\n expected_grad = np.full((10, 5), 2.0, dtype=np.float32)\n np.testing.assert_allclose(grad, expected_grad)\n\n\ndef test_backward_custom_input():\n model = nn.Sequential(nn.Linear(5, 10, bias=False))\n x = torch.ones((2, 5))\n grad_in = torch.ones((2, 10)) * 0.5\n\n output_dir = _helper(\n model, x, 'backword_custom_input', output_grad=grad_in,\n output_names=['output0'])\n\n assert os.path.isdir(output_dir)\n test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')\n assert os.path.isdir(test_data_set_dir)\n\n output_grad_in = _to_array(\n os.path.join(test_data_set_dir, 'gradient_input_0.pb'), 'output0')\n np.testing.assert_allclose(output_grad_in, grad_in)\n\n grad = _to_array(os.path.join(test_data_set_dir, 'gradient_0.pb'))\n expected_grad = np.full((10, 5), 1.0, dtype=np.float32)\n np.testing.assert_allclose(grad, expected_grad)\n\n\[email protected](\n \"ignore::torch.jit.TracerWarning\", \"ignore::UserWarning\")\ndef test_backward_multiple_input():\n model = nn.GRU(input_size=10, hidden_size=3, num_layers=1)\n input = torch.ones((4, 5, 10), requires_grad=True)\n h = torch.ones((1, 5, 3), requires_grad=True)\n\n grads = [torch.ones((4, 5, 3)) / 2, torch.ones((1, 5, 3)) / 3]\n output_dir = _helper(model, (input, h), 'backward_multiple_input',\n output_grad=grads,\n output_names=['output0', 'output1'])\n assert os.path.isdir(output_dir)\n test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')\n assert os.path.isdir(test_data_set_dir)\n\n model.zero_grad()\n exp_out1, exp_out2 = model.forward(input, h)\n torch.autograd.backward(\n tensors=[exp_out1, exp_out2],\n grad_tensors=grads)\n\n output1_grad_in = _to_array(\n os.path.join(test_data_set_dir, 'gradient_input_0.pb'), 'output0')\n np.testing.assert_allclose(grads[0], output1_grad_in)\n output2_grad_in = _to_array(\n os.path.join(test_data_set_dir, 'gradient_input_1.pb'), 'output1')\n np.testing.assert_allclose(grads[1], output2_grad_in)\n\n for i, (name, param) in enumerate(model.named_parameters()):\n actual_grad = _to_array(\n os.path.join(test_data_set_dir, 'gradient_{}.pb'.format(i)), name)\n np.testing.assert_allclose(param.grad, actual_grad)\n\n\ndef test_export_testcase_strip_large_tensor_data():\n model = Net().to('cpu')\n x = torch.zeros((1, 1, 28, 28))\n\n output_dir = _helper(\n model, x, 'mnist_stripped_tensor_data',\n output_grad=True, strip_large_tensor_data=True)\n\n assert os.path.isdir(output_dir)\n assert os.path.isfile(os.path.join(output_dir, 'meta.json'))\n assert os.path.isfile(os.path.join(output_dir, 'model.onnx'))\n test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')\n assert os.path.isfile(os.path.join(test_data_set_dir, 'input_0.pb'))\n assert os.path.isfile(os.path.join(test_data_set_dir, 'output_0.pb'))\n\n for i in range(8):\n assert os.path.isfile(os.path.join(\n test_data_set_dir, 'gradient_{}.pb'.format(i)))\n assert not os.path.isfile(os.path.join(test_data_set_dir, 'gradient_8.pb'))\n\n with open(os.path.join(output_dir, 'meta.json')) as metaf:\n metaj = json.load(metaf)\n assert metaj['strip_large_tensor_data']\n\n def check_tensor(tensor):\n if is_large_tensor(tensor, LARGE_TENSOR_DATA_THRESHOLD):\n assert tensor.data_location == onnx.TensorProto.EXTERNAL\n assert tensor.external_data[0].key == 'location'\n meta = json.loads(tensor.external_data[0].value)\n assert meta['type'] == 'stripped'\n assert type(meta['average']) == float\n assert type(meta['variance']) == float\n else:\n assert len(tensor.external_data) == 0\n\n onnx_model = onnx.load(os.path.join(\n output_dir, 'model.onnx'), load_external_data=False)\n for init in onnx_model.graph.initializer:\n check_tensor(init)\n\n for pb_filepath in ('input_0.pb', 'output_0.pb'):\n with open(os.path.join(test_data_set_dir, pb_filepath), 'rb') as f:\n tensor = onnx.TensorProto()\n tensor.ParseFromString(f.read())\n check_tensor(tensor)\n\n\ndef test_export_testcase_options():\n model = Net().to('cpu')\n x = torch.zeros((1, 1, 28, 28))\n\n output_dir = _helper(\n model, x, 'mnist_stripped_tensor_data',\n opset_version=11, strip_doc_string=False)\n\n onnx_model = onnx.load(os.path.join(\n output_dir, 'model.onnx'), load_external_data=False)\n assert onnx_model.opset_import[0].version == 11\n assert onnx_model.graph.node[0].doc_string != ''\n"
] | [
[
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.autograd.backward",
"torch.nn.GRU",
"torch.nn.Conv2d",
"numpy.full",
"torch.nn.Linear",
"torch.cuda.is_available",
"numpy.testing.assert_allclose",
"torch.nn.functional.max_pool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.