metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JinYAnGHe/openvino_training_extensions",
"score": 2
} |
#### File: pytorch_toolkit/face_recognition/train_16landmarks.py
```python
import argparse
import datetime
import os.path as osp
import numpy as np
import glog as log
from tensorboardX import SummaryWriter
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from datasets import IBUG
from model.common import models_landmarks
# from utils import landmarks_augmentation
from utils import landmarks_augmentation16
from utils.utils import save_model_cpu, load_model_state
from losses.alignment import AlignmentLoss
from evaluate_landmarks import evaluate
def train(args):
"""Launches training of landmark regression model"""
drops_schedule = [380]
dataset = IBUG(args.train, args.t_land)
val_dataset = IBUG(args.train, args.t_land, test=True)
log.info('Use alignment for the train data')
# dataset.transform = transforms.Compose([
# landmarks_augmentation16.Rescale((70, 70)),
# landmarks_augmentation16.Blur(k=3, p=.2),
# landmarks_augmentation16.HorizontalFlip(p=.5),
# landmarks_augmentation16.RandomRotate(30),
# landmarks_augmentation16.RandomScale(.8, .9, p=.4),
# landmarks_augmentation16.RandomCrop(64),
# landmarks_augmentation16.ToTensor(switch_rb=True)])
dataset.transform = transforms.Compose([landmarks_augmentation16.Rescale((112, 112)),
landmarks_augmentation16.RandomErasing(),
landmarks_augmentation16.ToTensor(switch_rb=True)])
val_dataset.transform = transforms.Compose([landmarks_augmentation16.Rescale((112, 112)),
landmarks_augmentation16.ToTensor(switch_rb=True)])
train_loader = DataLoader(dataset, batch_size=args.train_batch_size, num_workers=4, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=24, num_workers=4, shuffle=False)
writer = SummaryWriter('./logs_landm/{:%Y_%m_%d_%H_%M}_'.format(datetime.datetime.now()) + args.snap_prefix)
model = models_landmarks['mobilelandnet']()
# print(model)
if args.snap_to_resume is not None:
log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
model = load_model_state(model, args.snap_to_resume, args.device, eval_state=False)
model = torch.nn.DataParallel(model, device_ids=[0, 1])
cudnn.enabled = True
cudnn.benchmark = True
# else:
# model = torch.nn.DataParallel(model, device_ids=[0])
# model.cuda()
# model.train()
# cudnn.enabled = True
# cudnn.benchmark = True
else:
model = torch.nn.DataParallel(model, device_ids=[0, 1])
model.cuda()
model.train()
cudnn.enabled = True
cudnn.benchmark = True
log.info('Face landmarks model:')
log.info(model)
criterion = AlignmentLoss('l1')
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, drops_schedule)
# scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 100)
for epoch_num in range(args.epoch_total_num):
scheduler.step()
# print("*****************************************")
# if epoch_num > 300:
# model.module.set_dropout_ratio(0.)
for i, data in enumerate(train_loader, 0):
iteration = epoch_num * len(train_loader) + i
data, gt_landmarks = data['img'].cuda(), data['landmarks'].cuda()
# print(gt_landmarks)
predicted_landmarks = model(data)
# print(predicted_landmarks)
optimizer.zero_grad()
loss = criterion(predicted_landmarks, gt_landmarks)
loss.backward()
optimizer.step()
if i % 10 == 0:
log.info('Iteration %d, Loss: %.4f' % (iteration, loss))
log.info('Learning rate: %f' % scheduler.get_lr()[0])
writer.add_scalar('Loss/train_loss', loss.item(), iteration)
writer.add_scalar('Learning_rate', scheduler.get_lr()[0], iteration)
if iteration % args.val_step == 0:
snapshot_name = osp.join(args.snap_folder, args.snap_prefix + '_{0}.pt'.format(iteration))
log.info('Saving Snapshot: ' + snapshot_name)
save_model_cpu(model, optimizer, snapshot_name, epoch_num)
model.eval()
log.info('Evaluating Snapshot: ' + snapshot_name)
avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
# weights = per_point_avg_err / np.sum(per_point_avg_err)
# criterion.set_weights(weights)
# log.info(str(weights))
log.info('Avg train error: {}'.format(avg_err))
log.info('Train failure rate: {}'.format(failures_rate))
writer.add_scalar('Quality/Avg_error', avg_err, iteration)
writer.add_scalar('Quality/Failure_rate', failures_rate, iteration)
model.train()
def main():
"""Creates a command line parser"""
parser = argparse.ArgumentParser(description='Training Landmarks detector in PyTorch')
parser.add_argument('--train_data_root', dest='train', required=True, type=str, help='Path to train data.')
parser.add_argument('--train_list', dest='t_list', required=False, type=str, help='Path to train data image list.')
parser.add_argument('--train_landmarks', default='', dest='t_land', required=False, type=str,
help='Path to landmarks for the train images.')
parser.add_argument('--train_batch_size', type=int, default=256, help='Train batch size.')
parser.add_argument('--epoch_total_num', type=int, default=400, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--val_step', type=int, default=200, help='Evaluate model each val_step during each epoch.')
parser.add_argument('--weight_decay', type=float, default=0.000001, help='Weight decay.')
parser.add_argument('--device', '-d', default=0, type=int)
parser.add_argument('--snap_folder', type=str, default='./snapshots/', help='Folder to save snapshots.')
parser.add_argument('--snap_prefix', type=str, default='LandNet-68single', help='Prefix for snapshots.')
parser.add_argument('--snap_to_resume', type=str, default=None, help='Snapshot to resume.')
parser.add_argument('--dataset', choices=['vgg', 'celeb', 'ngd', 'ibug'], type=str, default='ibug', help='Dataset.')
arguments = parser.parse_args()
with torch.cuda.device(arguments.device):
train(arguments)
if __name__ == '__main__':
main()
``` |
{
"source": "JinyangLi01/mlinspect",
"score": 3
} |
#### File: demo/feature_overview/missing_embeddings.py
```python
import dataclasses
from typing import Iterable, List
from mlinspect import FunctionInfo
from mlinspect.inspections import Inspection, InspectionInputUnaryOperator
@dataclasses.dataclass(frozen=True, eq=True)
class MissingEmbeddingsInfo:
"""
Info about potentially missing embeddings
"""
missing_embedding_count: int
missing_embeddings_examples: List[str]
class MissingEmbeddings(Inspection):
"""
A simple example inspection
"""
def __init__(self, example_threshold=10):
self._is_embedding_operator = False
self._missing_embedding_count = 0
self._missing_embeddings_examples = []
self.example_threshold = example_threshold
def visit_operator(self, inspection_input) -> Iterable[any]:
"""
Visit an operator
"""
# pylint: disable=too-many-branches, too-many-statements
if isinstance(inspection_input, InspectionInputUnaryOperator) and \
inspection_input.operator_context.function_info == \
FunctionInfo('example_pipelines.healthcare.healthcare_utils', 'MyW2VTransformer'):
# TODO: Are there existing word embedding transformers for sklearn we can use this for?
self._is_embedding_operator = True
for row in inspection_input.row_iterator:
# Count missing embeddings
embedding_array = row.output[0]
is_zero_vector = not embedding_array.any()
if is_zero_vector:
self._missing_embedding_count += 1
if len(self._missing_embeddings_examples) < self.example_threshold:
self._missing_embeddings_examples.append(row.input[0])
yield None
else:
for _ in inspection_input.row_iterator:
yield None
def get_operator_annotation_after_visit(self) -> any:
if self._is_embedding_operator:
assert self._missing_embedding_count is not None # May only be called after the operator visit is finished
result = MissingEmbeddingsInfo(self._missing_embedding_count, self._missing_embeddings_examples)
self._missing_embedding_count = 0
self._is_embedding_operator = False
self._missing_embeddings_examples = []
return result
return None
@property
def inspection_id(self):
return self.example_threshold
```
#### File: mlinspect/monkeypatching/_patch_sklearn.py
```python
import gorilla
import numpy
import pandas
from sklearn import preprocessing, compose, tree, impute, linear_model, model_selection
from sklearn.feature_extraction import text
from sklearn.linear_model._stochastic_gradient import DEFAULT_EPSILON
from sklearn.metrics import accuracy_score
from tensorflow.keras.wrappers import scikit_learn as keras_sklearn_external # pylint: disable=no-name-in-module
from tensorflow.python.keras.wrappers import scikit_learn as keras_sklearn_internal # pylint: disable=no-name-in-module
from mlinspect.backends._backend import BackendResult
from mlinspect.backends._sklearn_backend import SklearnBackend
from mlinspect.inspections._inspection_input import OperatorContext, FunctionInfo, OperatorType
from mlinspect.instrumentation._dag_node import DagNode, BasicCodeLocation, DagNodeDetails, CodeReference
from mlinspect.instrumentation._pipeline_executor import singleton
from mlinspect.monkeypatching._mlinspect_ndarray import MlinspectNdarray
from mlinspect.monkeypatching._monkey_patching_utils import execute_patched_func, add_dag_node, \
execute_patched_func_indirect_allowed, get_input_info, execute_patched_func_no_op_id, \
get_optional_code_info_or_none, get_dag_node_for_id, add_train_data_node, \
add_train_label_node, add_test_label_node, add_test_data_dag_node
@gorilla.patches(preprocessing)
class SklearnPreprocessingPatching:
""" Patches for sklearn """
# pylint: disable=too-few-public-methods
@gorilla.name('label_binarize')
@gorilla.settings(allow_hit=True)
def patched_label_binarize(*args, **kwargs):
""" Patch for ('sklearn.preprocessing._label', 'label_binarize') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(preprocessing, 'label_binarize')
def execute_inspections(op_id, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=too-many-locals
function_info = FunctionInfo('sklearn.preprocessing._label', 'label_binarize')
input_info = get_input_info(args[0], caller_filename, lineno, function_info, optional_code_reference,
optional_source_code)
operator_context = OperatorContext(OperatorType.PROJECTION_MODIFY, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result)
new_return_value = backend_result.annotated_dfobject.result_data
classes = kwargs['classes']
description = "label_binarize, classes: {}".format(classes)
dag_node = DagNode(op_id,
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails(description, ["array"]),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
return new_return_value
return execute_patched_func(original, execute_inspections, *args, **kwargs)
@gorilla.patches(model_selection)
class SklearnModelSelectionPatching:
""" Patches for sklearn """
# pylint: disable=too-few-public-methods
@gorilla.name('train_test_split')
@gorilla.settings(allow_hit=True)
def patched_train_test_split(*args, **kwargs):
""" Patch for ('sklearn.model_selection._split', 'train_test_split') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(model_selection, 'train_test_split')
def execute_inspections(op_id, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=too-many-locals
function_info = FunctionInfo('sklearn.model_selection._split', 'train_test_split')
input_info = get_input_info(args[0], caller_filename, lineno, function_info, optional_code_reference,
optional_source_code)
operator_context = OperatorContext(OperatorType.TRAIN_TEST_SPLIT, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result) # We ignore the test set for now
train_backend_result = BackendResult(backend_result.annotated_dfobject,
backend_result.dag_node_annotation)
test_backend_result = BackendResult(backend_result.optional_second_annotated_dfobject,
backend_result.optional_second_dag_node_annotation)
description = "(Train Data)"
columns = list(result[0].columns)
dag_node = DagNode(op_id,
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails(description, columns),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], train_backend_result)
description = "(Test Data)"
columns = list(result[1].columns)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails(description, columns),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], test_backend_result)
new_return_value = (train_backend_result.annotated_dfobject.result_data,
test_backend_result.annotated_dfobject.result_data)
return new_return_value
return execute_patched_func(original, execute_inspections, *args, **kwargs)
class SklearnCallInfo:
""" Contains info like lineno from the current Transformer so indirect utility function calls can access it """
# pylint: disable=too-few-public-methods
transformer_filename: str or None = None
transformer_lineno: int or None = None
transformer_function_info: FunctionInfo or None = None
transformer_optional_code_reference: CodeReference or None = None
transformer_optional_source_code: str or None = None
column_transformer_active: bool = False
call_info_singleton = SklearnCallInfo()
@gorilla.patches(compose.ColumnTransformer)
class SklearnComposePatching:
""" Patches for sklearn ColumnTransformer"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self,
transformers, *,
remainder='drop',
sparse_threshold=0.3,
n_jobs=None,
transformer_weights=None,
verbose=False):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(compose.ColumnTransformer, '__init__')
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=attribute-defined-outside-init
original(self, transformers, remainder=remainder, sparse_threshold=sparse_threshold, n_jobs=n_jobs,
transformer_weights=transformer_weights, verbose=verbose)
self.mlinspect_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_indirect_allowed(execute_inspections)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument
call_info_singleton.transformer_filename = self.mlinspect_filename
call_info_singleton.transformer_lineno = self.mlinspect_lineno
call_info_singleton.transformer_function_info = FunctionInfo('sklearn.compose._column_transformer',
'ColumnTransformer')
call_info_singleton.transformer_optional_code_reference = self.mlinspect_optional_code_reference
call_info_singleton.transformer_optional_source_code = self.mlinspect_optional_source_code
call_info_singleton.column_transformer_active = True
original = gorilla.get_original_attribute(compose.ColumnTransformer, 'fit_transform')
result = original(self, *args, **kwargs)
call_info_singleton.column_transformer_active = False
return result
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument
call_info_singleton.transformer_filename = self.mlinspect_filename
call_info_singleton.transformer_lineno = self.mlinspect_lineno
call_info_singleton.transformer_function_info = FunctionInfo('sklearn.compose._column_transformer',
'ColumnTransformer')
call_info_singleton.transformer_optional_code_reference = self.mlinspect_optional_code_reference
call_info_singleton.transformer_optional_source_code = self.mlinspect_optional_source_code
call_info_singleton.column_transformer_active = True
original = gorilla.get_original_attribute(compose.ColumnTransformer, 'transform')
result = original(self, *args, **kwargs)
call_info_singleton.column_transformer_active = False
return result
@gorilla.name('_hstack')
@gorilla.settings(allow_hit=True)
def patched_hstack(self, *args, **kwargs):
""" Patch for ('sklearn.compose._column_transformer', 'ColumnTransformer') """
# pylint: disable=no-method-argument, unused-argument, too-many-locals
original = gorilla.get_original_attribute(compose.ColumnTransformer, '_hstack')
if not call_info_singleton.column_transformer_active:
return original(self, *args, **kwargs)
input_tuple = args[0]
function_info = FunctionInfo('sklearn.compose._column_transformer', 'ColumnTransformer')
input_infos = []
for input_df_obj in input_tuple:
input_info = get_input_info(input_df_obj, self.mlinspect_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
input_infos.append(input_info)
operator_context = OperatorContext(OperatorType.CONCATENATION, function_info)
input_annotated_dfs = [input_info.annotated_dfobject for input_info in input_infos]
backend_input_infos = SklearnBackend.before_call(operator_context, input_annotated_dfs)
# No input_infos copy needed because it's only a selection and the rows not being removed don't change
result = original(self, *args, **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
backend_input_infos,
result)
result = backend_result.annotated_dfobject.result_data
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails(None, ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
input_dag_nodes = [input_info.dag_node for input_info in input_infos]
add_dag_node(dag_node, input_dag_nodes, backend_result)
return result
@gorilla.patches(preprocessing.StandardScaler)
class SklearnStandardScalerPatching:
""" Patches for sklearn StandardScaler"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, *, copy=True, with_mean=True, with_std=True,
mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_fit_transform_active=False):
""" Patch for ('sklearn.preprocessing._data', 'StandardScaler') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.StandardScaler, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_fit_transform_active = mlinspect_fit_transform_active
self.mlinspect_non_data_func_args = {'copy': copy, 'with_mean': with_mean, 'with_std': with_std}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, copy=copy, with_mean=with_mean, with_std=with_std)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, **self.mlinspect_non_data_func_args)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._data.StandardScaler', 'fit_transform') """
# pylint: disable=no-method-argument
self.mlinspect_fit_transform_active = True # pylint: disable=attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.StandardScaler, 'fit_transform')
function_info = FunctionInfo('sklearn.preprocessing._data', 'StandardScaler')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
assert isinstance(new_return_value, MlinspectNdarray)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Standard Scaler: fit_transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
self.mlinspect_fit_transform_active = False # pylint: disable=attribute-defined-outside-init
return new_return_value
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._data.StandardScaler', 'transform') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(preprocessing.StandardScaler, 'transform')
if not self.mlinspect_fit_transform_active:
function_info = FunctionInfo('sklearn.preprocessing._data', 'StandardScaler')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
assert isinstance(new_return_value, MlinspectNdarray)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Standard Scaler: transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
else:
new_return_value = original(self, *args, **kwargs)
return new_return_value
@gorilla.patches(text.HashingVectorizer)
class SklearnHasingVectorizerPatching:
""" Patches for sklearn StandardScaler"""
# pylint: disable=too-few-public-methods, redefined-builtin, too-many-locals
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, *, input='content', encoding='utf-8', decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None, stop_words=None,
token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', alternate_sign=True, dtype=numpy.float64,
mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_fit_transform_active=False):
""" Patch for ('sklearn.feature_extraction.text', 'HashingVectorizer') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(text.HashingVectorizer, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_fit_transform_active = mlinspect_fit_transform_active
self.mlinspect_non_data_func_args = {'input': input, 'encoding': encoding, 'decode_error': decode_error,
'strip_accents': strip_accents, 'lowercase': lowercase,
'preprocessor': preprocessor, 'tokenizer': tokenizer,
'stop_words': stop_words, 'token_pattern': token_pattern,
'ngram_range': ngram_range, 'analyzer': analyzer, 'n_features': n_features,
'binary': binary, 'norm': norm, 'alternate_sign': alternate_sign,
'dtype': dtype}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, **self.mlinspect_non_data_func_args)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.feature_extraction.text.HashingVectorizer', 'fit_transform') """
# pylint: disable=no-method-argument
self.mlinspect_fit_transform_active = True # pylint: disable=attribute-defined-outside-init
original = gorilla.get_original_attribute(text.HashingVectorizer, 'fit_transform')
function_info = FunctionInfo('sklearn.feature_extraction.text', 'HashingVectorizer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Hashing Vectorizer: fit_transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
self.mlinspect_fit_transform_active = False # pylint: disable=attribute-defined-outside-init
return new_return_value
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._data.StandardScaler', 'transform') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(text.HashingVectorizer, 'transform')
if not self.mlinspect_fit_transform_active:
function_info = FunctionInfo('sklearn.feature_extraction.text', 'HashingVectorizer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Hashing Vectorizer: transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
else:
new_return_value = original(self, *args, **kwargs)
return new_return_value
@gorilla.patches(preprocessing.KBinsDiscretizer)
class SklearnKBinsDiscretizerPatching:
""" Patches for sklearn KBinsDiscretizer"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, n_bins=5, *, encode='onehot', strategy='quantile',
mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_fit_transform_active=False):
""" Patch for ('sklearn.preprocessing._discretization', 'KBinsDiscretizer') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.KBinsDiscretizer, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_fit_transform_active = mlinspect_fit_transform_active
self.mlinspect_non_data_func_args = {'n_bins': n_bins, 'encode': encode, 'strategy': strategy}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, **self.mlinspect_non_data_func_args)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._discretization.KBinsDiscretizer', 'fit_transform') """
# pylint: disable=no-method-argument
self.mlinspect_fit_transform_active = True # pylint: disable=attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.KBinsDiscretizer, 'fit_transform')
function_info = FunctionInfo('sklearn.preprocessing._discretization', 'KBinsDiscretizer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
assert isinstance(new_return_value, MlinspectNdarray)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("K-Bins Discretizer: fit_transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
self.mlinspect_fit_transform_active = False # pylint: disable=attribute-defined-outside-init
return new_return_value
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._discretization.KBinsDiscretizer', 'transform') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(preprocessing.KBinsDiscretizer, 'transform')
if not self.mlinspect_fit_transform_active:
function_info = FunctionInfo('sklearn.preprocessing._discretization', 'KBinsDiscretizer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
assert isinstance(new_return_value, MlinspectNdarray)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("K-Bins Discretizer: transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
else:
new_return_value = original(self, *args, **kwargs)
return new_return_value
@gorilla.patches(preprocessing.OneHotEncoder)
class SklearnOneHotEncoderPatching:
""" Patches for sklearn OneHotEncoder"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, *, categories='auto', drop=None, sparse=True,
dtype=numpy.float64, handle_unknown='error',
mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_fit_transform_active=False):
""" Patch for ('sklearn.preprocessing._encoders', 'OneHotEncoder') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.OneHotEncoder, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_fit_transform_active = mlinspect_fit_transform_active
self.mlinspect_non_data_func_args = {'categories': categories, 'drop': drop, 'sparse': sparse, 'dtype': dtype,
'handle_unknown': handle_unknown}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, **self.mlinspect_non_data_func_args)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._encoders.OneHotEncoder', 'fit_transform') """
# pylint: disable=no-method-argument
self.mlinspect_fit_transform_active = True # pylint: disable=attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.OneHotEncoder, 'fit_transform')
function_info = FunctionInfo('sklearn.preprocessing._encoders', 'OneHotEncoder')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("One-Hot Encoder: fit_transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
self.mlinspect_fit_transform_active = False # pylint: disable=attribute-defined-outside-init
return new_return_value
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing._encoders.OneHotEncoder', 'transform') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(preprocessing.OneHotEncoder, 'transform')
if not self.mlinspect_fit_transform_active:
function_info = FunctionInfo('sklearn.preprocessing._encoders', 'OneHotEncoder')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("One-Hot Encoder: transform", ['array']),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
else:
new_return_value = original(self, *args, **kwargs)
return new_return_value
@gorilla.patches(impute.SimpleImputer)
class SklearnSimpleImputerPatching:
""" Patches for sklearn SimpleImputer"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, *, missing_values=numpy.nan, strategy="mean",
fill_value=None, verbose=0, copy=True, add_indicator=False,
mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_fit_transform_active=False):
""" Patch for ('sklearn.impute._base', 'SimpleImputer') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(impute.SimpleImputer, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_fit_transform_active = mlinspect_fit_transform_active
self.mlinspect_non_data_func_args = {'missing_values': missing_values, 'strategy': strategy,
'fill_value': fill_value, 'verbose': verbose, 'copy': copy,
'add_indicator': add_indicator}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, **self.mlinspect_non_data_func_args)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.impute._base.SimpleImputer', 'fit_transform') """
# pylint: disable=no-method-argument
self.mlinspect_fit_transform_active = True # pylint: disable=attribute-defined-outside-init
original = gorilla.get_original_attribute(impute.SimpleImputer, 'fit_transform')
function_info = FunctionInfo('sklearn.impute._base', 'SimpleImputer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
if isinstance(input_infos[0].result_data, pandas.DataFrame):
columns = list(input_infos[0].result_data.columns)
else:
columns = ['array']
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Simple Imputer: fit_transform", columns),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
self.mlinspect_fit_transform_active = False # pylint: disable=attribute-defined-outside-init
return new_return_value
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.impute._base.SimpleImputer', 'transform') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(impute.SimpleImputer, 'transform')
if not self.mlinspect_fit_transform_active:
function_info = FunctionInfo('sklearn.impute._base', 'SimpleImputer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
if isinstance(input_infos[0].result_data, pandas.DataFrame):
columns = list(input_infos[0].result_data.columns)
else:
columns = ['array']
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Simple Imputer: transform", columns),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
else:
new_return_value = original(self, *args, **kwargs)
return new_return_value
@gorilla.patches(preprocessing.FunctionTransformer)
class SklearnFunctionTransformerPatching:
""" Patches for sklearn FunctionTransformer"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, func=None, inverse_func=None, *, validate=False, accept_sparse=False, check_inverse=True,
kw_args=None, inv_kw_args=None, mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_fit_transform_active=False):
""" Patch for ('sklearn.preprocessing_function_transformer', 'FunctionTransformer') """
# pylint: disable=no-method-argument, attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.FunctionTransformer, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_fit_transform_active = mlinspect_fit_transform_active
self.mlinspect_non_data_func_args = {'validate': validate, 'accept_sparse': accept_sparse,
'check_inverse': check_inverse, 'kw_args': kw_args,
'inv_kw_args': inv_kw_args}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, func=func, inverse_func=inverse_func, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, func=func, inverse_func=inverse_func,
**self.mlinspect_non_data_func_args)
@gorilla.name('fit_transform')
@gorilla.settings(allow_hit=True)
def patched_fit_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing_function_transformer.FunctionTransformer', 'fit_transform') """
# pylint: disable=no-method-argument
self.mlinspect_fit_transform_active = True # pylint: disable=attribute-defined-outside-init
original = gorilla.get_original_attribute(preprocessing.FunctionTransformer, 'fit_transform')
function_info = FunctionInfo('sklearn.preprocessing_function_transformer', 'FunctionTransformer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
if isinstance(input_infos[0].result_data, pandas.DataFrame):
columns = list(input_infos[0].result_data.columns)
else:
columns = ['array']
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Function Transformer: fit_transform", columns),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
self.mlinspect_fit_transform_active = False # pylint: disable=attribute-defined-outside-init
return new_return_value
@gorilla.name('transform')
@gorilla.settings(allow_hit=True)
def patched_transform(self, *args, **kwargs):
""" Patch for ('sklearn.preprocessing_function_transformer.FunctionTransformer', 'transform') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(preprocessing.FunctionTransformer, 'transform')
if not self.mlinspect_fit_transform_active:
function_info = FunctionInfo('sklearn.preprocessing_function_transformer', 'FunctionTransformer')
input_info = get_input_info(args[0], self.mlinspect_caller_filename, self.mlinspect_lineno, function_info,
self.mlinspect_optional_code_reference, self.mlinspect_optional_source_code)
operator_context = OperatorContext(OperatorType.TRANSFORMER, function_info)
input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])
result = original(self, input_infos[0].result_data, *args[1:], **kwargs)
backend_result = SklearnBackend.after_call(operator_context,
input_infos,
result,
self.mlinspect_non_data_func_args)
new_return_value = backend_result.annotated_dfobject.result_data
if isinstance(input_infos[0].result_data, pandas.DataFrame):
columns = list(input_infos[0].result_data.columns)
else:
columns = ['array']
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Function Transformer: transform", columns),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [input_info.dag_node], backend_result)
else:
new_return_value = original(self, *args, **kwargs)
return new_return_value
@gorilla.patches(tree.DecisionTreeClassifier)
class SklearnDecisionTreePatching:
""" Patches for sklearn DecisionTree"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, *, criterion="gini", splitter="best", max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None,
max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, class_weight=None,
presort='deprecated', ccp_alpha=0.0, mlinspect_caller_filename=None,
mlinspect_lineno=None, mlinspect_optional_code_reference=None,
mlinspect_optional_source_code=None, mlinspect_estimator_node_id=None):
""" Patch for ('sklearn.tree._classes', 'DecisionTreeClassifier') """
# pylint: disable=no-method-argument, attribute-defined-outside-init, too-many-locals
original = gorilla.get_original_attribute(tree.DecisionTreeClassifier, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_estimator_node_id = mlinspect_estimator_node_id
self.mlinspect_non_data_func_args = {'criterion': criterion, 'splitter': splitter, 'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'min_weight_fraction_leaf': min_weight_fraction_leaf,
'max_features': max_features, 'random_state': random_state,
'max_leaf_nodes': max_leaf_nodes,
'min_impurity_decrease': min_impurity_decrease,
'min_impurity_split': min_impurity_split, 'class_weight': class_weight,
'presort': presort, 'ccp_alpha': ccp_alpha}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
self.mlinspect_estimator_node_id = None
return execute_patched_func_no_op_id(original, execute_inspections, self,
**self.mlinspect_non_data_func_args)
@gorilla.name('fit')
@gorilla.settings(allow_hit=True)
def patched_fit(self, *args, **kwargs):
""" Patch for ('sklearn.tree._classes.DecisionTreeClassifier', 'fit') """
# pylint: disable=no-method-argument, too-many-locals
original = gorilla.get_original_attribute(tree.DecisionTreeClassifier, 'fit')
function_info = FunctionInfo('sklearn.tree._classes', 'DecisionTreeClassifier')
data_backend_result, train_data_node, train_data_result = add_train_data_node(self, args[0], function_info)
label_backend_result, train_labels_node, train_labels_result = add_train_label_node(self, args[1],
function_info)
# Estimator
operator_context = OperatorContext(OperatorType.ESTIMATOR, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
original(self, train_data_result, train_labels_result, *args[2:], **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
None,
self.mlinspect_non_data_func_args)
self.mlinspect_estimator_node_id = singleton.get_next_op_id() # pylint: disable=attribute-defined-outside-init
dag_node = DagNode(self.mlinspect_estimator_node_id,
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Decision Tree", []),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [train_data_node, train_labels_node], estimator_backend_result)
return self
@gorilla.name('score')
@gorilla.settings(allow_hit=True)
def patched_score(self, *args, **kwargs):
""" Patch for ('sklearn.tree._classes.DecisionTreeClassifier', 'score') """
# pylint: disable=no-method-argument
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=too-many-locals
function_info = FunctionInfo('sklearn.tree._classes.DecisionTreeClassifier', 'score')
data_backend_result, test_data_node, test_data_result = add_test_data_dag_node(args[0],
function_info,
lineno,
optional_code_reference,
optional_source_code,
caller_filename)
label_backend_result, test_labels_node, test_labels_result = add_test_label_node(args[1],
caller_filename,
function_info,
lineno,
optional_code_reference,
optional_source_code)
operator_context = OperatorContext(OperatorType.SCORE, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
# Same as original, but captures the test set predictions
predictions = self.predict(test_data_result) # pylint: disable=no-member
result = accuracy_score(test_labels_result, predictions, **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
predictions,
self.mlinspect_non_data_func_args)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails("Decision Tree", []),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
estimator_dag_node = get_dag_node_for_id(self.mlinspect_estimator_node_id)
add_dag_node(dag_node, [estimator_dag_node, test_data_node, test_labels_node],
estimator_backend_result)
return result
return execute_patched_func_indirect_allowed(execute_inspections)
@gorilla.patches(linear_model.SGDClassifier)
class SklearnSGDClassifierPatching:
""" Patches for sklearn SGDClassifier"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, loss="hinge", *, penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
n_jobs=None, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None,
warm_start=False, average=False, mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_estimator_node_id=None):
""" Patch for ('sklearn.linear_model._stochastic_gradient', 'SGDClassifier') """
# pylint: disable=no-method-argument, attribute-defined-outside-init, too-many-locals
original = gorilla.get_original_attribute(linear_model.SGDClassifier, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_estimator_node_id = mlinspect_estimator_node_id
self.mlinspect_non_data_func_args = {'loss': loss, 'penalty': penalty, 'alpha': alpha, 'l1_ratio': l1_ratio,
'fit_intercept': fit_intercept, 'max_iter': max_iter, 'tol': tol,
'shuffle': shuffle, 'verbose': verbose, 'epsilon': epsilon,
'n_jobs': n_jobs, 'random_state': random_state,
'learning_rate': learning_rate, 'eta0': eta0, 'power_t': power_t,
'early_stopping': early_stopping,
'validation_fraction': validation_fraction,
'n_iter_no_change': n_iter_no_change,
'class_weight': class_weight, 'warm_start': warm_start, 'average': average}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
self.mlinspect_estimator_node_id = None
return execute_patched_func_no_op_id(original, execute_inspections, self,
**self.mlinspect_non_data_func_args)
@gorilla.name('fit')
@gorilla.settings(allow_hit=True)
def patched_fit(self, *args, **kwargs):
""" Patch for ('sklearn.linear_model._stochastic_gradient', 'fit') """
# pylint: disable=no-method-argument, too-many-locals
original = gorilla.get_original_attribute(linear_model.SGDClassifier, 'fit')
function_info = FunctionInfo('sklearn.linear_model._stochastic_gradient', 'SGDClassifier')
data_backend_result, train_data_node, train_data_result = add_train_data_node(self, args[0], function_info)
label_backend_result, train_labels_node, train_labels_result = add_train_label_node(self, args[1],
function_info)
# Estimator
operator_context = OperatorContext(OperatorType.ESTIMATOR, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
original(self, train_data_result, train_labels_result, *args[2:], **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
None,
self.mlinspect_non_data_func_args)
self.mlinspect_estimator_node_id = singleton.get_next_op_id() # pylint: disable=attribute-defined-outside-init
dag_node = DagNode(self.mlinspect_estimator_node_id,
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("SGD Classifier", []),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [train_data_node, train_labels_node], estimator_backend_result)
return self
@gorilla.name('score')
@gorilla.settings(allow_hit=True)
def patched_score(self, *args, **kwargs):
""" Patch for ('sklearn.linear_model._stochastic_gradient.SGDClassifier', 'score') """
# pylint: disable=no-method-argument
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=too-many-locals
function_info = FunctionInfo('sklearn.linear_model._stochastic_gradient.SGDClassifier', 'score')
# Test data
data_backend_result, test_data_node, test_data_result = add_test_data_dag_node(args[0],
function_info,
lineno,
optional_code_reference,
optional_source_code,
caller_filename)
# Test labels
label_backend_result, test_labels_node, test_labels_result = add_test_label_node(args[1],
caller_filename,
function_info,
lineno,
optional_code_reference,
optional_source_code)
# Score
operator_context = OperatorContext(OperatorType.SCORE, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
# Same as original, but captures the test set predictions
predictions = self.predict(test_data_result) # pylint: disable=no-member
result = accuracy_score(test_labels_result, predictions, **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
predictions,
self.mlinspect_non_data_func_args)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails("SGD Classifier", []),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
estimator_dag_node = get_dag_node_for_id(self.mlinspect_estimator_node_id)
add_dag_node(dag_node, [estimator_dag_node, test_data_node, test_labels_node],
estimator_backend_result)
return result
return execute_patched_func_indirect_allowed(execute_inspections)
@gorilla.patches(linear_model.LogisticRegression)
class SklearnLogisticRegressionPatching:
""" Patches for sklearn LogisticRegression"""
# pylint: disable=too-few-public-methods
@gorilla.name('__init__')
@gorilla.settings(allow_hit=True)
def patched__init__(self, penalty='l2', *, dual=False, tol=1e-4, C=1.0, # pylint: disable=invalid-name
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='lbfgs', max_iter=100,
multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None, mlinspect_caller_filename=None,
mlinspect_lineno=None, mlinspect_optional_code_reference=None,
mlinspect_optional_source_code=None, mlinspect_estimator_node_id=None):
""" Patch for ('sklearn.linear_model._logistic', 'LogisticRegression') """
# pylint: disable=no-method-argument, attribute-defined-outside-init, too-many-locals
original = gorilla.get_original_attribute(linear_model.LogisticRegression, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_estimator_node_id = mlinspect_estimator_node_id
self.mlinspect_non_data_func_args = {'penalty': penalty, 'dual': dual, 'tol': tol, 'C': C,
'fit_intercept': fit_intercept, 'intercept_scaling': intercept_scaling,
'class_weight': class_weight, 'random_state': random_state,
'solver': solver, 'max_iter': max_iter, 'multi_class': multi_class,
'verbose': verbose, 'warm_start': warm_start, 'n_jobs': n_jobs,
'l1_ratio': l1_ratio}
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, **self.mlinspect_non_data_func_args)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, **self.mlinspect_non_data_func_args)
@gorilla.name('fit')
@gorilla.settings(allow_hit=True)
def patched_fit(self, *args, **kwargs):
""" Patch for ('sklearn.linear_model._logistic.LogisticRegression', 'fit') """
# pylint: disable=no-method-argument, too-many-locals
original = gorilla.get_original_attribute(linear_model.LogisticRegression, 'fit')
function_info = FunctionInfo('sklearn.linear_model._logistic', 'LogisticRegression')
data_backend_result, train_data_node, train_data_result = add_train_data_node(self, args[0], function_info)
label_backend_result, train_labels_node, train_labels_result = add_train_label_node(self, args[1],
function_info)
# Estimator
operator_context = OperatorContext(OperatorType.ESTIMATOR, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
original(self, train_data_result, train_labels_result, *args[2:], **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
None,
self.mlinspect_non_data_func_args)
self.mlinspect_estimator_node_id = singleton.get_next_op_id() # pylint: disable=attribute-defined-outside-init
dag_node = DagNode(self.mlinspect_estimator_node_id,
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Logistic Regression", []),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [train_data_node, train_labels_node], estimator_backend_result)
return self
@gorilla.name('score')
@gorilla.settings(allow_hit=True)
def patched_score(self, *args, **kwargs):
""" Patch for ('sklearn.linear_model._logistic.LogisticRegression', 'score') """
# pylint: disable=no-method-argument
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=too-many-locals
function_info = FunctionInfo('sklearn.linear_model._logistic.LogisticRegression', 'score')
# Test data
data_backend_result, test_data_node, test_data_result = add_test_data_dag_node(args[0],
function_info,
lineno,
optional_code_reference,
optional_source_code,
caller_filename)
# Test labels
label_backend_result, test_labels_node, test_labels_result = add_test_label_node(args[1],
caller_filename,
function_info,
lineno,
optional_code_reference,
optional_source_code)
# Score
operator_context = OperatorContext(OperatorType.SCORE, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
# Same as original, but captures the test set predictions
predictions = self.predict(test_data_result) # pylint: disable=no-member
result = accuracy_score(test_labels_result, predictions, **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
predictions,
self.mlinspect_non_data_func_args)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails("Logistic Regression", []),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
estimator_dag_node = get_dag_node_for_id(self.mlinspect_estimator_node_id)
add_dag_node(dag_node, [estimator_dag_node, test_data_node, test_labels_node],
estimator_backend_result)
return result
return execute_patched_func_indirect_allowed(execute_inspections)
class SklearnKerasClassifierPatching:
""" Patches for tensorflow KerasClassifier"""
# pylint: disable=too-few-public-methods
@gorilla.patch(keras_sklearn_internal.BaseWrapper, name='__init__', settings=gorilla.Settings(allow_hit=True))
def patched__init__(self, build_fn=None, mlinspect_caller_filename=None, mlinspect_lineno=None,
mlinspect_optional_code_reference=None, mlinspect_optional_source_code=None,
mlinspect_estimator_node_id=None, **sk_params):
""" Patch for ('tensorflow.python.keras.wrappers.scikit_learn', 'KerasClassifier') """
# pylint: disable=no-method-argument, attribute-defined-outside-init, too-many-locals, too-many-arguments
original = gorilla.get_original_attribute(keras_sklearn_internal.BaseWrapper, '__init__')
self.mlinspect_caller_filename = mlinspect_caller_filename
self.mlinspect_lineno = mlinspect_lineno
self.mlinspect_optional_code_reference = mlinspect_optional_code_reference
self.mlinspect_optional_source_code = mlinspect_optional_source_code
self.mlinspect_estimator_node_id = mlinspect_estimator_node_id
self.mlinspect_non_data_func_args = sk_params
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
original(self, build_fn=build_fn, **sk_params)
self.mlinspect_caller_filename = caller_filename
self.mlinspect_lineno = lineno
self.mlinspect_optional_code_reference = optional_code_reference
self.mlinspect_optional_source_code = optional_source_code
return execute_patched_func_no_op_id(original, execute_inspections, self, build_fn=build_fn, **sk_params)
@gorilla.patch(keras_sklearn_external.KerasClassifier, name='fit', settings=gorilla.Settings(allow_hit=True))
def patched_fit(self, *args, **kwargs):
""" Patch for ('tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier', 'fit') """
# pylint: disable=no-method-argument, too-many-locals
original = gorilla.get_original_attribute(keras_sklearn_external.KerasClassifier, 'fit')
function_info = FunctionInfo('tensorflow.python.keras.wrappers.scikit_learn', 'KerasClassifier')
data_backend_result, train_data_dag_node, train_data_result = add_train_data_node(self, args[0], function_info)
label_backend_result, train_labels_dag_node, train_labels_result = add_train_label_node(self, args[1],
function_info)
# Estimator
operator_context = OperatorContext(OperatorType.ESTIMATOR, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
original(self, train_data_result, train_labels_result, *args[2:], **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
None,
self.mlinspect_non_data_func_args)
self.mlinspect_estimator_node_id = singleton.get_next_op_id() # pylint: disable=attribute-defined-outside-init
dag_node = DagNode(self.mlinspect_estimator_node_id,
BasicCodeLocation(self.mlinspect_caller_filename, self.mlinspect_lineno),
operator_context,
DagNodeDetails("Neural Network", []),
get_optional_code_info_or_none(self.mlinspect_optional_code_reference,
self.mlinspect_optional_source_code))
add_dag_node(dag_node, [train_data_dag_node, train_labels_dag_node], estimator_backend_result)
return self
@gorilla.patch(keras_sklearn_external.KerasClassifier, name='score', settings=gorilla.Settings(allow_hit=True))
def patched_score(self, *args, **kwargs):
""" Patch for ('tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier', 'score') """
# pylint: disable=no-method-argument
original = gorilla.get_original_attribute(keras_sklearn_external.KerasClassifier, 'score')
def execute_inspections(_, caller_filename, lineno, optional_code_reference, optional_source_code):
""" Execute inspections, add DAG node """
# pylint: disable=too-many-locals
function_info = FunctionInfo('tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier', 'score')
# Test data
data_backend_result, test_data_node, test_data_result = add_test_data_dag_node(args[0],
function_info,
lineno,
optional_code_reference,
optional_source_code,
caller_filename)
# Test labels
label_backend_result, test_labels_node, test_labels_result = add_test_label_node(args[1],
caller_filename,
function_info,
lineno,
optional_code_reference,
optional_source_code)
# Score
operator_context = OperatorContext(OperatorType.SCORE, function_info)
input_dfs = [data_backend_result.annotated_dfobject, label_backend_result.annotated_dfobject]
input_infos = SklearnBackend.before_call(operator_context, input_dfs)
# This currently calls predict twice, but patching here is complex. Maybe revisit this in future work
predictions = self.predict(test_data_result) # pylint: disable=no-member
result = original(self, test_data_result, test_labels_result, *args[2:], **kwargs)
estimator_backend_result = SklearnBackend.after_call(operator_context,
input_infos,
predictions,
self.mlinspect_non_data_func_args)
dag_node = DagNode(singleton.get_next_op_id(),
BasicCodeLocation(caller_filename, lineno),
operator_context,
DagNodeDetails("Neural Network", []),
get_optional_code_info_or_none(optional_code_reference, optional_source_code))
estimator_dag_node = get_dag_node_for_id(self.mlinspect_estimator_node_id)
add_dag_node(dag_node, [estimator_dag_node, test_data_node, test_labels_node],
estimator_backend_result)
return result
return execute_patched_func_indirect_allowed(execute_inspections)
```
#### File: test/checks/test_no_bias_introduced_for.py
```python
import math
from inspect import cleandoc
from pandas import DataFrame
from testfixtures import compare
from mlinspect import DagNode, BasicCodeLocation, OperatorContext, OperatorType, FunctionInfo, DagNodeDetails, \
OptionalCodeInfo
from mlinspect._pipeline_inspector import PipelineInspector
from mlinspect.checks import CheckStatus, NoBiasIntroducedFor, \
NoBiasIntroducedForResult
from mlinspect.checks._no_bias_introduced_for import BiasDistributionChange
from mlinspect.instrumentation._dag_node import CodeReference
def test_no_bias_introduced_for_merge():
"""
Tests whether RowLineage works for joins
"""
test_code = cleandoc("""
import pandas as pd
df_a = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c', 'cat_b'], 'B': [1, 2, 4, 5, 7]})
df_b = pd.DataFrame({'B': [1, 2, 3, 4, 5], 'C': [1, 5, 4, 11, None]})
df_merged = df_a.merge(df_b, on='B')
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_check(NoBiasIntroducedFor(['A'])) \
.execute()
check_result = inspector_result.check_to_check_results[NoBiasIntroducedFor(['A'])]
expected_result = get_expected_check_result_merge()
compare(check_result, expected_result)
def test_no_bias_introduced_simple_imputer():
"""
Tests whether RowLineage works for joins
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.impute import SimpleImputer
import numpy as np
df = pd.DataFrame({'A': ['cat_a', np.nan, 'cat_a', 'cat_c']})
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputed_data = imputer.fit_transform(df)
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_check(NoBiasIntroducedFor(['A'])) \
.execute()
check_result = inspector_result.check_to_check_results[NoBiasIntroducedFor(['A'])]
expected_result = get_expected_check_result_simple_imputer()
compare(check_result, expected_result)
def get_expected_check_result_merge():
""" Expected result for the code snippet in test_no_bias_introduced_for_merge"""
failing_dag_node = DagNode(2,
BasicCodeLocation('<string-source>', 5),
OperatorContext(OperatorType.JOIN, FunctionInfo('pandas.core.frame', 'merge')),
DagNodeDetails("on 'B'", ['A', 'B', 'C']),
OptionalCodeInfo(CodeReference(5, 12, 5, 36), "df_a.merge(df_b, on='B')"))
change_df = DataFrame({'sensitive_column_value': ['cat_a', 'cat_b', 'cat_c'],
'count_before': [2, 2, 1],
'count_after': [2, 1, 1],
'ratio_before': [0.4, 0.4, 0.2],
'ratio_after': [0.5, 0.25, 0.25],
'relative_ratio_change': [(0.5 - 0.4) / 0.4, (.25 - 0.4) / 0.4, (0.25 - 0.2) / 0.2]})
expected_distribution_change = BiasDistributionChange(failing_dag_node, False, (.25 - 0.4) / 0.4, change_df)
expected_dag_node_to_change = {failing_dag_node: {'A': expected_distribution_change}}
failure_message = 'A Join causes a min_relative_ratio_change of \'A\' by -0.37500000000000006, a value below the ' \
'configured minimum threshold -0.3!'
expected_result = NoBiasIntroducedForResult(NoBiasIntroducedFor(['A']), CheckStatus.FAILURE, failure_message,
expected_dag_node_to_change)
return expected_result
def get_expected_check_result_simple_imputer():
""" Expected result for the code snippet in test_no_bias_introduced_for_simple_imputer"""
imputer_dag_node = DagNode(1,
BasicCodeLocation('<string-source>', 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.impute._base', 'SimpleImputer')),
DagNodeDetails('Simple Imputer: fit_transform', ['A']),
OptionalCodeInfo(CodeReference(6, 10, 6, 72),
"SimpleImputer(missing_values=np.nan, strategy='most_frequent')"))
change_df = DataFrame({'sensitive_column_value': ['cat_a', 'cat_c', math.nan],
'count_before': [2, 1, 1],
'count_after': [3, 1, 0],
'ratio_before': [0.5, 0.25, 0.25],
'ratio_after': [0.75, 0.25, 0.],
'relative_ratio_change': [0.5, 0., -1.]})
expected_distribution_change = BiasDistributionChange(imputer_dag_node, True, 0., change_df)
expected_dag_node_to_change = {imputer_dag_node: {'A': expected_distribution_change}}
expected_result = NoBiasIntroducedForResult(NoBiasIntroducedFor(['A']), CheckStatus.SUCCESS, None,
expected_dag_node_to_change)
return expected_result
```
#### File: test/example_pipelines/test_compas.py
```python
import ast
from mlinspect.testing._testing_helper_utils import run_and_assert_all_op_outputs_inspected
from example_pipelines import COMPAS_PY, COMPAS_PNG
def test_py_pipeline_runs():
"""
Tests whether the .py version of the pipeline works
"""
with open(COMPAS_PY) as file:
text = file.read()
parsed_ast = ast.parse(text)
exec(compile(parsed_ast, filename="<ast>", mode="exec"))
def test_instrumented_py_pipeline_runs():
"""
Tests whether the pipeline works with instrumentation
"""
dag = run_and_assert_all_op_outputs_inspected(COMPAS_PY, ['sex', 'race'], COMPAS_PNG)
assert len(dag) == 39
```
#### File: test/inspections/test_arg_capturing.py
```python
from inspect import cleandoc
import numpy
from testfixtures import compare
from mlinspect import OperatorType, DagNode, BasicCodeLocation, OperatorContext, FunctionInfo, DagNodeDetails, \
OptionalCodeInfo, CodeReference
from mlinspect.inspections import ArgumentCapturing
from mlinspect.instrumentation import _pipeline_executor
def test_arg_capturing_sklearn_decision_tree():
"""
Tests whether ArgumentCapturing works for the sklearn DecisionTreeClassifier
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import label_binarize, StandardScaler
from sklearn.tree import DecisionTreeClassifier
import numpy as np
df = pd.DataFrame({'A': [0, 1, 2, 3], 'B': [0, 1, 2, 3], 'target': ['no', 'no', 'yes', 'yes']})
train = StandardScaler().fit_transform(df[['A', 'B']])
target = label_binarize(df['target'], classes=['no', 'yes'])
clf = DecisionTreeClassifier()
clf = clf.fit(train, target)
test_df = pd.DataFrame({'A': [0., 0.6], 'B': [0., 0.6], 'target': ['no', 'yes']})
test_labels = label_binarize(test_df['target'], classes=['no', 'yes'])
test_score = clf.score(test_df[['A', 'B']], test_labels)
assert test_score == 1.0
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
classifier_node = list(inspector_result.dag.nodes)[7]
score_node = list(inspector_result.dag.nodes)[14]
expected_classifier = DagNode(7,
BasicCodeLocation("<string-source>", 11),
OperatorContext(OperatorType.ESTIMATOR,
FunctionInfo('sklearn.tree._classes', 'DecisionTreeClassifier')),
DagNodeDetails('Decision Tree', []),
OptionalCodeInfo(CodeReference(11, 6, 11, 30),
'DecisionTreeClassifier()'))
expected_score = DagNode(14,
BasicCodeLocation("<string-source>", 16),
OperatorContext(OperatorType.SCORE,
FunctionInfo('sklearn.tree._classes.DecisionTreeClassifier', 'score')),
DagNodeDetails('Decision Tree', []),
OptionalCodeInfo(CodeReference(16, 13, 16, 56),
"clf.score(test_df[['A', 'B']], test_labels)"))
compare(classifier_node, expected_classifier)
compare(score_node, expected_score)
expected_args = {'criterion': 'gini', 'splitter': 'best', 'max_depth': None, 'min_samples_split': 2,
'min_samples_leaf': 1, 'min_weight_fraction_leaf': 0.0, 'max_features': None, 'random_state': None,
'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None,
'class_weight': None, 'presort': 'deprecated', 'ccp_alpha': 0.0}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[classifier_node]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[score_node]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_sklearn_sgd_classifier():
"""
Tests whether ArgumentCapturing works for the sklearn SGDClassifier
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import label_binarize, StandardScaler
from sklearn.linear_model import SGDClassifier
import numpy as np
df = pd.DataFrame({'A': [0, 1, 2, 3], 'B': [0, 1, 2, 3], 'target': ['no', 'no', 'yes', 'yes']})
train = StandardScaler().fit_transform(df[['A', 'B']])
target = label_binarize(df['target'], classes=['no', 'yes'])
clf = SGDClassifier(loss='log', random_state=42)
clf = clf.fit(train, target)
test_df = pd.DataFrame({'A': [0., 0.6], 'B': [0., 0.6], 'target': ['no', 'yes']})
test_labels = label_binarize(test_df['target'], classes=['no', 'yes'])
test_score = clf.score(test_df[['A', 'B']], test_labels)
assert test_score == 1.0
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
classifier_node = list(inspector_result.dag.nodes)[7]
score_node = list(inspector_result.dag.nodes)[14]
expected_classifier = DagNode(7,
BasicCodeLocation("<string-source>", 11),
OperatorContext(OperatorType.ESTIMATOR,
FunctionInfo('sklearn.linear_model._stochastic_gradient',
'SGDClassifier')),
DagNodeDetails('SGD Classifier', []),
OptionalCodeInfo(CodeReference(11, 6, 11, 48),
"SGDClassifier(loss='log', random_state=42)"))
expected_score = DagNode(14,
BasicCodeLocation("<string-source>", 16),
OperatorContext(OperatorType.SCORE,
FunctionInfo('sklearn.linear_model._stochastic_gradient.SGDClassifier',
'score')),
DagNodeDetails('SGD Classifier', []),
OptionalCodeInfo(CodeReference(16, 13, 16, 56),
"clf.score(test_df[['A', 'B']], test_labels)"))
compare(classifier_node, expected_classifier)
compare(score_node, expected_score)
expected_args = {'loss': 'log', 'penalty': 'l2', 'alpha': 0.0001, 'l1_ratio': 0.15, 'fit_intercept': True,
'max_iter': 1000, 'tol': 0.001, 'shuffle': True, 'verbose': 0, 'epsilon': 0.1, 'n_jobs': None,
'random_state': 42, 'learning_rate': 'optimal', 'eta0': 0.0, 'power_t': 0.5,
'early_stopping': False, 'validation_fraction': 0.1, 'n_iter_no_change': 5, 'class_weight': None,
'warm_start': False, 'average': False}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[classifier_node]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[score_node]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_sklearn_keras_classifier():
"""
Tests whether ArgumentCapturing works for the sklearn KerasClassifier
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD
import tensorflow as tf
import numpy as np
df = pd.DataFrame({'A': [0, 1, 2, 3], 'B': [0, 1, 2, 3], 'target': ['no', 'no', 'yes', 'yes']})
train = StandardScaler().fit_transform(df[['A', 'B']])
target = OneHotEncoder(sparse=False).fit_transform(df[['target']])
def create_model(input_dim):
clf = Sequential()
clf.add(Dense(2, activation='relu', input_dim=input_dim))
clf.add(Dense(2, activation='relu'))
clf.add(Dense(2, activation='softmax'))
clf.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=["accuracy"])
return clf
np.random.seed(42)
tf.random.set_seed(42)
clf = KerasClassifier(build_fn=create_model, epochs=15, batch_size=1, verbose=0, input_dim=2)
clf = clf.fit(train, target)
test_df = pd.DataFrame({'A': [0., 0.8], 'B': [0., 0.8], 'target': ['no', 'yes']})
test_labels = OneHotEncoder(sparse=False).fit_transform(test_df[['target']])
test_score = clf.score(test_df[['A', 'B']], test_labels)
assert test_score == 1.0
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
classifier_node = list(inspector_result.dag.nodes)[7]
score_node = list(inspector_result.dag.nodes)[14]
expected_classifier = DagNode(7,
BasicCodeLocation("<string-source>", 25),
OperatorContext(OperatorType.ESTIMATOR,
FunctionInfo('tensorflow.python.keras.wrappers.scikit_learn',
'KerasClassifier')),
DagNodeDetails('Neural Network', []),
OptionalCodeInfo(CodeReference(25, 6, 25, 93),
'KerasClassifier(build_fn=create_model, epochs=15, batch_size=1, '
'verbose=0, input_dim=2)'))
expected_score = DagNode(14,
BasicCodeLocation("<string-source>", 30),
OperatorContext(OperatorType.SCORE,
FunctionInfo('tensorflow.python.keras.wrappers.scikit_learn.'
'KerasClassifier', 'score')),
DagNodeDetails('Neural Network', []),
OptionalCodeInfo(CodeReference(30, 13, 30, 56),
"clf.score(test_df[['A', 'B']], test_labels)"))
compare(classifier_node, expected_classifier)
compare(score_node, expected_score)
expected_args = {'epochs': 15, 'batch_size': 1, 'verbose': 0, 'input_dim': 2}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[classifier_node]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[score_node]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_standard_scaler():
"""
Tests whether ArgumentCapturing works for the sklearn StandardScaler
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import StandardScaler
import numpy as np
df = pd.DataFrame({'A': [1, 2, 10, 5]})
standard_scaler = StandardScaler()
encoded_data = standard_scaler.fit_transform(df)
test_df = pd.DataFrame({'A': [1, 2, 10, 5]})
encoded_data = standard_scaler.transform(test_df)
expected = np.array([[-1.], [-0.71428571], [1.57142857], [0.14285714]])
assert np.allclose(encoded_data, expected)
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
fit_transform_node = list(inspector_result.dag.nodes)[1]
transform_node = list(inspector_result.dag.nodes)[3]
expected_fit_transform = DagNode(1,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._data', 'StandardScaler')),
DagNodeDetails('Standard Scaler: fit_transform', ['array']),
OptionalCodeInfo(CodeReference(6, 18, 6, 34), 'StandardScaler()'))
expected_transform = DagNode(3,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._data', 'StandardScaler')),
DagNodeDetails('Standard Scaler: transform', ['array']),
OptionalCodeInfo(CodeReference(6, 18, 6, 34), 'StandardScaler()'))
compare(fit_transform_node, expected_fit_transform)
compare(transform_node, expected_transform)
expected_args = {'copy': True, 'with_mean': True, 'with_std': True}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_fit_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_hashing_vectorizer():
"""
Tests whether ArgumentCapturing works for the sklearn HasingVectorizer
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.feature_extraction.text import HashingVectorizer
from scipy.sparse import csr_matrix
import numpy as np
df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})
vectorizer = HashingVectorizer(ngram_range=(1, 3), n_features=2**2)
encoded_data = vectorizer.fit_transform(df['A'])
expected = csr_matrix([[-0., 0., 0., -1.], [0., -1., -0., 0.], [0., 0., 0., -1.], [0., 0., 0., -1.]])
assert np.allclose(encoded_data.A, expected.A)
test_df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})
encoded_data = vectorizer.transform(test_df['A'])
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
fit_transform_node = list(inspector_result.dag.nodes)[2]
transform_node = list(inspector_result.dag.nodes)[5]
expected_fit_transform = DagNode(2,
BasicCodeLocation("<string-source>", 7),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.feature_extraction.text',
'HashingVectorizer')),
DagNodeDetails('Hashing Vectorizer: fit_transform', ['array']),
OptionalCodeInfo(CodeReference(7, 13, 7, 67),
'HashingVectorizer(ngram_range=(1, 3), n_features=2**2)'))
expected_transform = DagNode(5,
BasicCodeLocation("<string-source>", 7),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.feature_extraction.text',
'HashingVectorizer')),
DagNodeDetails('Hashing Vectorizer: transform', ['array']),
OptionalCodeInfo(CodeReference(7, 13, 7, 67),
'HashingVectorizer(ngram_range=(1, 3), n_features=2**2)'))
compare(fit_transform_node, expected_fit_transform)
compare(transform_node, expected_transform)
expected_args = {'input': 'content', 'encoding': 'utf-8', 'decode_error': 'strict', 'strip_accents': None,
'lowercase': True, 'preprocessor': None, 'tokenizer': None, 'stop_words': None,
'token_pattern': '(?<KEY>', 'ngram_range': (1, 3), 'analyzer': 'word', 'n_features': 4,
'binary': False, 'norm': 'l2', 'alternate_sign': True, 'dtype': numpy.float64}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_fit_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_kbins_discretizer():
"""
Tests whether ArgumentCapturing works for the sklearn KBinsDiscretizer
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import KBinsDiscretizer
import numpy as np
df = pd.DataFrame({'A': [1, 2, 10, 5]})
discretizer = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
encoded_data = discretizer.fit_transform(df)
test_df = pd.DataFrame({'A': [1, 2, 10, 5]})
encoded_data = discretizer.transform(test_df)
expected = np.array([[0.], [0.], [2.], [1.]])
assert np.allclose(encoded_data, expected)
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
fit_transform_node = list(inspector_result.dag.nodes)[1]
transform_node = list(inspector_result.dag.nodes)[3]
expected_fit_transform = DagNode(1,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._discretization',
'KBinsDiscretizer')),
DagNodeDetails('K-Bins Discretizer: fit_transform', ['array']),
OptionalCodeInfo(CodeReference(6, 14, 6, 78),
"KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')"))
expected_transform = DagNode(3,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._discretization',
'KBinsDiscretizer')),
DagNodeDetails('K-Bins Discretizer: transform', ['array']),
OptionalCodeInfo(CodeReference(6, 14, 6, 78),
"KBinsDiscretizer(n_bins=3, encode='ordinal', "
"strategy='uniform')"))
compare(fit_transform_node, expected_fit_transform)
compare(transform_node, expected_transform)
expected_args = {'n_bins': 3, 'encode': 'ordinal', 'strategy': 'uniform'}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_fit_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_one_hot_encoder():
"""
Tests whether ArgumentCapturing works for the sklearn OneHotEncoder
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import label_binarize, OneHotEncoder
import numpy as np
df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})
one_hot_encoder = OneHotEncoder(sparse=False)
encoded_data = one_hot_encoder.fit_transform(df)
expected = np.array([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.], [0., 0., 1.]])
print(encoded_data)
assert np.allclose(encoded_data, expected)
test_df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})
encoded_data = one_hot_encoder.transform(test_df)
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
fit_transform_node = list(inspector_result.dag.nodes)[1]
transform_node = list(inspector_result.dag.nodes)[3]
expected_fit_transform = DagNode(1,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._encoders', 'OneHotEncoder')),
DagNodeDetails('One-Hot Encoder: fit_transform', ['array']),
OptionalCodeInfo(CodeReference(6, 18, 6, 45), 'OneHotEncoder(sparse=False)'))
expected_transform = DagNode(3,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._encoders',
'OneHotEncoder')),
DagNodeDetails('One-Hot Encoder: transform', ['array']),
OptionalCodeInfo(CodeReference(6, 18, 6, 45), 'OneHotEncoder(sparse=False)'))
compare(fit_transform_node, expected_fit_transform)
compare(transform_node, expected_transform)
expected_args = {'categories': 'auto', 'drop': None, 'sparse': False, 'dtype': numpy.float64,
'handle_unknown': 'error'}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_fit_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_simple_imputer():
"""
Tests whether ArgumentCapturing works for the sklearn SimpleImputer
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.impute import SimpleImputer
import numpy as np
df = pd.DataFrame({'A': ['cat_a', np.nan, 'cat_a', 'cat_c']})
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputed_data = imputer.fit_transform(df)
test_df = pd.DataFrame({'A': ['cat_a', np.nan, 'cat_a', 'cat_c']})
imputed_data = imputer.transform(test_df)
expected = np.array([['cat_a'], ['cat_a'], ['cat_a'], ['cat_c']])
assert np.array_equal(imputed_data, expected)
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
fit_transform_node = list(inspector_result.dag.nodes)[1]
transform_node = list(inspector_result.dag.nodes)[3]
expected_fit_transform = DagNode(1,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.impute._base', 'SimpleImputer')),
DagNodeDetails('Simple Imputer: fit_transform', ['A']),
OptionalCodeInfo(CodeReference(6, 10, 6, 72),
"SimpleImputer(missing_values=np.nan, strategy='most_frequent')"))
expected_transform = DagNode(3,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.impute._base', 'SimpleImputer')),
DagNodeDetails('Simple Imputer: transform', ['A']),
OptionalCodeInfo(CodeReference(6, 10, 6, 72),
"SimpleImputer(missing_values=np.nan, strategy='most_frequent')"))
compare(fit_transform_node, expected_fit_transform)
compare(transform_node, expected_transform)
expected_args = {'missing_values': numpy.nan, 'strategy': 'most_frequent', 'fill_value': None, 'verbose': 0,
'copy': True,
'add_indicator': False}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_fit_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
def test_arg_capturing_function_transformer():
"""
Tests whether ArgumentCapturing works for the sklearn FunctionTransformer
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import FunctionTransformer
import numpy as np
def safe_log(x):
return np.log(x, out=np.zeros_like(x), where=(x!=0))
df = pd.DataFrame({'A': [1, 2, 10, 5]})
function_transformer = FunctionTransformer(lambda x: safe_log(x))
encoded_data = function_transformer.fit_transform(df)
test_df = pd.DataFrame({'A': [1, 2, 10, 5]})
encoded_data = function_transformer.transform(test_df)
expected = np.array([[0.000000], [0.693147], [2.302585], [1.609438]])
assert np.allclose(encoded_data, expected)
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[ArgumentCapturing()])
fit_transform_node = list(inspector_result.dag.nodes)[1]
transform_node = list(inspector_result.dag.nodes)[3]
expected_fit_transform = DagNode(1,
BasicCodeLocation("<string-source>", 9),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing_function_transformer',
'FunctionTransformer')),
DagNodeDetails('Function Transformer: fit_transform', ['A']),
OptionalCodeInfo(CodeReference(9, 23, 9, 65),
'FunctionTransformer(lambda x: safe_log(x))'))
expected_transform = DagNode(3,
BasicCodeLocation("<string-source>", 9),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing_function_transformer',
'FunctionTransformer')),
DagNodeDetails('Function Transformer: transform', ['A']),
OptionalCodeInfo(CodeReference(9, 23, 9, 65),
'FunctionTransformer(lambda x: safe_log(x))'))
compare(fit_transform_node, expected_fit_transform)
compare(transform_node, expected_transform)
expected_args = {'validate': False, 'accept_sparse': False, 'check_inverse': True, 'kw_args': None,
'inv_kw_args': None}
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_fit_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
inspection_results_tree = inspector_result.dag_node_to_inspection_results[expected_transform]
captured_args = inspection_results_tree[ArgumentCapturing()]
compare(captured_args, expected_args)
```
#### File: test/inspections/test_completeness_of_columns.py
```python
from inspect import cleandoc
from testfixtures import compare
from mlinspect._pipeline_inspector import PipelineInspector
from mlinspect.inspections import CompletenessOfColumns
def test_completeness_merge():
"""
Tests whether CompletenessOfColumns works for joins
"""
test_code = cleandoc("""
import numpy as np
import pandas as pd
df_a = pd.DataFrame({'A': ['cat_a', None, 'cat_a', 'cat_c', None], 'B': [1, 2, 4, 5, 7]})
df_b = pd.DataFrame({'B': [1, 2, 3, 4, np.nan], 'C': [1, 5, 4, 11, None]})
df_merged = df_a.merge(df_b, on='B')
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_required_inspection(CompletenessOfColumns(['A', 'B'])) \
.execute()
inspection_results = list(inspector_result.dag_node_to_inspection_results.values())
completeness_output = inspection_results[0][CompletenessOfColumns(['A', 'B'])]
expected_completeness = {'A': 0.6, 'B': 1.0}
compare(completeness_output, expected_completeness)
completeness_output = inspection_results[1][CompletenessOfColumns(['A', 'B'])]
expected_completeness = {'B': 0.8}
compare(completeness_output, expected_completeness)
completeness_output = inspection_results[2][CompletenessOfColumns(['A', 'B'])]
expected_completeness = {'A': 2/3, 'B': 1.0}
compare(completeness_output, expected_completeness)
def test_completeness_projection():
"""
Tests whether CompletenessOfColumns works for projections
"""
test_code = cleandoc("""
import pandas as pd
import numpy as np
pandas_df = pd.DataFrame({'A': ['cat_a', 'cat_b', None, 'cat_c', 'cat_b'],
'B': [1, None, np.nan, None, 7], 'C': [2, 2, 10, 5, 7]})
pandas_df = pandas_df[['B', 'C']]
pandas_df = pandas_df[['C']]
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_required_inspection(CompletenessOfColumns(['A', 'B'])) \
.execute()
inspection_results = list(inspector_result.dag_node_to_inspection_results.values())
completeness_output = inspection_results[0][CompletenessOfColumns(['A', 'B'])]
expected_completeness = {'A': 0.8, 'B': 0.4}
compare(completeness_output, expected_completeness)
completeness_output = inspection_results[1][CompletenessOfColumns(['A', 'B'])]
expected_completeness = {'B': 0.4}
compare(completeness_output, expected_completeness)
completeness_output = inspection_results[2][CompletenessOfColumns(['A', 'B'])]
expected_completeness = {}
compare(completeness_output, expected_completeness)
```
#### File: test/inspections/test_count_distinct_of_columns.py
```python
from inspect import cleandoc
from testfixtures import compare
from mlinspect._pipeline_inspector import PipelineInspector
from mlinspect.inspections import CountDistinctOfColumns
def test_count_distinct_merge():
"""
Tests whether CountDistinctOfColumns works for joins
"""
test_code = cleandoc("""
import numpy as np
import pandas as pd
df_a = pd.DataFrame({'A': ['cat_a', None, 'cat_a', 'cat_c', None], 'B': [1, 2, 4, 5, 7]})
df_b = pd.DataFrame({'B': [1, 2, 3, 4, np.nan], 'C': [1, 5, 4, 11, None]})
df_merged = df_a.merge(df_b, on='B')
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_required_inspection(CountDistinctOfColumns(['A', 'B'])) \
.execute()
inspection_results = list(inspector_result.dag_node_to_inspection_results.values())
count_distinct_output = inspection_results[0][CountDistinctOfColumns(['A', 'B'])]
expected_count_distinct = {'A': 3, 'B': 5}
compare(count_distinct_output, expected_count_distinct)
count_distinct_output = inspection_results[1][CountDistinctOfColumns(['A', 'B'])]
expected_count_distinct = {'B': 5}
compare(count_distinct_output, expected_count_distinct)
count_distinct_output = inspection_results[2][CountDistinctOfColumns(['A', 'B'])]
expected_count_distinct = {'A': 2, 'B': 3}
compare(count_distinct_output, expected_count_distinct)
def test_count_distinct_projection():
"""
Tests whether CountDistinctOfColumns works for projections
"""
test_code = cleandoc("""
import pandas as pd
import numpy as np
pandas_df = pd.DataFrame({'A': ['cat_a', 'cat_b', None, 'cat_c', 'cat_b'],
'B': [1, None, np.nan, None, 7], 'C': [2, 2, 10, 5, 7]})
pandas_df = pandas_df[['B', 'C']]
pandas_df = pandas_df[['C']]
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_required_inspection(CountDistinctOfColumns(['A', 'B'])) \
.execute()
inspection_results = list(inspector_result.dag_node_to_inspection_results.values())
count_distinct_output = inspection_results[0][CountDistinctOfColumns(['A', 'B'])]
expected_count_distinct = {'A': 4, 'B': 5}
compare(count_distinct_output, expected_count_distinct)
count_distinct_output = inspection_results[1][CountDistinctOfColumns(['A', 'B'])]
expected_count_distinct = {'B': 5}
compare(count_distinct_output, expected_count_distinct)
count_distinct_output = inspection_results[2][CountDistinctOfColumns(['A', 'B'])]
expected_count_distinct = {}
compare(count_distinct_output, expected_count_distinct)
``` |
{
"source": "jinyangpeter/nucs349_final",
"score": 3
} |
#### File: nucs349_final/src/implement_cf.py
```python
from src.collaborative_filtering import collaborative_filtering
from utils import data
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
def load_global():
BASE_PATH = './COVID-19/csse_covid_19_data/'
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_global.csv')
return data.load_csv_data(confirmed)
def ind_replace_by_number(confirmed_cases, number):
threshold_ind = []
for i in range(confirmed_cases.shape[0]):
temp = np.argmax(confirmed_cases[i] > number)
threshold_ind.append([True]*temp + [False]*(len(confirmed_cases[i]) - temp))
return np.array(threshold_ind)
def ind_replace_by_percent(confirmed_cases, percent):
threshold = confirmed_cases[:, -1] * percent
ind = []
for i in range(confirmed_cases.shape[0]):
ind.append(confirmed_cases[i] <= threshold[i])
return np.array(ind)
# # Get the date on which a region first reported any case
# first_case = np.argmax((confirmed_cases != 0), axis=1)
# threshold_ind = []
# # Get the corresponding threshold number of cases; thresholds
# # will be determined by the quantile used
# for i in range(len(first_case)):
# temp = confirmed_cases[i][first_case[i]:]
# temp = np.quantile(temp, quantile)
# # Get the first date above threshold, and then create vector of 0, 1
# temp = np.argmax(confirmed_cases[i] > temp)
# threshold_ind.append([True]*temp + [False]*(len(confirmed_cases[i]) - temp))
# return np.array(threshold_ind)
def dhs_growth_rate(data):
t_0 = data[:, :-1]
t_1 = data[:, 1:]
denominator = 0.5*(t_0 + t_1)
numerator = t_1 - t_0
denominator[denominator == 0] = 1
return numerator/denominator
def rate_to_number(rate_, raw, ind):
rate = np.copy(rate_)
matrix = (raw * (1 - ind)).astype(float)
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1] - 2, 0, -1):
if ind[i, j - 1] == 1:
matrix[i, j - 1] = matrix[i, j] * (1 - rate[i, j - 1]/2)/(1 + rate[i, j - 1]/2)
return matrix
# rate = np.copy(rate_)
# temp = raw[:, 1:]
# num = np.zeros(temp.shape)
# # If rate is 2, i.e. it jumps from 0 to nonzero, just use the original raw data
# num[rate == 2] = temp[rate == 2]
# rate[rate == 2] = -2
# temp = raw[:, :-1]
# num = num + temp * (1 + rate/2)/(1 - rate/2)
# num = np.concatenate([raw[:, 0].reshape(-1, 1), num], axis=1)
# return num
def random_drop(data, index, rows_drop, days_drop):
ind = np.copy(index)
data_dropped = np.copy(data)
np.random.seed(0)
rows = np.random.choice(data.shape[0], rows_drop, replace=False)
# ind_drop = np.random.randint(days_drop, size=rows_drop)
for i in range(rows_drop):
temp = ind[rows[i]][ind[rows[i]] == 0]
temp[0:days_drop] = 1
temp2 = [0] * len(ind[rows[i]] == 1) + list(temp)
ind[rows[i]][ind[rows[i]] == 0] = temp
data_dropped[rows[i]][temp2] = 0
return ind, data_dropped
confirmed = load_global()
confirmed_cases = confirmed.drop(confirmed.columns[0:4], axis=1)
confirmed_cases = confirmed_cases.to_numpy()
quantile_ind = ind_replace_by_percent(confirmed_cases, 0.05)
confirmed_rate = dhs_growth_rate(confirmed_cases)
number_ind = ind_replace_by_number(confirmed_cases, 0)
number_ind_dropped, confirmed_cases_dropped = random_drop(confirmed_cases, number_ind,
int(confirmed_cases.shape[0]/10), 10)
confirmed_rate_dropped = dhs_growth_rate(confirmed_cases_dropped)
np.random.seed(0)
mse = []
for dis in ['euclidean', 'cosine', 'manhattan']:
imputed_rate = collaborative_filtering(confirmed_rate_dropped, 3, number_ind_dropped[:, :-1],
distance_measure=dis, aggregator="median")
imputed_cases = rate_to_number(imputed_rate, confirmed_cases_dropped, number_ind_dropped)
test_ind = number_ind_dropped.astype(int) - number_ind.astype(int)
test_ind = test_ind.astype(bool)
m = (confirmed_cases[test_ind] - imputed_cases[test_ind])**2
mse.append(np.mean(m))
mse = np.array(mse)
print(mse)
print("Best is", ['euclidean', 'cosine', 'manhattan'][np.argmin(mse)])
np.random.seed(0)
mse = []
for agg in ['mean', 'mode', 'median']:
imputed_rate = collaborative_filtering(confirmed_rate_dropped, 3, number_ind_dropped[:, :-1],
distance_measure="manhattan", aggregator=agg)
imputed_cases = rate_to_number(imputed_rate, confirmed_cases_dropped, number_ind_dropped)
test_ind = number_ind_dropped.astype(int) - number_ind.astype(int)
test_ind = test_ind.astype(bool)
m = (confirmed_cases[test_ind] - imputed_cases[test_ind]) ** 2
mse.append(np.mean(m))
mse = np.array(mse)
print(mse)
print("Best is", ['mean', 'mode', 'median'][np.argmin(mse)])
np.random.seed(0)
mse = []
for k in range(1, 31):
imputed_rate = collaborative_filtering(confirmed_rate_dropped, k, number_ind_dropped[:, :-1],
distance_measure="manhattan", aggregator="mean")
imputed_cases = rate_to_number(imputed_rate, confirmed_cases_dropped, number_ind_dropped)
test_ind = number_ind_dropped.astype(int) - number_ind.astype(int)
test_ind = test_ind.astype(bool)
m = (confirmed_cases[test_ind] - imputed_cases[test_ind]) ** 2
mse.append(np.mean(m))
mse = np.array(mse)
print("Best is", range(1, 31)[np.argmin(mse)])
plt.title("MSE for K in range(1, 31)")
plt.ylabel("MSE")
plt.plot(range(1, 31), mse)
plt.savefig("final_mse.png")
###############################
number_ind_0 = ind_replace_by_number(confirmed_cases, 10)
imputed_rate = collaborative_filtering(confirmed_rate, 2, number_ind_0[:, :-1],
distance_measure="manhattan", aggregator="mean")
imputed_cases = rate_to_number(imputed_rate, confirmed_cases, number_ind_0)
imputed_cases = np.rint(imputed_cases).astype(int)
temp = (imputed_rate != confirmed_rate)
print(np.sum(temp))
temp = (imputed_cases != confirmed_cases)
print(np.sum(imputed_cases - confirmed_cases))
print(np.sum(temp))
temp = np.argwhere(imputed_cases != confirmed_cases)
temp = imputed_cases- confirmed_cases
temp = np.sum(temp, axis=1)
print(np.sum(temp != 0))
top_name = list(confirmed[confirmed.columns[1]][np.argsort(-temp)[0:4]])
top = np.argsort(-temp)[0:4]
date = list(confirmed.columns[4:])
# date = pd.to_datetime(date, format="%m/%d/%y")
date = [d[:-3] for d in date]
cut = np.argmin(number_ind_0, axis=1)[top]
days_more = 10
for i, l in enumerate(top_name):
fig, ax = plt.subplots()
ax.set_title("Confirmed vs Imputed (Until 2%): " + l, fontsize=12)
ax.set_ylabel("Total Reported Cases")
ax.set_xlabel("Date")
ax.plot(date[:cut[i]+days_more], confirmed_cases[top[i]][:cut[i]+days_more], color="black", label="Confirmed")
ax.plot(date[:cut[i]+days_more], imputed_cases[top[i]][:cut[i]+days_more], "--", color="red", label="Imputed")
tick = (len(date[:cut[i]+days_more]) // 20)
if tick == 0:
tick = range(0, len(date[:cut[i] + days_more]))
else:
tick = range(0, len(date[:cut[i]+days_more]), tick)
plt.xticks(list(tick), [date[:cut[i]+days_more][k] for k in tick], rotation=45)
ax.legend(loc='upper left')
ax.set_yscale('log')
fig.savefig(f"{l}_2%.png", bbox_inches='tight')
total_cases = np.sum(confirmed_cases, axis=0)
total_imputed_cases = np.sum(imputed_cases, axis=0)
fig, ax = plt.subplots()
ax.plot(date, total_cases, color="black", label="Confirmed")
ax.plot(date, total_imputed_cases, "--", color="red", label="Imputed")
ax.set_title("Confirmed vs Imputed: Global Total", fontsize=16)
ax.set_ylabel('Total Reported Cases')
ax.set_xlabel("Time (days since Jan 22, 2020)")
ax.set_yscale('log')
plt.xticks(range(date), range(date), rotation=45)
ax.legend(loc='upper left')
plt.tight_layout()
plt.savefig('results/cases_by_country.png')
``` |
{
"source": "jin-yc10/ImageTagging",
"score": 3
} |
#### File: ImageTagging/public/extract_item.py
```python
import os,sys
import csv
import locale
def sort_tuple_by_pinyin(datalist):
return sorted(datalist,cmp = tuple_cmp)
def tuple_cmp(x,y):
locale.setlocale(locale.LC_COLLATE, '<EMAIL>')
return locale.strcoll(x[1],y[1])
def usage():
print '-Usage:'
print '\tpython extract_item.py itemDir [outputfilepath(.csv)]'
if len(sys.argv) < 2:
usage()
else:
itemDir = sys.argv[1]
if len(sys.argv) < 3:
if itemDir.endswith('/'):
itemDir = itemDir[0:-1]
outputFilePath = itemDir + '.csv'
else:
outputFilePath = sys.argv[2]
dirs = os.listdir(itemDir)
flists = []
for dir in dirs:
if dir.startswith('.'):
continue
dirpath = os.path.join(itemDir,dir)
if os.path.isdir(dirpath):
splitdir = os.listdir(dirpath)
for splitd in splitdir:
if splitd.startswith('.'):
continue
obj_path = os.path.join(dirpath,splitd)
files = os.listdir(obj_path)
for file in files:
if file.startswith('.'):
continue
path = os.path.join(obj_path,file)
slug = splitd
flists.append((slug,path))
with open(outputFilePath, 'w') as csvfile:
fieldnames = ['slug','path']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
flists = sort_tuple_by_pinyin(flists)
for slug,path in flists:
print slug,path
writer.writerow({'slug': slug, 'path':path})
``` |
{
"source": "jinyeom/detectron2-tensorrt",
"score": 2
} |
#### File: jinyeom/detectron2-tensorrt/retinanet.py
```python
from pathlib import Path
from typing import Union, List, Tuple
import torch
from torch import nn
from detectron2.config import CfgNode
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.layers import batched_nms, cat
from detectron2.structures import Boxes, Instances, pairwise_iou
from utils.tensorrt import TensorRTModule
class RetinaNetRT(nn.Module):
def __init__(self, cfg: CfgNode):
super().__init__()
# fmt: off
self.num_classes = cfg.MODEL.RETINANET.NUM_CLASSES
self.score_threshold = cfg.MODEL.RETINANET.SCORE_THRESH_TEST
self.topk_candidates = cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST
self.nms_threshold = cfg.MODEL.RETINANET.NMS_THRESH_TEST
self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE
# fmt: on
self.model = TensorRTModule(
cfg.MODEL.DEPLOY.ONNX_PATH,
cfg.MODEL.DEPLOY.ENGINE_PATH,
max_batch_size=cfg.MODEL.DEPLOY.MAX_BATCH_SIZE,
max_workspace_size=cfg.MODEL.DEPLOY.MAX_WORKSPACE_SIZE,
fp16_mode=cfg.MODEL.DEPLOY.FP16_MODE,
force_rebuild=cfg.MODEL.DEPLOY.FORCE_REBUILD,
)
self.anchors = torch.load(cfg.MODEL.DEPLOY.ANCHORS_PATH)
self.anchors = [anchor.cuda() for anchor in self.anchors]
self.box2box_transform = Box2BoxTransform(
weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS
)
self.image_size = cfg.MODEL.DEPLOY.INPUT_SHAPE[-2:]
def forward(self, inputs: torch.Tensor):
outputs = self.model([inputs])
return self.postprocess(outputs)
def postprocess(self, outputs: List[torch.Tensor]) -> torch.Tensor:
results = []
for img_idx in range(len(outputs[0])):
box_cls_per_image = []
box_reg_per_image = []
for o in outputs:
box_cls_per_image.append(o[img_idx, ..., 4:])
box_reg_per_image.append(o[img_idx, ..., :4])
results_per_image = self.postprocess_single_image(
box_cls_per_image, box_reg_per_image,
)
results.append(results_per_image)
return results
def postprocess_single_image(self, box_cls, box_delta):
boxes_all = []
scores_all = []
class_idxs_all = []
# Iterate over every feature level
for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, self.anchors):
box_cls_i = box_cls_i.flatten() # (HxWxAxK,)
# Keep top k top scoring indices only.
num_topk = min(self.topk_candidates, box_reg_i.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
predicted_prob, topk_idxs = box_cls_i.sort(descending=True)
predicted_prob = predicted_prob[:num_topk]
topk_idxs = topk_idxs[:num_topk]
# filter out the proposals with low confidence score
keep_idxs = predicted_prob > self.score_threshold
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = topk_idxs[keep_idxs]
anchor_idxs = topk_idxs // self.num_classes
classes_idxs = topk_idxs % self.num_classes
box_reg_i = box_reg_i[anchor_idxs]
anchors_i = anchors_i[anchor_idxs]
predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i)
boxes_all.append(predicted_boxes)
scores_all.append(predicted_prob)
class_idxs_all.append(classes_idxs)
boxes_all, scores_all, class_idxs_all = [
cat(x) for x in [boxes_all, scores_all, class_idxs_all]
]
keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.nms_threshold)
keep = keep[: self.max_detections_per_image]
result = Instances(self.image_size)
result.pred_boxes = Boxes(boxes_all[keep])
result.scores = scores_all[keep]
result.pred_classes = class_idxs_all[keep]
return result
``` |
{
"source": "jinyeom/dreamer",
"score": 3
} |
#### File: dreamer/dreamer/modules.py
```python
from typing import Optional, Tuple, Sequence
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from torch.distributions.one_hot_categorical import OneHotCategoricalStraightThrough
class MLP(nn.Sequential):
"""
Multilayer perceptron with exponential linear unit activations.
Parameters:
in_dim (int): Number of input features
out_dim (int): Number of outputs
hid_dims (int): An optional sequence of numbers of hidden units
"""
def __init__(
self, in_dim: int, out_dim: int, hid_dims: Optional[Sequence[int]] = None
) -> None:
if hid_dims is None or len(hid_dims) == 0:
net = [nn.Linear(in_dim, out_dim)]
else:
net = [nn.Linear(in_dim, hid_dims[0]), nn.ELU(inplace=True)]
for in_features, out_features in zip(hid_dims[:-1], hid_dims[1:]):
net.append(nn.Linear(in_features, out_features))
net.append(nn.ELU(inplace=True))
net.append(nn.Linear(hid_dims[-1], out_dim))
super().__init__(*net)
class Encoder(nn.Module):
"""
Convolutional encoder for visual observation data. Note that the encoder expects the
input shape of (B, 3, 64, 64).
Parameters:
x_dim (int): Size of the observation embedding
"""
def __init__(self, x_dim: int) -> None:
super().__init__()
self.x_dim = x_dim
self.conv1 = nn.Conv2d(3, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.embed = nn.Linear(1024, self.x_dim)
def forward(self, x: Tensor) -> Tensor:
x = F.elu_(self.conv1(x))
x = F.elu_(self.conv2(x))
x = F.elu_(self.conv3(x))
x = F.elu_(self.conv4(x))
x = torch.flatten(x, start_dim=1)
x = self.embed(x)
return x
class RSSM(nn.Module):
"""
Recurrent State-Space Model (RSSM) is composed of three models: 1) a recurrent
model, which updates a deterministic recurrent state, 2) a representation model,
which computes a posterior stochastic state, and 3) a transition model, which
computes a prior stochastic state that tries to predict the posterior without
access to the current observation.
Parameters:
x_dim (int): Size of the observation embedding
h_dim (int): Size of the deterministic model state
z_num_var (int): Number of categorical variables for the stochastic state
z_var_dim (int): Size of each categorical variable for the stochastic state
a_dim (int): Size of the action vector
"""
def __init__(
self, x_dim: int, h_dim: int, z_num_var: int, z_var_dim: int, a_dim: int
) -> None:
super().__init__()
self.x_dim = x_dim
self.h_dim = h_dim
self.z_num_var = z_num_var
self.z_var_dim = z_var_dim
self.z_dim = z_num_var * z_var_dim
self.a_dim = a_dim
self.recurrent = nn.GRUCell(self.z_dim + self.a_dim, self.h_dim)
self.representation = MLP(self.x_dim + self.h_dim, self.z_dim, (1024, 1024))
self.transition = MLP(self.h_dim, self.z_dim, (1024, 1024))
def compute_posterior(self, x: Tensor, h: Tensor) -> Tensor:
z_logits = self.representation(torch.cat([x, h], dim=1))
z_logits = z_logits.reshape(-1, self.z_num_var, self.z_var_dim)
z_dist = OneHotCategoricalStraightThrough(logits=z_logits)
z = z_dist.rsample()
return z
def compute_prior(self, h: Tensor) -> Tensor:
z_hat_logits = self.transition(h)
z_hat_logits = z_hat_logits.reshape(-1, self.z_num_var, self.z_var_dim)
z_hat_dist = OneHotCategoricalStraightThrough(logits=z_hat_logits)
z_hat = z_hat_dist.rsample()
return z_hat
def step(
self, action: Tensor, states: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tensor]:
if states is None:
states = (
torch.zeros(action.size(0), self.h_dim).to(action.device),
torch.zeros(action.size(0), self.z_dim).to(action.device),
)
h, z = states
h = self.recurrent(torch.cat([z, action], dim=1), h)
z = self.compute_prior(h)
return h, z
def forward(
self, x: Tensor, action: Tensor, states: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tensor, Tensor]:
if states is None:
states = (
torch.zeros(x.size(0), self.h_dim).to(x.device),
torch.zeros(x.size(0), self.z_dim).to(x.device),
)
h, z = states
h = self.recurrent(torch.cat([z, action], dim=1), h)
z = self.compute_posterior(x, h)
z_hat = self.compute_prior(h)
return h, z, z_hat
class Decoder(nn.Module):
"""
Deconvolutional decoder for reconstructing visual observations from model states.
Parameters:
h_dim (int): Size of the deterministic model state
z_dim (int): Size of the stochstic model state
"""
def __init__(self, h_dim: int, z_dim: int) -> None:
super().__init__()
self.h_dim = h_dim
self.z_dim = z_dim
self.fc = nn.Linear(h_dim + z_dim, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2)
def forward(self, h: Tensor, z: Tensor) -> Tensor:
x = torch.cat([h, z], dim=1)
x = x.view(x.size(0), x.size(1), 1, 1)
x = F.elu_(self.fc(x))
x = F.elu_(self.deconv1(x))
x = F.elu_(self.deconv2(x))
x = F.elu_(self.deconv3(x))
x = torch.sigmoid(self.deconv4(x))
return x
``` |
{
"source": "jinyeom/evomini",
"score": 2
} |
#### File: examples/cartpole_swingup/es_main.py
```python
import argparse
import numpy as np
from evomini.es import OpenaiES
from evomini.nn import Module, Linear, LSTM
from evomini.eval import Evaluator
from cartpole_swingup import CartPoleSwingUpEnv
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--num-workers", type=int, default=16)
parser.add_argument("--models-per-worker", type=int, default=16)
parser.add_argument("--num-gen", type=int, default=1000)
parser.add_argument("--num-evals", type=int, default=1)
parser.add_argument("--precision", type=int, default=4)
parser.add_argument("--sigma", type=float, default=0.1)
parser.add_argument("--stepsize", type=float, default=0.03)
args = parser.parse_args()
np.random.seed(args.seed)
class Model(Module):
# small world model agent
def __init__(self, obs_size, action_size, hidden_size):
super().__init__()
self.obs_size = obs_size
self.action_size = action_size
self.hidden_size = hidden_size
self.register_module("C", Linear(obs_size + hidden_size, action_size))
self.register_module("M", LSTM(obs_size + action_size, hidden_size))
def __call__(self, *args, module="C"):
if module == "C":
obs, h = args
x = np.concatenate([obs, h])
action = self.C(x)
return action
if module == "M":
obs, action = args
x = np.concatenate([obs, action])
h = self.M(x)
return h
class CartPoleSwingUpEvaluator(Evaluator):
def _build_env(self):
return CartPoleSwingUpEnv()
def _build_model(self):
return Model(5, 1, 16)
def _evaluate_once(self, env, model):
obs = env.reset()
h = model.M.reset()
rewards = 0
done = False
while not done:
action = model(obs, h, module="C")
obs, reward, done, _ = env.step(action)
h = model(obs, action, module="M")
rewards += reward
return rewards
env = CartPoleSwingUpEnv()
es = OpenaiES(len(Model(5, 1, 16)), sigma=args.sigma, stepsize=args.stepsize)
global_best_fitness = -np.inf
with CartPoleSwingUpEvaluator(args.num_workers,
args.models_per_worker,
args.precision) as evaluator:
popsize = len(evaluator)
for gen in range(args.num_gen):
seeds, solutions = es.sample(popsize)
fitness, success = evaluator.evaluate(seeds, solutions, args.num_evals)
assert success, f"evaluation failed at generation {gen}"
es.step(fitness)
best_fitness = np.max(fitness)
if best_fitness > global_best_fitness:
print(f"improvement detected: {global_best_fitness} -> {best_fitness}")
best = solutions[np.argmax(fitness)]
np.save("model_final.npy", best)
global_best_fitness = best_fitness
stats = {
"gen": gen,
"fitness_mean": np.mean(fitness),
"fitness_std": np.std(fitness),
"fitness_max": np.max(fitness),
"fitness_min": np.min(fitness),
}
print(stats)
``` |
{
"source": "jinyeom/general-bipedal-walker",
"score": 3
} |
#### File: general-bipedal-walker/general_bipedal_walker/color.py
```python
import numpy as np
class Color:
BLACK = np.array((0.000, 0.000, 0.000))
DARK_BLUE = np.array((0.114, 0.169, 0.325))
DARK_PURPLE = np.array((0.494, 0.145, 0.325))
DARK_GREEN = np.array((0.000, 0.529, 0.318))
BROWN = np.array((0.671, 0.322, 0.212))
DARK_GRAY = np.array((0.373, 0.341, 0.310))
LIGHT_GRAY = np.array((0.761, 0.765, 0.780))
WHITE = np.array((1.000, 0.945, 0.910))
RED = np.array((1.000, 0.000, 0.302))
ORANGE = np.array((1.000, 0.639, 0.000))
YELLOW = np.array((1.000, 0.925, 0.153))
GREEN = np.array((0.000, 0.894, 0.212))
BLUE = np.array((0.161, 0.678, 1.000))
INDIGO = np.array((0.514, 0.463, 0.612))
PINK = np.array((1.000, 0.467, 0.659))
PEACH = np.array((1.000, 0.800, 0.667))
@staticmethod
def darker(color, scale=1):
return np.clip(color - scale * 0.1, 0, 1)
@staticmethod
def lighter(color, scale=1):
return np.clip(color + scale * 0.1, 0, 1)
@staticmethod
def rand(rng=None, include_grays=False):
colors = [
'DARK_BLUE',
'DARK_PURPLE',
'DARK_GREEN',
'BROWN',
'RED',
'ORANGE',
'YELLOW',
'GREEN',
'BLUE',
'INDIGO',
'PINK',
'PEACH',
]
if include_grays:
colors += [
'BLACK',
'DARK_GRAY',
'LIGHT_GRAY',
'WHITE',
]
if rng:
return Color.__dict__[rng.choice(colors)]
return Color.__dict__[np.random.choice(colors)]
```
#### File: general-bipedal-walker/general_bipedal_walker/simulation.py
```python
import math
import numpy as np
import Box2D
from Box2D.b2 import (
edgeShape,
circleShape,
fixtureDef,
polygonShape,
revoluteJointDef,
contactListener
)
from .color import Color
class Terrain:
GRASS = 0
STUMP = 1
STAIRS = 2
PIT = 3
@staticmethod
def rand(np_random=None, include_grass=False):
if np_random is None:
np_random = np.random
options = [Terrain.STUMP, Terrain.STAIRS, Terrain.PIT]
if include_grass:
options.append(Terrain.GRASS)
return np_random.choice(options)
class Simulation:
_FPS = 50
_SCALE = 30.0
_VIEWPORT_WIDTH = 600
_VIEWPORT_HEIGHT = 400
_TERRAIN_STEP = 14 / _SCALE
_TERRAIN_LENGTH = 200
_TERRAIN_HEIGHT = _VIEWPORT_HEIGHT / _SCALE / 4
_TERRAIN_GRASS = 10
_TERRAIN_STARTPAD = 20
_FRICTION = 2.5
_BIPED_LIMIT = 1600
_BIPED_HARDCORE_LIMIT = 2000
def __init__(self, np_random, hardcore):
self.np_random = np_random
self.hardcore = hardcore
self.world = Box2D.b2World()
self.terrain = None
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)]),
friction=self._FRICTION
)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=[(0, 0), (1, 1)]),
friction=self._FRICTION,
categoryBits=0x0001
)
@property
def fps(self):
return self._FPS
@property
def scale(self):
return self._SCALE
@property
def viewport_width(self):
return self._VIEWPORT_WIDTH
@property
def viewport_height(self):
return self._VIEWPORT_HEIGHT
@property
def scaled_width(self):
return self.viewport_width / self.scale
@property
def scaled_height(self):
return self.viewport_height / self.scale
@property
def terrain_step(self):
return self._TERRAIN_STEP
@property
def terrain_length(self):
return self._TERRAIN_LENGTH
@property
def terrain_height(self):
return self._TERRAIN_HEIGHT
@property
def terrain_grass(self):
return self._TERRAIN_GRASS
@property
def terrain_startpad(self):
return self._TERRAIN_STARTPAD
@property
def limit(self):
return self._BIPED_HARDCORE_LIMIT if self.hardcore else self._BIPED_LIMIT
def generate_terrain(self):
state = Terrain.GRASS
velocity = 0.0
y = self.terrain_height
counter = self.terrain_startpad
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
for i in range(self.terrain_length):
x = i * self.terrain_step
self.terrain_x.append(x)
if state == Terrain.GRASS and not oneshot:
sign = np.sign(self.terrain_height - y)
velocity = 0.8 * velocity + sign * 0.01
if i > self.terrain_startpad:
noise = self.np_random.uniform(-1, 1)
velocity += noise / self.scale
y += velocity
elif state == Terrain.PIT and oneshot:
color1 = Color.rand()
color2 = Color.BLACK
counter = self.np_random.randint(3, 5)
poly = [
(x, y),
(x + self.terrain_step, y),
(x + self.terrain_step, y - 4 * self.terrain_step),
(x, y - 4 * self.terrain_step)
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1 = color1
t.color2 = color2
self.terrain.append(t)
self.fd_polygon.shape.vertices = [(x + self.terrain_step * counter, y) for x, y in poly]
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1 = color1
t.color2 = color2
self.terrain.append(t)
counter += 2
original_y = y
elif state == Terrain.PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4 * self.terrain_step
elif state == Terrain.STUMP and oneshot:
counter = self.np_random.randint(1, 3)
poly = [
(x, y),
(x + counter * self.terrain_step, y),
(x + counter * self.terrain_step, y + counter * self.terrain_step),
(x, y + counter * self.terrain_step),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1 = Color.rand()
t.color2 = Color.BLACK
self.terrain.append(t)
elif state == Terrain.STAIRS and oneshot:
color1 = Color.rand()
color2 = Color.BLACK
stair_steps = self.np_random.randint(3, 5)
stair_width = self.np_random.randint(4, 5)
stair_height = 1 if self.np_random.rand() > 0.5 else -1
original_y = y
for s in range(stair_steps):
self.fd_polygon.shape.vertices = [
(x + (s * stair_width) * self.terrain_step, y + (s * stair_height) * self.terrain_step),
(x + ((1 + s) * stair_width) * self.terrain_step, y + (s * stair_height) * self.terrain_step),
(x + ((1 + s) * stair_width) * self.terrain_step, y + (s * stair_height - 1) * self.terrain_step),
(x + (s * stair_width) * self.terrain_step, y + (s * stair_height - 1) * self.terrain_step)
]
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1 = color1
t.color2 = color2
self.terrain.append(t)
counter = stair_steps * stair_width
elif state == Terrain.STAIRS and not oneshot:
s = stair_steps * stair_width - counter - stair_height
n = s / stair_width
y = original_y + (n * stair_height) * self.terrain_step
self.terrain_y.append(y)
oneshot = False
counter -= 1
if counter == 0:
counter = self.np_random.randint(self.terrain_grass / 2, self.terrain_grass)
if state == Terrain.GRASS and self.hardcore:
state = Terrain.rand(np_random=self.np_random)
else:
state = Terrain.GRASS
oneshot = True
self.terrain_poly = []
for i in range(self.terrain_length - 1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i+1], self.terrain_y[i+1])
]
self.fd_edge.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_edge)
t.color1 = Color.WHITE if i % 2 == 0 else Color.BLACK
t.color2 = Color.WHITE if i % 2 == 0 else Color.BLACK
self.terrain.append(t)
poly += [(poly[1][0], 0), (poly[0][0], 0)]
self.terrain_poly.append((poly, Color.DARK_GREEN))
self.terrain.reverse()
def destroy(self):
if not self.terrain:
return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
def step(self):
self.world.Step(1.0 / self.fps, 6 * 30, 2 * 30)
``` |
{
"source": "jinyeom/npne",
"score": 3
} |
#### File: jinyeom/npne/npes.py
```python
import multiprocessing as mp
import numpy as np
#############
# Utilities #
#############
class Seeder:
def __init__(self, seed=0):
self.seed = seed
self._rs = np.random.RandomState(seed=seed)
def __call__(self, size):
seeds = self._rs.randint(2 ** 31 - 1, size=size, dtype=int)
return seeds.tolist()
class Evaluator:
def __init__(self, num_workers=None):
if num_workers is None:
num_workers = mp.cpu_count()
self.num_workers = num_workers
def evaluate(self, solution, seed):
raise NotImplementedError
def __call__(self, solutions, seeds):
results = []
with mp.Pool(self.num_workers) as pool:
for solution, seed in zip(solutions, seeds):
func = self.evaluate
args = (solution, seed)
results.append(pool.apply_async(func, args=args))
fitness = [r.get() for r in results]
return np.array(fitness)
##############
# Optimizers #
##############
class Optimizer:
def __init__(self, theta):
self.theta = theta
self.t = 0
def update(self, grad):
self.t += 1
self.theta += self._step(grad)
return np.array(self.theta)
def _step(self, grad):
raise NotImplementedError
class SGD(Optimizer):
def __init__(self, theta, alpha, beta=0.9):
super().__init__(theta)
self.alpha = alpha
self.beta = beta
self.v = np.zeros_like(theta)
def _step(self, grad):
self.v = self.beta * self.v + (1 - self.beta) * grad
return -self.alpha * self.v
class Adam(Optimizer):
def __init__(self, theta, alpha, beta1=0.9, beta2=0.999):
super().__init__(theta)
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.m = np.zeros_like(theta)
self.v = np.zeros_like(theta)
def _step(self, grad):
self.m = self.beta1 * self.m + (1 - self.beta1) * grad
self.v = self.beta2 * self.v + (1 - self.beta2) * grad ** 2
m_corr = 1 - self.beta1 ** self.t
v_corr = np.sqrt(1 - self.beta2 ** self.t)
alpha = self.alpha * v_corr / m_corr
return -alpha * self.m / (np.sqrt(self.v) + 1e-8)
########################
# Evolution strategies #
########################
class ES:
def __init__(self, optim, sigma):
self.optim = optim
self.mu = np.array(optim.theta)
self.sigma = sigma
self.epsilon = None
def sample(self, popsize):
assert popsize % 2 == 0
eps_split = np.random.randn(popsize // 2, len(self.mu))
self.epsilon = np.concatenate([eps_split, -eps_split], axis=0)
return self.mu + self.sigma * self.epsilon
def update(self, fitness):
rank = np.empty_like(fitness, dtype=np.long)
rank[np.argsort(fitness)] = np.arange(len(fitness))
fitness = rank.astype(fitness.dtype) / (len(fitness) - 1) - 0.5
fitness = (fitness - np.mean(fitness)) / (np.std(fitness) + 1e-8)
grad = 1 / (len(fitness) * self.sigma) * (self.epsilon.T @ fitness)
self.mu = self.optim.update(-grad)
``` |
{
"source": "jinyeom/ppo",
"score": 3
} |
#### File: jinyeom/ppo/env.py
```python
import numpy as np
import torch as pt
import gym
class NormalizeObservation(gym.ObservationWrapper):
def observation(self, observation):
mean = np.mean(observation)
std = np.std(observation)
return (observation - mean) / (std + 1e-8)
class TorchObservation(gym.ObservationWrapper):
def observation(self, observation):
observation = observation.astype(np.float32)
return pt.from_numpy(observation)
class TorchAction(gym.ActionWrapper):
def action(self, action):
action = action.squeeze()
return action.cpu().numpy()
class TorchReward(gym.RewardWrapper):
def reward(self, reward):
reward = reward.astype(np.float32)
reward = reward[:, np.newaxis]
return pt.from_numpy(reward)
class TorchDone(gym.Wrapper):
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, reward, self.done(done), info
def done(self, done):
done = done.astype(np.float32)
done = done[:, np.newaxis]
return pt.from_numpy(done)
def make_env(env_id, *wrappers, num_envs=1):
if num_envs == 1:
env = gym.make(env_id)
for wrapper in wrappers:
env = wrapper(env)
return env
env = gym.vector.make(
env_id,
num_envs=num_envs,
wrappers=wrappers
)
env = TorchObservation(env)
env = TorchAction(env)
env = TorchReward(env)
env = TorchDone(env)
return env
```
#### File: jinyeom/ppo/main.py
```python
import os
import gym
import torch as pt
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from tqdm import tqdm
from utils import pretty_args, export_args, mkdir_exp, TensorBook
from env import make_env, NormalizeObservation
from agent import ContinuousPolicyAgent
from ppo import ProximalPolicyOptimization
from play import play
def main(args):
exp_path = mkdir_exp(f'{args.env_id}_PPO')
export_args(args, os.path.join(exp_path, 'config.json'))
np.random.seed(args.seed)
pt.random.manual_seed(args.seed)
print("== Creating a training environment...")
env = make_env(args.env_id, NormalizeObservation, num_envs=args.num_envs)
print("== Creating a evaluation environment...")
eval_env = make_env(args.env_id, NormalizeObservation, num_envs=1)
obs_dim = eval_env.observation_space.shape[0]
act_dim = eval_env.action_space.shape[0]
print("== Creating an agent....")
device = pt.device('cuda' if pt.cuda.is_available() else 'cpu')
agent = ContinuousPolicyAgent(obs_dim, act_dim, args.hid_dim).to(device)
print("== Creating a data storage...")
data = TensorBook(args.env_id, args.rollout_steps)
print("== Creating a PPO optimizer...")
optimizer = ProximalPolicyOptimization(
agent,
device,
num_epochs=args.num_epochs,
batch_size=args.batch_size,
lr_max=args.lr_max,
lr_min=args.lr_min,
eps=args.eps,
gamma=args.gamma,
lam=args.lam,
alpha=args.alpha,
value_coef=args.value_coef,
entropy_coef=args.entropy_coef,
max_grad_norm=args.max_grad_norm,
target_kldiv=args.target_kldiv
)
print("== Creating a TensorBoard summary writer...")
writer = SummaryWriter(log_dir=exp_path)
print("IT'S DANGEROUS TO GO ALONE! TAKE THIS.")
obs = env.reset().to(device)
best_perf = -np.inf
num_updates = args.num_steps // args.rollout_steps // args.num_envs
for i in tqdm(range(num_updates)):
obs = agent.rollout(obs, env, data)
info = optimizer.update(data)
lr = optimizer.update_lr(i, num_updates)
# Compute mean total reward during the rollout.
reward = data.reward.sum(dim=0).mean(dim=0).item()
# Evaluate the agent.
perf = play(eval_env, agent, device, repeat=args.num_eval)
if perf > best_perf:
model_path = os.path.join(exp_path, f'{agent.__class__.__name__}.pt')
pt.save(agent.state_dict(), model_path)
best_perf = perf
# Log training progress.
step = i * args.rollout_steps * args.num_envs
writer.add_scalar('Train/lr', lr, step)
writer.add_scalar('Train/epochs', info['num_epochs'], step)
writer.add_scalar('Train/loss/policy', info['policy_loss'], step)
writer.add_scalar('Train/loss/value', info['value_loss'], step)
writer.add_scalar('Train/loss/entropy', info['entropy'], step)
writer.add_scalar('Train/loss/total', info['total_loss'], step)
writer.add_scalar('Train/reward/mean', reward, step)
writer.add_scalar('Eval/reward/mean', perf, step)
writer.add_scalar('Eval/reward/best', best_perf, step)
env.close()
eval_env.close()
writer.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env-id', type=str, default='BipedalWalker-v2')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--num-envs', type=int, default=8)
parser.add_argument('--hid-dim', type=int, default=64)
parser.add_argument('--num-steps', type=int, default=4000000)
parser.add_argument('--rollout-steps', type=int, default=2048)
parser.add_argument('--num-epochs', type=int, default=10)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr_max', type=float, default=3e-4)
parser.add_argument('--lr_min', type=float, default=1e-4)
parser.add_argument('--eps', type=float, default=0.2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lam', type=float, default=0.95)
parser.add_argument('--alpha', type=float, default=0.0)
parser.add_argument('--value-coef', type=float, default=0.5)
parser.add_argument('--entropy-coef', type=float, default=0.0)
parser.add_argument('--max-grad-norm', type=float, default=0.5)
parser.add_argument('--target-kldiv', type=float, default=0.02)
parser.add_argument('--num-eval', type=int, default=5)
args = parser.parse_args(); pretty_args(args)
main(args)
``` |
{
"source": "jinyeom/torchrt",
"score": 2
} |
#### File: torchrt/torchrt/convert.py
```python
from pathlib import Path
from typing import Union
from torch import nn
from .onnx import export_onnx
from .tensorrt import TensorRTModule
def convert(
model: nn.Module,
onnx_path: Union[str, Path],
engine_path: Union[str, Path],
input_shape: Tuple[int, ...],
opset_version: int = 11,
verbose: bool = False,
simplify: bool = True,
max_batch_size: int = 1,
max_workspace_size: int = 1 << 25,
fp16_mode: bool = False,
force_rebuild: bool = False,
) -> TensorRTModule:
input_shape = (max_batch_size, *input_shape)
onnx_path = export_onnx(
model,
onnx_path,
input_shape,
opset_version=opset_version,
verbose=verbose,
simplify=simplify,
)
rt_module = TensorRTModule(
onnx_path,
engine_path,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
fp16_mode=fp16_mode,
force_rebuild=force_rebuild,
)
return rt_module
``` |
{
"source": "jinyiabc/china_stock_lib",
"score": 3
} |
#### File: src/helper/mysql_dbconnection.py
```python
import sys
import os
from importlib import resources
from _mysql_connector import MySQLInterfaceError
from sqlalchemy import create_engine
import config
# from helper import config
with resources.path('helper', 'mysql.cfg') as p:
resource_path = str(p)
if os.path.isfile('mysql.cfg'):
cfg = config.Config('mysql.cfg')
else:
cfg = config.Config(resource_path)
def mysql_dbconnection(database=None):
'''
database_url = mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
'''
if database is None:
database_url = 'mysql+mysqlconnector://{}:{}@{}?charset=utf8mb4'.format(cfg['user'], cfg['password'],
cfg['host'])
else:
database_url = 'mysql+mysqlconnector://{}:{}@{}/{}?charset=utf8mb4'.format(cfg['user'], cfg['password'], cfg['host'], database)
sqlEngine = create_engine(database_url, pool_recycle=3600)
if cfg['password'] == "<PASSWORD>":
print(f"Please modify mysql.cfg file under {resource_path} to connect mysql properly.")
sys.exit()
return sqlEngine.connect()
if __name__ == '__main__':
import pandas as pd
test = 'READ' # Set to 'WRITE' to test.
if test == 'WRITE':
userVitals = {"UserId": ["xxxxx", "yyyyy", "zzzzz", "aaaaa", "bbbbb", "ccccc", "ddddd"],
"UserFavourite": ["Greek Salad", "Philly Cheese Steak", "Turkey Burger", "Crispy Orange Chicken",
"Atlantic Salmon", "Pot roast", "Banana split"],
"MonthlyOrderFrequency": [5, 1, 2, 2, 7, 6, 1],
"HighestOrderAmount": [30, 20, 16, 23, 20, 26, 9],
"LastOrderAmount": [21, 20, 4, 11, 7, 7, 7],
"LastOrderRating": [3, 3, 3, 2, 3, 2, 4],
"AverageOrderRating": [3, 4, 2, 1, 3, 4, 3],
"OrderMode": ["Web", "App", "App", "App", "Web", "Web", "App"],
"InMedicalCare": ["No", "No", "No", "No", "Yes", "No", "No"]};
tableName = "UserVitals"
dataFrame = pd.DataFrame(data=userVitals)
dbConnection = mysql_dbconnection(database='test1')
try:
frame = dataFrame.to_sql(tableName, dbConnection, if_exists='fail');
except ValueError as vx:
print(vx)
except Exception as ex:
print(ex)
else:
print("Table %s created successfully." % tableName);
finally:
dbConnection.close()
pass
if test == 'READ':
dbConnection = mysql_dbconnection('test1')
frame = pd.read_sql("select * from uservitals", dbConnection);
pd.set_option('display.expand_frame_repr', False)
print(frame)
dbConnection.close()
```
#### File: src/helper/script.py
```python
import argparse
from helper.WSDLoader import WSDLoader
from helper.WSETLoader import WSETLoader
from helper.WSSLoader import WSSLoader
from helper.mysql_dbconnection import mysql_dbconnection
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Wind API.')
parser.add_argument('-s',
'--start_date',
# action='store_true',
dest='start_date',
required=True,
help='start date')
parser.add_argument('-e',
'--end_date',
# action='store_true',
dest='end_date',
required=True,
help='end date')
parser.add_argument('-t',
'--table_name',
# action='store_true',
dest='table_name',
required=True,
help='table name')
parser.add_argument('-d',
'--database',
# action='store_true',
dest='database',
required=True,
help='database')
parser.add_argument('-se',
'--sector',
# action='store_true',
dest='sector',
required=False,
help='sector')
parser.add_argument('--github', action='store_true', default=False,
help='Updoad to Github')
parser.add_argument('--wind_codes', '-w', action='store',
required=False, default='000001.SZ,110034.SH,110055.SH',
help='wind codes')
return parser.parse_args(args)
def run_wsd(args=None):
res = parse_args(args)
loader = WSDLoader(res.start_date, res.end_date, res.database, res.table_name)
wind_codes = loader.get_windcodes()
if type(wind_codes) is not int:
loader.fetch_historical_data(wind_codes)
else:
print('ErrorCode:', wind_codes)
def run_wset(args=None):
res = parse_args(args)
loader = WSETLoader(res.start_date, res.end_date, res.database, res.table_name, res.sector)
loader.fetch_historical_data(UPLOAD_GITHUB=res.github)
def run_wss(args=None):
res = parse_args(args)
wind_codes = res.wind_codes.split(sep=',')
loader = WSSLoader(res.start_date, res.end_date, res.database, res.table_name)
loader.fetch_historical_data(wind_codes)
keyarg = {'table_name': res.table_name}
set_data_type = (
"ALTER TABLE {table_name} MODIFY `wind_code` VARCHAR(10);"
).format(**keyarg)
set_primary = (
"ALTER TABLE {table_name} ADD PRIMARY KEY (`rpt_date`,`wind_code`);"
).format(**keyarg)
db_engine = mysql_dbconnection(database=res.database)
with db_engine.connect() as con:
con.execute(set_data_type)
con.execute(set_primary)
```
#### File: test/bond/test_time_factor.py
```python
from src.helper.bond import time_factors
def test_d30360e():
settlement_date = (2018, 12, 15)
coupon_schedule = [(2019,3,1),
(2020,3,1),
(2021,3,1),
(2022,3,1),
(2023,3,1)]
coupon_freq = 1
time_factors0 = time_factors(settlement_date,
coupon_schedule,
coupon_freq, convention='d30360e')
print(time_factors0) # 0.2111111111111111
assert time_factors0[0] == 0.2111111111111111
def test_act_isda():
settlement_date = (2018, 12, 15)
coupon_schedule = [(2019,3,1),
(2020,3,1),
(2021,3,1),
(2022,3,1),
(2023,3,1)]
coupon_freq = 1
time_factors0 = time_factors(settlement_date,
coupon_schedule,
coupon_freq, convention='act_isda')
print(time_factors0) # 0.2054794520547945
assert time_factors0[0] == 0.2054794520547945
def test_act_afb():
settlement_date = (2018, 12, 15)
coupon_schedule = [(2019,3,1),
(2020,3,1),
(2021,3,1),
(2022,3,1),
(2023,3,1)]
coupon_freq = 1
time_factors0 = time_factors(settlement_date,
coupon_schedule,
coupon_freq, convention='act_afb') # unexpected keyword argument 'matu'
print(time_factors0) # 0.2054794520547945
assert time_factors0[0] == 0.2054794520547945
def test_30365():
settlement_date = (2018, 12, 15)
coupon_schedule = [(2019,3,1),
(2020,3,1),
(2021,3,1),
(2022,3,1),
(2023,3,1)]
coupon_freq = 1
time_factors0 = time_factors(settlement_date,
coupon_schedule,
coupon_freq, convention='30365') # unexpected keyword argument 'matu'
print(time_factors0) # 0.20821917808219179
assert time_factors0[0] == 0.20821917808219179
def test_default():
settlement_date = (2018, 12, 15)
coupon_schedule = [(2019,3,1),
(2020,3,1),
(2021,3,1),
(2022,3,1),
(2023,3,1)]
coupon_freq = 1
time_factors0 = time_factors(settlement_date,
coupon_schedule,
coupon_freq)
print(time_factors0) # 0.2111111111111111
assert time_factors0[0] == 0.2111111111111111
``` |
{
"source": "jinyiabc/machine_learning",
"score": 3
} |
#### File: jinyiabc/machine_learning/homework5-8and9.py
```python
import numpy as np
# machine limits epsilon
eps = np.finfo(float).eps
def gradient_descent(w, x, y):
"Logistic regression: e(w) = ln(1+e**(-y*wT*x)"
err0 = -y*x[0]/(1 + np.exp(y*np.dot(w, x)))
err1 = -y*x[1]/(1 + np.exp(y*np.dot(w, x)))
err2 = -y*x[2]/(1 + np.exp(y*np.dot(w, x)))
return np.array([err0, err1, err2])
sum_out = 0
sum_iter = 0
for i in range(100):
data_set = np.random.uniform(low=-1, high=1.0+eps, size=(2,2))
# y = m*x + c
A = np.column_stack((data_set[:,0], np.ones(2)))
y = data_set[:,1]
m, c = np.linalg.lstsq(A, y, rcond=None)[0]
w0 = np.array([m, -1, c])
sample = np.random.uniform(low=-1, high=1.0+eps, size=(100,2))
X = np.column_stack((sample, np.ones(100)))
Y = np.sign(np.dot(X, w0))
cur_w = np.zeros(3)
pre_w1 = np.ones(3)
precison = 0.01
iter = 0
while np.linalg.norm((pre_w1 - cur_w)) >= precison:
pre_w1 = cur_w
shuffle = np.random.choice(100, 100, replace=False)
eta = 0.01
for i in shuffle:
pre_w = cur_w
GD = gradient_descent(pre_w, X[i], Y[i])
cur_w = pre_w - eta*GD
iter +=1
#print(cur_w, np.linalg.norm((pre_w - cur_w)))
A0 = np.column_stack((data_set, np.ones(2)))
#print("test:",np.dot(A0, cur_w),np.dot(A0, w0))
#print("precision", np.linalg.norm((pre_w - cur_w)))
#print("iteration:", iter)
sum_iter = sum_iter + iter
def err(w, x, y):
"cross entropy"
result = np.log((1+np.exp(-y*np.dot(w, x))))
return result
N1 = 100
out_of_sample = np.random.uniform(low=-1, high=1.0+eps, size=(N1, 2))
#X2 = out_of_sample[:,0]
#Y2 = out_of_sample[:,1]
#YY = np.sign(Y2 - m*X2 - c)
XX = np.column_stack((out_of_sample, np.ones(N1)))
YY = np.sign(np.dot(XX, w0))
sum_err = 0
for j in range(N1):
sum_err = sum_err + err(cur_w, XX[j], YY[j])
#print("E_out:", sum_err/N1)
sum_out = sum_out + sum_err/N1
print("E_out:", sum_out/100)
print("iteration:", sum_iter/100)
# E_out: 0.10693970292826682
# iteration: 336.27
```
#### File: jinyiabc/machine_learning/homeworks6-6.py
```python
import numpy as np
import pandas as pd
from numpy.linalg import inv
from numpy import transpose, matmul, dot
df1 = pd.read_csv('in.csv')
df2 = pd.read_csv('out.csv')
#df1['x1']
#df1.loc[32]
c = np.ones(35)
x1 = df1['x1']
x2 = df1['x2']
x1_power = df1['x1']**2
x2_power = df1['x2']**2
x1x2 = df1['x1']*df1['x2']
x1_x2 = abs(df1['x1'] - df1['x2'])
x1__x2 = abs(df1['x1'] + df1['x2'])
y = df1['y']
# df2 = pd.DataFrame(np.column_stack((c, x1, x2, x1_power, x2_power, x1x2, x1_x2, x1__x2)), columns=['1', 'x1', 'x2', 'x1_power', 'x2_power', 'x1x2', 'x1-x2', 'x1+x2'])
A = np.column_stack((c, x1, x2, x1_power, x2_power, x1x2, x1_x2, x1__x2))
# A*w = y
#E_IN = np.count_nonzero(np.sign(np.dot(A, w)) != y)/35.0
#print(E_IN)
#k = -1
def err(z, y, w, k):
result = np.dot(np.transpose(np.dot(z, w) - y), (np.dot(z, w) - y))/(y.size)
lamda = 10**k
#lamda = 0
result+=lamda/y.size*np.dot(w, w)
return result
def der(z, y, w):
result = np.dot(np.transpose(z), (np.dot(z, w) - y))/(y.size)
return result
k = 1
cur_w = np.linalg.lstsq(A, y, rcond=None)[0]
pre_w = np.zeros(8)
#cur_w = np.zeros(8)
#print("initialization E_IN", err(A, y, cur_w, k))
eta = 0.01
precision = 10**(-7)
N = y.size
lamda = 10**(-k)
max_iters = 10000
itr = 0
# Slides 12: 11/21
temp = inv(dot(transpose(A), A) + lamda*np.identity(8))
cur_w = matmul(matmul(temp, transpose(A)), y)
E_IN = np.count_nonzero(np.sign(np.dot(A, cur_w)) != y)/35.0
print("E_IN:", E_IN)
#print("regression err:", err(A, y, cur_w, -k))
print("k:", -k, "iteration:", itr)
c = np.ones(250)
x1 = df2['x1']
x2 = df2['x2']
x1_power = df2['x1']**2
x2_power = df2['x2']**2
x1x2 = df2['x1']*df2['x2']
x1_x2 = abs(df2['x1'] - df2['x2'])
x1__x2 = abs(df2['x1'] + df2['x2'])
y = df2['y']
A = np.column_stack((c, x1, x2, x1_power, x2_power, x1x2, x1_x2, x1__x2))
E_OUT = np.count_nonzero(np.sign(np.dot(A, cur_w)) != y)/250.0
print("E_OUT:", E_OUT)
#E_IN: 0.02857142857142857
#k: -1 iteration: 0
#E_OUT: 0.056
``` |
{
"source": "jinyier/ai_pointnet_attack",
"score": 2
} |
#### File: jinyier/ai_pointnet_attack/attack.py
```python
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import argparse
import importlib
import logging
import os
import re
import sys
import numpy as np
import random as rd
import tensorflow as tf
import argsutils
import meshio
import modelnet_dataset
from tf_ops.grouping import tf_grouping
from tf_ops.interpolation_3d import tf_interpolate
from tf_ops.sampling import tf_sampling
def load_graph(frozen_graph, raw_num_points, num_points, random_distortions):
graph = tf.Graph()
with graph.as_default():
# Input placeholders
# Point cloud (Point set)
# The placeholder has no batch axis since we only deal with a pointcloud in each attack process
pointcloud_pl = tf.placeholder(tf.float32, shape=(raw_num_points, 3), name='pointcloud_orig')
# Normal vector of point clouds
pointnormal_pl = tf.placeholder(tf.float32, shape=(raw_num_points, 3), name='pointnormal_orig')
# Rotation angle in degree [0, 360), used for random distortion
rotate_matrix_pl = tf.placeholder(tf.float32, shape=(None, 3, 3), name='rotate_matrix')
# Used for batch normalization layer
is_training_pl = tf.placeholder_with_default(False, shape=(), name='is_training_default')
# L0 mask for perturbation
l0_mask_pl = tf.placeholder(tf.float32, shape=(raw_num_points), name='l0_mask')
l0_mask = tf.stack([l0_mask_pl] * 3, axis=-1)
# Variable to optimize
perturb = tf.Variable(np.zeros((raw_num_points, 3)), dtype=tf.float32, name='perturb')
# l0 masked perturbation
perturb_masked = perturb * l0_mask
# Modified point clouds
pointcloud = tf.math.add(pointcloud_pl, perturb_masked)
# Output of adversarial pointclouds
assert pointcloud.shape[0].value == raw_num_points
pointcloud_output = tf.identity(pointcloud, name='pointcloud_pert')
# Random sample for model input
assert pointcloud.shape[0].value == raw_num_points
pointcloud_sampled = random_sample_pointcloud(pointcloud, num_samples=num_points)
pointcloud_sampled = tf.identity(pointcloud_sampled, name='pointcloud_sampled')
# Random sample for knn distance
assert pointcloud.shape[0].value == raw_num_points
pointcloud_knn_sampled = tf.identity(pointcloud_sampled, name='pointcloud_knn_sampled')
# Normalize
assert pointcloud_sampled.shape[0].value == num_points
pointcloud = normalize_pointcloud(pointcloud_sampled)
if random_distortions > 0:
batch_size = tf.shape(rotate_matrix_pl)[0]
pointclouds = tf.broadcast_to(pointcloud, shape=(batch_size, num_points, 3))
pointclouds = tf.linalg.matmul(pointclouds, rotate_matrix_pl)
else:
pointclouds = tf.expand_dims(pointcloud, axis=0)
assert pointclouds.shape[1].value == num_points
graphdef = tf.GraphDef()
with tf.io.gfile.GFile(frozen_graph, 'rb') as fid:
serialized_graph = fid.read()
graphdef.ParseFromString(serialized_graph)
tf.import_graph_def(graphdef, name='', input_map={
'pointclouds': pointclouds,
'is_training': is_training_pl})
with graph.as_default():
feats = graph.get_tensor_by_name('fc2/BiasAdd:0')
feats = tf.identity(feats, name='feats')
logits = graph.get_tensor_by_name('fc3/BiasAdd:0')
logits = tf.identity(logits, name='logits')
probs = tf.nn.softmax(logits, axis=-1, name='probs')
with graph.as_default():
tensors = {'pc_orig': graph.get_tensor_by_name('pointcloud_orig:0'),
'pc_pert': graph.get_tensor_by_name('pointcloud_pert:0'),
'pc_samp': graph.get_tensor_by_name('pointcloud_sampled:0'),
'knn_samp': graph.get_tensor_by_name('pointcloud_knn_sampled:0'),
'nv_orig': graph.get_tensor_by_name('pointnormal_orig:0'),
'rot_mat': graph.get_tensor_by_name('rotate_matrix:0'),
'l0_mask': graph.get_tensor_by_name('l0_mask:0'),
'logits': graph.get_tensor_by_name('logits:0'),
'probs': graph.get_tensor_by_name('probs:0'),
'feats': graph.get_tensor_by_name('feats:0'),
'pert': perturb}
return graph, tensors
def detect_model_parameters(frozen_graph):
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_graph, 'rb') as fid:
serialized_graph = fid.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='imported')
ph = graph.get_tensor_by_name('imported/pointclouds:0')
num_points = ph.get_shape()[1].value
use_normal = (ph.get_shape()[2].value == 6)
return num_points, use_normal
def load_pointcloud(fn, shape2id, logger):
logger.info('Reading file {}...'.format(fn))
bn = os.path.basename(fn)
shapename = re.split('[._]', bn)[0]
groundtruth = shape2id[shapename]
logger.info(' Groundtruth: {}'.format(shapename))
pointcloud = meshio.loadmesh(fn)
pointnormal = pointcloud[..., 3:]
pointcloud = pointcloud[..., :3]
if pointnormal.size == 0:
logger.warning(' Warning: Input data has no normal information')
logger.warning(' -> Fill them with all zeros')
pointnormal = np.zeros_like(pointcloud)
return groundtruth, pointcloud, pointnormal
def random_sample_and_add_points(points, normals, raw_num_points, logger):
# raw_num_points: final number of points
num_points = points.shape[0]
num_adds = raw_num_points - num_points
if num_adds > 0:
logger.info(' Expected #points > current #points. Points added: {}'.format(num_adds))
round_add = num_adds // num_points + 1
points = np.concatenate([points] * round_add, axis=0)
normals = np.concatenate([normals] * round_add, axis=0)
ind_fixed = np.arange(num_points)
ind_range = np.arange(points.shape[0], dtype=np.int32)
np.random.shuffle(ind_range)
ind_range = np.concatenate([ind_fixed, ind_range], axis=0)
points = points[ind_range[:raw_num_points]]
normals = normals[ind_range[:raw_num_points]]
else:
logger.info(' Expected #points <= current #points. Choose a subset')
ind_range = np.arange(points.shape[0], dtype=np.int32)
np.random.shuffle(ind_range)
points = points[ind_range[:raw_num_points]]
normals = normals[ind_range[:raw_num_points]]
return points, normals
def random_sample_pointcloud(points, num_samples):
# points: shape n x 3, where n is num_points
num_pts = points.get_shape()[0].value
ind = tf.range(num_pts)
rind = tf.random_shuffle(ind)[:num_samples]
rpoints = tf.gather(points, rind, axis=0)
return rpoints
def normalize_pointcloud(points):
# points: shape n x 3, where n is num_points
# tf implementation of modelnet_dataset.pc_normalize()
num_pts = points.get_shape()[0].value
centroid = tf.reduce_mean(points, axis=0)
points = points - centroid
max_pts = tf.reduce_max(tf.reduce_sum(tf.square(points), axis=1))
points = tf.math.divide(points, max_pts)
return points
def chamfer_distance(points_x, points_y):
# chamfer distance from point set x to point set y
# x will be stack to have shape [#pt_x, #pt_y, 3]
num_points = tf.shape(points_y)[0]
points_x = tf.expand_dims(points_x, axis=1)
points_x = tf.tile(points_x, multiples=[1, num_points, 1])
chamfer = tf.square(points_x - points_y) # n x n x 3
chamfer = tf.reduce_sum(chamfer, axis=-1) # n x n
chamfer = tf.reduce_min(chamfer, axis=-1) # n
chamfer = tf.reduce_sum(chamfer, axis=-1) # 1
return chamfer
def knn_outlier_distance(points, points_all, k=5, alpha=1.05):
# points: shape n x 3, where n is num_points
num_points = points_all.shape[0].value
points_now = tf.expand_dims(points, axis=1)
points_now = tf.tile(points_now, multiples=[1, num_points, 1])
distance = tf.square(points_now - points_all) # n x n x 3
distance = tf.reduce_sum(distance, axis=-1) # n x n
values, indices = tf.nn.top_k(tf.negative(distance), k=k+1)
values, indices = values[..., 1:], indices[..., 1:] # n x k
values = tf.negative(values)
avg_distance = tf.reduce_mean(values, axis=-1) # n
knn_mean = tf.reduce_mean(avg_distance) # 1
knn_stddev = tf.math.reduce_std(avg_distance) # 1
threshold = knn_mean + alpha * knn_stddev
condition = tf.math.greater_equal(avg_distance, threshold)
penalty = tf.where(condition, avg_distance, tf.zeros_like(avg_distance))
penalty = tf.reduce_sum(penalty)
return penalty
def gradients_clipping(gradvars, normals):
# gradvars: a list returned by tf.train.Optimizer.compute_gradients()
# normals: shape n x 3, normal vector of the object
assert len(gradvars) == 1 # w.r.t. perturbation
gradvalue = gradvars[0][0]
gradname = gradvars[0][1]
inner_prod = tf.reduce_sum(tf.multiply(tf.negative(gradvalue), normals))
preserved = tf.math.greater_equal(inner_prod, tf.constant(0.0))
gradvalue = tf.where(preserved, gradvalue, tf.zeros_like(gradvalue))
return [(gradvalue, gradname)]
def generate_random_rotations(batch_size):
degs = []
mats = []
for i in range(batch_size):
degs.append(rd.randrange(0, 360))
for deg in degs:
rad = np.deg2rad(deg)
cosval = np.cos(rad)
sinval = np.sin(rad)
mats.append([
[ cosval, 0.0, sinval],
[ 0.0, 1.0, 0.0],
[-sinval, 0.0, cosval]])
return np.array(mats)
def build_perturbation_clipping_network(graph, tensors, project='dir'):
if project not in ['dir', 'norm', 'none']:
raise ValueError('Invalid projection type: {}'.format(project))
with graph.as_default():
cc_linf_pl = tf.placeholder(tf.float32, shape=(), name='cc_linf')
tensors['cc_linf'] = cc_linf_pl
normal = tensors['nv_orig']
perturb = tensors['pert']
# compute inner product
inner_prod = tf.reduce_sum(normal * perturb, axis=-1) # shape: n
condition_inner = tf.math.greater_equal(inner_prod, tf.constant(0.0))
if project == 'dir':
# 1) vng = Normal x Perturb
# 2) vref = vng x Normal
# 3) Project Perturb onto vref
# Note that the length of vref should be greater than zero
vng = tf.linalg.cross(normal, perturb)
vng_len = tf.sqrt(tf.reduce_sum(tf.square(vng), axis=-1))
vref = tf.linalg.cross(vng, normal)
vref_len = tf.sqrt(tf.reduce_sum(tf.square(vref), axis=-1))
vref_len_stack = tf.stack([vref_len] * 3, axis=-1)
# add 1e-6 to avoid dividing by zero
perturb_projected = perturb * vref / (vref_len_stack + 1e-6)
# if the length of vng < 1e-6, let projected vector = (0, 0, 0)
# it means the Normal and Perturb are just in opposite direction
condition_vng = tf.math.greater(vng_len, tf.constant(1e-6))
perturb_projected = tf.where(condition_vng, perturb_projected, tf.zeros_like(perturb_projected))
# if inner_prod < 0, let perturb be the projected ones
perturb = tf.where(condition_inner, perturb, perturb_projected)
elif project == 'norm':
# 1) Project Perturb onto normal
# 2) Choose based on inner product
normal_len = tf.sqrt(tf.reduce_sum(tf.square(normal), axis=-1))
normal_len_stacked = tf.stack([normal_len] * 3, axis=-1)
# the length of normal vector should always be one
perturb_projected = perturb * normal / (normal_len_stacked + 1e-6)
# if inner_prod < 0, let perturb be the projected ones
perturb = tf.where(condition_inner, perturb_projected, tf.zeros_like(perturb_projected))
else:
# without projection, let the perturb be (0, 0, 0) if inner_prod < 0
perturb = tf.where(condition_inner, perturb, tf.zeros_like(perturb))
# compute vector length
# if length > cc_linf, clip it
lengths = tf.sqrt(tf.reduce_sum(tf.square(perturb), axis=-1))
lengths_stacked = tf.stack([lengths] * 3, axis=-1) # shape: n x 3
# scale the perturbation vectors to length cc_linf
# except the ones with zero length
condition = tf.math.greater(lengths, tf.constant(1e-6))
perturb_scaled = tf.where(condition, perturb / lengths_stacked * cc_linf_pl, tf.zeros_like(perturb))
# check the length and clip if necessary
condition = tf.math.less_equal(lengths, cc_linf_pl)
perturb = tf.where(condition, perturb, perturb_scaled)
# assign operatior for updating the perturbation variable
perturb_assign = tf.assign(tensors['pert'], perturb)
tensors['pert_assign'] = perturb_assign
return graph, tensors
def build_normal_estimate_network(graph, tensors, k=3):
# k: number of neighbors used in kNN algorithm
with graph.as_default():
# Note that the first dimension should be the same between points_orig and normals_orig
points_pert = tf.placeholder(tf.float32, shape=(3), name='points_pert_single')
points_orig = tensors['pc_orig']
normals_orig = tensors['nv_orig']
distance = tf.square(points_orig - points_pert)
distance = tf.reduce_sum(distance, axis=-1)
values, indices = tf.nn.top_k(tf.negative(distance), k=k)
values = tf.negative(values) # k
normals_top1 = tf.gather(normals_orig, indices[0]) # 3
avg1_normals = tf.identity(normals_top1)
normals_topk = tf.gather(normals_orig, indices) # k x 3
avgk_normals = tf.reduce_mean(normals_topk, axis=0) # 3
avgk_lengths = tf.sqrt(tf.reduce_sum(tf.square(avgk_normals), axis=-1))
avgk_lengths = tf.stack([avgk_lengths] * 3, axis=-1)
avgk_normals = tf.divide(avgk_normals, avgk_lengths) # 3 (normalize the vector)
# If the points are not modified (distance = 0), use the normal directly from the original
# one. Otherwise, use the mean of the normals of the k-nearest points.
exact = tf.math.less(tf.math.abs(values[0]), tf.constant(1e-6))
normals_pert = tf.where(exact, avg1_normals, avgk_normals)
normals_pert = tf.identity(normals_pert, 'pointnormal_pert')
tensors['pc_pert_single'] = points_pert
tensors['nv_pert'] = normals_pert
return graph, tensors
def build_knn_centroid_network(graph, tensors, k=5):
with graph.as_default():
points_inp = tf.placeholder(tf.float32, shape=(None, 3), name='knn_res')
points_ref = tf.placeholder(tf.float32, shape=(None, 3), name='knn_ref')
num_inp = tf.shape(points_inp)[0]
num_ref = tf.shape(points_ref)[0]
tiled_inp = tf.expand_dims(points_inp, axis=1)
tiled_inp = tf.tile(tiled_inp, multiples=[1, num_ref, 1])
distance = tf.square(tiled_inp - points_ref) # np x no x 3
distance = tf.reduce_sum(distance, axis=-1) # np x no
values, indices = tf.nn.top_k(tf.negative(distance), k=k)
ref_points = tf.gather(points_ref, indices)
ref_points = tf.reduce_mean(ref_points, axis=1)
ref_points = tf.identity(ref_points, name='knn_cent_pt')
tensors['knn_inp'] = points_inp
tensors['knn_ref'] = points_ref
tensors['knn_cent'] = ref_points
return graph, tensors
def get_feat_vectors(sess, graph, tensors, guide_points):
feats = sess.run(tensors['feats'], feed_dict={
tensors['pc_orig']: guide_points,
tensors['l0_mask']: np.zeros(guide_points.shape[0])})
return feats[0]
def filter_inputfiles(inputfiles):
filtered_inputs = []
for fn in inputfiles:
success = True
try:
data = meshio.loadmesh(fn)
except:
success = False
if success:
filtered_inputs.append(fn)
return filtered_inputs
def random_select(iterable):
length = len(iterable)
i = rd.randint(0, length - 1)
return iterable[i]
def create_logger(logfile):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.INFO,
format=log_format,
filename=logfile)
return logging.getLogger(__name__)
def attack(inputfiles, model_path, raw_num_points, shape_names, attack_target, **kwargs):
# Uncomment this line for more logs from Tensorflow
tf.logging.set_verbosity(tf.logging.ERROR)
logfile = kwargs.get('logfile')
logger = create_logger(logfile)
normalize = kwargs.get('normalize')
clip_grad = kwargs.get('clip_grad')
loss_type = kwargs.get('loss_type')
random_distortions = kwargs.get('random_distortions')
logits_lower_bound = kwargs.get('logits_lower_bound')
update_period = kwargs.get('update_period')
optim_method = kwargs.get('optim_method')
max_iter = kwargs.get('max_iter')
learning_rate = kwargs.get('learning_rate')
cc_knn = kwargs.get('cc_knn')
cc_chamfer = kwargs.get('cc_chamfer')
cc_feats = kwargs.get('cc_feats')
cc_linf = kwargs.get('cc_linf')
outputdir = kwargs.get('outputdir')
batch_size = max(random_distortions, 1)
logger.info('Batch size: {} ({} random distortions)'.format(
batch_size, 'with' if random_distortions > 0 else 'without'))
if cc_linf is None:
cc_linf = 1e6
logger.info('Number of points selected from point clouds: {}'.format(raw_num_points))
logger.info('Loading graph at {}...'.format(model_path))
num_points, normal_channel = detect_model_parameters(model_path)
graph, tensors = load_graph(model_path, raw_num_points, num_points, random_distortions)
num_classes = len(shape_names)
shape2id = {}
for idx, name in enumerate(shape_names):
shape2id[name] = idx
logger.info('Build attack network...')
with graph.as_default():
# Input placeholders
pointcloud_pl = tensors['pc_orig']
pointnormal_pl = tensors['nv_orig']
rotate_matrix_pl = tensors['rot_mat']
l0_mask_pl = tensors['l0_mask']
# Groundtruth labels
label_pl = tf.placeholder(tf.int32, shape=(), name='label')
tensors['label'] = label_pl
# Attack type
targeted_attack_pl = tf.placeholder(tf.bool, shape=(), name='targeted_attack')
tensors['targeted'] = targeted_attack_pl
# Feature guide
guide_pl = tf.placeholder(tf.float32, shape=(256,), name='pointcloud_guide_pl')
clean_pl = tf.placeholder(tf.float32, shape=(256,), name='pointcloud_orig_feats_pl')
# Logits & probs layer
logits = tensors['logits']
probs = tensors['probs']
# Define losses
losses = {}
# Define logits loss
logger.info(' Logit loss type: {}'.format(
'<NAME>' if loss_type == 'cw' else 'Cross Entropy'))
target_onehot = tf.one_hot(label_pl, depth=num_classes)
if loss_type == 'cw':
real = tf.reduce_sum(logits * target_onehot, axis=-1)
other = tf.reduce_max(logits * (1.0 - target_onehot) - 1e6 * target_onehot, axis=-1)
logits_loss_untarget = tf.reduce_mean(real - other)
logits_loss_target = tf.reduce_mean(other - real)
logits_loss = tf.where(targeted_attack_pl, logits_loss_target, logits_loss_untarget)
logits_loss = tf.math.maximum(logits_loss, tf.constant(logits_lower_bound, dtype=tf.float32))
else:
ce_untarget = tf.argmax(logits - 1e6 * target_onehot, axis=-1)
ce_untarget = tf.one_hot(ce_untarget, depth=num_classes)
logits_loss_untarget = tf.losses.softmax_cross_entropy(onehot_labels=ce_untarget, logits=logits)
logits_loss_target = tf.losses.softmax_cross_entropy(onehot_labels=target_onehot, logits=logits)
logits_loss = tf.where(targeted_attack_pl, logits_loss_target, logits_loss_untarget)
losses['logits'] = logits_loss
feats = tensors['feats']
feats_target = tf.reduce_mean(tf.abs(feats - guide_pl))
feats_orig = tf.reduce_mean(tf.abs(feats - clean_pl))
feats_loss = tf.math.maximum(feats_target - feats_orig, tf.constant(0.0))
feats_loss = tf.where(targeted_attack_pl, feats_loss, tf.zeros_like(feats_loss))
losses['feat'] = cc_feats * feats_loss
# Define loss using Chamfer pseudo distance
pc_orig = tensors['pc_orig']
pc_pert = tensors['pc_samp'] # To avoid using a lot of memory, only consider sampled points
chamfer = chamfer_distance(pc_pert, pc_orig) # only consider distance of pert -> orig
chamfer_loss = cc_chamfer * chamfer
losses['chamfer'] = chamfer_loss
pc_pert = tensors['knn_samp']
pc_all = tensors['pc_pert']
knn_distance = knn_outlier_distance(pc_pert, pc_all, k=5, alpha=1.05)
knn_loss = cc_knn * knn_distance
losses['knn'] = knn_loss
# Total attack loss
attack_loss = tf.add_n(list(losses.values()))
losses['total'] = attack_loss
# Define optimizer
if optim_method == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
elif optim_method == 'adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
elif optim_method == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif optim_method == 'graddesc':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optim_method == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate)
elif optim_method == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
else:
raise ValueError('Unsupported optimizer: {}'.format(optim_method))
if clip_grad:
gradvars = optimizer.compute_gradients(attack_loss, var_list=[tensors['pert']])
gradvars = gradients_clipping(gradvars, pointnormal_pl)
train_step = optimizer.apply_gradients(gradvars)
else:
train_step = optimizer.minimize(attack_loss, var_list=[tensors['pert']])
tensors['pert_init'] = tf.variables_initializer([tensors['pert']])
tensors['optim_init'] = tf.variables_initializer(optimizer.variables())
tensors['init'] = tf.group([tensors['pert_init'], tensors['optim_init']])
# Build perturbation clipping network
graph, tensors = build_perturbation_clipping_network(graph, tensors)
# Build normal estimation network
graph, tensors = build_normal_estimate_network(graph, tensors)
# Create a Tensorflow session
logger.info('Create Tensorflow session...')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
with graph.as_default():
tensors['global_init'] = tf.global_variables_initializer()
sess = tf.Session(graph=graph, config=config)
sess.run(tensors['global_init'])
for fn in inputfiles:
# First initialize the variables including perturbation and the state of the optimizer
sess.run(tensors['global_init'])
l0_mask = np.ones(raw_num_points, dtype=np.float32)
# Prepare input data (one point cloud a time)
# Note that the point clouds returned may have different shapes (num_points)
# Always load normal channels
groundtruth, pointcloud, pointnormal = load_pointcloud(fn, shape2id, logger)
pointcloud, pointnormal = random_sample_and_add_points(
pointcloud, pointnormal, raw_num_points, logger)
attack_target_name = str(attack_target).lower()
if attack_target is None or attack_target.lower() == 'none':
attack_target = -1
logger.info('Untargeted attack (attack target: {})'.format(attack_target_name))
elif attack_target_name in shape_names:
attack_target = shape2id[attack_target.lower()]
logger.info('Targeted attack (attack target: {})'.format(attack_target_name))
else:
raise ValueError('Attack target cannot be recognized: {}'.format(attack_target))
if attack_target == -1:
label = groundtruth
is_targeted = False
else:
label = attack_target
is_targeted = True
groundtruth_text = shape_names[groundtruth]
if attack_target_name == groundtruth_text:
logger.warning('Attack target is equal to groundtruth, skipped')
continue
# Get guide feats
clean_points_path = './data/modelnet40_normal_resampled/'
target_shape_name = shape_names[label]
target_file = os.listdir(os.path.join(clean_points_path, target_shape_name))
target_file = os.path.join(clean_points_path, target_shape_name, random_select(target_file))
guide_points = meshio.loadmesh(target_file)[..., :3]
guide_feats = get_feat_vectors(sess, graph, tensors, guide_points)
clean_feats = get_feat_vectors(sess, graph, tensors, pointcloud)
# Optimize
for it in range(0, max_iter + 1):
if it % update_period == 0:
logger.info('File {} > Target {} > Iter {} / {}:'.format(
os.path.basename(fn),
attack_target_name,
it,
max_iter))
rot_matrix = generate_random_rotations(batch_size)
sess.run(train_step, feed_dict={
pointcloud_pl: pointcloud,
pointnormal_pl: pointnormal,
rotate_matrix_pl: rot_matrix,
l0_mask_pl: l0_mask,
label_pl: label,
guide_pl: guide_feats,
clean_pl: clean_feats,
targeted_attack_pl: is_targeted})
if it % update_period == 0:
sess.run(tensors['pert_assign'], feed_dict={
tensors['nv_orig']: pointnormal,
tensors['cc_linf']: cc_linf})
# show modified new infinity norm
newpert = sess.run(tensors['pert'])
newpert = np.max(np.sqrt(np.sum(np.square(newpert), axis=-1)))
logger.info(' Current infinity norm: {}'.format(newpert))
if it % update_period == 0:
loss_value = sess.run(losses, feed_dict={
pointcloud_pl: pointcloud,
pointnormal_pl: pointnormal,
rotate_matrix_pl: rot_matrix,
l0_mask_pl: l0_mask,
label_pl: label,
guide_pl: guide_feats,
clean_pl: clean_feats,
targeted_attack_pl: is_targeted})
logger.info(' Loss: {}'.format(loss_value))
if it % update_period == 0:
prob_value = sess.run(probs, feed_dict={
pointcloud_pl: pointcloud,
pointnormal_pl: pointnormal,
rotate_matrix_pl: rot_matrix,
l0_mask_pl: l0_mask})
predict_id = np.argmax(prob_value, axis=-1)
predict_text = [shape_names[predid] for predid in predict_id]
predict_string = ' '.join(['({}, {})'.format(i, t) for i, t in zip(predict_id, predict_text)])
logger.info(' Predictions: {}'.format(predict_string))
if outputdir is not None:
logger.info('Writing result to directory: {}'.format(outputdir))
os.makedirs(outputdir, exist_ok=True)
# Get perturbed point cloud
pointcloud_update, pointcloud_perturb = sess.run(
[tensors['pc_pert'], tensors['pert']],
feed_dict={
pointcloud_pl: pointcloud,
l0_mask_pl: l0_mask})
# Get estimated point normals
pointnormal_update = []
for i in range(raw_num_points):
est_normal = sess.run(tensors['nv_pert'], feed_dict={
tensors['pc_pert_single']: pointcloud_update[i],
tensors['pc_orig']: pointcloud,
tensors['nv_orig']: pointnormal})
pointnormal_update.append(est_normal)
pointnormal_update = np.array(pointnormal_update)
pointcloud = pointcloud_update
pointnormal = pointnormal_update
# Concatenate
pointcloud = np.concatenate([pointcloud, pointnormal], axis=-1)
logger.info(' Output pointcloud shape: {}'.format(pointcloud.shape))
outfnid = re.split('[._]', os.path.basename(fn))[1]
outfn = '{}_{}_{}.xyz'.format(groundtruth_text, attack_target_name, outfnid)
outfn = os.path.join(outputdir, outfn)
logger.info(' Write file {} to {}'.format(fn, outfn))
meshio.savemesh(outfn, pointcloud)
sess.close()
return
def main():
# Argument Parser
parser = argparse.ArgumentParser(description='Adversarial Attack against PointNet++',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', type=str, required=True,
help='Path to the model checkpoint')
parser.add_argument('--raw-num-points', type=int, default=10000,
help='Number of points to select from the input point clouds')
parser.add_argument('--without-normalize', action='store_true',
help='No mormalization will be applied on the input point clouds')
parser.add_argument('--optimizer', type=str, default='adam',
choices=['adadelta', 'adagrad', 'adam', 'graddesc', 'momentum', 'rmsprop'],
help='Optimizer to use (from Tensorflow)')
parser.add_argument('--learning-rate', type=float, default=1e-3,
help='Learning rate for the optimizer')
parser.add_argument('--clip-grad', action='store_true',
help='Clip the gradients to prevent moving points inside the object')
parser.add_argument('--target', type=str, default=None,
help='Attack target class for targeted attack')
parser.add_argument('--loss-type', type=str, default='cw', choices=['cw', 'ce'],
help='Loss to use, cw: Carlini and Wagner, ce: cross entropy')
parser.add_argument('--max-iter', type=int, default=2500,
help='Max iterations for optimization')
parser.add_argument('--random-distortions', type=int, default=0,
help='Number of random distortions (rotations) applied when optimizing, 0 to disable')
parser.add_argument('--logits-lower-bound', type=float, default=(-15.0),
help='Lower bound of the attack confidence')
parser.add_argument('--update-period', type=int, default=10,
help='Number of iterations to print information')
parser.add_argument('--cc-knn', type=float, default=5.0,
help='Coefficient for kNN smoothing loss')
parser.add_argument('--cc-chamfer', type=float, default=3.0,
help='Coefficient for Chamfer distance')
parser.add_argument('--cc-feats', type=float, default=0.0,
help='Coefficient for feature vector loss')
parser.add_argument('--cc-linf', type=float, default=0.1,
help='Coefficient for infinity norm')
parser.add_argument('--outputdir', type=str, default=None,
help='Output directory')
parser.add_argument('--log', type=str, default=None,
help='Path to log file')
parser.add_argument('files', type=str, nargs='+',
help='Point cloud files to process')
args = parser.parse_args()
shape_names = [line.strip() for line in
open('./labels/shape_names.txt')]
model_path = args.model
raw_num_points = args.raw_num_points
normalize = (not args.without_normalize)
attack_target = args.target.lower() if args.target is not None else None
inputfiles = argsutils.get_input_files(args.files)
attack(inputfiles,
model_path,
raw_num_points,
shape_names,
attack_target,
optim_method=args.optimizer,
learning_rate=args.learning_rate,
normalize=normalize,
clip_grad=args.clip_grad,
loss_type=args.loss_type,
max_iter=args.max_iter,
random_distortions=args.random_distortions,
logits_lower_bound=args.logits_lower_bound,
update_period=args.update_period,
cc_knn=args.cc_knn,
cc_chamfer=args.cc_chamfer,
cc_feats=args.cc_feats,
cc_linf=args.cc_linf,
outputdir=args.outputdir,
logfile=args.log)
if __name__ == '__main__':
main()
``` |
{
"source": "jinyier/EMSim",
"score": 3
} |
#### File: src/Current Analysis/logic_cell_modeling.py
```python
import argparse
import re
import sys
import multiprocessing as mp
import numpy as np
import time
import math
start = time.process_time()
def get_cell_pin_info(parasitic_netlist_path):
"""
Get the pin locations of each logic cell
:param parasitic_netlist_path: the path to the parasitic netlist used to create the file
:return: cell name, x and y pins
"""
parasitic_netlist_file = open(parasitic_netlist_path, 'r')
# cell name in the parasitic netlist
name = []
# cell pin_x
X = []
# cell pin_y
Y = []
for line in parasitic_netlist_file.readlines():
# search the instance pin line
if re.search('\*|I \(X.*', line):
# split on spaces
split_line = line.split(" ")
if len(split_line) > 6:
# judge if the line has string 'VDD' or 'VSS'
judge_false = "VDD" == split_line[3] or "VSS" == split_line[3]
# judge if the line has string 'X..' (cell name)
judge_true = "X" in split_line[2]
# exclude the line with string 'VDD' and 'VSS'
judge_true_again = "X" in split_line[4]
# exclude the parasitic transistor
if judge_false:
continue
elif judge_true and judge_true_again:
# update data
name.append(split_line[2])
X.append(float(split_line[6]))
Y.append(float(split_line[7].strip(')\n')))
parasitic_netlist_file.close()
X = np.asarray(X)
Y = np.asarray(Y)
name_reorder = []
X_reorder = []
Y_reorder = []
name_classify = list(set(name))
# exclude the repetitive data
for current_num_classify in range(len(name_classify)):
index_list = [index for index, item in enumerate(name) if item == name_classify[current_num_classify]]
# the minimum data is preserved
X_min = np.min(X[index_list])
Y_min = np.min(Y[index_list])
name_reorder.append(name[index_list[0]])
X_reorder.append(X_min)
Y_reorder.append(Y_min)
assert len(X_reorder) == len(Y_reorder), "Error: please check the instance reordering"
return name_reorder, X_reorder, Y_reorder
def match_cell_info(def_path, cell_name, cell_pin_X, cell_pin_Y):
'''
mathch the cell infor from parasistic file and def file
:param def_file: design.def
:param cell_name: cell name of the design
:param cell_pin_X: x pins of all cell
:param cell_pin_Y: y pins of all cell
:return: hierarchical instance
'''
# open and read the def file
def_file = open(def_path, 'r')
# create an empty list
instance_hierarchy = []
instance_type = []
instance_x0 = []
instance_y0 = []
# initialize the counter for instance
instance_count = 0
start_component = False
for line in def_file:
# the end of the component lines to check
if re.match(r'END COMPONENTS', line):
start_component = False
if start_component:
if 'FILL' in line:
continue
split_line = line.split(" ")
# check if the data length > 7
if len(split_line) > 7 and split_line[5] in ['TIMING', 'DIST']:
instance_hierarchy.append(split_line[1])
instance_type.append(split_line[2])
instance_x0.append(float(split_line[9])/2000)
instance_y0.append(float(split_line[10])/2000)
instance_count += 1
elif len(split_line) > 7:
instance_hierarchy.append(split_line[1])
instance_type.append(split_line[2])
instance_x0.append(float(split_line[6])/2000)
instance_y0.append(float(split_line[7])/2000)
instance_count += 1
else:
continue
# the beginning of the component lines to check
elif re.match(r'COMPONENTS [\d]+', line):
start_component = True
print('Total number of logic cells:', instance_count)
instance_y0 = np.asarray(instance_y0)
instance_x0 = np.asarray(instance_x0)
instance_hierarchy = np.asarray(instance_hierarchy)
# the index of sorted data, from small to large
sort_index_vertical = np.argsort(instance_y0)
instance_y0_reorder = instance_y0[sort_index_vertical]
instance_x0_reorder = instance_x0[sort_index_vertical]
instance_hierarchy_reorder = instance_hierarchy[sort_index_vertical]
# the unique data without repetition
sort_unique = np.unique(instance_y0_reorder)
cell_name = np.asarray(cell_name)
cell_pin_X= np.asarray(cell_pin_X)
cell_pin_Y= np.asarray(cell_pin_Y)
# the index of sorted data, from small to large
sort_index_vertical = np.argsort(cell_pin_Y)
cell_pin_Y_reorder = cell_pin_Y[sort_index_vertical]
cell_pin_X_reorder = cell_pin_X[sort_index_vertical]
cell_name_reorder = cell_name[sort_index_vertical]
# match the cell name and the hierarchy instance name
end_num = 0
for current_num in range(len(sort_unique)):
start_num = end_num
end_num = start_num + np.count_nonzero(instance_y0_reorder == sort_unique[current_num])
tmp_x0 = instance_x0_reorder[start_num: end_num]
tmp_instance_hierarchy = instance_hierarchy_reorder[start_num: end_num]
sort_index_horizontal = np.argsort(tmp_x0)
instance_x0_reorder[start_num: end_num] = tmp_x0[sort_index_horizontal]
instance_hierarchy_reorder[start_num: end_num] = tmp_instance_hierarchy[sort_index_horizontal]
tmp_pin_X = cell_pin_X_reorder[start_num: end_num]
tmp_cell_name = cell_name_reorder[start_num: end_num]
sort_index_horizontal = np.argsort(tmp_pin_X)
cell_pin_X_reorder[start_num: end_num] = tmp_pin_X[sort_index_horizontal]
cell_name_reorder[start_num: end_num] = tmp_cell_name[sort_index_horizontal]
return cell_name_reorder, instance_hierarchy_reorder
def process_time_interval_use(desired_time_interval, desired_time_scale):
"""
process the time interval to use
:param desired_time_interval: desired time interval to use
:param desired_time_scale: desired time scale used in power analysis
:return: final time interval for subsequent step
"""
if desired_time_scale < 1:
intermediate = '%e' % desired_time_scale
multiple = math.pow(10, int(intermediate.partition('-')[2]))
final_time_interval = int(desired_time_interval * multiple)
final_time_divider = int(desired_time_scale * multiple)
else:
final_time_interval = int(desired_time_interval)
final_time_divider = int(desired_time_scale)
return final_time_interval, final_time_divider
def process_power_report_use(num_plaintexts, start_time_point, desired_time_interval, power_report_init_path, power_report_path):
"""
process the power report from power analysis
:param num_plaintexts: the number of the plaintexts
:param start_time_point: start time point in the initial power report
:param desired_time_interval: final time interval for subsequent step
:param power_report_init_path: path to the init power report file
:param power_report_path: path to the power report file
:return: combined power report
"""
power_report_file = open(power_report_path, 'w', newline='\n')
# create patterns used to extract parameters
pattern_power = r'[\d]+ [\d]+.*'
pattern_time_point = r'^\d+\n$'
start_enable = True
for current_num in range(num_plaintexts):
power_report_init_file = open(power_report_init_path + str(current_num) + '.out', 'r')
left_time_interval = start_time_point
if current_num == 0:
for line in power_report_init_file.readlines():
if re.match(pattern_time_point, line):
current_time_point = int(line.strip('\n')) - left_time_interval
if current_time_point >= desired_time_interval:
start_enable = False
break
elif current_time_point < 0:
start_enable = False
else:
start_enable = True
new_time_point = current_time_point + desired_time_interval * current_num
power_report_file.write(str(new_time_point) + "\n")
elif start_enable:
power_report_file.write(line)
else:
for line in power_report_init_file.readlines():
if re.match(pattern_time_point, line):
current_time_point = int(line.strip('\n')) - left_time_interval
if current_time_point >= desired_time_interval:
start_enable = False
break
elif current_time_point < 0:
start_enable = False
else:
start_enable = True
new_time_point = current_time_point + desired_time_interval * current_num
power_report_file.write(str(new_time_point) + "\n")
elif re.match(pattern_power, line) and start_enable:
power_report_file.write(line)
power_report_init_file.close()
power_report_file.close()
def logic_cell_modeling(top_cell, num_plaintexts, desired_time_interval, final_time_divider, power_report_path, instance_hierarchy, power_supply_voltage):
"""
modeling the current waveform of logic cells
:param top_cell: top cell in the design
:param num_plaintexts: the number of the plaintexts
:param desired_time_interval: final time interval for subsequent step
:param final_time_divider: final time divider used for final time interval
:param power_report_path: the path to the power_report used to create the file
:param instance_hierarchy: instance with design hierarchy
:param power_supply_voltage: supply voltage for logic cells
:return: the current waveform of logic cells
"""
power_file = open(power_report_path, 'r')
print("Load power data, finished.")
# create empty list for cell name, keyword and time point
cell_hierarchy = []
cell_keyword = []
time_point = []
# create patterns used to extract parameters
pattern_keyword = r'.index Pc\(.*?\) [\d]+ Pc'
pattern_time_point = r'^\d+\n$'
# add matched parameters into list
for line in power_file.readlines():
if re.match(pattern_keyword, line):
tmp = line.split()
cell_hierarchy.append(tmp[1])
cell_keyword.append(int(tmp[2]))
elif re.match(pattern_time_point, line):
time_point.append(int(line.strip('\n')))
power_file.seek(0)
# create the map list between the cell hierarchy (from .out file) and instance_hierarchy (from .dspf or .def file)
cell_map = []
for current_num in range(len(instance_hierarchy)):
tmp = 'Pc(' + str(top_cell) + '/' + str(instance_hierarchy[current_num]) + ')'
try:
cell_map.append(cell_hierarchy.index(tmp))
except ValueError:
continue
# print("The amount of the logic cells is ", len(cell_map))
print("The maximum of the time points is ", time_point[-1])
time_point = np.asarray(time_point)
cell_keyword = np.asarray(cell_keyword)
# create array for all power traces
clock_period = desired_time_interval-0
off_clock_cycles = 1
start_time_point = 0
power_trace_all = np.full((len(cell_keyword), num_plaintexts, int(clock_period/final_time_divider)), 1e-11, dtype=np.float32)
# print("The shape of the current trace is ", np.shape(power_trace_all))
# create pattern used to extract power traces
pattern_power = r'[\d]+ [\d]+.*'
tracked_time_point = 0
num_plaintexts_recorded = 0
# add matched power values into the array
for line in power_file.readlines():
if num_plaintexts_recorded > num_plaintexts:
break
if re.match(pattern_time_point, line):
current_time_point = int(line.strip('\n'))
tracked_time_point = current_time_point - start_time_point - (clock_period * off_clock_cycles * num_plaintexts_recorded)
if current_time_point > start_time_point + num_plaintexts * off_clock_cycles * clock_period:
break
elif tracked_time_point >= clock_period:
num_plaintexts_recorded += 1
tracked_time_point = current_time_point - start_time_point - (clock_period * off_clock_cycles * num_plaintexts_recorded)
if re.match(pattern_power, line):
if tracked_time_point < 0:
continue
tmp = line.split()
current_keyword = int(tmp[0]) - 1
power_trace_all[current_keyword, num_plaintexts_recorded, int(tracked_time_point/final_time_divider)] = float(tmp[1])
power_file.close()
# map power traces for required cell
power_trace_all = power_trace_all[cell_map, :, :]
# post-process the power trace (replace the 0 value)
# pool = mp.Pool(mp.cpu_count()-1)
for current_time_point in range(np.shape(power_trace_all)[2]):
for plaintext in range(num_plaintexts):
vertical = np.asarray(power_trace_all[:, plaintext, current_time_point] == 1e-11).nonzero()
if current_time_point >= 1:
power_trace_all[vertical, plaintext, current_time_point] = power_trace_all[vertical, plaintext, current_time_point - 1]
else:
power_trace_all[vertical, plaintext, current_time_point] = power_trace_all[vertical, plaintext, current_time_point]
power_trace_all = power_trace_all / power_supply_voltage
print("The shape of the current trace is ", np.shape(power_trace_all))
return power_trace_all
def main(top_cell, parasitic_netlist_path, def_path, power_report_path, num_plaintexts, start_time_point,
desired_time_interval, desired_time_scale, power_report_init_path, power_supply_voltage):
cell_name, cell_pin_X, cell_pin_Y = get_cell_pin_info(parasitic_netlist_path)
cell_name, instance_hierarchy = match_cell_info(def_path, cell_name, cell_pin_X, cell_pin_Y)
final_time_interval, final_time_divider = process_time_interval_use(desired_time_interval, desired_time_scale)
process_power_report_use(num_plaintexts, start_time_point, final_time_interval, power_report_init_path, power_report_path)
current_trace = logic_cell_modeling(top_cell, num_plaintexts, final_time_interval, final_time_divider, power_report_path, instance_hierarchy, power_supply_voltage)
np.save('instance_hierarchy.npy', instance_hierarchy)
np.save('cell_name.npy', cell_name)
np.save("current_trace.npy", current_trace)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--top_cell", type=str, default="aes_top",
help="Top cell in the design")
parser.add_argument("--parasitic_netlist_path", type=str, default="aes_top.dspf",
help="Path to the parasitic info file, should end in .dspf")
parser.add_argument("--def_path", type=str, default="aes_top.def",
help="Path to the def file, should end in .def")
parser.add_argument("--power_report_path", type=str, default="vcd_to_use.out",
help="Path to the power report file, should end in .out")
parser.add_argument("--num_plaintexts", type=int, default=10,
help="Number of required plaintexts")
parser.add_argument("--start_time_point", type=int, default=(1120 + 6880 - 1120 - 6880)*10,
help="Start time point in the initial power report")
parser.add_argument("--desired_time_interval", type=int, default=40,
help="Desired time interval to use, timescale 1ns/1ns")
parser.add_argument("--desired_time_scale", type=float, default=0.4,
help="Desired time scale used in power analysis")
parser.add_argument("--power_report_init_path", type=str, default="power_reports/vcd_",
help="Path to the init power report file, should end in vcd_")
parser.add_argument("--power_supply_voltage", type=float, default=1.8,
help="Supply voltage for logic cells")
args = parser.parse_args()
top_cell = args.top_cell
parasitic_netlist_path = args.parasitic_netlist_path
def_path = args.def_path
power_report_path = args.power_report_path
num_plaintexts = args.num_plaintexts
start_time_point = args.start_time_point
desired_time_interval = args.desired_time_interval
desired_time_scale = args.desired_time_scale
power_report_init_path = args.power_report_init_path
power_supply_voltage = args.power_supply_voltage
start_time = time.time()
try:
sys.exit(main(top_cell, parasitic_netlist_path, def_path, power_report_path, num_plaintexts, start_time_point,
desired_time_interval, desired_time_scale, power_report_init_path, power_supply_voltage))
except KeyboardInterrupt:
sys.exit()
finally:
print("Running time:", time.time() - start_time, "seconds")
``` |
{
"source": "jinyiY/desitarget",
"score": 2
} |
#### File: py/desitarget/uratmatch.py
```python
import os
import numpy as np
import fitsio
import requests
import pickle
from pkg_resources import resource_filename
from time import time
from astropy.io import ascii
from glob import glob
import healpy as hp
from desitarget.internal import sharedmem
from desimodel.footprint import radec2pix
from desitarget.geomask import add_hp_neighbors, radec_match_to
# ADM set up the DESI default logger
from desiutil.log import get_logger
log = get_logger()
# ADM start the clock
start = time()
# ADM columns contained in our version of the URAT fits files.
uratdatamodel = np.array([], dtype=[
('URAT_ID', '>i8'), ('RA', '>f8'), ('DEC', '>f8'),
('APASS_G_MAG', '>f4'), ('APASS_G_MAG_ERROR', '>f4'),
('APASS_R_MAG', '>f4'), ('APASS_R_MAG_ERROR', '>f4'),
('APASS_I_MAG', '>f4'), ('APASS_I_MAG_ERROR', '>f4'),
('PMRA', '>f4'), ('PMDEC', '>f4'), ('PM_ERROR', '>f4')
])
def _get_urat_dir():
"""Convenience function to grab the URAT environment variable.
Returns
-------
:class:`str`
The directory stored in the $URAT_DIR environment variable.
"""
# ADM check that the $URAT_DIR environment variable is set.
uratdir = os.environ.get('URAT_DIR')
if uratdir is None:
msg = "Set $URAT_DIR environment variable!"
log.critical(msg)
raise ValueError(msg)
return uratdir
def _get_urat_nside():
"""Grab the HEALPixel nside to be used throughout this module.
Returns
-------
:class:`int`
The HEALPixel nside number for URAT file creation and retrieval.
"""
nside = 32
return nside
def scrape_urat(url="http://cdsarc.u-strasbg.fr/ftp/I/329/URAT1/v12/",
nfiletest=None):
"""Retrieve the binary versions of the URAT files.
Parameters
----------
url : :class:`str`
The web directory that hosts the archived binary URAT files.
nfiletest : :class:`int`, optional, defaults to ``None``
If an integer is sent, only retrieve this number of files, for testing.
Returns
-------
Nothing
But the archived URAT files are written to $URAT_DIR/binary.
Notes
-----
- The environment variable $URAT_DIR must be set.
- Runs in about 50 minutes for 575 URAT files.
"""
# ADM check that the URAT_DIR is set and retrieve it.
uratdir = _get_urat_dir()
# ADM construct the directory to which to write files.
bindir = os.path.join(uratdir, 'binary')
# ADM the directory better be empty for the wget!
if os.path.exists(bindir):
if len(os.listdir(bindir)) > 0:
msg = "{} should be empty to wget URAT binary files!".format(bindir)
log.critical(msg)
raise ValueError(msg)
# ADM make the directory, if needed.
else:
log.info('Making URAT directory for storing binary files')
os.makedirs(bindir)
index = requests.get(url)
# ADM retrieve any file name that starts with z.
# ADM the [1::2] pulls back just the odd lines from the split list.
garbled = index.text.split("z")[1::2]
filelist = ["z{}".format(g[:3]) for g in garbled]
# ADM if nfiletest was passed, just work with that number of files.
test = nfiletest is not None
if test:
filelist = filelist[:nfiletest]
nfiles = len(filelist)
# ADM loop through the filelist.
start = time()
for nfile, fileinfo in enumerate(filelist):
# ADM make the wget command to retrieve the file and issue it.
cmd = 'wget -q {} -P {}'.format(os.path.join(url, fileinfo), bindir)
print(cmd)
os.system(cmd)
if nfile % 25 == 0 or test:
elapsed = time() - start
rate = nfile / elapsed
log.info(
'{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed'
.format(nfile+1, nfiles, rate, elapsed/60.)
)
log.info('Done...t={:.1f}s'.format(time()-start))
return
def urat_binary_to_csv():
"""Convert files in $URAT_DIR/binary to files in $URAT_DIR/csv.
Returns
-------
Nothing
But the archived URAT binary files in $URAT_DIR/binary are
converted to CSV files in the $URAT_DIR/csv.
Notes
-----
- The environment variable $URAT_DIR must be set.
- Relies on the executable urat/fortran/v1dump, which is only
tested at NERSC and might need compiled by the user.
- Runs in about 40 minutes for 575 files.
"""
# ADM check that the URAT_DIR is set.
uratdir = _get_urat_dir()
# ADM a quick check that the csv directory is empty before writing.
csvdir = os.path.join(uratdir, 'csv')
if os.path.exists(csvdir):
if len(os.listdir(csvdir)) > 0:
msg = "{} should be empty to make URAT files!".format(csvdir)
log.critical(msg)
raise ValueError(msg)
# ADM make the directory, if needed.
else:
log.info('Making URAT directory for storing CSV files')
os.makedirs(csvdir)
log.info('Begin converting URAT files to CSV...t={:.1f}s'
.format(time()-start))
# ADM check the v1dump executable has been compiled.
readme = resource_filename('desitarget', 'urat/fortran/README')
cmd = resource_filename('desitarget', 'urat/fortran/v1dump')
if not (os.path.exists(cmd) and os.access(cmd, os.X_OK)):
msg = "{} must have been compiled (see {})".format(cmd, readme)
log.critical(msg)
raise ValueError(msg)
# ADM execute v1dump.
os.system(cmd)
log.info('Done...t={:.1f}s'.format(time()-start))
return
def urat_csv_to_fits(numproc=5):
"""Convert files in $URAT_DIR/csv to files in $URAT_DIR/fits.
Parameters
----------
numproc : :class:`int`, optional, defaults to 5
The number of parallel processes to use.
Returns
-------
Nothing
But the archived URAT CSV files in $URAT_DIR/csv are converted
to FITS files in the directory $URAT_DIR/fits. Also, a look-up
table is written to $URAT_DIR/fits/hpx-to-files.pickle for which
each index is an nside=_get_urat_nside(), nested scheme HEALPixel
and each entry is a list of the FITS files that touch that HEAPixel.
Notes
-----
- The environment variable $URAT_DIR must be set.
- if numproc==1, use the serial code instead of the parallel code.
- Runs in about 10 minutes with numproc=25 for 575 files.
"""
# ADM the resolution at which the URAT HEALPix files should be stored.
nside = _get_urat_nside()
# ADM check that the URAT_DIR is set.
uratdir = _get_urat_dir()
log.info("running on {} processors".format(numproc))
# ADM construct the directories for reading/writing files.
csvdir = os.path.join(uratdir, 'csv')
fitsdir = os.path.join(uratdir, 'fits')
# ADM make sure the output directory is empty.
if os.path.exists(fitsdir):
if len(os.listdir(fitsdir)) > 0:
msg = "{} should be empty to make URAT FITS files!".format(fitsdir)
log.critical(msg)
raise ValueError(msg)
# ADM make the output directory, if needed.
else:
log.info('Making URAT directory for storing FITS files')
os.makedirs(fitsdir)
# ADM construct the list of input files.
infiles = glob("{}/*csv*".format(csvdir))
nfiles = len(infiles)
# ADM the critical function to run on every file.
def _write_urat_fits(infile):
"""read an input name for a csv file and write it to FITS"""
outbase = os.path.basename(infile)
outfilename = "{}.fits".format(outbase.split(".")[0])
outfile = os.path.join(fitsdir, outfilename)
# ADM astropy understands without specifying format='csv'.
fitstable = ascii.read(infile)
# ADM map the ascii-read csv to typical DESI quantities.
nobjs = len(fitstable)
done = np.zeros(nobjs, dtype=uratdatamodel.dtype)
# ADM have to do this one-by-one, given the format.
done["RA"] = fitstable['col1']/1000./3600.
done["DEC"] = fitstable['col2']/1000./3600. - 90.
done["PMRA"] = fitstable['col16']/10.
done["PMDEC"] = fitstable['col17']/10.
done["PM_ERROR"] = fitstable['col18']/10.
done["APASS_G_MAG"] = fitstable['col36']/1000.
done["APASS_R_MAG"] = fitstable['col37']/1000.
done["APASS_I_MAG"] = fitstable['col38']/1000.
done["APASS_G_MAG_ERROR"] = fitstable['col41']/1000.
done["APASS_R_MAG_ERROR"] = fitstable['col42']/1000.
done["APASS_I_MAG_ERROR"] = fitstable['col43']/1000.
done["URAT_ID"] = fitstable['col46']
fitsio.write(outfile, done, extname='URATFITS')
# ADM return the HEALPixels that this file touches.
pix = set(radec2pix(nside, done["RA"], done["DEC"]))
return [pix, os.path.basename(outfile)]
# ADM this is just to count processed files in _update_status.
nfile = np.zeros((), dtype='i8')
t0 = time()
def _update_status(result):
"""wrapper function for the critical reduction operation,
that occurs on the main parallel process"""
if nfile % 25 == 0 and nfile > 0:
rate = nfile / (time() - t0)
elapsed = time() - t0
log.info(
'{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed'
.format(nfile, nfiles, rate, elapsed/60.)
)
nfile[...] += 1 # this is an in-place modification
return result
# - Parallel process input files...
if numproc > 1:
pool = sharedmem.MapReduce(np=numproc)
with pool:
pixinfile = pool.map(_write_urat_fits, infiles, reduce=_update_status)
# ADM ...or run in serial.
else:
pixinfile = list()
for file in infiles:
pixinfile.append(_update_status(_write_urat_fits(file)))
# ADM create a list for which each index is a HEALPixel and each
# ADM entry is a list of files that touch that HEALPixel.
npix = hp.nside2npix(nside)
pixlist = [[] for i in range(npix)]
for pixels, file in pixinfile:
for pix in pixels:
pixlist[pix].append(file)
# ADM write out the HEALPixel->files look-up table.
outfilename = os.path.join(fitsdir, "hpx-to-files.pickle")
outfile = open(outfilename, "wb")
pickle.dump(pixlist, outfile)
outfile.close()
log.info('Done...t={:.1f}s'.format(time()-t0))
return
def urat_fits_to_healpix(numproc=5):
"""Convert files in $URAT_DIR/fits to files in $URAT_DIR/healpix.
Parameters
----------
numproc : :class:`int`, optional, defaults to 5
The number of parallel processes to use.
Returns
-------
Nothing
But the archived URAT FITS files in $URAT_DIR/fits are
rearranged by HEALPixel in the directory $URAT_DIR/healpix.
The HEALPixel sense is nested with nside=_get_urat_nside(), and
each file in $URAT_DIR/healpix is called healpix-xxxxx.fits,
where xxxxx corresponds to the HEALPixel number.
Notes
-----
- The environment variable $URAT_DIR must be set.
- if numproc==1, use the serial code instead of the parallel code.
- Runs in about 10 minutes with numproc=25.
"""
# ADM the resolution at which the URAT HEALPix files should be stored.
nside = _get_urat_nside()
# ADM check that the URAT_DIR is set.
uratdir = _get_urat_dir()
# ADM construct the directories for reading/writing files.
fitsdir = os.path.join(uratdir, 'fits')
hpxdir = os.path.join(uratdir, 'healpix')
# ADM make sure the output directory is empty.
if os.path.exists(hpxdir):
if len(os.listdir(hpxdir)) > 0:
msg = "{} should be empty to make URAT HEALPix files!".format(hpxdir)
log.critical(msg)
raise ValueError(msg)
# ADM make the output directory, if needed.
else:
log.info('Making URAT directory for storing HEALPix files')
os.makedirs(hpxdir)
# ADM read the pixel -> file look-up table.
infilename = os.path.join(fitsdir, "hpx-to-files.pickle")
infile = open(infilename, "rb")
pixlist = pickle.load(infile)
npixels = len(pixlist)
# ADM include the pixel number explicitly in the look-up table.
pixlist = list(zip(np.arange(npixels), pixlist))
# ADM the critical function to run on every file.
def _write_hpx_fits(pixlist):
"""from files that touch a pixel, write out objects in each pixel"""
pixnum, files = pixlist
# ADM only proceed if some files touch a pixel.
if len(files) > 0:
# ADM track if it's our first time through the files loop.
first = True
# ADM Read in files that touch a pixel.
for file in files:
filename = os.path.join(fitsdir, file)
objs = fitsio.read(filename)
# ADM only retain objects in the correct pixel.
pix = radec2pix(nside, objs["RA"], objs["DEC"])
if first:
done = objs[pix == pixnum]
first = False
else:
done = np.hstack([done, objs[pix == pixnum]])
# ADM construct the name of the output file.
outfilename = 'healpix-{:05d}.fits'.format(pixnum)
outfile = os.path.join(hpxdir, outfilename)
# ADM write out the file.
hdr = fitsio.FITSHDR()
hdr['HPXNSIDE'] = nside
hdr['HPXNEST'] = True
fitsio.write(outfile, done, extname='URATHPX', header=hdr)
return
# ADM this is just to count processed files in _update_status.
npix = np.zeros((), dtype='i8')
t0 = time()
def _update_status(result):
"""wrapper function for the critical reduction operation,
that occurs on the main parallel process"""
if npix % 500 == 0 and npix > 0:
rate = npix / (time() - t0)
elapsed = time() - t0
log.info(
'{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed'
.format(npix, npixels, rate, elapsed/60.)
)
npix[...] += 1 # this is an in-place modification
return result
# - Parallel process input files...
if numproc > 1:
pool = sharedmem.MapReduce(np=numproc)
with pool:
_ = pool.map(_write_hpx_fits, pixlist, reduce=_update_status)
# ADM ...or run in serial.
else:
for pix in pixlist:
_update_status(_write_hpx_fits(pix))
log.info('Done...t={:.1f}s'.format(time()-t0))
return
def make_urat_files(numproc=5, download=False):
"""Make the HEALPix-split URAT files in one fell swoop.
Parameters
----------
numproc : :class:`int`, optional, defaults to 5
The number of parallel processes to use.
download : :class:`bool`, optional, defaults to ``False``
If ``True`` then wget the URAT binary files from Vizier.
Returns
-------
Nothing
But produces:
- URAT DR1 binary files in $URAT_DIR/binary (if download=True).
- URAT CSV files with all URAT columns in $URAT_DIR/csv.
- FITS files with columns from `uratdatamodel` in $URAT_DIR/fits.
- FITS files reorganized by HEALPixel in $URAT_DIR/healpix.
The HEALPixel sense is nested with nside=_get_urat_nside(), and
each file in $URAT_DIR/healpix is called healpix-xxxxx.fits,
where xxxxx corresponds to the HEALPixel number.
Notes
-----
- The environment variable $URAT_DIR must be set.
- if numproc==1, use the serial, instead of the parallel, code.
- Runs in about 2 hours with numproc=25 if download is ``True``.
- Runs in about 1 hour with numproc=25 if download is ``False``.
"""
t0 = time()
log.info('Begin making URAT files...t={:.1f}s'.format(time()-t0))
# ADM check that the URAT_DIR is set.
uratdir = _get_urat_dir()
# ADM a quick check that the fits and healpix directories are empty
# ADM before embarking on the slower parts of the code.
csvdir = os.path.join(uratdir, 'csv')
fitsdir = os.path.join(uratdir, 'fits')
hpxdir = os.path.join(uratdir, 'healpix')
for direc in [csvdir, fitsdir, hpxdir]:
if os.path.exists(direc):
if len(os.listdir(direc)) > 0:
msg = "{} should be empty to make URAT files!".format(direc)
log.critical(msg)
raise ValueError(msg)
if download:
scrape_urat()
log.info('Retrieved URAT files from Vizier...t={:.1f}s'
.format(time()-t0))
urat_binary_to_csv()
log.info('Converted binary files to CSV...t={:.1f}s'.format(time()-t0))
urat_csv_to_fits(numproc=numproc)
log.info('Converted CSV files to FITS...t={:.1f}s'.format(time()-t0))
urat_fits_to_healpix(numproc=numproc)
log.info('Rearranged FITS files by HEALPixel...t={:.1f}s'.format(time()-t0))
return
def find_urat_files(objs, neighbors=True, radec=False):
"""Find full paths to URAT healpix files for objects by RA/Dec.
Parameters
----------
objs : :class:`~numpy.ndarray`
Array of objects. Must contain the columns "RA" and "DEC".
neighbors : :class:`bool`, optional, defaults to ``True``
Also return all pixels that touch the files of interest
to prevent edge effects (e.g. if a URAT source is 1 arcsec
away from a primary source and so in an adjacent pixel).
radec : :class:`bool`, optional, defaults to ``False``
If ``True`` then the passed `objs` is an [RA, Dec] list
instead of a rec array that contains "RA" and "DEC".
Returns
-------
:class:`list`
A list of all URAT files to read to account for objects at
the passed locations.
Notes
-----
- The environment variable $URAT_DIR must be set.
"""
# ADM the resolution at which the URAT HEALPix files are stored.
nside = _get_urat_nside()
# ADM check that the URAT_DIR is set and retrieve it.
uratdir = _get_urat_dir()
hpxdir = os.path.join(uratdir, 'healpix')
# ADM which flavor of RA/Dec was passed.
if radec:
ra, dec = objs
else:
ra, dec = objs["RA"], objs["DEC"]
# ADM convert RA/Dec to co-latitude and longitude in radians.
theta, phi = np.radians(90-dec), np.radians(ra)
# ADM retrieve the pixels in which the locations lie.
pixnum = hp.ang2pix(nside, theta, phi, nest=True)
# ADM if neighbors was sent, then retrieve all pixels that touch each
# ADM pixel covered by the provided locations, to prevent edge effects...
if neighbors:
pixnum = add_hp_neighbors(nside, pixnum)
# ADM reformat file names in the URAT healpix format.
uratfiles = [os.path.join(hpxdir, 'healpix-{:05d}.fits'.format(pn))
for pn in pixnum]
# ADM restrict to only files/HEALPixels actually covered by URAT.
uratfiles = [fn for fn in uratfiles if os.path.exists(fn)]
return uratfiles
def match_to_urat(objs, matchrad=1., radec=False):
"""Match objects to URAT healpix files and return URAT information.
Parameters
----------
objs : :class:`~numpy.ndarray`
Must contain at least "RA" and "DEC".
matchrad : :class:`float`, optional, defaults to 1 arcsec
The radius at which to match in arcseconds.
radec : :class:`bool`, optional, defaults to ``False``
If ``True`` then the passed `objs` is an [RA, Dec] list instead of
a rec array.
Returns
-------
:class:`~numpy.ndarray`
The matching URAT information for each object. The returned
format is as for desitarget.uratmatch.uratdatamodel with
and extra column "URAT_SEP" which is the matching distance
in ARCSECONDS.
Notes
-----
- For objects that do NOT have a match in URAT, the "URAT_ID"
and "URAT_SEP" columns are -1, and other columns are zero.
- Retrieves the CLOSEST match to URAT for each passed object.
- Because this reads in HEALPixel split files, it's (far) faster
for objects that are clumped rather than widely distributed.
"""
# ADM parse whether a structure or coordinate list was passed.
if radec:
ra, dec = objs
else:
ra, dec = objs["RA"], objs["DEC"]
# ADM set up an array of URAT information for the output.
nobjs = len(ra)
done = np.zeros(nobjs, dtype=uratdatamodel.dtype)
# ADM objects without matches should have URAT_ID, URAT_SEP of -1.
done["URAT_ID"] = -1
urat_sep = np.zeros(nobjs) - 1
# ADM determine which URAT files need to be scraped.
uratfiles = find_urat_files([ra, dec], radec=True)
nfiles = len(uratfiles)
# ADM catch the case of no matches to URAT.
if nfiles > 0:
# ADM loop through the URAT files and find matches.
for ifn, fn in enumerate(uratfiles):
if ifn % 500 == 0 and ifn > 0:
log.info('{}/{} files; {:.1f} total mins elapsed'
.format(ifn, nfiles, (time()-start)/60.))
urat = fitsio.read(fn)
idurat, idobjs, dist = radec_match_to(
[urat["RA"], urat["DEC"]], [ra, dec],
sep=matchrad, radec=True, return_sep=True)
# ADM update matches whenever we have a CLOSER match.
ii = (urat_sep[idobjs] == -1) | (urat_sep[idobjs] > dist)
done[idobjs[ii]] = urat[idurat[ii]]
urat_sep[idobjs[ii]] = dist[ii]
# ADM add the separation distances to the output array.
dt = uratdatamodel.dtype.descr + [("URAT_SEP", ">f4")]
output = np.zeros(nobjs, dtype=dt)
for col in uratdatamodel.dtype.names:
output[col] = done[col]
output["URAT_SEP"] = urat_sep
return output
``` |
{
"source": "jinyiyexing518/DAL",
"score": 3
} |
#### File: DAL/utils/box_coder.py
```python
import numpy as np
import torch
class BoxCoder(object):
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(self, weights=(10., 10., 10., 5., 15.)):
self.weights = weights
def encode(self, ex_rois, gt_rois): #输入的是ROI坐标,x1y1 x2y2 angle五个参数
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] #计算宽度
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] #计算高度
ex_widths = torch.clamp(ex_widths, min=1) #宽度最小为1
ex_heights = torch.clamp(ex_heights, min=1) #高度最小为1
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths #计算中心坐标xy
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
ex_thetas = ex_rois[:, 4] #角度
gt_widths = gt_rois[:, 2] - gt_rois[:, 0]
gt_heights = gt_rois[:, 3] - gt_rois[:, 1]
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1) #获得gt宽和高
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths #获得gt的中心坐标
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
gt_thetas = gt_rois[:, 4] #获得gt的角度
wx, wy, ww, wh, wt = self.weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets_dt = wt * (torch.tan(gt_thetas / 180.0 * np.pi) - torch.tan(ex_thetas / 180.0 * np.pi))
targets = torch.stack(
(targets_dx, targets_dy, targets_dw, targets_dh, targets_dt), dim=1
)
return targets #计算偏移
def decode(self, boxes, deltas, mode='xywht'):
widths = boxes[:, :, 2] - boxes[:, :, 0]
heights = boxes[:, :, 3] - boxes[:, :, 1]
widths = torch.clamp(widths, min=1)
heights = torch.clamp(heights, min=1)
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
thetas = boxes[:, :, 4]
wx, wy, ww, wh, wt = self.weights
dx = deltas[:, :, 0] / wx
dy = deltas[:, :, 1] / wy
dw = deltas[:, :, 2] / ww
dh = deltas[:, :, 3] / wh
dt = deltas[:, :, 4] / wt
pred_ctr_x = ctr_x if 'x' not in mode else ctr_x + dx * widths
pred_ctr_y = ctr_y if 'y' not in mode else ctr_y + dy * heights
pred_w = widths if 'w' not in mode else torch.exp(dw) * widths
pred_h = heights if 'h' not in mode else torch.exp(dh) * heights
pred_t = thetas if 't' not in mode else torch.atan(torch.tan(thetas / 180.0 * np.pi) + dt) / np.pi * 180.0
pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w
pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h
pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w
pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack([
pred_boxes_x1,
pred_boxes_y1,
pred_boxes_x2,
pred_boxes_y2,
pred_t], dim=2
)
return pred_boxes #返回真实的预测框,两个坐标以及角度
``` |
{
"source": "JinyongJeong/Deeplabv3_pytorch_Cityscape_and_Apolloscape",
"score": 2
} |
#### File: JinyongJeong/Deeplabv3_pytorch_Cityscape_and_Apolloscape/inference_apolloscape.py
```python
import sys
import os
default_path = os.path.dirname(os.path.abspath(__file__))
from datasets_apolloscape import DatasetTrain, DatasetVal # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)
sys.path.append(os.path.join(default_path,'model'))
from deeplabv3_apolloscape_class_8 import DeepLabV3
sys.path.append(os.path.join(default_path,'utils'))
from utils import add_weight_decay
from utils import label_img_to_color
from utils import label_img_to_color_apolloscape
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import cv2
import time
import glob
def getEpoch(checkpoint_name):
filename_w_ext = os.path.basename(checkpoint_name)
filename, file_extension = os.path.splitext(filename_w_ext)
filenames = filename.split("_")
return filenames[3]
# NOTE! NOTE! change this to not overwrite all log data when you train the model:
model_ids = [2,3,5,6,9]
eval_batch_size = 1
for model_id in model_ids:
print("model_id: " + str(model_id))
logs_dir = os.path.join(default_path, 'training_logs')
checkpoints_dir = os.path.join(default_path, 'training_logs', 'model_' + str(model_id), 'checkpoints')
model_dir = os.path.join(default_path, 'training_logs', 'model_' + str(model_id))
#network = DeepLabV3(model_id, project_dir=default_path).cuda()
network = DeepLabV3(model_id, project_dir=default_path)
network = nn.DataParallel(network)
network = network.cuda()
#check last checkpoint
data_list = glob.glob(os.path.join(checkpoints_dir,'model_'+str(model_id)+'_*.pth'))
#find latest checkpoint
start_epoch = 0
for name in list(data_list):
if start_epoch < int(getEpoch(name)):
start_epoch = int(getEpoch(name))
if start_epoch != 0:
network.load_state_dict(torch.load(os.path.join(checkpoints_dir,"model_" + str(model_id) +"_epoch_" + str(start_epoch) + ".pth")))
print("Recorver check point of epoch: " + str(start_epoch))
else:
print("Can't find checkpoint for loading")
quit()
val_dataset = DatasetVal()
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=eval_batch_size, shuffle=False,
num_workers=30)
############################################################################
# inference:
############################################################################
network.eval() # (set in evaluation mode, this affects BatchNorm and dropout)
save_path = os.path.join(default_path,'inference/apolloscape', 'model_' + str(model_id))
if not os.path.exists(save_path):
os.makedirs(save_path)
img_index = 0
print("Start inference")
for step, (imgs, label_imgs) in enumerate(val_loader):
print("Eval step: " + str(step))
with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)
imgs = Variable(imgs).cuda() # (shape: (batch_size, 3, img_h, img_w))
#label_imgs = Variable(label_imgs.type(torch.LongTensor)).cuda() # (shape: (batch_size, img_h, img_w))
outputs = network(imgs) # (shape: (batch_size, num_classes, img_h, img_w))
# compute the loss:
#outputs = outputs.data.cpu().numpy() # (shape: (batch_size, num_classes, img_h, img_w))
outputs = torch.argmax(outputs, dim=1)
#pred_label_imgs = np.argmax(outputs, axis=1) # (shape: (batch_size, img_h, img_w))
pred_label_imgs = outputs.data.cpu().numpy()
pred_label_imgs = pred_label_imgs.astype(np.uint8)
for i in range(pred_label_imgs.shape[0]):
pred_label_img = pred_label_imgs[i] # (shape: (img_h, img_w))
img = imgs[i] # (shape: (3, img_h, img_w))
img = img.data.cpu().numpy()
img = np.transpose(img, (1, 2, 0)) # (shape: (img_h, img_w, 3))
img = img*np.array([0.229, 0.224, 0.225])
img = img + np.array([0.485, 0.456, 0.406])
img = img*255.0
img = img.astype(np.uint8)
pred_label_img_color = label_img_to_color_apolloscape(pred_label_img)
overlayed_img = 0.35*img + 0.65*pred_label_img_color
overlayed_img = overlayed_img.astype(np.uint8)
save_file_path = os.path.join(save_path, str(img_index) + '.png')
cv2.imwrite(save_file_path, overlayed_img)
img_index += 1
``` |
{
"source": "jinyonglner00/crawl-zsxq2",
"score": 3
} |
#### File: jinyonglner00/crawl-zsxq2/crawl.py
```python
import re
import requests
import json
import os
import pdfkit
from bs4 import BeautifulSoup
from urllib.parse import quote
html_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
</head>
<body>
<h1>{title}</h1>
<p>{text}</p>
</body>
</html>
"""
htmls = []
num = 0
def get_data(url):
global htmls, num
headers = {
'Authorization': '<PASSWORD>',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
rsp = requests.get(url, headers=headers)
with open('test.json', 'w', encoding='utf-8') as f: # 将返回数据写入 test.json 方便查看
f.write(json.dumps(rsp.json(), indent=2, ensure_ascii=False))
with open('test.json', encoding='utf-8') as f:
for topic in json.loads(f.read()).get('resp_data').get('topics'):
content = topic.get('question', topic.get('talk', topic.get('task', topic.get('solution'))))
# print(content)
text = content.get('text', '')
text = re.sub(r'<[^>]*>', '', text).strip()
text = text.replace('\n', '<br>')
title = str(num) + text[:9]
num += 1
if content.get('images'):
soup = BeautifulSoup(html_template, 'html.parser')
for img in content.get('images'):
url = img.get('large').get('url')
img_tag = soup.new_tag('img', src=url)
soup.body.append(img_tag)
html_img = str(soup)
html = html_img.format(title=title, text=text)
else:
html = html_template.format(title=title, text=text)
if topic.get('question'):
answer = topic.get('answer').get('text', "")
soup = BeautifulSoup(html, 'html.parser')
answer_tag = soup.new_tag('p')
answer_tag.string = answer
soup.body.append(answer_tag)
html_answer = str(soup)
html = html_answer.format(title=title, text=text)
htmls.append(html)
next_page = rsp.json().get('resp_data').get('topics')
if next_page:
create_time = next_page[-1].get('create_time')
if create_time[20:23] == "000":
end_time = create_time[:20]+"999"+create_time[23:]
else :
res = int(create_time[20:23])-1
end_time = create_time[:20]+str(res).zfill(3)+create_time[23:] # zfill 函数补足结果前面的零,始终为3位数
end_time = quote(end_time)
if len(end_time) == 33:
end_time = end_time[:24] + '0' + end_time[24:]
next_url = start_url + '&end_time=' + end_time
print(next_url)
get_data(next_url)
return htmls
def make_pdf(htmls):
html_files = []
for index, html in enumerate(htmls):
file = str(index) + ".html"
html_files.append(file)
with open(file, "w", encoding="utf-8") as f:
f.write(html)
options = {
"user-style-sheet": "test.css",
"page-size": "Letter",
"margin-top": "0.75in",
"margin-right": "0.75in",
"margin-bottom": "0.75in",
"margin-left": "0.75in",
"encoding": "UTF-8",
"custom-header": [("Accept-Encoding", "gzip")],
"cookie": [
("cookie-name1", "cookie-value1"), ("cookie-name2", "cookie-value2")
],
"outline-depth": 10,
}
try:
pdfkit.from_file(html_files, "电子书.pdf", options=options)
except Exception as e:
pass
for file in html_files:
os.remove(file)
print("已制作电子书在当前目录!")
if __name__ == '__main__':
start_url = 'https://api.zsxq.com/v1.10/groups/8424258282/topics?scope=digests&count=20'
make_pdf(get_data(start_url))
``` |
{
"source": "jinyoungbang/CS501_Haverhill",
"score": 3
} |
#### File: haverhill_311_function/modules/qalert.py
```python
from io import BytesIO
from . import settings
from . import db
import requests
import ijson
def pull():
"""
Makes a GET request to fetch 311 Data from QAlert API and returns the data.
"""
data = []
if settings.TEST:
url = settings.QALERT_REQUEST_ENDPOINT_TEST
else:
url = "{endpoint}?key={api_key}&count={count}&sort={sort}".format(
endpoint=settings.QALERT_REQUEST_ENDPOINT,
api_key=settings.QALERT_API_KEY,
count=-1,
sort="[createdate] asc,"
)
with db.QAlertAuditDB() as audit_db:
latest_request = audit_db.get_latest_request()
if latest_request is not None:
url += f"&createDateMin={latest_request.create_date}"
payload = {}
headers = {'User-Agent': 'Custom'}
response = requests.request(
"GET", url, headers=headers, data=payload
)
if response.status_code != 200:
return data
data = ijson.items(BytesIO(response.content), 'item')
return data
```
#### File: tests/unit/test_db.py
```python
from haverhill_311_function.modules import db
import pytest
@pytest.fixture
def qalert_db():
return db.QAlertDB(
host='localhost',
port=5432,
user='docker',
password='<PASSWORD>',
database='qalert_test'
)
def test_init(qalert_db):
assert getattr(qalert_db, 'session', None) is None
assert qalert_db.host == 'localhost'
assert qalert_db.port == 5432
assert qalert_db.user == 'docker'
assert qalert_db.password == '<PASSWORD>'
assert qalert_db.database == 'qalert_test'
def test_connection(qalert_db):
with qalert_db:
assert(qalert_db.session is not None)
def test_save(qalert_db):
qalert_request = db.QAlertRequest(
id=1,
latitude=1.1,
longitude=1.1
)
with qalert_db:
qalert_db.save(qalert_request)
saved_qalert_request = qalert_db.get(request_id=1)
assert saved_qalert_request == qalert_request
def test_save_many(qalert_db):
qalert_requests = [
db.QAlertRequest(
id=1,
latitude=1.1,
longitude=1.1
),
db.QAlertRequest(
id=2,
latitude=1.1,
longitude=1.1
),
db.QAlertRequest(
id=3,
latitude=1.1,
longitude=1.1
),
]
with qalert_db:
qalert_db.save_many(qalert_requests)
assert qalert_db.get(request_id=1) is not None
assert qalert_db.get(request_id=2) is not None
assert qalert_db.get(request_id=3) is not None
```
#### File: tests/unit/test_qalert.py
```python
from haverhill_311_function.modules import settings
from haverhill_311_function.modules import qalert
def test_valid_format():
settings.TEST = True
res = qalert.pull()
for qalert_request in res:
assert type(qalert_request) == dict
def test_using_endpoint():
settings.TEST = True
res = qalert.pull()
assert res is not None
def test_actual_endpoint():
settings.TEST = False
res = qalert.pull()
print(res)
assert res == []
``` |
{
"source": "jinyuanliu23/DCRNN_PyTorch",
"score": 2
} |
#### File: model/pytorch/dcrnn_cell.py
```python
import numpy as np
import torch
# from MultiGAT import GAT
from lib import utils
from gatl import GAT
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class GAGRUCell(torch.nn.Module):
def __init__(self, num_units, adj_mx, num_nodes, nonlinearity='tanh', use_ga_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self.multi_head_nums = 3
self._supports = []
self._use_ga_for_ru = use_ga_for_ru
self.adj_mx = adj_mx
supports = []
# if filter_type == "laplacian":
# supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
# elif filter_type == "random_walk":
# supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
# elif filter_type == "dual_random_walk":
# supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
# supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
# else:
# supports.append(utils.calculate_scaled_laplacian(adj_mx))
# for support in supports:
# self._supports.append(self._build_sparse_matrix(support))
self._fc_params = LayerParams(self, 'fc')
self._gat_params = LayerParams(self, 'gat')
@staticmethod
# def _build_sparse_matrix(L):
# L = L.tocoo()
# indices = np.column_stack((L.row, L.col))
# indices = indices.astype(float) # numpy强制类型转换
#
# # this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
# indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
# L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
#
# return L
def forward(self, inputs, hx):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
output_size = 2 * self._num_units
if self._use_ga_for_ru:
fn = self._GAT
else:
fn = self._fc
value = torch.sigmoid(fn(inputs, hx, output_size,bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._GAT(inputs, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _GAT(self, inputs, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
x = inputs_and_state
model = GAT(x.size(1) ,x.size(1) ,x.size(1),self.multi_head_nums )
x = model(x , self.adj_mx)
# x = torch.tensor(x)
weights = self._gat_params.get_weights((input_size, output_size))
x = torch.sigmoid(torch.matmul(x, weights)) # (batch_size * self._num_nodes, output_size)
biases = self._gat_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
``` |
{
"source": "JinyuanSun/Codes_for_FoldX",
"score": 3
} |
#### File: Codes_for_FoldX/GUI/GUI.py
```python
import os
import ssl
import urllib
import urllib.request
from tkinter import *
from tkinter import messagebox
ssl._create_default_https_context = ssl._create_unverified_context
'''
LARGE_FONT= ("Verdana", 12)
NORM_FONT = ("Helvetica", 10)
SMALL_FONT = ("Helvetica", 8)
'''
os.system("mkdir EasyFoldx")
sourcePath = "EasyFoldx"
os.chdir(sourcePath)
def mutation():
inputname = pdb_text.get()
chainid = pdb_text.get()[5]
pdbname = inputname.split(".")[0] + "_Repair.pdb"
mut = mutation_text.get()
l = mut[0] + chainid + mut[1:]
print(l)
o = "command=PositionScan\npdb=" + pdbname + "\npositions=" + l
of_name = "MT_" + mut + ".cfg"
of = open(of_name, "w+")
of = open(of_name, "a+")
print(o, file=of)
#cmd = "./foldx --config " + of_name
#os.system("./foldx -f " + of_name)
os.system("nohup ./foldx -f " + of_name+" &")
def analyze():
inputname = pdb_text.get()
filename = "PS_"+inputname+"_Repair_scanning_output.txt"
'''
file = open(filename)
l = ''
for x in file:
l = l +x+"\n"
mut_out.insert(END,l)
'''
data = []
with open(filename) as f:
for line in f:
data += line.split()
#Create your listbox here.
for i in range(len(data)):
mut_out.insert(i+1, data[i])
#print("./foldx -f " + of_name)
#
def PS():
inputname = pdb_text.get()
pdbname = inputname.split(".")[0] + "_Repair.pdb"
print(pdbname)
# pdbname = args.pdbfile
nt = int(nt_text.get())
# nt = 20
# pdbname="6QG9_A_Repair.pdb"
import os
try:
file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
except FileNotFoundError:
os.system("./foldx --command=SequenceOnly --pdb=" + pdbname)
file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
lst = []
for line in file:
l = line.replace("\n", "").split("\t")
if len(l) > 3:
lst.append(l[3] + "a")
t = len(lst) // (nt - 1)
n = 0
for i in range(0, len(lst), t):
b = lst[i:i + t]
l = ""
for x in b:
l = l + x + ","
n = n + 1
o = "command=PositionScan\npdb=" + pdbname + "\npositions=" + l
of_name = "PS_" + str(n) + ".cfg"
of = open(of_name, "w+")
of = open(of_name, "a+")
print(o[:-1], file=of)
os.system("nohup ./foldx -f " + of_name + " &")
def populate_list():
print("populate")
def getpdb():
if len(pdb_text.get()) < 5:
pdbid = pdb_text.get()
pdb = pdbid + ".pdb"
urllib.request.urlretrieve('https://files.rcsb.org/download/' + pdb, pdb)
cwd = os.getcwd() + "/" + pdb
messagebox.showinfo("Done", "The pdb file " + pdbid + " has been downloaded to " + cwd)
oname = pdbid
else:
pdbid = pdb_text.get()[0:4]
pdb = pdbid + ".pdb"
urllib.request.urlretrieve('https://files.rcsb.org/download/' + pdb, pdb)
pdbfile = open(pdb)
oname = pdb_text.get() + ".pdb"
opdb = open(oname, "w+")
opdb = open(oname, "a+")
chainid = pdb_text.get()[5]
for line in pdbfile:
if line.startswith("ATOM"):
l = line.split()
if l[4] == chainid:
print(line, end="", file=opdb)
cwd = os.getcwd() + "/" + oname
messagebox.showinfo("Done", "The pdb file " + oname + " has been downloaded to " + cwd)
return oname
# os.system("")
def repair_pdb():
inputname = pdb_text.get()+".pdb"
pdbname = inputname.split(".")[0] + "_Repair.pdb"
os.system("./foldx --command=RepairPDB --pdb=" + inputname)
cwd = os.getcwd() + "/" + pdbname
messagebox.showinfo("Done", "The Repaired pdb file " + pdbname + " has been saved to " + cwd)
# create window object
app = Tk()
# pdb
pdb_text = StringVar()
pdb_lable = Label(app, text='PDB ID', font=('bold', 14), pady=20)
pdb_lable.grid(row=0, column=0, sticky=W)
pdb_entry = Entry(app, textvariable=pdb_text)
pdb_entry.grid(row=0, column=1)
# mutation
mutation_text = StringVar()
mutation_lable = Label(app, text='Mutation', font=('bold', 14))
mutation_lable.grid(row=2, column=0, sticky=W)
mutation_entry = Entry(app, textvariable=mutation_text)
mutation_entry.grid(row=2, column=1)
# chain
inpdb_text = StringVar()
inpdb_lable = Label(app, text='Input PDB', font=('bold', 14))
inpdb_lable.grid(row=1, column=0, sticky=W)
inpdb_entry = Entry(app, textvariable=pdb_text)
inpdb_entry.grid(row=1, column=1)
# PS
nt_text = StringVar()
nt_lable = Label(app, text='Number of Threads', font=('bold', 14))
nt_lable.grid(row=3, column=0, sticky=W)
nt_entry = Entry(app, textvariable=nt_text)
nt_entry.grid(row=3, column=1)
# mutation out
mut_out = Listbox(app, height=8, width=50, border=0)
mut_out.grid(row=4, column=0, columnspan=3, rowspan=6, pady=20, padx=20)
# creat scrollbar
scrollbar = Scrollbar(app)
scrollbar.grid(row=4, column=3)
# Buttons
fetch_btn = Button(app, text='Fetch', width=12, command=getpdb)
fetch_btn.grid(row=0, column=2, )
repair_btn = Button(app, text='Foldx Repair', width=12, command=repair_pdb)
repair_btn.grid(row=1, column=2)
Position_Scan_btn = Button(app, text='Foldx Position Scan', width=15, command=PS)
Position_Scan_btn.grid(row=3, column=2)
mutation_btn = Button(app, text='Foldx Mutation', width=12, command=mutation)
mutation_btn.grid(row=2, column=2)
analyze_btn = Button(app, text='Show', width=12, command=analyze)
analyze_btn.grid(row=5, column=0)
def pwd():
print(os.getcwd())
pwd_btn = Button(app, text='pwd', width=12, command=pwd)
pwd_btn.grid(row=2, column=3)
# set scroll to listbox
# mut_out.configure(yscrollcommand=scrollbar.set)
# scrollbar.configure(command=mut_out.yview)
app.title('Easy FoldX')
app.geometry('700x500')
# populate
populate_list()
# start program
app.mainloop()
```
#### File: Codes_for_FoldX/GUI/prepare4scan.py
```python
import argparse
parser = argparse.ArgumentParser(description='prepare for PositionScan in FoldX')
parser.add_argument("-i", help="input a fasta file")
parser.add_argument("-p", help="the a pdb file corresponding to the fasta file, a repaired pdbfile mostly")
args = parser.parse_args()
inf = open(args.i)
pdbid = args.p
def readseq(fasta):
seq = ''
for line in fasta:
line = line.strip('\n')
if not line.startswith('>'):
seq += line
return seq
ofile = open('config_scan.cfg', "w")
print('command=PositionScan' + '\n' + 'pdb=' + pdbid + '\n' + 'positions=', end='', file=ofile)
seq = readseq(inf)
for i in range(len(seq)):
if i < len(seq) - 1:
new_word = seq[i] + 'A' + str(i + 1) + 'a,'
print(new_word, end='', file=ofile)
else:
new_word = seq[i] + 'A' + str(i + 1) + 'a'
print(new_word, end='', file=ofile)
```
#### File: test/md/dcd2pdb.py
```python
def dcd2pdb(dcd_file, topol_file, out_file, stride=1, noWater=True, superimpose=True):
top = mt.load(topol_file)
if noWater:
indices = top.topology.select("protein")
else:
indices = top.topology.select("all")
traj = mt.load_dcd(dcd_file, top=topol_file, stride=stride, atom_indices=indices)
if superimpose:
print("INFO: Superimposing to topology ......")
CA_indices = top.topology.select("protein and name CA")
traj.superpose(top, ref_atom_indices=CA_indices, atom_indices=CA_indices)
traj.save_pdb(out_file)
return None
if __name__ == '__main__':
import sys
import mdtraj as mt
dcd_file, topol_file, out_file = sys.argv[1:]
dcd2pdb(dcd_file, topol_file, out_file)
``` |
{
"source": "JinyuanSun/DDGScan",
"score": 2
} |
#### File: DDGScan/utils/abacus.py
```python
import os
import time
def run_abacus(pdbfilename):
try:
os.mkdir("abacus_jobs")
os.chdir("abacus_jobs")
start_time = time.time()
print("[INFO]: ABACUS started at %s" % (time.ctime()))
os.system("cp ../%s ./" % (pdbfilename))
print("[INFO]: Running ABACUS_prepare.")
os.system("ABACUS_prepare %s" % (pdbfilename))
print("[INFO]: Running ABACUS_S1S2.")
os.system("ABACUS_S1S2 %s" % (pdbfilename))
prepare_end = time.time()
prepare_time = prepare_end - start_time
print("[INFO]: ABACUS prepare took %f seconds." % (prepare_time))
print("[INFO]: Running ABACUS_singleMutationScan.")
os.system("ABACUS_singleMutationScan %s abacus_output.txt" % (pdbfilename))
scan_end = time.time()
scan_time = scan_end - prepare_end
print("[INFO]: ABACUS scan took %f seconds." % (scan_time))
os.chdir("../")
return prepare_time, scan_time
except FileExistsError:
os.chdir("abacus_jobs")
if os.path.exists("./abacus_output.txt"):
print("[INFO]: ABACUS results found. Skipping.")
os.chdir("../")
return 0, 0
def runOneJob(varlist):
def _1_2_3(x):
d = {
"C": "CYS",
"D": "ASP",
"S": "SER",
"Q": "GLN",
"K": "LYS",
"I": "ILE",
"P": "PRO",
"T": "THR",
"F": "PHE",
"N": "ASN",
"G": "GLY",
"H": "HIS",
"L": "LEU",
"R": "ARG",
"W": "TRP",
"A": "ALA",
"V": "VAL",
"E": "GLU",
"Y": "TYR",
"M": "MET",
}
return d[x]
# me varlist as foldx.runOneJob
pdb, wild, chain, aa, resNum = varlist
MUT = _1_2_3(aa)
output = (
os.popen("singleMutation %s %s %s %s" % (pdb, chain, str(resNum), MUT))
.read()
.split()
)
# print(output)
s1 = float(output[6])
s2 = float(output[8])
pack = float(output[10])
total = s1 + s2 + pack
# self.abacus2_results["_".join([wild, str(resNum), aa])] = total
# print(all_results)
# A 42 GLU->TRP SAI: 0.966 S1: 1.748 S2: 0.212 PACK: -0.009 HB: 0.000
return "_".join([wild, str(resNum), aa]), total
def parse_abacus_out():
try:
os.mkdir("abacus_results")
except FileExistsError:
pass
longer_names = {
"ALA": "A",
"ARG": "R",
"ASN": "N",
"ASP": "D",
"CYS": "C",
"GLU": "E",
"GLN": "Q",
"GLY": "G",
"HIS": "H",
"ILE": "I",
"LEU": "L",
"LYS": "K",
"MET": "M",
"PHE": "F",
"PRO": "P",
"SER": "S",
"THR": "T",
"TRP": "W",
"TYR": "Y",
"VAL": "V",
}
with open("tempfile", "w") as tem:
with open("abacus_jobs/abacus_output.txt") as abacusfile:
for line in abacusfile:
if line.startswith("site"):
wildAA = line.strip().split()[4]
wildAAnum = line.strip().split()[1]
else:
tem.write(wildAA + " " + wildAAnum + " " + line)
with open("abacus_results/All_ABACUS.score", "w+") as complete:
complete.write(
"#Score file formatted by GRAPE from ABACUS.\n#mutation\tscore\tstd\n"
)
with open("tempfile") as abacusfile:
for line in abacusfile:
wildAA1 = line.strip().split()[0]
if wildAA1 in longer_names:
wildAAabr = longer_names[wildAA1]
wildAAnum1 = line.strip().split()[1]
mutAA = line.strip().split()[2]
if mutAA in longer_names:
mutAAabr = longer_names[mutAA]
sef_energy = line.strip().split()[11]
complete.write(
wildAAabr
+ "_"
+ wildAAnum1
+ "_"
+ mutAAabr
+ "\t"
+ sef_energy
+ "\t"
+ str(0)
+ "\n"
)
tem.close()
complete.close()
os.remove("tempfile")
if __name__ == "__main__":
print("Running")
parse_abacus_out()
```
#### File: DDGScan/utils/autofix.py
```python
import os
from openmm import app
from pdbfixer import PDBFixer
################################################################################
# ref:https://python.hotexamples.com/site/file?hash=0xc5901342b2c339b10661d6508\
# 4166c386ffc205460660dd91e9b1ce00555c106&fullName=openmmtools/data/prepare_pdb\
# .py&project=choderalab/openmmtools
################################################################################
def write_file(filename, contents):
outfile = open(filename, 'w')
outfile.write(contents)
outfile.close()
################################################################################
# SET UP SYSTEM
################################################################################
def autofix(pdb, chain_ids_to_keep, risky=False):
os.system("cp %s %s.raw" % (pdb, pdb))
fixer = PDBFixer(filename=pdb)
fixer_1 = PDBFixer(filename=pdb)
fixer_2 = PDBFixer(filename=pdb)
# Build a list of chains to remove.
print('Removing all chains but %s' % chain_ids_to_keep)
# all_chains = list(fixer.topology.chains())
chain_id_list = [c.id for c in fixer.topology.chains()]
chain_ids_to_remove = set(chain_id_list) - set(chain_ids_to_keep)
fixer.removeChains(chainIds=chain_ids_to_remove)
fixer_1.removeChains(chainIds=chain_ids_to_remove)
fixer_2.removeChains(chainIds=chain_ids_to_remove)
if risky:
# Replace nonstandard residues.
print('Replacing nonstandard residues...')
print("It is risky to replace ncAA using pdbfixer!")
# logging.WARNING("It is risky to replace ncAA using pdbfixer!")
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
# Add missing atoms.
print('Adding backbone missing atoms only!')
fixer.findMissingResidues()
# modeller = app.Modeller(self.topology, self.positions)
# modeller.delete(toDelete)
fixer.findMissingAtoms()
print(f"MISSING ATOMS 000: {fixer.missingAtoms}")
missing_atoms = fixer.missingAtoms
# print(missing_atoms)
need_atoms = {}
for residue, atoms in missing_atoms.items():
for atom in atoms:
if atom.name in ["C", "N", "CA", "O"]:
# keep mainchain residues only
need_atoms[residue] = [atom]
fixer_1.missingAtoms = need_atoms
print(f"MISSING ATOMS 111: {fixer_1.missingAtoms}")
print(f"MISSING terminals: =======\n {fixer.missingTerminals} ======\n")
fixer_1.missingTerminals = fixer.missingTerminals
fixer_1.missingResidues = need_atoms.keys()
fixer_1.addMissingAtoms()
# Remove heterogens.
print('Removing heterogens...')
fixer_1.removeHeterogens(keepWater=False)
# Write PDB file.
output_filename = pdb.replace(".pdb", "_fixed.pdb")
print('Writing PDB file to "%s"...' % output_filename)
app.PDBFile.writeFile(fixer_2.topology, fixer_2.positions, open(output_filename, 'w'))
return output_filename
if __name__ == '__main__':
pdb = '../test_bak/1NWW.pdb' # PDB ID to retrieve
chain_ids_to_keep = ['A'] # chains to keep
autofix(pdb, chain_ids_to_keep)
```
#### File: DDGScan/utils/grape_phaseI.py
```python
import distutils.dir_util
import glob
import json
import logging
import os
import time
import pandas as pd
from joblib import Parallel, delayed
import utils.foldx as foldx
import utils.io as io
import utils.rosetta as rosetta
from utils import abacus
# from utils import autofix
from utils import judge
from utils.common import *
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class GRAPE:
def __init__(self):
self.repaired_pdbfile: str
self.relaxed_prot: str
self.running_time = {
"foldx_repair": 0.0,
"foldx_scan": 0.0,
"rosetta_relax": 0.0,
"rosetta_scan": 0.0,
"abacus_prepare": 0.0,
"abacus_scan": 0.0,
"abacus2": 0.0,
"MD simulations": 0.0,
}
self.abacus2_results = {}
# self.repaired_pdbfile: str
pass
def run_foldx(self, pdb, threads, chain, numOfRuns):
print("[INFO]: FoldX started at %s" % (time.ctime()))
prot_foldx = foldx.FoldX(pdb, "", threads)
repair_start = time.time()
self.repaired_pdbfile = prot_foldx.repairPDB()
repair_end = time.time()
repair_time = repair_end - repair_start
self.running_time["foldx_repair"] = repair_time
print("[INFO]: FoldX Repair took %f seconds." % (repair_time))
prot = io.Protein(self.repaired_pdbfile, chain)
seq, resNumList = io.Protein.pdb2seq(prot)
distutils.dir_util.mkpath(FOLDX_JOBS_DIR)
all_results = []
job_list = []
for i, res in enumerate(seq):
resNum = resNumList[i]
wild = res
for j, aa in enumerate("QWERTYIPASDFGHKLCVNM"):
if aa != wild:
jobID = FOLDX_JOBS_DIR + "_".join([wild, str(resNum), aa])
job_list.append(
[
self.repaired_pdbfile,
wild,
chain,
aa,
resNum,
jobID,
numOfRuns,
]
)
# print("[INFO]: FoldX started at %s" %(time.ctime()))
scan_start = time.time()
Parallel(n_jobs=threads)(delayed(prot_foldx.runOneJob)(var) for var in job_list)
scan_end = time.time()
scan_time = scan_end - scan_start
self.running_time["foldx_scan"] = scan_time
print("[INFO]: FoldX Scan took %f seconds." % (scan_time))
return all_results
def run_rosetta(self, pdb, threads, chain, relax_num, exe, rosettadb):
print("Rosetta started at %s" % (time.ctime()))
# relax_num = 200
prot_rosetta = rosetta.Rosetta(pdb, relax_num, threads, exe, rosettadb)
relax_start = time.time()
relaxed_prot = prot_rosetta.relax()
relax_end = time.time()
relax_time = relax_end - relax_start
self.running_time["rosetta_relax"] = relax_time
print("[INFO]: Rosetta Relax took %f seconds." % (relax_time))
prot = io.Protein(pdb, chain)
seq, resNumList = io.Protein.pdb2seq(prot)
distutils.dir_util.mkpath(ROSETTA_JOBS_DIR)
# all_results = []
job_list = []
for i, res in enumerate(seq):
resNum = resNumList[i]
wild = res
for j, aa in enumerate("QWERTYIPASDFGHKLCVNM"):
if aa != wild:
jobID = ROSETTA_JOBS_DIR + "_".join([wild, str(resNum), aa])
job_list.append([wild, aa, str(i + 1), jobID])
scan_start = time.time()
Parallel(n_jobs=threads)(
delayed(prot_rosetta.runOneJob)(var) for var in job_list
)
scan_end = time.time()
scan_time = scan_end - scan_start
self.running_time["rosetta_scan"] = scan_time
print("[INFO]: Rosetta cartesian_ddg Scan took %f seconds." % (scan_time))
return prot_rosetta
def run_abacus2(self, pdb, threads, chain):
print("[INFO]: ABACUS2 started at %s" % (time.ctime()))
distutils.dir_util.mkpath(ABACUS2_JOBS_DIR)
distutils.dir_util.mkpath(ABACUS2_RESULTS_DIR)
prot = io.Protein(pdb, chain)
seq, resNumList = io.Protein.pdb2seq(prot)
# all_results = {}
job_list = []
for i, res in enumerate(seq):
resNum = resNumList[i]
wild = res
for j, aa in enumerate("QWERTYIPASDFGHKLCVNM"):
if aa != wild:
# mutationName = "_".join([wild, str(resNum), aa])
# all_results[mutationName] = 0
job_list.append(
[
pdb,
wild,
chain,
aa,
resNum
]
)
# print("[INFO]: FoldX started at %s" %(time.ctime()))
scan_start = time.time()
abacus2_results = Parallel(n_jobs=threads)(delayed(abacus.runOneJob)(var) for var in job_list)
# mutations, scores = zip(*result)
scan_end = time.time()
scan_time = scan_end - scan_start
self.running_time["abacus2"] = scan_time
print("[INFO]: ABACUS2 Scan took %f seconds." % (scan_time))
# print(self.abacus2_results)
# ABACUS2_RESULTS_DIR + ABACUS2_SCORE_FILE
with open(ABACUS2_RESULTS_DIR + ABACUS2_SCORE_FILE, "w+") as complete:
complete.write(
"#Score file formatted by GRAPE from ABACUS2.\n#mutation\tscore\tstd\n"
)
for pair in abacus2_results:
complete.write("\t".join([pair[0], str(round(pair[1], 4)), "0"]) + "\n")
complete.close()
return self.abacus2_results
def Analysis_foldx(self, pdb, chain, foldx1):
self.repaired_pdbfile = pdb.replace(".pdb", "_Repair.pdb")
distutils.dir_util.mkpath(FOLDX_RESULTS_DIR)
prot = io.Protein(pdb, chain)
seq, resNumList = io.Protein.pdb2seq(prot)
all_results = []
for i, res in enumerate(seq):
resNum = resNumList[i]
wild = res
for j, aa in enumerate("QWERTYIPASDFGHKLCVNM"):
# jobID = "foldx_jobs/" + str(i) + "_" + str(j) + "/"
if aa != wild:
jobID = FOLDX_JOBS_DIR + "_".join([wild, str(resNum), aa])
all_results.append(
foldx1.calScore(wild, resNum, aa, self.repaired_pdbfile, jobID)
)
with open(FOLDX_RESULTS_DIR + FOLDX_SCORE_FILE, "w+") as foldxout:
foldxout.write(
"#Score file formatted by GRAPE from FoldX.\n#mutation\tscore\tstd\n"
)
for line in all_results:
foldxout.write("\t".join([line[0], str(line[1]), str(line[2])]) + "\n")
foldxout.close()
return all_results
def Analysis_rosetta(self, pdb, chain, prot_rosetta):
distutils.dir_util.mkpath(ROSETTA_RESULTS_DIR)
prot = io.Protein(pdb, chain)
seq, resNumList = io.Protein.pdb2seq(prot)
all_results = []
for i, res in enumerate(seq):
resNum = resNumList[i]
wild = res
for j, aa in enumerate("QWERTYIPASDFGHKLCVNM"):
if aa != wild:
# jobID = "foldx_jobs/" + str(i) + "_" + str(j) + "/"
# "_".join([wild, str(resNum), mutation])
rosettaddgfile = (
ROSETTA_JOBS_DIR
+ "_".join([wild, str(resNum), aa])
+ "/mtfile.ddg"
)
all_results.append(
["_".join([wild, str(resNum), aa])]
+ prot_rosetta.read_rosetta_ddgout(rosettaddgfile)
)
with open(ROSETTA_RESULTS_DIR + ROSETTA_SCORE_FILE, "w+") as rosettaout:
rosettaout.write(
"#Score file formatted by GRAPE from Rosetta.\n#mutation\tscore\tstd\n"
)
for line in all_results:
rosettaout.write(
"\t".join([line[0], str(line[1]), str(line[2])]) + "\n"
)
rosettaout.close()
return all_results
def analysisGrapeScore(self, scoreFile, cutoff, result_dir):
result_dict = {"mutation": [], "energy": [], "SD": [], "position": []}
with open(scoreFile, "r") as scorefile:
for line in scorefile:
if line[0] != "#":
lst = line.strip().split("\t")
result_dict["mutation"].append(lst[0].replace("_", ""))
result_dict["energy"].append(float(lst[1]))
result_dict["SD"].append(float(lst[2]))
result_dict["position"].append(int(lst[0].split("_")[1]))
scorefile.close()
# print(result_dict)
CompleteList_df = pd.DataFrame(result_dict)
CompleteList_SortedByEnergy_df = CompleteList_df.sort_values(
"energy"
).reset_index(drop=True)
def BetsPerPosition(df):
position_list = []
length = df.shape[0]
for i in range(length):
if df["position"][i] in position_list:
df = df.drop(index=i)
else:
position_list.append(df["position"][i])
return df.reset_index(drop=True)
def BelowCutOff(df, cutoff):
# position_list = []
length = df.shape[0]
for i in range(length):
if float(df["energy"][i]) > float(cutoff):
df = df.drop(index=i)
else:
continue
return df.reset_index(drop=True)
BestPerPosition_SortedByEnergy_df = BetsPerPosition(
CompleteList_SortedByEnergy_df
)
BestPerPosition_df = BetsPerPosition(CompleteList_SortedByEnergy_df)
BelowCutOff_df = BelowCutOff(CompleteList_df, cutoff)
BelowCutOff_SortedByEnergy_df = BelowCutOff(
CompleteList_SortedByEnergy_df, cutoff
)
BestPerPositionBelowCutOff_SortedByEnergy_df = BelowCutOff(
BestPerPosition_SortedByEnergy_df, cutoff
)
BestPerPositionBelowCutOff_df = BelowCutOff(BestPerPosition_df, cutoff)
def out_tab_file(df, name, result_dir):
filename = result_dir + "/MutationsEnergies_" + name[:-3] + ".tab"
with open(filename, "w+") as of:
of.write(
df.to_csv(
columns=["mutation", "energy", "SD"], sep="\t", index=False
)
)
of.close()
out_tab_file(CompleteList_df, "CompleteList_df", result_dir)
out_tab_file(
CompleteList_SortedByEnergy_df, "CompleteList_SortedByEnergy_df", result_dir
)
out_tab_file(
BestPerPosition_SortedByEnergy_df,
"BestPerPosition_SortedByEnergy_df",
result_dir,
)
out_tab_file(BestPerPosition_df, "BestPerPosition_df", result_dir)
out_tab_file(BelowCutOff_df, "BelowCutOff_df", result_dir)
out_tab_file(
BelowCutOff_SortedByEnergy_df, "BelowCutOff_SortedByEnergy_df", result_dir
)
out_tab_file(
BestPerPositionBelowCutOff_SortedByEnergy_df,
"BestPerPositionBelowCutOff_SortedByEnergy_df",
result_dir,
)
out_tab_file(
BestPerPositionBelowCutOff_df, "BestPerPositionBelowCutOff_df", result_dir
)
def readfasta(fastafile):
seq = ""
with open(fastafile) as fasta:
for line in fasta:
if line.startswith(">"):
continue
else:
seq += line.strip()
fasta.close()
def checkseq(seq):
for aa in seq:
if aa in "QWERTYIPASDFGHKLCVNM":
continue
else:
logging.error("Non-canonical amino acids found in sequence!")
exit()
checkseq(seq)
return seq
def selectpdb4md(pdb, softlist, MD):
distutils.dir_util.mkpath("selectpdb/")
selected_dict = {"mutation": [], "score": [], "sd": [], "soft": []}
for soft in softlist:
with open("%s_results/MutationsEnergies_BelowCutOff.tab" % (soft)) as scorefile:
for line in scorefile:
linelist = line.strip().split()
if linelist[0] != "mutation":
selected_dict["mutation"].append(linelist[0])
selected_dict["score"].append(linelist[1])
selected_dict["sd"].append(linelist[2])
selected_dict["soft"].append(soft)
scorefile.close()
selected_df = pd.DataFrame(selected_dict)
selected_df.to_csv("Selected_Mutation.csv")
if MD:
for mutation in set(selected_dict["mutation"]):
mutation = "_".join([mutation[0], mutation[1:-1], mutation[-1]])
mut_pdb = pdb.replace(".pdb", "_Repair_1_0.pdb")
# WORKING_DIR = os.getcwd()
# print(WORKING_DIR)
# print("%s/selectpdb"%WORKING_DIR)
os.system(
f"cp {FOLDX_JOBS_DIR}/%s/%s selectpdb/%s.pdb" % (mutation, mut_pdb, mutation)
)
# os.chdir("%s/selectpdb"%WORKING_DIR)
return selected_dict
else:
logging.info("Switching to FoldX sampled structures!")
for mutation in set(selected_dict["mutation"]):
mutation = "_".join([mutation[0], mutation[1:-1], mutation[-1]])
mut_pdb = os.path.join(FOLDX_JOBS_DIR, mutation, pdb.replace(".pdb", "_Repair_1_*.pdb"))
ref_mut_pdb = pdb.replace(".pdb", "_Repair_1_0.pdb")
os.system(
f"cp {FOLDX_JOBS_DIR}/%s/%s selectpdb/%s.pdb" % (mutation, ref_mut_pdb, mutation)
)
for index, foldx_pdb_file_name in enumerate(glob.glob(mut_pdb)):
logging.info(f"Copyying file {foldx_pdb_file_name}!")
os.system(f"cp {foldx_pdb_file_name} selectpdb/{mutation}_sample_{index}.pdb")
def runMD(platform, selected_dict, md_threads=None):
from utils import mdrelax
os.chdir("selectpdb")
def one_md(mutation):
# repeat 5 100ps mds
mutation = "_".join([mutation[0], mutation[1:-1], mutation[-1]])
mutant = mutation + ".pdb"
for i in range(5):
mdrelax.main(mutant, mutation + f"_sample_{i}.pdb", platform)
os.system(f"rm {mutation}__tip3p.dcd")
if platform == "CUDA":
for mutation in set(selected_dict["mutation"]):
one_md(mutation)
if platform == "CPU":
Parallel(n_jobs=md_threads)(delayed(one_md)(mutation) for mutation in set(selected_dict["mutation"]))
os.system("rm *dcd")
os.chdir("../")
def main1(args):
pdb = args.pdb
chain = args.chain
threads = int(args.threads)
numOfRuns = str(args.numofruns)
relax_num = args.relax_number
foldx_cutoff = -float(args.foldx_cutoff)
rosetta_cutoff = -float(args.rosetta_cutoff)
abacus_cutoff = -float(args.abacus_cutoff)
abacus2_cutoff = -float(args.abacus2_cutoff)
softlist = args.engine
preset = args.preset
md = args.molecular_dynamics
platform = args.platform
fillloop = args.fill_break_in_pdb
seqfile = args.sequence
logging.info("Started at %s" % (time.ctime()))
def checkpdb(pdb, chain, seqfile=None):
"""
only breaks in middle of the chain will be fixed, C- and N- terminal missing
will be ignored!
"""
if not bool(seqfile):
logging.warning("No sequence provided!")
if fillloop:
# if no missing loop found, don't do anything
if judge.main(pdb, chain, None):
from utils import modeller_loop
_seq = modeller_loop.main(pdb, chain)
# exit()
else:
# print("No sequence provided!")
seq = readfasta(seqfile)
if judge.main(pdb, chain, seq): # break found
if fillloop:
from utils import modeller_loop
_seq = modeller_loop.main(pdb, chain, seq)
logging.warning(f"The patched sequence is {_seq}, we modelling the missing part according it!")
# exit()
else:
# print("PDB check Failed!")
logging.warning("Gaps found in your pdb file. PDB check failed. However, the job will continue.")
# exit()
else:
logging.warning("PDB check passed!")
return pdb
if args.mode == "test":
checkpdb(pdb, chain, seqfile)
exit()
exe_dict = {"foldx": "", "relax": "", "cartddg": "", "pmut": "", "abacus": "", "abacus2": ""}
foldx_exe = os.popen("which foldx").read().replace("\n", "")
exe_dict["foldx"] = foldx_exe
pmut_scan_parallel_exe = (
os.popen("which pmut_scan_parallel.mpi.linuxgccrelease")
.read()
.replace("\n", "")
)
# rosettadb = "/".join(pmut_scan_parallel_exe.split("/")[:-3]) + "/database/"
exe_dict["pmut"] = pmut_scan_parallel_exe
for release in ["", ".static", ".mpi", ".default"]:
cartesian_ddg_exe = (
os.popen("which cartesian_ddg%s.linuxgccrelease" % (release))
.read()
.replace("\n", "")
)
if cartesian_ddg_exe != "":
exe_dict["cartddg"] = cartesian_ddg_exe
break
relax_exe = os.popen("which relax.mpi.linuxgccrelease").read().replace("\n", "")
rosettadb = os.popen("echo $ROSETTADB").read().replace("\n", "")
if not rosettadb:
rosettadb = "/".join(relax_exe.split("/")[:-4]) + "/database/"
exe_dict["relax"] = relax_exe
abacus_prep = os.popen("which ABACUS_prepare").read().replace("\n", "")
exe_dict["abacus"] = abacus_prep
singleMutation = os.popen("which singleMutation").read().replace("\n", "")
exe_dict["abacus2"] = singleMutation
for soft in softlist:
if soft == "rosetta":
if exe_dict["relax"] == "":
logging.error("Cannot find Rosetta: relax.mpi.linuxgccrelease!")
exit()
if preset == "slow":
if exe_dict["cartddg"] == "":
logging.error(
"Cannot find Rosetta: any cartesian_ddg.linuxgccrelease (mpi nor default nor static)!")
exit()
if preset == "fast":
if exe_dict["pmut"] == "":
logging.error("Cannot find Rosetta: pmut_scan_parallel.mpi.linuxgccrelease!")
exit()
else:
if exe_dict[soft] == "":
logging.error("Cannot find %s!" % (soft))
exit()
mode = args.mode
grape = GRAPE()
foldx1 = foldx.FoldX(pdb, foldx_exe, threads)
rosetta1 = rosetta.Rosetta(pdb, relax_num, threads, cartesian_ddg_exe, rosettadb)
if mode == "rerun":
os.system("rm -rf *_jobs")
os.system("rm -rf *_results")
os.system("rm -rf *_relax")
os.system("rm -rf selectpdb")
mode = "run"
if mode == "run":
pdb = checkpdb(pdb, chain, seqfile)
# FoldX
if "foldx" in softlist:
grape.run_foldx(pdb, threads, chain, numOfRuns)
grape.Analysis_foldx(pdb, chain, foldx1)
grape.analysisGrapeScore(
FOLDX_RESULTS_DIR + FOLDX_SCORE_FILE, foldx_cutoff, FOLDX_RESULTS_DIR
)
if preset == "slow":
if "rosetta" in softlist:
prot_rosetta = grape.run_rosetta(pdb, threads, chain, relax_num, cartesian_ddg_exe, rosettadb)
grape.Analysis_rosetta(pdb, chain, prot_rosetta)
grape.analysisGrapeScore(
ROSETTA_RESULTS_DIR + ROSETTA_SCORE_FILE,
rosetta_cutoff,
ROSETTA_RESULTS_DIR,
)
if preset == "fast":
if "rosetta" in softlist:
relaxed_pdb = rosetta1.relax()
distutils.dir_util.mkpath(ROSETTA_JOBS_DIR)
os.chdir(ROSETTA_JOBS_DIR)
os.system("cp ../%s/%s ./" % (ROSETTA_RELAX_DIR, relaxed_pdb))
pmut_time = rosetta1.pmut_scan(relaxed_pdb)
grape.running_time["rosetta_scan"] = pmut_time
logging.warning("Rosetta pmut_scan_parallel took %f seconds." % (pmut_time))
os.chdir("..")
distutils.dir_util.mkpath(ROSETTA_RESULTS_DIR)
os.chdir(ROSETTA_RESULTS_DIR)
rosetta1.pmut_scan_analysis(f"../{ROSETTA_JOBS_DIR}pmut.out")
os.chdir("..")
grape.analysisGrapeScore(
ROSETTA_RESULTS_DIR + ROSETTA_SCORE_FILE,
rosetta_cutoff,
ROSETTA_RESULTS_DIR,
)
# prot_rosetta = grape.run_rosetta(pdb, threads, chain, relax_num)
# grape.Analysis_rosetta(pdb, chain, prot_rosetta)
# grape.analysisGrapeScore('rosetta_results/All_rosetta.score', rosetta_cutoff, "rosetta_results/")
if "abacus" in softlist:
abacus_prepare_time, abacus_scan_time = abacus.run_abacus(pdb)
grape.running_time["abacus_prepare"] = abacus_prepare_time
grape.running_time["abacus_scan"] = abacus_scan_time
abacus.parse_abacus_out()
grape.analysisGrapeScore(
ABACUS_RESULTS_DIR + ABACUS_SCORE_FILE, abacus_cutoff, ABACUS_RESULTS_DIR
)
if "abacus2" in softlist:
grape.run_abacus2(pdb, threads, chain)
grape.analysisGrapeScore(
ABACUS2_RESULTS_DIR + ABACUS2_SCORE_FILE, abacus2_cutoff, ABACUS2_RESULTS_DIR
)
if mode == "analysis":
# FoldX
if "foldx" in softlist:
# pdb = pdb.replace(".pdb", "_Repair.pdb")
grape.Analysis_foldx(pdb, chain, foldx1)
grape.analysisGrapeScore(
FOLDX_RESULTS_DIR + FOLDX_SCORE_FILE, foldx_cutoff, FOLDX_RESULTS_DIR
)
if preset == "slow":
if "rosetta" in softlist:
prot_rosetta = rosetta.Rosetta(pdb, relax_num, threads, cartesian_ddg_exe, rosettadb)
grape.Analysis_rosetta(pdb, chain, prot_rosetta)
grape.analysisGrapeScore(
ROSETTA_RESULTS_DIR + ROSETTA_SCORE_FILE,
rosetta_cutoff,
ROSETTA_RESULTS_DIR,
)
if preset == "fast":
if "rosetta" in softlist:
distutils.dir_util.mkpath(ROSETTA_JOBS_DIR)
os.chdir(ROSETTA_JOBS_DIR)
rosetta1.pmut_scan_analysis(f"../{ROSETTA_JOBS_DIR}pmut.out")
os.chdir("..")
grape.analysisGrapeScore(
ROSETTA_RESULTS_DIR + ROSETTA_SCORE_FILE,
rosetta_cutoff,
ROSETTA_RESULTS_DIR,
)
if "abacus" in softlist:
# abacus.run_abacus(pdb)
abacus.parse_abacus_out()
grape.analysisGrapeScore(
ABACUS_RESULTS_DIR + ABACUS_SCORE_FILE, abacus_cutoff, ABACUS_RESULTS_DIR
)
if "abacus2" in softlist:
grape.analysisGrapeScore(
ABACUS2_RESULTS_DIR + ABACUS2_SCORE_FILE, abacus2_cutoff, ABACUS2_RESULTS_DIR
)
logging.info(f"Finished calculation in {mode} mode of grape-fast in {time.ctime()}.\n")
#
if md:
selected_dict = selectpdb4md(pdb, softlist, md)
md_start = time.time()
if platform == "CUDA":
runMD(platform, selected_dict)
if platform == 'CPU':
md_job_num = int(threads) // 2
runMD(platform, selected_dict, md_job_num)
md_end = time.time()
grape.running_time["MD simulations"] = md_end - md_start
logging.info("All MDs took %f seconds." % (md_end - md_start))
else:
selectpdb4md(pdb, softlist, md)
logging.warning("No MDs!")
json_running_time = json.dumps(grape.running_time, indent=4)
with open("timing.json", "w+") as timing:
timing.write(json_running_time)
timing.close()
if __name__ == "__main__":
args = io.Parser().get_args()
main1(args)
logging.info("Ended at %s" % (time.ctime()))
```
#### File: DDGScan/utils/mdrelax.py
```python
from __future__ import print_function
import os
os.environ["OPENMM_CPU_THREADS"] = "2"
from openmm import app
import openmm as mm
from openmm import unit
from pdbfixer import PDBFixer
from openmm.app import PDBFile
import mdtraj as mt
from sys import stdout
def fix(pdbfile):
fixed = pdbfile.replace(".pdb", "_fixed.pdb")
fixer = PDBFixer(pdbfile)
numChains = len(list(fixer.topology.chains()))
fixer.removeChains(range(1, numChains))
fixer.findMissingResidues()
# only add missing residues in the middle of the chain, do not add terminal ones
chains = list(fixer.topology.chains())
keys = fixer.missingResidues.keys()
missingResidues = dict()
for key in keys:
chain = chains[key[0]]
if not (key[1] == 0 or key[1] == len(list(chain.residues()))):
missingResidues[key] = fixer.missingResidues[key]
fixer.missingResidues = missingResidues
fixer.findMissingAtoms()
fixer.addMissingAtoms()
PDBFile.writeFile(
fixer.topology,
fixer.positions,
open(pdbfile.replace(".pdb", "_fixed.pdb"), "w"),
)
return fixed
def produciton(pdbfilename, platform="CUDA"):
# load in input PDB file and force field XML files
pdb = app.PDBFile(pdbfilename)
forcefield = app.ForceField("amber99sbildn.xml", "tip3p.xml")
# use app.Modeller to add hydrogens and solvent
modeller = app.Modeller(pdb.topology, pdb.positions)
modeller.addHydrogens(forcefield)
modeller.addSolvent(forcefield, model="tip3p", padding=1.0 * unit.nanometers)
topname = pdbfilename.replace("fixed", "modeller_tip3p")
app.PDBFile.writeFile(modeller.topology, modeller.positions, open(topname, "w"))
# prepare system and integrator
system = forcefield.createSystem(
modeller.topology,
nonbondedMethod=app.PME,
nonbondedCutoff=1.0 * unit.nanometers,
constraints=app.HBonds,
rigidWater=True,
ewaldErrorTolerance=0.0005,
)
integrator = mm.LangevinIntegrator(
300 * unit.kelvin, 1.0 / unit.picoseconds, 2.0 * unit.femtoseconds
)
integrator.setConstraintTolerance(0.00001)
# prepare simulation
if platform == "CUDA":
platform = mm.Platform.getPlatformByName(platform)
properties = {"CudaPrecision": "mixed"}
simulation = app.Simulation(
modeller.topology, system, integrator, platform, properties
)
else:
platform = mm.Platform.getPlatformByName("CPU")
simulation = app.Simulation(modeller.topology, system, integrator, platform)
simulation.context.setPositions(modeller.positions)
# minimize
print("Minimizing...")
simulation.minimizeEnergy()
# equilibrate for 100 steps
simulation.context.setVelocitiesToTemperature(300 * unit.kelvin)
print("Equilibrating...")
simulation.step(100)
# append reporters
dcdname = pdbfilename.replace("fixed.pdb", "_tip3p.dcd")
simulation.reporters.append(app.DCDReporter(dcdname, 1000))
simulation.reporters.append(
app.StateDataReporter(
stdout,
1000,
step=True,
potentialEnergy=True,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
totalSteps=50000,
separator="\t",
)
)
# run 100 ps of production simulation
print("Running Production...")
simulation.step(50000)
print("Done!")
return topname, dcdname
def dcd2pdb(dcd_file, topol_file, out_file, stride=100, noWater=True, superimpose=True):
top = mt.load(topol_file)
if noWater:
indices = top.topology.select("protein")
else:
indices = top.topology.select("all")
traj = mt.load_dcd(dcd_file, top=topol_file, stride=stride, atom_indices=indices)
if superimpose:
print("INFO: Superimposing to topology ......")
CA_indices = top.topology.select("protein and name CA")
traj.superpose(top, ref_atom_indices=CA_indices, atom_indices=CA_indices)
no_hydrogen_indices = traj.topology.select("protein and symbol != H")
no_hydrogen = traj.atom_slice(no_hydrogen_indices)
no_hydrogen.save_pdb(out_file)
return None
def main(pdbfile, out_file, platform):
fixed = fix(pdbfile)
topname, dcdname = produciton(fixed, platform)
dcd2pdb(dcdname, topname, out_file)
if __name__ == "__main__":
import sys
pdbfile, out_file, platform = sys.argv[1:]
main(pdbfile, out_file, platform)
```
#### File: DDGScan/utils/rosetta.py
```python
import distutils.dir_util
import os
import time
import numpy as np
import utils.common as common
# from .common import *
class Rosetta:
def __init__(
self,
pdbName,
relax_num,
numThreads,
exe,
rosettadb,
):
self.exe = exe
self.pdbname = pdbName
self.relax_num = relax_num
self.threads = numThreads
# self.relaxedpdb = pdbName #for test
self.rosettadb = rosettadb
self.relaxedpdb: str # for test
# self.cutoff = cutOff
self.result = []
def relax(self):
distutils.dir_util.mkpath(common.ROSETTA_RELAX_DIR)
os.system("cp " + self.pdbname + " " + common.ROSETTA_RELAX_DIR)
os.chdir(common.ROSETTA_RELAX_DIR)
# try:
# os.mkdir("rosetta_relax")
# os.system("cp " + self.pdbname + " rosetta_relax/")
# os.chdir("rosetta_relax")
# except FileExistsError:
# os.system("cp " + self.pdbname + " rosetta_relax/")
# os.chdir("rosetta_relax")
# pass
with open("cart2.script", "w+") as cart2:
cart2.write("switch:cartesian\n")
cart2.write("repeat 2\n")
cart2.write("ramp_repack_min 0.02 0.01 1.0 50\n")
cart2.write("ramp_repack_min 0.250 0.01 0.5 50\n")
cart2.write("ramp_repack_min 0.550 0.01 0.0 100\n")
cart2.write("ramp_repack_min 1 0.00001 0.0 200\n")
cart2.write("accept_to_best\n")
cart2.write("endrepeat")
cart2.close()
relax_threads = min([int(self.threads), int(self.relax_num)])
relax_cmd = "".join(
[
"mpirun --allow-run-as-root -n "
+ str(relax_threads)
+ " relax.mpi.linuxgccrelease -s "
+ self.pdbname
+ " -use_input_sc",
" -constrain_relax_to_start_coords -ignore_unrecognized_res",
" -nstruct " + str(self.relax_num),
" -relax:coord_constrain_sidechains",
" -relax:cartesian -score:weights ref2015_cart ",
" -relax:min_type lbfgs_armijo_nonmonotone",
" -relax:script cart2.script 1>/dev/null && sort -nk2 score.sc |head -n 1|awk '{print$22}'",
]
)
print("==" * 20)
print(" Relaxing your Protein: ")
# os.system(relax_cmd)
relaxed_pdb_name = os.popen(relax_cmd).read()
print(" Finished relax! ")
print("==" * 20)
relaxed_pdb_name = os.popen(
"sort -nk2 score.sc |head -n 1|awk '{print$22}'"
).read()
self.relaxedpdb = relaxed_pdb_name.replace("\n", "") + ".pdb"
os.chdir("../")
return relaxed_pdb_name.replace("\n", "") + ".pdb"
def read_rosetta_ddgout(self, rosettaddgfilename):
ddg_dict = {}
ddg_array = []
with open(rosettaddgfilename) as rosettaddg:
for line in rosettaddg:
# print(line.split(":")[2])
if line.split(":")[2].strip() == "WT":
dg_ref = float(line.split(":")[3][1:10])
else:
ddg = float(line.split(":")[3][1:10]) - dg_ref
ddg_array.append(ddg)
rosettaddg.close()
return [
round(np.array(ddg_array).mean(), 4),
round(np.array(ddg_array).std(), 4),
]
def runOneJob(self, varlist: list):
wild, mutation, resNum, jobID = varlist
distutils.dir_util.mkpath(jobID)
os.chdir(jobID)
# try:
# os.mkdir(jobID)
# os.chdir(jobID)
# except FileExistsError:
# os.chdir(jobID)
# os.popen('cp ../../rosetta_relax/' + self.relaxedpdb + ' ./')
os.system("cp ../../" + common.ROSETTA_RELAX_DIR + self.relaxedpdb + " ./")
with open("mtfile", "w+") as mtfile:
mtfile.write("total 1\n")
mtfile.write("1\n")
mtfile.write(wild + " " + str(resNum) + " " + mutation + "\n")
mtfile.close()
argument_list = [
self.exe,
"-database",
self.rosettadb,
"-use_input_sc",
"-s",
self.relaxedpdb,
"-ddg:mut_file",
"mtfile",
"-ddg:iterations",
"3",
"-ddg::cartesian",
"-ddg::dump_pdbs",
"true",
"-ddg:bbnbrs",
"1",
"-score:weights",
"ref2015_cart",
"-relax:cartesian",
"-relax:min_type",
"lbfgs_armijo_nonmonotone",
"-flip_HNQ",
"-crystal_refine",
"-fa_max_dis",
"9.0",
"1>/dev/null",
]
cartddg_cmd = " ".join(argument_list)
starttime = time.time()
os.system(cartddg_cmd)
finishtime = time.time()
print(
"[DEBUG]: Rosetta mutation %s_%s_%s took %f seconds."
% (wild, resNum, mutation, finishtime - starttime)
)
# print(cartddg_cmd)
os.chdir("../../")
# return pid, '_'.join([wild, str(trueResNum), mutation])
def pmut_scan(self, relaxed_pdb):
if os.path.isfile("pmut.out"):
pass
else:
pmut_scan_exe = (
os.popen("which pmut_scan_parallel.mpi.linuxgccrelease")
.read()
.replace("\n", "")
)
rosettadb = "/".join(pmut_scan_exe.split("/")[:-3]) + "/database/"
arg_list = [
"mpirun",
"-np",
str(self.threads),
pmut_scan_exe,
"-database",
rosettadb,
"-s",
relaxed_pdb,
"-ex1",
"-ex2",
"-extrachi_cutoff 1",
"-use_input_sc",
"-ignore_unrecognized_res",
"-no_his_his_pairE",
"-multi_cool_annealer",
"10",
"-mute",
"basic",
"core" ">",
"pmut.out && ls pmut.out",
]
print("[INFO]: Running pmut_scan_parallel")
pmut_start = time.time()
print(" ")
os.system(" ".join(arg_list))
pmut_end = time.time()
pmut_time = pmut_end - pmut_start
return pmut_time
def pmut_scan_analysis(self, pmutoutfile):
with open(pmutoutfile) as pmut_out:
i = -1
start_line = 0
for line in pmut_out:
i += 1
line = line.replace("\x1b[0m", "")
if line.endswith(
"mutation mutation_PDB_numbering average_ddG average_total_energy\n"
):
start_line = 1
if "protocol took" in line:
start_line = 0
if start_line == 1:
mutinfo = line.replace("\n", "").split(")")[1].split()
if mutinfo[2] == "average_ddG":
with open(common.ROSETTA_SCORE_FILE, "w") as scorefile:
scorefile.write(
"#Score file formatted by GRAPE from Rosetta.\n#mutation\tscore\tstd\n"
)
scorefile.close()
else:
with open(common.ROSETTA_SCORE_FILE, "a+") as scorefile:
mut = mutinfo[0].split("-")[1]
scorefile.write(
"_".join([mut[0], mut[1:-1], mut[-1]])
+ "\t"
+ mutinfo[2]
+ "\t"
+ "0\n"
)
scorefile.close()
pmut_out.close()
class rosetta_binder:
def __init__(self):
pass
@staticmethod
def relax(pdbname, threads, relax_num):
distutils.dir_util.mkpath(common.ROSETTA_RELAX_DIR)
os.system("cp " + pdbname + " " + common.ROSETTA_RELAX_DIR)
os.chdir(common.ROSETTA_RELAX_DIR)
with open("cart2.script", "w+") as cart2:
cart2.write("switch:cartesian\n")
cart2.write("repeat 2\n")
cart2.write("ramp_repack_min 0.02 0.01 1.0 50\n")
cart2.write("ramp_repack_min 0.250 0.01 0.5 50\n")
cart2.write("ramp_repack_min 0.550 0.01 0.0 100\n")
cart2.write("ramp_repack_min 1 0.00001 0.0 200\n")
cart2.write("accept_to_best\n")
cart2.write("endrepeat")
cart2.close()
relax_cmd = "".join(
[
"mpirun -n "
+ threads
+ " relax.mpi.linuxgccrelease -s "
+ pdbname
+ " -use_input_sc",
" -constrain_relax_to_start_coords -ignore_unrecognized_res",
" -nstruct " + relax_num,
" -relax:coord_constrain_sidechains",
" -relax:cartesian -score:weights ref2015_cart ",
" -relax:min_type lbfgs_armijo_nonmonotone",
" -relax:script cart2.script 1>/dev/null && sort -nk2 score.sc |head -n 1|awk '{print$22}'",
]
)
print("==" * 20)
print(" Relaxing your Protein: ")
# os.system(relax_cmd)
relaxed_pdb_name = os.popen(relax_cmd).read()
print(" Finished relax! ")
print("==" * 20)
relaxed_pdb_name = os.popen(
"sort -nk2 score.sc |head -n 1|awk '{print$22}'"
).read()
relaxedpdb = relaxed_pdb_name.replace("\n", "") + ".pdb"
os.chdir("../")
return relaxedpdb
@staticmethod
def read_rosetta_ddgout(rosettaddgfilename, wild, mutation, resNum):
ddg_array = []
with open(rosettaddgfilename, 'r') as rosettaddg:
for line in rosettaddg:
# print(line.split(":")[2])
if line.split(":")[2].strip() == "WT":
dg_ref = float(line.split(":")[3][1:10])
else:
ddg = float(line.split(":")[3][1:10]) - dg_ref
ddg_array.append(ddg)
rosettaddg.close()
return [
"_".join([wild, str(resNum), mutation]),
str(round(np.array(ddg_array).mean(), 4)),
str(round(min(np.array(ddg_array)), 4)),
str(round(np.array(ddg_array).std(), 4))
]
@staticmethod
def run_one_job(varlist: list):
wild, mutation, resNum, jobID, relaxedpdb, exe, rosettadb = varlist
path_job_id = common.ROSETTA_JOBS_DIR + jobID
distutils.dir_util.mkpath(path_job_id)
os.chdir(path_job_id)
# try:
# os.mkdir(jobID)
# os.chdir(jobID)
# except FileExistsError:
# os.chdir(jobID)
# os.popen('cp ../../rosetta_relax/' + self.relaxedpdb + ' ./')
os.system("cp ../../" + common.ROSETTA_RELAX_DIR + relaxedpdb + " ./")
with open("mtfile", "w+") as mtfile:
mtfile.write("total 1\n")
mtfile.write("1\n")
mtfile.write(wild + " " + str(resNum) + " " + mutation + "\n")
mtfile.close()
argument_list = [
exe,
"-database",
rosettadb,
"-use_input_sc",
"-s",
relaxedpdb,
"-ddg:mut_file",
"mtfile",
"-ddg:iterations",
"3",
"-ddg::cartesian",
"-ddg::dump_pdbs",
"true",
"-ddg:bbnbrs",
"1",
"-score:weights",
"ref2015_cart",
"-relax:cartesian",
"-relax:min_type",
"lbfgs_armijo_nonmonotone",
"-flip_HNQ",
"-crystal_refine",
"-fa_max_dis",
"9.0",
"1>/dev/null",
]
cartddg_cmd = " ".join(argument_list)
starttime = time.time()
os.system(cartddg_cmd)
finishtime = time.time()
print(
"[DEBUG]: Rosetta mutation %s_%s_%s took %f seconds."
% (wild, resNum, mutation, finishtime - starttime)
)
result = rosetta_binder.read_rosetta_ddgout('mtfile.ddg', wild, mutation, resNum)
# print(cartddg_cmd)
os.chdir("../../")
return result
if __name__ == "__main__":
print("run")
# pdbname = '1PGA.pdb'
# chain = 'A'
# threads = 24
# relax_num = 200
# prot = Rosetta(pdbname, relax_num, 'numThreads')
# score, std = prot.read_rosetta_ddgout('rosetta_jobs/0_1_/mtfile.ddg')
# print(score, std)
# relax_pdb_name = prot.relax(pdbname, threads)
# print("Using: " + relax_pdb_name)
# os.mkdir('rosetta_jobs')
# prot.runOneJob(relax_pdb_name, "M", chain, "Q", 1, 'rosetta_jobs/0_1/')
``` |
{
"source": "JinyuanSun/DEPECT",
"score": 3
} |
#### File: DEPECT/backup/auto_blast_align_analysis.py
```python
import os
import numpy as np
import pandas as pd
# In[ ]:
#1. blastp against the therbase
db = ""
evalue = "1e-5"
cutoff = 30
seqname = ""
def _blast(seqname,db,evalue,cutoff):
outfile = seqname+"_out.txt"
cmd = "blastp -query "+seqname+" -db "+db+" -evalue "+evalue+" -outfmt 6 -out "+outfile
os.system(cmd)
l = os.popen("wc -l "+outfile)
ll = l.read()
print("blast finished, total "+ll+" thermostable sequences found")
blastout = read(outfile)
hmhits = []
h = 0
for line in blastout:
w = line.replace("\n","").split("\t")
identity = float(w[2])
if identity > cutoff:
hmhits += w[1]
h = h + 1
print("find "+h+" homoglous sequences")
subdb_name = seqname+".prodb"
subdb = open(subdb_name,"w+")
outline = ''
for x in hmhits:#extract seq from db for align
outline += x+"\n"
outline += hot_dic.get(x)+"\n"
print(outline,file = subdb)
os.system("cat "+seqname+" "+subdb_name+" > seq_hot.fasta")
print("File seq_hot.fasta written!"+"\n"+"Alignment starts!")
return subdb_name
# In[ ]:
# In[ ]:
def Align_analyze(seqname):
ali_filename = seqname+".ali"
os.system("/usr/local/bin/mafft"+" --localpair --maxiterate 16 --clustalout "+subdb_name+" > "+ali_filename)
ali_file = open(ali_filename)
next(ali_file)
seq_dic = {}
mark = ""
for line in ali_file:
if line.startswith(" "):
mark = mark + line[16:].replace("\n", "")
if line.startswith("\n"):
continue
else:
head = line[0:15]
#print(head)
seq = line[16:].replace("\n", "")
#print(seq)
try:
seq_dic[head] += seq
except KeyError:
seq_dic[head] = seq
return seq_dic, mark
# In[ ]:
seq_dic = Ali_analyze("/Users/jsun/MHET/mhet_30.ali")[0]
mark = Ali_analyze("/Users/jsun/MHET/mhet_30.ali")[1]
#print(Ali_analyze("mhet_30.ali")[1])
import numpy as np
import pandas as pd
import math
import matplotlib
target = "sp|A0A0K8P6T7|P"
def ali_array(seq_dic,mark,target):
l = len(mark)
lst = []
for k in seq_dic:
ls = list(seq_dic[k])
lst.append(ls)
df = pd.DataFrame(lst)
t = seq_dic[target]
target = []
mark_lst = []
cdict = {}
n = 0
s = 0
mapdic = {}
for x in t:
if x == "-":
s = s + 1
else:
target += x
mark_lst += mark[n]
c = mark[n]
mapdic[n] = s
n = n + 1
if c == ".":
cdict[n] = x+"_."
if c == ":":
cdict[n] = x+"_:"
if c == "*":
cdict[n] = x+"_*"
return df,target,mark_lst,cdict,mapdic
df = ali_array(seq_dic,mark,target)[0]
t = ali_array(seq_dic,mark,target)[1]
m = ali_array(seq_dic,mark,target)[2]
#print(ali_array(seq_dic,mark,"sp|A0A0K8P6T7|P")[3])
#print(t,"\n",m)
cuttoff = 0.9
n = 0
print("position","WT","MUT")
for i in range(len(df.columns)):
dic = df[i][0:19].value_counts().to_dict()
x = df[i][20]
if x == "-":
continue
else:
n = n + 1
for k in dic:
p = int(dic[k])/19
if p > cuttoff:
if k == "-":
continue
if k == x:
continue
else:
print(n,x,k)
```
#### File: DEPECT/enzde/depect_cst.py
```python
import math
import numpy as np
import os
import struct
def readpdb(pdbfilename):
pdb_format = '6s5s1s4s1s3s1s1s4s1s3s8s8s8s6s6s10s2s3s'
protein_pdbdict = {}
ligand_pdbdict = {}
pdbfile = open(pdbfilename)
for line in pdbfile:
#try:
#print(type(line))
try:
tmpline = struct.unpack(pdb_format, line.encode())
#print(tmpline)
if tmpline[0].decode().strip() == "ATOM":
#record_name = line[0:6].replace(" ","")
atom_num = tmpline[1].decode().strip()
atom = tmpline[3].decode().strip()
altLoc = tmpline[4].decode().strip()
res = tmpline[5].decode().strip()
chainid = tmpline[7].decode().strip()
resseq = tmpline[8].decode().strip()
icode = tmpline[10].decode().strip()
x = float(tmpline[11].decode().strip())
y = float(tmpline[12].decode().strip())
z = float(tmpline[13].decode().strip())
occ = tmpline[14].decode().strip()
tfactor = tmpline[15].decode().strip()
element = tmpline[17].decode().strip()
charge = tmpline[18].decode().strip()
try:
protein_pdbdict[chainid][resseq][atom] = np.array([x,y,z])
except KeyError:
try:
protein_pdbdict[chainid][resseq] = {}
protein_pdbdict[chainid][resseq]["res"] = res
protein_pdbdict[chainid][resseq][atom] = np.array([x,y,z])
except KeyError:
protein_pdbdict[chainid] = {resseq:{atom:np.array([x,y,z])}}
if tmpline[0].decode().strip() == "HETATM":
#record_name = line[0:6].replace(" ","")
atom_num = tmpline[1].decode().strip()
atom = tmpline[3].decode().strip()
altLoc = tmpline[4].decode().strip()
res = tmpline[5].decode().strip()
chainid = tmpline[7].decode().strip()
resseq = tmpline[8].decode().strip()
icode = tmpline[10].decode().strip()
x = tmpline[11].decode().strip()
y = tmpline[12].decode().strip()
z = tmpline[13].decode().strip()
occ = tmpline[14].decode().strip()
tfactor = tmpline[15].decode().strip()
element = tmpline[17].decode().strip()
charge = tmpline[18].decode().strip()
try:
ligand_pdbdict[chainid][atom_num][atom] = np.array([x,y,z])
except KeyError:
try:
ligand_pdbdict[chainid][atom_num] = {}
ligand_pdbdict[chainid][atom_num]["res"] = res
ligand_pdbdict[chainid][atom_num][atom] = np.array([x,y,z])
except KeyError:
ligand_pdbdict[chainid] = {resseq:{atom:np.array([x,y,z])}}
except struct.error:
pdb_format = '6s5s1s4s1s3s1s1s4s1s3s8s8s8s6s6s10s2s1s'
try:
tmpline = struct.unpack(pdb_format, line.encode())
if tmpline[0].decode().strip() == "ATOM":
#record_name = line[0:6].replace(" ","")
atom_num = tmpline[1].decode().strip()
atom = tmpline[3].decode().strip()
altLoc = tmpline[4].decode().strip()
res = tmpline[5].decode().strip()
chainid = tmpline[7].decode().strip()
resseq = tmpline[8].decode().strip()
icode = tmpline[10].decode().strip()
x = float(tmpline[11].decode().strip())
y = float(tmpline[12].decode().strip())
z = float(tmpline[13].decode().strip())
occ = tmpline[14].decode().strip()
tfactor = tmpline[15].decode().strip()
element = tmpline[17].decode().strip()
charge = tmpline[18].decode().strip()
try:
protein_pdbdict[chainid][resseq][atom] = np.array([x,y,z])
except KeyError:
try:
protein_pdbdict[chainid][resseq] = {}
protein_pdbdict[chainid][resseq]["res"] = res
protein_pdbdict[chainid][resseq][atom] = np.array([x,y,z])
except KeyError:
protein_pdbdict[chainid] = {resseq:{atom:np.array([x,y,z])}}
except struct.error:
#print(line)
continue
return {"protein":protein_pdbdict, "ligand":ligand_pdbdict}
def readpdbqt(pdbqtfilename):
pdbqtfile = open(pdbqtfilename)
pdbqt_dic = {}
for line in pdbqtfile:
try:
tmp = line.split()
type = tmp[0]
atom_num = tmp[1]
atom = tmp[2]
lig = tmp[3]
coordx = float(tmp[5])
coordy = float(tmp[6])
coordz = float(tmp[7])
if type == "HETATM":
pdbqt_dic[atom_num] = np.array([coordx,coordy,coordz])
except IndexError:
continue
except ValueError:
continue
return pdbqt_dic
def readcstfile(cstfilename):
cstfile = open(cstfilename)
cstparadict = {}
#liganddict = readpdbqt(ligandfilename)
#proteindict = readpdb(proteinfilename)
for line in cstfile:
atomlst = []
tmp = line.replace("\n","").split(":")
head = tmp[0]
atoms = tmp[1].split(",")
for atom in atoms:
atmp = atom.split("|")
if atmp[0] == "protein":
#coordlst.append(proteindict["protein"][atmp[1]][atmp[2]][atmp[3]])
atomlst.append(["protein",atmp[1],atmp[2],atmp[3]])
if atmp[0] == "ligand":
atomlst.append(["ligand",atmp[1]])
cstparadict[head] = atomlst
return cstparadict
def cal_dihedral(p):
"""Praxeolitic formula
1 sqrt, 1 cross product"""
p0 = p[0]
p1 = p[1]
p2 = p[2]
p3 = p[3]
b0 = -1.0*(p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= np.linalg.norm(b1)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - np.dot(b0, b1)*b1
w = b2 - np.dot(b2, b1)*b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.degrees(np.arctan2(y, x))
def getcoords(cstparadict,proteindict,liganddict):
cstcoorddict = {}
for head in cstparadict:
atomlst = cstparadict[head]
coordlst = []
for atom in atomlst:
if atom[0] == "protein":
coordlst.append(proteindict["protein"][atom[1]][atom[2]][atom[3]])
if atom[0] == "ligand":
coordlst.append(liganddict[atom[1]])
cstcoorddict[head] = coordlst
return cstcoorddict
def calgeovalue(cstcoorddict,proteindict,liganddict):
geovaldict = {}
for key in cstcoorddict:
coordlst = cstcoorddict[key]
#print(coordlst)
if len(coordlst) == 2:
geovaldict[key] = str(round(np.linalg.norm(coordlst[0]-coordlst[1]),3))
if len(coordlst) == 3:
geovaldict[key] = str(round(math.acos(getcos(coordlst[0]-coordlst[1], coordlst[1]-coordlst[2])) * 180 / math.pi,3))
if len(coordlst) == 4:
geovaldict[key] = str(round(cal_dihedral(coordlst),3))
return geovaldict
def run_geo_cst(depectfilename,cstfilename,ligand):
depectfile = open(depectfilename)
cstparadict = readcstfile(cstfilename)
cstfile = open(cstfilename)
#liganddict = readpdbqt(ligandfilename)
head_line = ''
for line in cstfile:
head_line = head_line+","+line.split(":")[0]
for line in depectfile:
if line.startswith("#"):
with open("depect_enzde_geo.sc","w+") as newoutfile:
newoutfile.write("#receptor,vina_affinity"+head_line+"\n")
newoutfile.close()
else:
tmp = line.split(",")
receptor = proteinfilename = tmp[0]
ligandfilename = ligand.replace(".pdbqt","@_"+receptor+"_out.pdbqt")
#print(ligandfilename)
#ligandfile = ligand.replace(".pdbqt",receptor+"_out.pdbqt")
proteindict = readpdb(proteinfilename)
liganddict = readpdbqt(ligandfilename)
cstcoorddict = getcoords(cstparadict,proteindict,liganddict)
#print(cstcoorddict)
geovaldict = calgeovalue(cstcoorddict,proteindict,liganddict)
#print(geovaldict)
with open("depect_enzde_geo.sc","a+") as newoutfile:
newoutfile.write(line.replace("\n",","+",".join(list(geovaldict.values()))+"\n"))
newoutfile.close()
#cstparadict = readcstfile(cstfilename,ligandfilename,proteinfilename)
#geovaldict = calgeovalue(cstparadict)
if __name__ == '__main__':
import sys
try:
depectfilename = sys.argv[1]
cstfilename = sys.argv[2]
ligand = sys.argv[3]
run_geo_cst(depectfilename,cstfilename,ligand)
except IndexError:
print("Usage: python3 depect_cst.py depect_enzde.sc zlj.cst RPBE_ref.pdbqt")
#try:
#depectfilename = sys.argv[1]
#cstfilename = sys.argv[2]
#ligand = sys.argv[3]
'''
try:
open(depectfilename)
except FileNotFoundError:
print("Trying to run demo!")
depectfilename = "depect_enzde.sc"
cstfilename = "zlj.cst"
ligand = "RPBE_ref.pdbqt"
run_geo_cst(depectfilename,cstfilename,ligand)
'''
```
#### File: DEPECT/hybrid_redesign/ca2mutfile.py
```python
import os
import argparse
def ca2mutfilelist(depectcafile):
cafile = open(depectcafile)
i = 0
mutfilelist = []
for line in cafile:
if line.startswith("#"):
continue
else:
lst = line.split("\t")
mut_unit = lst[0]+" "+lst[1]+" "+lst[2]
i = i + 1
mutfilelist.append(mut_unit)
return mutfilelist
def mutfilelist2rosettamutfile(mutfilelist,mutfilename):
#mutfilename = depectcafile.split(".")[0]+".mutfile"
mutfile = open(mutfilename,"w+")
with open(mutfilename,"a+") as mutfile:
mutfile.write("total "+str(len(mutfilelist))+"\n")
for mutunit in mutfilelist:
mutfile.write("1\n"+mutunit+"\n")
mutfile.close()
def chunk(lst,nt,prefix):
outfilenamelist = []
t = len(lst)//(int(nt)-1)
n = 0
for i in range(0,len(lst),t):
b=lst[i:i+t]
n = n + 1
mutfilelist2rosettamutfile(b,prefix+"_"+str(n)+".mutfile")
c = prefix+"_"+str(n)+".mutfile"
outfilenamelist.append(c)
return outfilenamelist
def get_parser():
parser = argparse.ArgumentParser(description='convert depect consensus analysis output .ca file to rosetta mutfile')
#parser.parse_args()
parser.add_argument("-in", "--input", help="depect consensus analysis output .ca file")
parser.add_argument("-nt", "--number_splited", help="number of output mutfile, default is 1",default=1)
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
depectcafile = args.input
nt = int(args.number_splited)
mutfilelist = ca2mutfilelist(depectcafile)
#print(nt)
outfilename = prefix = depectcafile.split(".")[0]
if nt == 1:
mutfilelist2rosettamutfile(mutfilelist,outfilename)
if nt > 1:
chunk(mutfilelist,nt,prefix)
```
#### File: DEPECT/hybrid_redesign/hybrid_redesign.py
```python
import os
import math
import argparse
import time
import depect_ca_local as dcl
#import rosetta_cartddg_filter
import ca2mutfile
#cartesian_ddg.linuxgccrelease -s 4eb0_clean.pdb @cart_ddg_flag -ddg:mut_file 4EB01.mutfile
Cartesian_ddG_exe = "cartesian_ddg.linuxgccrelease"
Cartesian_ddG_opt = "-ddg:iterations 1 -ddg::cartesian -ddg::dump_pdbs true -ddg:bbnbrs 1 -fa_max_dis 9.0 -score:weights ref2015_cart"
def get_parser():
parser = argparse.ArgumentParser(description='consensus analysis based on relative entropy score and fruther filter by ddg')
#parser.parse_args()
parser.add_argument("-s", "--structure", help="target structure which must be provided")
parser.add_argument("-cid", "--chain", help="chain id for sequence extraction, default is A",default = "A")
parser.add_argument("-db", "--database", help="database name for blastp")
parser.add_argument("-ic", '--identity_cutoff',default=30,
help='the identity cutoff when build the sub-database')
parser.add_argument("-e", '--evalue', default=1e-5,
help='the evalue cutoff when blast')
parser.add_argument("-ds", "--dataset", help="dataset to do consensus analysis")
parser.add_argument("-sc", '--score_cutoff', default=2,
help='the score cutoff for selection, default is 2')
parser.add_argument("-nt", '--num_threads', default=8,
help='number of threads used to run blastp and ddg calculation software, default is 8')
parser.add_argument("-m", '--mode', help='blast mode require input -ic, -db and -e, analysis mode require -ds')
parser.add_argument("-en", '--engine', default="rcd",
help='software used for ddg calculation, currently supporting FoldX (f) and Rosetta_Cartesian_ddG (rcd). Default is Rosetta_Cartesian_ddG')
parser.add_argument("-dc", '--ddg_cutoff', help='cutoff of ddg for output')
#args = parser.parse_args()
return parser
def pdb2seq(structure,chain):
pdbfile = open(structure)
seq = ''
#pdb_seq_out_dict = {}
longer_names = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLU': 'E', 'GLN': 'Q', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V'}
#pdb = open(pdbname)
resseqlst = []
for line in pdbfile:
if line.startswith("ATOM") and "CA" in line.split():
amino_acid = line[17:20]
resseq = int(line[22:26].replace(" ",""))
if chain == line[21:22] and resseq not in resseqlst:
resseqlst.append(resseq)
seq = seq + longer_names[amino_acid]
#if line[0:3] == "TER":
#seq = seq + "\n"
#pdb_seq_out_dict[chain] = seq[:-1]
seqfilename = structure.replace("pdb","fasta")
with open(seqfilename,"w+") as seqfile:
seqfile.write(">seq\n"+seq+"\n")
seqfile.close()
return seqfilename
def runconsensusanalysis(seq,db_name,identity_cutoff,evalue,nt,score_cutoff,mode):
#parser = dcl.get_parser()
#args = parser.parse_args()
#struct = args.structure
#chain = args.chain
#seq = pdb2seq(struct,chain)
##seq = args.structure.replace("pdb","fasta")
#db_name = args.database
#identity_cutoff = args.identity_cutoff
#evalue = args.evalue
#score_cutoff = args.score_cutoff
#mode = args.mode
#nt = str(args.num_threads)
if mode == "blast":
t0 =time.time()
aln = dcl.build_sub_db(seq,db_name,identity_cutoff,evalue,nt)
print("Blast search finished in",str(time.time()-t0)[0:3]+"s")
score_dict,map_dict,raw_target = dcl.build_matrix(aln,seq)
outlist = dcl.select(score_dict,map_dict,score_cutoff,raw_target)
curtime = time.asctime( time.localtime(time.time()))
depectcafile = seq+".ca"
with open(depectcafile,"w+") as outf:
outf.write("#Back-to-consensus mutations suggested by DEPECT consensus analysis.\n#"+curtime+"\n#wildtype\tnumber\tback-to-consensus\tscore"+"\n")
outf.close()
with open(depectcafile,"a+") as outf:
for mutations in outlist:
outf.write(mutations+"\n")
outf.close()
#print(out_line)
if mode == "analysis":
dataset = args.dataset
os.system("cat "+seq+" "+dataset+" > "+seq+".db")
os.system("muscle -in "+seq+".db -out "+seq+".aln -maxiters 99")
aln = seq+".aln"
score_dict,map_dict,raw_target = dcl.build_matrix(aln,seq)
outlist = dcl.select(score_dict,map_dict,score_cutoff,raw_target)
curtime = time.asctime( time.localtime(time.time()))
depectcafile = seq+".ca"
with open(depectcafile,"w+") as outf:
outf.write("#Back-to-consensus mutations suggested by DEPECT consensus analysis.\n#"+curtime+"\n#wildtype\tnumber\tback-to-consensus\tscore"+"\n")
outf.close()
with open(depectcafile,"a+") as outf:
for mutations in outlist:
outf.write(mutations+"\n")
outf.close()
return depectcafile
def generaterosettamutfile(depectcafile,nt):
#parser = dcl.get_parser()
#args = parser.parse_args()
nt = int(nt)
mutfilelist = ca2mutfile.ca2mutfilelist(depectcafile)
#print(nt)
outfilename = prefix = depectcafile.split(".")[0]
if nt == 1:
ca2mutfile.mutfilelist2rosettamutfile(mutfilelist,outfilename)
if nt > 1:
mutfilenamelist = ca2mutfile.chunk(mutfilelist,nt,prefix)
return mutfilenamelist
def runrosetta(mutfilenamelist,struct):
#parser = dcl.get_parser()
#args = parser.parse_args()
#struct = args.structure
for mutfile in mutfilenamelist:
os.system("nohup "+Cartesian_ddG_exe+" -s "+struct+" "+Cartesian_ddG_opt+" -ddg:mut_file "+mutfile+" &")
if __name__ == '__main__':
print(
"""
#########################################################
# DEPECT #
# thermostability hybrid redesign #
# Design and Engineering of Proteins and Enzymes #
# by Computational Tools #
# #
# Author: <NAME> #
# E-mail: <EMAIL> #
#########################################################
"""
)
parser = get_parser()
parser = get_parser()
args = parser.parse_args()
struct = args.structure
chain = args.chain
seq = pdb2seq(struct,chain)
#seq = args.structure.replace("pdb","fasta")
db_name = args.database
identity_cutoff = args.identity_cutoff
evalue = args.evalue
score_cutoff = args.score_cutoff
mode = args.mode
nt = str(args.num_threads)
depectcafile = runconsensusanalysis(seq,db_name,identity_cutoff,evalue,nt,score_cutoff,mode)
mutfilenamelist = generaterosettamutfile(depectcafile,nt)
runrosetta(mutfilenamelist,struct)
``` |
{
"source": "JinyuanSun/DETECT",
"score": 2
} |
#### File: DETECT/enzde/fvdesign.py
```python
import os
import time
import numpy as np
import pandas as pd
import argparse
from joblib import Parallel, delayed
class FoldX:
def __init__(self, exe=""):
if exe:
self.exe = exe
else:
self.exe = os.popen("which foldx").read().replace("\n", "")
# AlaScan
# AnalyseComplex
# BuildModel
# CrystalWaters
# Dihedrals
# DNAContact
# DNAScan
# RNAScan
# MetalBinding
# Optimize
# PDBFile
# PositionScan
# PrintNetworks
# Pssm
# QualityAssessment
# ReconstructSideChains
# RepairPDB
# Rmsd
# SequenceDetail
# SequenceOnly
# Stability
def repair(self, input_file):
cmd_list = [self.exe, "--command=RepairPDB", "--pdb=%s" % input_file]
def build_model(self, input_file, mutation, numberOfRuns=1):
# mutation = "A_126_M"
start_time = time.time()
cmd_list = [
self.exe,
"--command=BuildModel",
"--pdb=%s" % input_file,
"--mutant-file=individual_list.txt",
"--numberOfRuns=%s 1>/dev/null" % str(numberOfRuns),
]
with open("individual_list.txt", "w+") as indifile:
indifile.write(mutation + ";\n")
indifile.close()
# os.popen(" ".join(cmd_list))
if test_on_mac:
print(" ".join(cmd_list))
else:
os.system(" ".join(cmd_list))
end_time = time.time()
return round(end_time - start_time, 3)
class Autodock:
def __init__(self, pythonsh_path, ADTU_path, box_cache, ligand_cache):
# /home/jsun/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24
self.ADTU_path = ADTU_path
self.pythonsh_path = pythonsh_path
self.box_cache = box_cache
self.ligand_cache = ligand_cache
def run_local_dock(self, receptor, i):
os.system(
"%s %s/prepare_receptor4.py -r %s -A checkhydrogens"
% (self.pythonsh_path, self.ADTU_path, receptor)
)
# os.popen("pythonsh %s/prepare_ligand.py -l %s " %(self.ADTU_path, ligand))
with open("box.cfg", "w+") as box:
box.write(self.box_cache)
box.close()
with open("ligand.pdbqt", "w+") as ligand:
ligand.write(self.ligand_cache)
ligand.close()
local_out = os.popen(
"vina --config box.cfg --receptor=%s --ligand=ligand.pdbqt --local_only --cpu=1 --out=local_docked_%s.pdbqt"
% (receptor + "qt", str(i))
)
for line in local_out.read().split("\n"):
if line.startswith("Affinity:"):
affinity = line.split(":")[1].split("(")[0].strip()
return affinity
def _3_2_1(x):
d = {
"CYS": "C",
"ASP": "D",
"SER": "S",
"GLN": "Q",
"LYS": "K",
"ILE": "I",
"PRO": "P",
"THR": "T",
"PHE": "F",
"ASN": "N",
"GLY": "G",
"HIS": "H",
"LEU": "L",
"ARG": "R",
"TRP": "W",
"ALA": "A",
"VAL": "V",
"GLU": "E",
"TYR": "Y",
"MET": "M",
}
assert x in d, "%s is not in 20 canonical amino acids!" % (x)
return d[x]
def __remove_chain_from_individual_list_mutation(mutation):
# EB243Q -> [[E,B,243,Q]]
# EA243Q,EB243Q -> [[E,A,243,Q],[E,B,243,Q]]
mutation_list = mutation.split(",")
converted = []
for mut in mutation_list:
wild_type = mut[0]
mut_type = mut[-1]
chain = mut[1]
num = mut[2:-1]
converted.append([wild_type, chain, num, mut_type])
return converted
class Pipeline:
def __init__(
self,
position_list_filename,
enzyme_wildtype_structure_filename,
substrate_filename,
chain_list,
numberOfRuns,
box_cfg,
ADTU_path="",
pythonsh_path="",
verbose=False,
):
self.text = 0
self.position_list_filename = position_list_filename
self.chain_dict = {}
self.root_dir = os.getcwd()
self.enzyme = "/".join([self.root_dir, enzyme_wildtype_structure_filename])
self.substrate = "/".join([self.root_dir, substrate_filename])
self.lig = open(self.substrate, "r").read()
self.verbose = verbose
self.cachefile = open(enzyme_wildtype_structure_filename, "r").read()
# self.docking_score = {}
self.box_cache = open(box_cfg, "r").read()
self.numberOfRuns = numberOfRuns
self.chain_list = chain_list
# self.foldx_energy = {}
if ADTU_path:
self.ADTU_path = ADTU_path
else:
self.ADTU_path = (
os.popen("locate MGLToolsPckgs/AutoDockTools/Utilities24")
.read()
.split("\n")[0]
)
if pythonsh_path:
self.pythonsh_path = pythonsh_path
else:
self.pythonsh_path = os.popen("which pythonsh").read().replace("\n", "")
def read_in_positions(self):
# mutation list generated from "pymol selection sele, output=S"
with open(self.position_list_filename, "r") as pos_file:
for line in pos_file:
if line[0] != "#":
protein_name, chain, wild_type_3l, res_num = line.replace(
"\n", ""
).split("/")
# self.position_list.append([chain, wild_type, res_num])
if chain in self.chain_dict:
self.chain_dict[chain].append((wild_type_3l, res_num))
else:
self.chain_dict[chain] = [(wild_type_3l, res_num)]
pos_file.close()
def generate_all_mutation(self):
# Automate determine how many chains to use depending on input list file
docking_score = {}
foldx_energy = {}
if self.chain_list:
chains = self.chain_list.split(",")
else:
chains = list(self.chain_dict.keys())
poses = []
for chain in self.chain_dict:
poses += self.chain_dict[chain]
all_mutation = []
for pos in poses:
pos_mutation_info = []
wild_type_3l, res_num = pos
wild_type_1l = _3_2_1(wild_type_3l)
for mut_type in "QWERTYIPASDFGHKLCVNM":
if mut_type != wild_type_1l:
a_mutation = []
for chain in chains:
a_mutation.append(
"".join([wild_type_1l, chain, res_num, mut_type])
)
pos_mutation_info.append(",".join(a_mutation))
docking_score[",".join(a_mutation)] = np.zeros(
self.numberOfRuns
)
foldx_energy[",".join(a_mutation)] = {}
all_mutation += pos_mutation_info
return all_mutation, docking_score, foldx_energy
def convert_mut_file_to_mutations(self):
pass
def calScore(self, mutation, foldx_energy):
# fxout_name = jobID + "/Dif_" + pdbfile.replace(".pdb", ".fxout")
fxout_name = os.popen("ls Dif*.fxout").read().replace("\n", "")
# print(fxout_name)
df = pd.read_table(fxout_name, sep="\t", skiprows=8)
score = round(df["total energy"].mean(), 4)
sd = round(df["total energy"].std(), 4)
foldx_energy[mutation] = {"score": score, "SD": sd}
with open("foldx_energy.txt", 'w+') as output:
output.write("\t".join([mutation, str(score), str(sd)]) + "\n")
output.close()
# return ["_".join([wild, str(resNum), mutation]), score, sd]
def build_model(self, varlist):
mutation, docking_score, foldx_energy = varlist
os.mkdir(mutation)
os.chdir(mutation)
os.mkdir("build_model")
os.chdir("build_model")
with open("WT_protein.pdb", "w+") as enzymefile:
enzymefile.write(self.cachefile)
enzymefile.close()
runtime = FoldX().build_model("WT_protein.pdb", mutation, self.numberOfRuns)
self.calScore(mutation, foldx_energy)
if self.substrate:
i = 0
aff_arr = np.zeros(self.numberOfRuns)
while i in range(self.numberOfRuns):
affinity = Autodock(
pythonsh_path=self.pythonsh_path,
ADTU_path=self.ADTU_path,
box_cache=self.box_cache,
ligand_cache=self.lig,
).run_local_dock("WT_protein_1_%s.pdb" % str(i), i)
# docking_score[mutation][i] = affinity
aff_arr[i] = affinity
i += 1
with open("docking_energy.txt", 'w+') as output:
output.write("\t".join([mutation, str(round(aff_arr.mean(), 4)), str(round(aff_arr.std(), 4))]) + "\n")
os.chdir("../")
if self.verbose:
print("[DEBUG]: ")
def build_scan(self, mutations, docking_score, foldx_energy, threads):
Parallel(n_jobs=threads)(
delayed(self.build_model)([mutation, docking_score, foldx_energy]) for mutation in mutations
)
return 0
def get_args():
parser = argparse.ArgumentParser(
description="Run FoldX, AutoDock Vina for substrate binding pocket redesign."
)
parser.add_argument("enzyme", help="Input enzyme PDB")
parser.add_argument(
"chain_list", help='list of chains, capital letters seperated with comma: "A,B"'
)
parser.add_argument("substrate", help="Input substrate PDBQT")
parser.add_argument("box_cfg", help="box information for local docking")
parser.add_argument("position_list", help="position list file output from pymol")
parser.add_argument("-T", "--threads", help="Number of threads to use", default=16)
parser.add_argument(
"-N",
"--num_of_runs",
help="Number of model foldx to build for each mutation",
default=5,
)
parser.add_argument("-mp", "--mgltools_path", help="path to mgltools")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
test_on_mac = False
position_list_filename = args.position_list
enzyme_wildtype_structure_filename = args.enzyme
substrate_filename = args.substrate
box_cfg = args.box_cfg
chain_list = args.chain_list
numberOfRuns = int(args.num_of_runs)
threads = int(args.threads)
mgltools_path = args.mgltools_path
scan_pipeline = Pipeline(
position_list_filename,
enzyme_wildtype_structure_filename,
substrate_filename,
chain_list,
numberOfRuns,
box_cfg,
mgltools_path,
)
def scan_and_dock():
scan_pipeline.read_in_positions()
all_mutations, docking_score, foldx_energy = scan_pipeline.generate_all_mutation()
scan_pipeline.build_scan(all_mutations[:6], docking_score, foldx_energy, threads)
scan_and_dock()
``` |
{
"source": "JinyuanSun/MarBGC",
"score": 3
} |
#### File: JinyuanSun/MarBGC/gbk2fasta.py
```python
def gbkfile2fasta(gbkfilename):
idlist = []
seqlist = []
with open(gbkfilename,'r') as gbkfile:
filetext = gbkfile.read()
feature_list = filetext.split("FEATURES")[1].replace(" ","").split("\n/")
#print(feature_list)
for ele in feature_list:
if "=" in ele:
ele_list = ele.split("=")
key = ele_list[0]
if key == 'protein_id':
try:
value = ele_list[1].replace(" ","").replace("\n","").split('"')[1]
idlist.append(value)
#seqlist.append(value)
except IndexError:
value = ele_list[1].replace(" ","").replace("\n","").split('"')[0]
idlist.append(value)
#seqlist.append(value)
if key == 'translation':
try:
value = ele_list[1].replace(" ","").replace("\n","").split('"')[1]
#idlist.append(key)
seqlist.append(value)
except IndexError:
value = ele_list[1].replace(" ","").replace("\n","").split('"')[0]
#idlist.append(key)
seqlist.append(value)
#print(key,value)
gbkfile.close()
#print(idlist)
fastafilename = gbkfilename.split(".")[0]+".fasta"
i = 0
with open(fastafilename,"a+") as fastafile:
while i in range(len(idlist)):
fastafile.write(">"+idlist[i]+"\n"+seqlist[i]+"\n")
i += 1
fastafile.close()
with open("gbk.txt","r") as f:
for line in f:
filename=line.strip()
gbkfile2fasta(filename)
``` |
{
"source": "JinyuanSun/my_bio_script",
"score": 3
} |
#### File: my_bio_script/basic/fasta.py
```python
def fasta2dic(fastafilename): #read a fasta file into a dict
fasta_dict = {}
with open(fastafilename) as fastafile:
for line in fastafile:
if line[0] == ">":
head = line.strip()
fasta_dict[head] = ''
else:
fasta_dict[head] += line.strip()
fastafile.close()
return fasta_dict
def output_single_fa(fasta_dict): #split a fastadict into fasta file of single seq
for key in fasta_dict:
filename = key[1:]+".fa"
with open(filename, "w+") as outfile:
outfile.write(key+"\n"+fasta_dict[key]+"\n")
outfile.close()
return filename
def split_fasta(fastafilename):
output_single_fa(fasta2dic(fastafilename))
def read_a3m():
return []
def align_2_seq(raw_seq1,raw_seq2):
from Bio import pairwise2
import pickle
with open("BLOSUM62.pkl", "rb") as tf:
matrix = pickle.load(tf)
tf.close()
seq1 = raw_seq1
seq2 = raw_seq2
alignments = pairwise2.align.globalds(seq1, seq2, matrix, -10, -0.5)
seq1 = alignments[0][0]
seq2 = alignments[0][1]
resnum = 0
#index = 0
aligned_seq2 = ''
for index in range(len(seq1)):
if seq1[index] == "-":
continue
else:
aligned_seq2 += seq2[index]
resnum += 1
if seq1[index] == seq2[index]:
continue
#else:
#print(seq1[index],resnum,seq2[index])
index += 1
#print(raw_seq1+"\n"+aligned_seq2)
return aligned_seq2
```
#### File: my_bio_script/explore/explore_motif_diversity.py
```python
import argparse
import re
parser = argparse.ArgumentParser(description='a code to explore motif explore motif diversity')
parser.add_argument("-i", '--input', help="input a fasta file")
parser.add_argument("-m", '--motif', help="input a motif in regular expression")
parser.add_argument("-o", '--output', help="the out put file name")
args = parser.parse_args()
inf = open(args.input)
ouf = args.output
motif = args.motif
regex = re.compile(motif)
def readseq(fasta):# return a dict in which headers are keys and sequences are value
seq = {}
for line in fasta:
if line.startswith(">"):
name = line.replace('>','').split()[0]
seq[name] = ''
else:
seq[name]+=line.replace('\n','').strip()
return seq
seq = readseq(inf)
ofile = open(ouf,'w')
print('header'+' '+'tax',file=ofile)
for key in seq:
sequence = str(seq[key])
if re.search(regex,str(seq[key])) != None:
fragment = re.findall(regex,str(seq[key]))[0]
header = key.split('|')[1] #This line is to deal with raw uniprot ID :>tr|ID|annotations
print(header+' '+fragment[-3:],file=ofile)
ofile.close()
``` |
{
"source": "JinyuanSun/SeqDDG",
"score": 3
} |
#### File: seqddg/utilities/feature_maker.py
```python
import pandas as pd
import numpy as np
class feature:
__a = """AAa H V P IP HT ST GSI F0 F1 F2 C B EC
A -0.171 −0.677 −0.680 −0.170 0.900 −0.476 −0.350 −0.044 −0.234 −0.269 0.587 −0.099 0.829
D -0.767 −0.281 −0.417 −0.900 −0.155 −0.635 −0.213 −0.103 0.900 0.014 −0.475 −0.082 0.247
C 0.508 −0.359 −0.329 −0.114 −0.652 0.476 −0.140 −0.642 −0.773 −0.035 −0.433 0.094 −0.388
E -0.696 −0.058 −0.241 −0.868 0.900 −0.582 −0.230 0.347 0.480 0.021 −0.900 0.105 0.565
F 0.646 0.412 0.373 −0.272 0.155 0.318 0.363 −0.863 −0.504 −0.113 −0.673 0.721 0.035
G -0.342 −0.900 −0.900 −0.179 −0.900 −0.900 −0.900 0.701 0.527 −0.050 0.378 −0.900 0.829
H −0.271 0.138 0.110 0.195 −0.031 −0.106 0.384 −0.480 −0.186 −0.255 −0.297 0.115 −0.088
I 0.652 −0.009 −0.066 −0.186 0.155 0.688 0.900 −0.332 −0.662 −0.411 −0.288 0.879 −0.900
K −0.889 0.163 0.066 0.727 0.279 −0.265 −0.088 0.339 0.844 0.900 −0.375 0.317 0.547
L 0.596 −0.009 −0.066 −0.186 0.714 −0.053 0.213 −0.590 −0.115 −0.064 −0.288 0.879 0.865
M 0.337 0.087 0.066 −0.262 0.652 −0.001 0.110 −0.738 −0.900 −0.893 −0.205 0.370 0.724
N −0.674 −0.243 −0.329 −0.075 −0.403 −0.529 −0.213 0.516 0.242 0.000 −0.166 0.031 0.265
P 0.055 −0.294 −0.900 −0.010 −0.900 0.106 0.247 0.059 0.868 0.014 0.900 0.487 0.212
Q −0.464 −0.020 −0.110 −0.276 0.528 −0.371 −0.230 0.870 0.416 −0.319 −0.403 0.192 0.529
R −0.900 0.466 0.373 0.900 0.528 −0.371 0.105 −0.066 0.416 −0.206 0.430 0.175 −0.106
S −0.364 −0.544 −0.637 −0.265 −0.466 −0.212 −0.337 0.900 0.575 −0.050 −0.024 −0.300 0.600
T −0.199 −0.321 −0.417 −0.288 −0.403 0.212 0.402 0.192 0.599 0.028 −0.212 0.323 0.406
V 0.331 −0.232 −0.285 −0.191 −0.031 0.900 0.677 −0.480 −0.385 −0.120 −0.127 0.896 0.794
W 0.900 0.900 0.900 −0.209 0.279 0.529 0.479 −0.900 −0.464 −0.900 −0.074 0.900 0.900
Y 0.188 0.541 0.417 −0.274 −0.155 0.476 0.363 −0.634 −0.361 −0.659 −0.738 0.546 0.582"""
vloumn = {"A":88.6,"R":173.4,"N":114.1,"D":111.1,"C":108.5,
"Q":143.8,"E":138.4,"G":60.1,"H":153.2,"I":166.7,
"L":166.7,"K":168.6,"M":162.9,"F":189.9,"P":112.7,
"S":89.0,"T":116.1,"W":227.8,"Y":193.6,"V":140.0}
hydropathy_index = {"R":-2.5,"K":-1.5,"D":-0.9,"Q":-0.85,"N":-0.78,
"E":-0.74,"H":-0.4,"S":-0.18,"T":-0.05,"P":0.12,
"Y":0.26,"C":0.29,"G":0.48,"A":0.62,"M":0.64,
"W":0.81,"L":1.1,"V":1.1, "F":1.2,"I":1.4}
def get_al_dd(self):
al_dd = {"A":{},"D":{},"C":{},"E":{},"F":{},
"G":{},"H":{},"I":{},"K":{},"L":{},
"M":{},"N":{},"P":{},"Q":{},"R":{},
"S":{},"T":{},"V":{},"W":{},"Y":{}}
for line in self.__a.replace("−","-").split("\n"):
lst = line.split()
if lst[0] == "AAa":
name_lst = lst[1:]
else:
al_dd[lst[0]] = dict(zip(name_lst,lst[1:]))
#al_df = pd.DataFrame(al_dd).T
return al_dd
feature = feature()
al_dd = feature.get_al_dd()
vloumn = feature.vloumn
hydropathy_index = feature.hydropathy_index
alphabet = "ARNDCQEGHILKMFPSTWYV-"
states = len(alphabet)
a2n = {}
for a,n in zip(alphabet,range(states)):
a2n[a] = n
def get_MSA(seqa3m):
import string
rm_lc = str.maketrans(dict.fromkeys(string.ascii_lowercase))
ali_dict = {}
a3mfile = open(seqa3m)
for line in a3mfile:
if line[0] == ">":
head = line.strip()
ali_dict[head] = ""
#print(line.strip())
else:
line = line.translate(rm_lc).strip()
lst = []
for x in line:
lst.append(x)
ali_dict[head] = lst
#print(lst)
df = pd.DataFrame(ali_dict)
seq_length = df.shape[0]
seq_num = df.shape[1]
df_msa = df.T
alphabet = "ARNDCQEGHILKMFPSTWYV-"
p_msa = np.zeros([21,seq_length])
for p in range(seq_length):
for num,AA in enumerate(alphabet):
if AA != "-":
count = df_msa[p].value_counts().get(AA)
if count == None:
p_msa[num][p] = 0
else:
p_msa[num][p] = count/seq_num
#print(num,p,count/seq_num)
p = p + 1
return p_msa
def getSeq(fasta):
fasta = open(fasta)
seq = ''
for line in fasta:
if line[0] == ">":
continue
else:
seq += line.strip().replace("X","A")
return seq
def getStatic(wpm):
dP = 0.5*(-float(al_dd[wpm[0]]['P'])+float(al_dd[wpm[2]]['P']))
dF1 = -0.5*(-float(al_dd[wpm[0]]['F1'])+float(al_dd[wpm[2]]['F1']))
dB = 0.5*(-float(al_dd[wpm[0]]['B'])+float(al_dd[wpm[2]]['B']))
dST = 0.5*(-float(al_dd[wpm[0]]['ST'])+float(al_dd[wpm[2]]['ST']))
dHT = 0.5*(-float(al_dd[wpm[0]]['HT'])+float(al_dd[wpm[2]]['HT']))
dV = -0.007*(vloumn[wpm[0]]-vloumn[wpm[2]])
dH = -0.25*(hydropathy_index[wpm[0]]-hydropathy_index[wpm[2]])
return [dP, dF1, dB, dST, dHT, dV, dH]
def getStatical(msa,v_out,w_out,seq,mutation):
wild = mutation[0]
pos = int(mutation[1])
mut = mutation[2]
wild_score = []
mut_score = []
i = 0
wild_v = v_out[pos-1][a2n[wild]]
mut_v = v_out[pos-1][a2n[mut]]
for aa in seq:
wild_score.append(w_out[i][pos-1][:-1].reshape(21,21)[a2n[aa]][a2n[wild]])
mut_score.append(w_out[i][pos-1][:-1].reshape(21,21)[a2n[aa]][a2n[mut]])
i += 1
return [-0.1*(wild_v-mut_v),
-0.5*(msa[a2n[wild]][pos-1]-msa[a2n[mut]][pos-1]),
-0.5*(max(wild_score) - max(mut_score)),
0.5*(min(wild_score) - min(mut_score)),
-0.15*(sum(wild_score) - sum(mut_score)),
-0.1*(np.linalg.norm((np.array(wild_score)), ord=1)-np.linalg.norm((np.array(mut_score)), ord=1)),
-0.5*(np.linalg.norm((np.array(wild_score)), ord=2)-np.linalg.norm((np.array(mut_score)), ord=2))]
if __name__ == '__main__':
feature = feature()
al_dd = feature.get_al_dd()
seq = feature.getSeq(fasta)
msa = feature.get_MSA(seqa3m)
static_features = feature.getStatic(wpm)
statical_features = feature.getStatical(msa,v_out,w_out,seq,mutation)
```
#### File: seqddg/utilities/HHsearch.py
```python
from os import popen
import subprocess
import time
#subprocess.call('a.exe')
def hhsearch(seqfilename, iter_num, path_to_database, num_threads):
searchcmd = "hhblits -i " + seqfilename + " -o " + seqfilename + ".hhr -oa3m " + seqfilename + ".a3m -n " + str(
iter_num) + " -d " + path_to_database + " -cpu " + str(num_threads)
# print("grep \">\" " + seqfilename + ".a3m|wc -l")
search = subprocess.Popen(searchcmd,shell=True)
while search.poll() != 0:
time.sleep(1)
#if search.poll() == 0:
hits_num = popen("grep \">\" " + seqfilename + ".a3m|wc -l").read()
print("Found " + hits_num + "hits!")
a3mfilename = seqfilename + ".a3m"
return a3mfilename
#else:
#search.wait(1)
if __name__ == '__main__':
# print_hi('PyCharm')
seqfilename = "g.fasta"
iter_num = "3"
path_to_database = "/ydata/jsun/database/UniRef30_2020_03"
num_threads = 8
hhsearch(seqfilename, iter_num, path_to_database, num_threads)
```
#### File: seqddg/utilities/parsermodule.py
```python
import argparse
def get_parser():
parser = argparse.ArgumentParser(description='sequence based ∆∆G prediction')
parser.add_argument("-s",
"--sequence",
help="target sequence which must be provided")
parser.add_argument("-a",
"--a3mfile",
help="the sequence alignment file in a3m format")
parser.add_argument("-db",
"--database",
help="path/to/UniRep30_2020_03 for hhblits")
parser.add_argument("-ni",
'--iteration_number',
default=3,
help='the iteration number of hhblits search')
parser.add_argument("-nt",
'--num_threads',
default=4,
help='number of threads used to run hhblist, default is 4')
parser.add_argument("-m",
'--mutation',
help='mutation to predict, in form of wild_resnum_mut, like A_24_G')
parser.add_argument("-ml",
'--mutation_list',
help='list of mutation to predict, one mutation per line')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
seqfilename = args.sequence
a3mfilename = args.a3mfile
path_to_database = args.database
iter_num = args.iteration_number
num_threads = args.num_threads
mutation = args.mutation
mutation_list = args.mutation_list
print(seqfilename,a3mfilename)
``` |
{
"source": "JinyuanSun/song_of_fire",
"score": 2
} |
#### File: JinyuanSun/song_of_fire/many_models.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
df = pd.read_csv("data4",sep="\t")
df.head()
print(df.describe())
print(df[df['target']=='unstable'].describe())
print(df[df['target']=='stable'].describe())
print(df[df['target']=='unchanged'].describe())
# In[27]:
df.plot(kind = 'box', subplots = True, layout = (4, 4), sharex = False, sharey = False)
plt.show()
# In[28]:
df.hist()
his = plt.gcf()
his.set_size_inches(12, 6)
plt.show()
# In[29]:
sns.set_style('whitegrid')
sns.FacetGrid(df, hue = 'target', size = 6).map(plt.scatter, ':', '*').add_legend()
plt.show()
# In[30]:
plt.close()
sns.pairplot(df, hue = 'target', height = 2, diag_kind = 'kde')
plt.show()
# In[31]:
plt.figure(figsize=(15,10))
plt.subplot(2,2,1)
sns.violinplot(x='target',y='+',data=df)
plt.subplot(2,2,2)
sns.violinplot(x='target',y='.',data=df)
plt.subplot(2,2,3)
sns.violinplot(x='target',y=':',data=df)
plt.subplot(2,2,4)
sns.violinplot(x='target',y='*',data=df)
plt.show()
# In[32]:
# Import modules
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
# In[33]:
df.head()
# In[34]:
plt.figure(figsize=(7,5))
sns.heatmap(df.corr(),annot=True,cmap='RdYlGn_r')
plt.show()
# In[35]:
#spliting the data
test_size = 0.30
seed = 7
score = 'accuracy'
# Implementation of different ML Algorithms
def models(X_train, Y_train,score):
clfs = []
result = []
names = []
clfs.append(('LR', LogisticRegression()))
clfs.append(('LDA', LinearDiscriminantAnalysis()))
clfs.append(('KNN', KNeighborsClassifier()))
clfs.append(('CART', DecisionTreeClassifier()))
clfs.append(('NB', GaussianNB()))
clfs.append(('SVM', SVC()))
for algo_name, clf in clfs:
k_fold = model_selection.KFold(n_splits=10, random_state=seed)
cv_score = model_selection.cross_val_score(clf, X_train, Y_train, cv=k_fold, scoring=score)
#result = "%s: %f (%f)" % (algo_name, cv_score.mean(), cv_score.std())
result.append((algo_name,cv_score.mean(), cv_score.std()))
names.append(algo_name)
return (result)
# In[36]:
X_all = df.iloc[:,:4]
Y_all = df.iloc[:,4]
# In[37]:
X_train_all, X_test_all, Y_train_all, Y_test_all = model_selection.train_test_split(X_all, Y_all, test_size=test_size, random_state=14)
# In[38]:
models(X_train_all, Y_train_all, score)
# In[39]:
# Evaluation of the Classifier
# Predictions on test dataset
svm = SVC()
svm.fit(X_train_all, Y_train_all)
pred = svm.predict(X_test_all)
print(accuracy_score(Y_test_all, pred))
print(confusion_matrix(Y_test_all, pred))
print(classification_report(Y_test_all, pred))
# In[19]:
X_sep = df[['*','.']]
Y_sep = df.target
# In[20]:
X_train_sep, X_test_sep, Y_train_sep, Y_test_sep = model_selection.train_test_split(X_sep, Y_sep, test_size=test_size, random_state=seed)
models(X_train_sep, Y_train_sep, score)
# In[22]:
svm = SVC()
svm.fit(X_train_sep, Y_train_sep)
pred = svm.predict(X_test_sep)
print(accuracy_score(Y_test_sep, pred))
print(confusion_matrix(Y_test_sep, pred))
print(classification_report(Y_test_sep, pred))
# In[23]:
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# In[24]:
a = pd.read_csv('data4',sep="\t", header = None)
i = pd.DataFrame(a)
mut = i.values
# In[104]:
print(mut)
X = mut[1:, 0:4].astype(float)
print(X)
Y = mut[1:, 4]
print(Y)
# In[25]:
X[0:5]
# In[106]:
Y[0:5]
# In[131]:
from sklearn.model_selection import train_test_split
x=df.iloc[:,:-1]
y=df.iloc[:,4]
x_train,x_test, y_train, y_test=train_test_split(x,y)
print(x_train,x_test,y_train,y_test)
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(max_iter=100)
# In[114]:
mlp = MLPClassifier(solver='sgd', activation='relu',alpha=1e-4,hidden_layer_sizes=(50,50), random_state=1,max_iter=10,verbose=10,learning_rate_init=.1)
mlp.fit(x_train, y_train)
# In[115]:
print(mlp.score(x_test,y_test))
# In[116]:
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(max_iter=100)
parameter_space = {
'hidden_layer_sizes': [(50,50,50), (50,100,50), (100,)],
'activation': ['tanh', 'relu'],
'solver': ['sgd', 'adam'],
'alpha': [0.0001, 0.05],
'learning_rate': ['constant','adaptive'],
}
from sklearn.model_selection import GridSearchCV
clf = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3)
clf.fit(x_train, y_train)
# In[117]:
# Best paramete set
print('Best parameters found:\n', clf.best_params_)
# All results
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
# In[118]:
y_true, y_pred = y_test , clf.predict(x_test)
print(set(y_test) - set(y_pred))
from sklearn.metrics import classification_report
print('Results on the test set:')
print(classification_report(y_test, y_pred))
# In[133]:
svm = SVC()
svm.fit(x_train, y_train)
pred = svm.predict(x_test)
print(set(y_test) - set(y_pred))
print(accuracy_score(y_test, pred))
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
# In[ ]:
``` |
{
"source": "JinyuGuan/JINYU",
"score": 3
} |
#### File: JinyuGuan/JINYU/BiliBiliaidGrapeTest1.py
```python
import requests
import math
from bs4 import BeautifulSoup
import csv
import random
from selenium import webdriver
import time
Tp = 59 #The total page number
i = 2 #The start page number
aidArray = [0 for x in range(2000)] #Define an array for storing aids
path = "C:/Users/guanj/Documents/PythonEx/chromedriver_win32/chromedriver.exe" #Get Chromedriver.exe path
driver = webdriver.Chrome(executable_path=path) #Drive Chrome
while i < Tp + 1 : #
j = 0
s = str(i) # The current page
url = "https://space.bilibili.com/10330740?from=search&seid=12264074931239145067#/video?tid=0&page=" + s + "&keyword=&order=pubdate"
#The homepage link for one upper whose uid is 10330740
driver.get(url)
#Load the url
time.sleep(5)
#Delay 5s
while 1 : #Check out HTML entire page source codes for each page
pageSourceThree = driver.page_source
PageSourceHtml = BeautifulSoup(pageSourceThree,"html.parser")
PageSourceBodyHtml = PageSourceHtml.find('ul', attrs={'class': 'list-list'})
#Find out the information of all videos under lable '<ul> class = 'list-list'
if(str(PageSourceBodyHtml) == 'None'): #If that information cannot be obtain, delay 5s, return back and do again.
time.sleep(5)
else:
detial = PageSourceBodyHtml.findAll('li', attrs = {'class':'list-item clearfix fakeDanmu-item'})
#If got that information, find all videos' detials under lable <li> class = 'list-item clearfix fakeDanmu-item'
if(str(detial) != '[]'): #If the detial is not empty, break the loop and start next page's work.
break
else: #If it is, sleep 5s
time.sleep(5)
i = i + 1 #The current page number plus one.
while j < 30 : #Find all 30 aids from each page's HTML source codes
aidStart = str(detial[j]).find('aid') + 5
aidEnd = str(detial[j]).find('"><a')
aid = ''
index = 0
while index < (aidEnd - aidStart):
aid = aid + str(detial[j])[aidStart + index]
index += 1
aidArray[j+30*(i-3)] = int(aid)
j = j + 1
time.sleep(random.randrange(9))
#######################################################################
#Get Reply Number
#Create CSV file first
csvFile = open("C:/Users/guanj/Documents/PythonEx/testnetg.csv",'w+', newline = "")
writer = csv.writer(csvFile)
def REPLYG(str):
DOWNLOAD_URL = 'http://api.bilibili.com/archive_stat/stat?aid=' + str
data = requests.get(DOWNLOAD_URL).content
soup = BeautifulSoup(data, "html.parser", from_encoding='utf-8')
text = soup.get_text()
num_r = text.find('reply')
num_f = text.find(',"favorite"')
index = 0
replyc = ''
while index < (num_f - (num_r + 7)) :
replyc = replyc + text[num_r + 7 + index]
index += 1
return replyc
i = 0
while i < (Tp - 1)*30 :
s = str(aidArray[i])
r = REPLYG(s)
writer.writerow([aidAray[i], int(r)])
i = i + 1
```
#### File: JinyuGuan/JINYU/BilibiliAid.py
```python
import requests
import math
from bs4 import BeautifulSoup
import datetime
import random
from selenium import webdriver
import time
#upper_id = '10330740'
def aid_tget(upper_id, Tp):
#aidArray is used to stroe aids
#Tp = 2
aidArray = []
#you can download other upper's video by changing this parameter
path = "./chromedriver.exe" #Get Chromedriver.exe path
driver = webdriver.Chrome(executable_path=path) #Drive Chrome
#print('stage 1: obtain aids')
print('opening the chrmodriver...')
for page in range(1, Tp + 1) : #
page_video = 0 #video index in current page
print('opening page: ' + str(page))
url = "https://space.bilibili.com/" + upper_id + "?from=search&seid=12264074931239145067#/video?tid=0&page=" + str(page) + "&keyword=&order=pubdate"
#The homepage link for one upper whose uid is 10330740
driver.get(url)
#Load the url
time.sleep(5)
#Delay 5s
while True : #Check out HTML entire page source codes for each page
pageSourceThree = driver.page_source
PageSourceHtml = BeautifulSoup(pageSourceThree,"html.parser")
PageSourceBodyHtml = PageSourceHtml.find('ul', attrs={'class': 'list-list'})
#Find out the information of all videos under lable '<ul> class = 'list-list'
if(str(PageSourceBodyHtml) == 'None'): #If that information cannot be obtain, delay 0.5s, return back and do again.
time.sleep(0.5)
else:
#If got that information, find all videos' detials under lable <li> class = 'list-item clearfix fakeDanmu-item'
detial_old = PageSourceBodyHtml.findAll('li', attrs = {'class':'list-item clearfix fakeDanmu-item'})
detial = PageSourceBodyHtml.findAll('li', attrs = {'class':'list-item clearfix fakeDanmu-item new'})
#Add videos under lable <new>
detial = detial + detial_old
if(str(detial) != '[]'): #If the detial is not empty, break the loop and start next page's work.
break
else: #If it is, sleep 0.5s
time.sleep(0.5)
print('Finished Page:' + str(page))
while True:
try : #Find all aids from each page's HTML source codes
aidStart = str(detial[page_video]).find('aid') + 5
aidEnd = str(detial[page_video]).find('"><a')
#find the aid of this video
aid = str(detial[page_video])[aidStart : aidEnd]
aidArray.append(str(detial[page_video])[aidStart : aidEnd])
#print('found video number: ' + aid)
page_video = page_video + 1
#time.sleep(0.5)
except:
#if all the video aids are already found, break and go to the next page
break
return aidArray
``` |
{
"source": "Jin-Yuhan/manimpy",
"score": 3
} |
#### File: manimpy/manimpy/right_angle.py
```python
from manimlib.imports import *
class RightAngle(VGroup):
CONFIG = {
'size': 0.25,
'stroke_color': WHITE,
'stroke_width': 3.2,
'fill_color': BLUE,
'fill_opacity': 0.5,
'on_the_right': True,
}
def __init__(self, corner=ORIGIN, angle=0, **kwargs):
VGroup.__init__(self, **kwargs)
self.corner = ORIGIN
self.angle = 0
r = UR if self.on_the_right else UL
self.add(
Polygon(ORIGIN, RIGHT * self.size * r, UR * self.size * r, UP * self.size * r, stroke_width=0,
fill_color=self.fill_color, fill_opacity=self.fill_opacity),
Line(RIGHT * self.size * r, UR * self.size * r + UP * self.stroke_width / 100 / 2 * 0.8,
stroke_width=self.stroke_width, stroke_color=self.stroke_color),
Line(UR * self.size * r + RIGHT * self.stroke_width / 100 / 2 * r * 0.8, UP * self.size * r,
stroke_width=self.stroke_width, stroke_color=self.stroke_color),
)
self.move_corner_to(corner)
self.change_angle_to(angle)
def move_corner_to(self, new_corner):
self.shift(new_corner - self.corner)
self.corner = new_corner
return self
def change_angle_to(self, new_angle):
self.rotate(new_angle - self.angle, about_point=self.corner)
self.angle = new_angle
return self
```
#### File: manimpy/manimpy/trail.py
```python
from manimlib.imports import *
from manimpy.vectors import *
class Trail(VGroup):
class Position:
def __init__(self, pos, life_time):
super().__init__()
self.pos = pos
self.time = life_time
self.life_time = life_time
def update_life_time(self, dt):
self.time -= dt
def get_opacity(self):
return self.time / self.life_time
@property
def should_remove(self):
return self.life_time <= 0
CONFIG = {
'life_time': 1, #* 拖尾的生命周期,以秒为单位
'min_width': 5, #* 最小的宽度
'trail_color': None, #* 默认使用目标对象的颜色
'width': None, #* 默认目标对象的宽度,如果它大于min_width
'rate_func': linear
}
def __init__(self, obj: Mobject, **kwargs):
super().__init__(obj, VGroup(), **kwargs)
self.all_pos = []
self.last_pos = obj.get_center()
if self.trail_color is None:
self.trail_color = obj.color
if not self.width:
self.width = obj.get_width() if obj.get_width() > self.min_width else self.min_width
def update_point_list(self, dt, pos=None):
for i in range(len(self.all_pos)):
self.all_pos[i].update_life_time(dt)
while len(self.all_pos) > 0 and self.all_pos[0].should_remove:
self.all_pos.pop(0)
if not (pos is None):
self.all_pos.append(Trail.Position(pos, self.life_time))
return self
def get_path(self):
return VGroup(*[
Line(
self.all_pos[i].pos, self.all_pos[i + 1].pos,
stroke_color=self.trail_color,
stroke_opacity=self.rate_func(self.all_pos[i].get_opacity()),
plot_depth=self.rate_func(self.all_pos[i].get_opacity()),
stroke_width=self.width * self.rate_func(self.all_pos[i].get_opacity())
)
for i in range(len(self.all_pos) - 1)
]) if len(self.all_pos) > 1 else VGroup()
def update_trail(self, trail, dt):
pos = self[0].get_center()
self.update_point_list(dt, pos if sqr_distance(pos, self.last_pos) > 0 else None)
self.last_pos = pos
trail.become(self.get_path())
def enable(self):
self[1].add_updater(self.update_trail)
return self
def disable(self):
self[1].remove_updater(self.update_trail)
return self
```
#### File: manimpy/manimpy/vectors.py
```python
from manimlib.imports import *
from typing import Union
def random_direction(scale: Union[float, np.ndarray] = 1, allow_z_directions: bool = False) -> np.ndarray:
"""
获取一个随机的位移向量
:param scale: 对向量大小的缩放
:param allow_z_directions: 是否允许返回z轴的位移向量
:return: 随机的位移向量
"""
dirs = [LEFT, RIGHT, UP, DOWN, UR, UL, DR, DL]
if allow_z_directions:
dirs.append(IN)
dirs.append(OUT)
return random.choice(dirs) * scale
def vec(*args: float) -> np.ndarray:
"""
构建一个n维向量
:param args: 向量的所有分量
:return: 根据参数构建的n维向量
"""
return np.array(args)
def sqr_magnitude(v: np.ndarray) -> float:
"""
计算向量的模长的平方
:param v: 向量
:return: 向量的模长的平方
"""
result = 0
for i in range(len(v)):
result += v[i] ** 2
return result
def magnitude(v: np.ndarray) -> float:
"""
计算向量的模长
:param v: 向量
:return: 向量的模长
"""
return np.sqrt(sqr_magnitude(v))
def normalize(v: np.ndarray) -> np.ndarray:
"""
计算归一化向量(方向不变,模长为1)
:param v: 向量
:return: 归一化后的向量
"""
return v / magnitude(v)
def sqr_distance(a: np.ndarray, b: np.ndarray) -> float:
"""
计算两个点间距离的平方
:param a: 点1
:param b: 点2
:return: a和b的距离的平方
"""
return sqr_magnitude(a - b)
def distance(a: np.ndarray, b: np.ndarray) -> float:
"""
计算两个点间距离
:param a: 点1
:param b: 点2
:return: a和b的距离
"""
return np.sqrt(sqr_distance(a, b))
def dot(v1: np.ndarray, v2: np.ndarray) -> float:
"""
计算两个向量的点乘
:param v1: 向量1
:param v2: 向量2
:return: v1, v2的点乘结果
"""
result = 0.
for i in range(np.min(len(v1), len(v2))):
result += v1[i] * v2[i]
return result
def angle(v1: np.ndarray, v2: np.ndarray) -> float:
"""
计算两个向量的夹角度数(弧度制)
:param v1: 向量1
:param v2: 向量2
:return: v1和v2间的角度
"""
dot_product = dot(v1, v2)
cos_value = dot_product / np.sqrt(sqr_magnitude(v1) * sqr_magnitude(v2))
return np.arccos(cos_value)
def reflect(in_dir: np.ndarray, normal=DOWN) -> np.ndarray:
"""
计算反射向量
:param in_dir: 入射向量
:param normal: 法线向量
:return: 反射向量
"""
return (-2 * dot(in_dir, normalize(normal))) * normal + in_dir
def cross(v1: np.ndarray, v2: np.ndarray) -> np.ndarray:
"""
计算两个三维向量的叉乘
:param v1: 向量1
:param v2: 向量2
:return: v1和v2的叉乘
"""
return vec(v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0])
def vertical_vec(v: np.ndarray) -> np.ndarray:
"""
计算与当前向量垂直的向量
:param v: 向量
:return: 与当前向量垂直的向量
"""
return vec(v[1], -v[0], v[2])
def x_of(v: np.ndarray) -> float:
"""
获取向量的x分量
:param v: 向量
:return: 向量的x分量
"""
return v[0]
def y_of(v: np.ndarray) -> float:
"""
获取向量的y分量
:param v: 向量
:return: 向量的y分量
"""
return v[1]
def z_of(v: np.ndarray) -> float:
"""
获取向量的z分量
:param v: 向量
:return: 向量的z分量
"""
return v[2]
``` |
{
"source": "JinyuJinyuJinyu/masterproject",
"score": 2
} |
#### File: JinyuJinyuJinyu/masterproject/utils.py
```python
import os
import numpy as np
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import time
image_size = (224,224)
# training images path
img_path = 'imageNet_val/ILSVRC2010_images_val/val'
# validation images path
img_val_path = '/home/jinyu/Downloads/ILSVRC2010_images_test/test'
img2resize_path = ['/home/jinyu/Downloads/imageNet_val/ILSVRC2010_images_val/val']
def resize_images(path,name):
img_ = os.path.join(path,name)
print(img_)
exit()
img = cv2.imread(img_)
img = cv2.resize(img, image_size)
cv2.imwrite(os.path.join(path , name),img)
def subract_one_lable():
# in case of some ground truth label is start from 1 not 0, need to convert label start from 0
ground_truth_path = 'imageNet_val/test_grond_truth.txt'
output_path = 'test_grond_truth_zero.txt'
f = open(ground_truth_path, 'r')
with open(output_path, 'w') as of:
for label in f:
of.write(str(int(label)-1) + '\n')
def load_dat():
training_labels = 'imageNet_val/ILSVRC2010_validation_ground_truth.txt'
validation_labels = 'test_grond_truth_zero.txt'
ttl2load = len(os.listdir(img_path))
ttl2load +=len(os.listdir(img_val_path))
pbar = tqdm(total=ttl2load)
# training images label path
f = open(training_labels,'r')
# validation images label path
f_val = open(validation_labels,'r')
dat_x = []
dat_y = []
for lable in f:
dat_y.append(lable)
for f_n in os.listdir(img_path):
img_ = os.path.join(img_path, f_n)
img = cv2.imread(img_)
dat_x.append(img)
pbar.update(1)
dat_x = np.array(dat_x)
dat_y = np.array(dat_y)
dat_y = dat_y.astype('int32')
val_x = []
val_y = []
for f_n in os.listdir(img_val_path):
img_ = os.path.join(img_val_path,f_n)
img = cv2.imread(img_)
val_x.append(img)
pbar.update(1)
for lable in f_val:
val_y.append(lable)
val_x = np.array(val_x)
val_y = np.array(val_y)
val_y = val_y.astype('int32')
print('done')
return dat_x ,val_x,dat_y ,val_y
def main():
ttl2resize = 0
for i in range(len(img2resize_path)):
ttl2resize += len(os.listdir(img2resize_path[i]))
pbar = tqdm(total=ttl2resize)
for i in range(len(img2resize_path)):
for f_n in os.listdir(img2resize_path[i]):
resize_images(img2resize_path[i],f_n)
pbar.update(1)
if __name__ == '__main__':
main()
# subract_one_lable()
``` |
{
"source": "jinyuKING/Paddle",
"score": 2
} |
#### File: unittests/mkldnn/test_matmul_mkldnn_op.py
```python
from __future__ import print_function
import unittest, os
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
@skip_check_grad_ci(reason="DNNL's MatMul doesn't implemend grad kernel.")
class TestDnnlMatMulOp(OpTest):
def generate_data(self):
self.x = np.random.random((25, 2, 2)).astype("float32")
self.y = np.random.random((25, 2, 2)).astype("float32")
self.alpha = 1.0
self.out = self.alpha * np.matmul(self.x, self.y)
def set_attributes(self):
self.alpha = self.alpha if hasattr(self, 'alpha') else 1.0
self.attrs = {'alpha': self.alpha}
def setUp(self):
# Set max isa, otherwise fails on SKX and earlier
os.environ["DNNL_MAX_CPU_ISA"] = "AVX"
self.op_type = "matmul"
self._cpu_only = True
self.use_mkldnn = True
self.generate_data()
self.set_attributes()
self.attrs['use_mkldnn'] = True
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': self.out}
def test_check_output(self):
self.check_output()
class TestDnnlMatMulOpAlpha(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((17, 2, 3)).astype("float32")
self.y = np.random.random((17, 3, 2)).astype("float32")
self.alpha = 2.0
self.out = self.alpha * np.matmul(self.x, self.y)
class TestDnnlMatMulOp2D(TestDnnlMatMulOp):
def print_tensor(self, name, tensor):
print(name)
print(tensor)
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((9, 12)).astype("float32")
self.out = np.matmul(self.x, self.y)
class TestDnnlMatMulOpTransposeX(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((12, 9)).astype("float32")
self.out = np.matmul(np.transpose(self.x), self.y)
def set_attributes(self):
self.attrs = {'transpose_X': True}
class TestDnnlMatMulOpTransposeY(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((12, 9)).astype("float32")
self.out = np.matmul(self.x, np.transpose(self.y))
def set_attributes(self):
self.attrs = {'transpose_Y': True}
class TestDnnlMatMulOpTransposeY3D(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((17, 3, 2)).astype("float32")
self.y = np.random.random((17, 3, 2)).astype("float32")
self.out = np.matmul(self.x, np.transpose(self.y, (0, 2, 1)))
def set_attributes(self):
self.attrs = {'transpose_Y': True}
class TestDnnlMatMulOpInt8NoScales(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("int8")
self.y = np.random.random((9, 12)).astype("int8")
self.out = np.matmul(self.x, self.y)
class TestDnnlMatMulOpInt8(TestDnnlMatMulOp):
def quantize(self, tensor):
scale = 127. / np.abs(np.amax(tensor))
quantized = np.round(scale * tensor).astype("int8")
return scale, quantized
def generate_data(self):
x_float = np.random.random((12, 9)).astype("float32")
self.x_scale, self.x = self.quantize(x_float)
y_float = np.random.random((9, 12)).astype("float32")
self.y_scale, self.y = self.quantize(y_float)
out_float = np.matmul(x_float, y_float)
self.out_scale, self.out = self.quantize(out_float)
def set_attributes(self):
self.attrs = {
'Scale_x': self.x_scale,
'Scale_y': self.y_scale,
'Scale_out': self.out_scale,
}
def test_check_output(self):
int_atol = 1
self.check_output(atol=int_atol)
class TestDnnlMatMulOpInt8ForceFP32(TestDnnlMatMulOpInt8):
def generate_data(self):
x_float = np.random.random((12, 9)).astype("float32")
self.x_scale, self.x = self.quantize(x_float)
y_float = np.random.random((9, 12)).astype("float32")
self.y_scale, self.y = self.quantize(y_float)
out_float = np.matmul(x_float, y_float)
self.out = out_float
def set_attributes(self):
self.attrs = {
'Scale_x': self.x_scale,
'Scale_y': self.y_scale,
'force_fp32_output': True
}
class TestDnnlMatMulOpInt8ForceFP32BasicScales(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.randint(0, 3, (12, 9)).astype("int8")
self.y = np.random.randint(0, 3, (9, 12)).astype("int8")
self.out = np.matmul(self.x, self.y).astype("float32")
def set_attributes(self):
self.attrs = {'force_fp32_output': True}
if __name__ == "__main__":
unittest.main()
```
#### File: tests/unittests/test_dyn_rnn.py
```python
from __future__ import print_function
import paddle.fluid as fluid
import paddle
import unittest
import numpy
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layers.control_flow import lod_rank_table
from paddle.fluid.layers.control_flow import max_sequence_len
from paddle.fluid.layers.control_flow import lod_tensor_to_array
from paddle.fluid.layers.control_flow import array_to_lod_tensor
from paddle.fluid.layers.control_flow import shrink_memory
from fake_reader import fake_imdb_reader
class TestDynamicRNN(unittest.TestCase):
def setUp(self):
self.word_dict_len = 5147
self.BATCH_SIZE = 2
reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)
self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)
def _train(self,
main_program,
startup_program,
feed_list,
fetch_list,
is_nested=False,
max_iters=1):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
data = next(self.train_data())
for iter_id in range(max_iters):
fetch_outs = exe.run(main_program,
feed=feeder.feed(data),
fetch_list=fetch_list,
return_numpy=False)
if len(fetch_list) == 3:
rnn_in_seq = fetch_outs[0]
rnn_out_seq = fetch_outs[1]
if not is_nested:
# Check for lod set in runtime. When lod_level is 1,
# the lod of DynamicRNN's output should be the same as input.
self.assertEqual(rnn_in_seq.lod(), rnn_out_seq.lod())
loss_i = numpy.array(fetch_outs[2])
elif len(fetch_list) == 1:
loss_i = numpy.array(fetch_outs[0])
#print(loss_i)
self.assertEqual((1, ), loss_i.shape)
self.assertFalse(numpy.isnan(loss_i))
if iter_id == 0:
loss_0 = loss_i
if max_iters > 10:
# loss should be small after 10 mini-batch
self.assertLess(loss_i[0], loss_0[0])
def test_plain_while_op(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
sentence = fluid.layers.data(
name='word', shape=[1], dtype='int64', lod_level=1)
sent_emb = fluid.layers.embedding(
input=sentence, size=[self.word_dict_len, 32], dtype='float32')
rank_table = lod_rank_table(x=sent_emb)
sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)
seq_len = max_sequence_len(rank_table=rank_table)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
i.stop_gradient = False
boot_mem = fluid.layers.fill_constant_batch_size_like(
input=fluid.layers.array_read(
array=sent_emb_array, i=i),
value=0,
shape=[-1, 100],
dtype='float32')
boot_mem.stop_gradient = False
mem_array = fluid.layers.array_write(x=boot_mem, i=i)
cond = fluid.layers.less_than(x=i, y=seq_len)
cond.stop_gradient = False
while_op = fluid.layers.While(cond=cond)
out = fluid.layers.create_array(dtype='float32')
with while_op.block():
mem = fluid.layers.array_read(array=mem_array, i=i)
ipt = fluid.layers.array_read(array=sent_emb_array, i=i)
mem = shrink_memory(x=mem, i=i, table=rank_table)
hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh')
fluid.layers.array_write(x=hidden, i=i, array=out)
fluid.layers.increment(x=i, in_place=True)
fluid.layers.array_write(x=hidden, i=i, array=mem_array)
fluid.layers.less_than(x=i, y=seq_len, cond=cond)
result_all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
last = fluid.layers.sequence_last_step(input=result_all_timesteps)
logits = fluid.layers.fc(input=last, size=1, act=None)
label = fluid.layers.data(name='label', shape=[1], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=logits, label=label)
loss = fluid.layers.mean(loss)
sgd = fluid.optimizer.SGD(1e-4)
sgd.minimize(loss=loss)
# Check for lod_level set in compile-time.
self.assertEqual(sent_emb.lod_level, result_all_timesteps.lod_level)
self._train(
main_program=main_program,
startup_program=startup_program,
feed_list=[sentence, label],
fetch_list=[sent_emb, result_all_timesteps, loss],
is_nested=False,
max_iters=1)
def test_train_dynamic_rnn(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
sentence = fluid.layers.data(
name='word', shape=[1], dtype='int64', lod_level=1)
sent_emb = fluid.layers.embedding(
input=sentence, size=[self.word_dict_len, 32], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
in_ = drnn.step_input(sent_emb)
mem = drnn.memory(shape=[100], dtype='float32')
out_ = fluid.layers.fc(input=[in_, mem], size=100, act='tanh')
drnn.update_memory(mem, out_)
drnn.output(out_)
drnn_result = drnn()
last = fluid.layers.sequence_last_step(input=drnn_result)
logits = fluid.layers.fc(input=last, size=1, act=None)
label = fluid.layers.data(name='label', shape=[1], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=logits, label=label)
loss = fluid.layers.mean(loss)
sgd = fluid.optimizer.Adam(1e-3)
sgd.minimize(loss=loss)
# Check for lod_level set in compile-time.
self.assertEqual(sent_emb.lod_level, drnn_result.lod_level)
self._train(
main_program=main_program,
startup_program=startup_program,
feed_list=[sentence, label],
fetch_list=[sent_emb, drnn_result, loss],
is_nested=False,
max_iters=100)
def _fake_reader(self):
seq_len, label = [[2, 2]], [0, 1]
data = []
for ele in seq_len:
for j in ele:
data.append([numpy.random.randint(30) for _ in range(j)])
while True:
yield data, label
# this unit test is just used to the two layer nested dyn_rnn.
def test_train_nested_dynamic_rnn(self):
word_dict = [i for i in range(30)]
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
sentence = fluid.layers.data(
name='word', shape=[1], dtype='int64', lod_level=2)
label = fluid.layers.data(
name='label', shape=[1], dtype='float32', lod_level=1)
drnn0 = fluid.layers.DynamicRNN()
with drnn0.block():
in_0 = drnn0.step_input(sentence)
assert in_0.lod_level == 1, "the lod level of in_ should be 1"
sentence_emb = fluid.layers.embedding(
input=in_0, size=[len(word_dict), 32], dtype='float32')
out_0 = fluid.layers.fc(input=sentence_emb,
size=100,
act='tanh')
drnn1 = fluid.layers.DynamicRNN()
with drnn1.block():
in_1 = drnn1.step_input(out_0)
assert in_1.lod_level == 0, "the lod level of in_1 should be 0"
out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh')
drnn1.output(out_1)
drnn1_result = drnn1()
last_1 = fluid.layers.sequence_last_step(input=drnn1_result)
drnn0.output(last_1)
last = drnn0()
logits = fluid.layers.fc(input=last, size=1, act=None)
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=logits, label=label)
loss = fluid.layers.mean(loss)
sgd = fluid.optimizer.SGD(1e-3)
sgd.minimize(loss=loss)
train_data_orig = self.train_data
self.train_data = paddle.batch(self._fake_reader, batch_size=2)
self._train(
main_program=main_program,
startup_program=startup_program,
feed_list=[sentence, label],
fetch_list=[loss],
is_nested=True,
max_iters=100)
self.train_data = train_data_orig
# this unit test is just used to the two layer nested dyn_rnn.
def test_train_nested_dynamic_rnn2(self):
word_dict = [i for i in range(30)]
hidden_size = 32
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
sentence = fluid.layers.data(
name='word', shape=[1], dtype='int64', lod_level=2)
label = fluid.layers.data(
name='label', shape=[1], dtype='float32', lod_level=1)
drnn0 = fluid.layers.DynamicRNN()
with drnn0.block():
in_0 = drnn0.step_input(sentence)
sentence_emb = fluid.layers.embedding(
input=in_0,
size=[len(word_dict), hidden_size],
dtype='float32')
input_forward_proj = fluid.layers.fc(input=sentence_emb,
size=hidden_size * 4,
act=None,
bias_attr=False)
forward, _ = fluid.layers.dynamic_lstm(
input=input_forward_proj,
size=hidden_size * 4,
use_peepholes=False)
drnn1 = fluid.layers.DynamicRNN()
with drnn1.block():
in_1 = drnn1.step_input(forward)
out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh')
drnn1.output(out_1)
last = fluid.layers.sequence_last_step(input=drnn1())
drnn0.output(last)
last = drnn0()
logits = fluid.layers.fc(input=last, size=1, act=None)
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=logits, label=label)
loss = fluid.layers.mean(loss)
sgd = fluid.optimizer.SGD(1e-3)
sgd.minimize(loss=loss)
train_data_orig = self.train_data
self.train_data = paddle.batch(self._fake_reader, batch_size=2)
self._train(
main_program=main_program,
startup_program=startup_program,
feed_list=[sentence, label],
fetch_list=[loss],
is_nested=True,
max_iters=100)
self.train_data = train_data_orig
class TestDynamicRNNErrors(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
init = fluid.layers.zeros(shape=[1], dtype='float32')
shape = 'shape'
sentence = fluid.data(
name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
# The type of Input(shape) in API(memory) must be list or tuple
def input_shape_type_of_memory():
drnn = fluid.layers.DynamicRNN()
with drnn.block():
res = drnn.memory(init, shape)
self.assertRaises(TypeError, input_shape_type_of_memory)
# The type of element of Input(*outputs) in API(output) must be Variable.
def outputs_type_of_output():
drnn = fluid.layers.DynamicRNN()
with drnn.block():
word = drnn.step_input(sentence)
memory = drnn.memory(shape=[10], dtype='float32', value=0)
hidden = fluid.layers.fc(input=[word, memory],
size=10,
act='tanh')
out = np.ones(1).astype('float32')
drnn.update_memory(ex_mem=memory, new_mem=hidden)
drnn.output(hidden, out)
self.assertRaises(TypeError, outputs_type_of_output)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JinyuSun-csu/D-GCAN",
"score": 2
} |
#### File: D-GCAN/Discussion/GNN.py
```python
import timeit
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pickle
from sklearn.metrics import roc_auc_score,roc_curve
from sklearn.metrics import confusion_matrix
import preprocess as pp
import pandas as pd
import matplotlib.pyplot as plt
class MolecularGraphNeuralNetwork(nn.Module):
def __init__(self, N_fingerprints, dim, layer_hidden, layer_output):
super(MolecularGraphNeuralNetwork, self).__init__()
self.embed_fingerprint = nn.Embedding(N_fingerprints, dim)
self.W_fingerprint = nn.ModuleList([nn.Linear(dim, dim)
for _ in range(layer_hidden)])
self.W_output = nn.ModuleList([nn.Linear(dim, dim)
for _ in range(layer_output)])
self.W_property = nn.Linear(dim, 2)
def pad(self, matrices, pad_value):
"""Pad the list of matrices
with a pad_value (e.g., 0) for batch processing.
For example, given a list of matrices [A, B, C],
we obtain a new matrix [A00, 0B0, 00C],
where 0 is the zero (i.e., pad value) matrix.
"""
shapes = [m.shape for m in matrices]
M, N = sum([s[0] for s in shapes]), sum([s[1] for s in shapes])
zeros = torch.FloatTensor(np.zeros((M, N))).to(device)
pad_matrices = pad_value + zeros
i, j = 0, 0
for k, matrix in enumerate(matrices):
m, n = shapes[k]
pad_matrices[i:i+m, j:j+n] = matrix
i += m
j += n
return pad_matrices
def update(self, matrix, vectors, layer):
hidden_vectors = torch.relu(self.W_fingerprint[layer](vectors))
return hidden_vectors + torch.matmul(matrix, hidden_vectors)
def sum(self, vectors, axis):
sum_vectors = [torch.sum(v, 0) for v in torch.split(vectors, axis)]
return torch.stack(sum_vectors)
def gnn(self, inputs):
"""Cat or pad each input data for batch processing."""
Smiles,fingerprints, adjacencies, molecular_sizes = inputs
fingerprints = torch.cat(fingerprints)
adjacencies = self.pad(adjacencies, 0)
"""GNN layer (update the fingerprint vectors)."""
fingerprint_vectors = self.embed_fingerprint(fingerprints)
for l in range(layer_hidden):
hs = self.update(adjacencies, fingerprint_vectors, l)
fingerprint_vectors = F.normalize(hs, 2, 1) # normalize.
"""Molecular vector by sum or mean of the fingerprint vectors."""
molecular_vectors = self.sum(fingerprint_vectors, molecular_sizes)
return Smiles,molecular_vectors
def mlp(self, vectors):
"""Classifier based on multilayer perceptron给予多层感知器的分类器."""
for l in range(layer_output):
vectors = torch.relu(self.W_output[l](vectors))
outputs = torch.sigmoid(self.W_property(vectors))
return outputs
def forward_classifier(self, data_batch, train):
inputs = data_batch[:-1]
correct_labels = torch.cat(data_batch[-1])
if train:
Smiles,molecular_vectors = self.gnn(inputs)
predicted_scores = self.mlp(molecular_vectors)
loss = F.cross_entropy(predicted_scores, correct_labels.long())
predicted_scores = predicted_scores.to('cpu').data.numpy()
predicted_scores = [s[1] for s in predicted_scores]
correct_labels = correct_labels.to('cpu').data.numpy()
return loss,predicted_scores, correct_labels
else:
with torch.no_grad():
Smiles,molecular_vectors = self.gnn(inputs)
predicted_scores = self.mlp(molecular_vectors)
loss = F.cross_entropy(predicted_scores, correct_labels.long())
predicted_scores = predicted_scores.to('cpu').data.numpy()
predicted_scores = [s[1] for s in predicted_scores]
correct_labels = correct_labels.to('cpu').data.numpy()
return Smiles,loss,predicted_scores, correct_labels
class Trainer(object):
def __init__(self, model):
self.model = model
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
def train(self, dataset):
np.random.shuffle(dataset)
N = len(dataset)
loss_total = 0
P, C = [], []
for i in range(0, N, batch_train):
data_batch = list(zip(*dataset[i:i+batch_train]))
loss,predicted_scores, correct_labels= self.model.forward_classifier(data_batch, train=True)
P.append(predicted_scores)
C.append(correct_labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_total += loss.item()
tru=np.concatenate(C)
pre=np.concatenate(P)
AUC = roc_auc_score(tru, pre)
pred = [1 if i >0.4 else 0 for i in pre]
predictions =np.stack((tru,pred,pre))
return AUC, loss_total,predictions
class Tester(object):
def __init__(self, model):
self.model = model
def test_classifier(self, dataset):
N = len(dataset)
loss_total = 0
SMILES,P, C ='', [], []
for i in range(0, N, batch_test):
data_batch = list(zip(*dataset[i:i+batch_test]))
(Smiles,loss,predicted_scores,correct_labels) = self.model.forward_classifier(
data_batch, train=False)
SMILES += ' '.join(Smiles) + ' '
loss_total += loss.item()
P.append(predicted_scores)
C.append(correct_labels)
SMILES = SMILES.strip().split()
tru=np.concatenate(C)
pre=np.concatenate(P)
AUC = roc_auc_score(tru, pre)
pred = [1 if i >0.4 else 0 for i in pre]
# Tru=map(str,np.concatenate(C))
# Pre=map(str,np.concatenate(P))
# predictions = '\n'.join(['\t'.join(x) for x in zip(SMILES, Tru, Pre)])
predictions =np.stack((tru,pred,pre))
return AUC, loss_total,predictions
def save_result(self, result, filename):
with open(filename, 'a') as f:
f.write(result + '\n')
def save_predictions(self, predictions, filename):
with open(filename, 'w') as f:
f.write('Smiles\tCorrect\tPredict\n')
f.write(predictions + '\n')
def save_model(self, model, filename):
torch.save(model.state_dict(), filename)
def split_dataset(dataset, ratio):
"""Shuffle and split a dataset."""
np.random.seed(111) # fix the seed for shuffle.
np.random.shuffle(dataset)
n = int(ratio * len(dataset))
return dataset[:n], dataset[n:]
def edit_dataset(drug,non_drug,task):
np.random.seed(111) # fix the seed for shuffle.
if task =='balance':
#np.random.shuffle(non_drug)
non_drug=non_drug[0:len(drug)]
else:
np.random.shuffle(non_drug)
np.random.shuffle(drug)
dataset_train_drug, dataset_test_drug = split_dataset(drug, 0.9)
# dataset_train_drug,dataset_dev_drug = split_dataset(dataset_train_drug, 0.9)
dataset_train_no, dataset_test_no = split_dataset(non_drug, 0.9)
# dataset_train_no,dataset_dev_no = split_dataset(dataset_train_no, 0.9)
dataset_train = dataset_train_drug+dataset_train_no
dataset_test= dataset_test_drug+dataset_test_no
# dataset_dev = dataset_dev_drug+dataset_dev_no
return dataset_train, dataset_test
def dump_dictionary(dictionary, filename):
with open(filename, 'wb') as f:
pickle.dump(dict(dictionary), f)
if __name__ == "__main__":
radius=1
dim=65
layer_hidden=0
layer_output=5
batch_train=48
batch_test=48
lr=3e-4
lr_decay=0.85
decay_interval=10#下降间隔
iteration=140
N=5000
(radius, dim, layer_hidden, layer_output,
batch_train, batch_test, decay_interval,
iteration) = map(int, [radius, dim, layer_hidden, layer_output,
batch_train, batch_test,
decay_interval, iteration])
lr, lr_decay = map(float, [lr, lr_decay])
if torch.cuda.is_available():
device = torch.device('cuda')
print('The code uses a GPU!')
else:
device = torch.device('cpu')
print('The code uses a CPU...')
print('-'*100)
# print('Preprocessing the', dataset, 'dataset.')
print('Just a moment......')
print('-'*100)
path='E:/code/drug/drugnn/'
dataname=''
dataset_train = pp.create_dataset('data_train.txt',path,dataname)
dataset_test = pp.create_dataset('data_test.txt',path,dataname)
#dataset_train, dataset_test = edit_dataset(dataset_drug, dataset_nondrug,'balance')
#dataset_train, dataset_dev = split_dataset(dataset_train, 0.9)
print('The preprocess has finished!')
print('# of training data samples:', len(dataset_train))
#print('# of development data samples:', len(dataset_dev))
print('# of test data samples:', len(dataset_test))
print('-'*100)
print('Creating a model.')
torch.manual_seed(111)
model = MolecularGraphNeuralNetwork(
N, dim, layer_hidden, layer_output).to(device)
trainer = Trainer(model)
tester = Tester(model)
print('# of model parameters:',
sum([np.prod(p.size()) for p in model.parameters()]))
print('-'*100)
file_result = path+'AUC'+'.txt'
# file_result = '../output/result--' + setting + '.txt'
result = 'Epoch\tTime(sec)\tLoss_train\tLoss_test\tAUC_train\tAUC_test'
file_test_result = path+ 'test_prediction'+ '.txt'
file_predictions = path+'train_prediction' +'.txt'
file_model = path+'model'+'.h5'
with open(file_result, 'w') as f:
f.write(result + '\n')
print('Start training.')
print('The result is saved in the output directory every epoch!')
np.random.seed(111)
start = timeit.default_timer()
for epoch in range(iteration):
epoch += 1
if epoch % decay_interval == 0:
trainer.optimizer.param_groups[0]['lr'] *= lr_decay
#[‘amsgrad’, ‘params’, ‘lr’, ‘betas’, ‘weight_decay’, ‘eps’]
prediction_train,loss_train,train_res= trainer.train(dataset_train)
#prediction_dev,dev_res = tester.test_classifier(dataset_dev)
prediction_test,loss_test,test_res = tester.test_classifier(dataset_test)
time = timeit.default_timer() - start
if epoch == 1:
minutes = time * iteration / 60
hours = int(minutes / 60)
minutes = int(minutes - 60 * hours)
print('The training will finish in about',
hours, 'hours', minutes, 'minutes.')
print('-'*100)
print(result)
result = '\t'.join(map(str, [epoch, time, loss_train, loss_test,prediction_train,prediction_test]))
tester.save_result(result, file_result)
print(result)
loss = pd.read_table(file_result)
plt.plot(loss['Loss_train'], color='r',label='Loss of train set')
plt.plot(loss['Loss_test'], color='y',label='Loss of train set')
plt.plot(loss['AUC_train'], color='y',label='AUC of train set')
plt.plot(loss['AUC_test'], color='b',label='AUC of test set')
# plt.plot(loss['AUC_test'], color='y',label='AUC of test set')
plt.ylabel('AUC')
plt.xlabel('Epoch')
plt.legend()
plt.savefig(path+'loss.tif',dpi=300)
plt.show()
colors = ['#00CED1','#DC143C' ]
target_names=np.array(['druglike','not-drug'])
lw=2
res_test = test_res.T
for color,i,target_name in zip(colors,[1,0],target_names):
plt.scatter((res_test[res_test[:,0]==i,0]),(res_test[res_test[:,0]==i,2]),color = color,alpha=.8,lw=lw,label=target_name)
plt.legend(loc='best',shadow=False,scatterpoints=1)
plt.title('the results of gnn classification')
res_train = train_res.T
cn_matrix=confusion_matrix(res_train[:,0], res_train[:,1])
cn_matrix
tn1 = cn_matrix[0,0]
tp1 = cn_matrix[1,1]
fn1 = cn_matrix[1,0]
fp1 = cn_matrix[0,1]
bacc_train = ((tp1/(tp1+fn1))+(tn1/(tn1+fp1)))/2#balance accurance
pre_train = tp1/(tp1+fp1)#precision/q+
rec_train = tp1/(tp1+fn1)#recall/se
sp_train=tn1/(tn1+fp1)
q__train=tn1/(tn1+fn1)
f1_train = 2*pre_train*rec_train/(pre_train+rec_train)#f1score
mcc_train = ((tp1*tn1) - (fp1*fn1))/math.sqrt((tp1+fp1)*(tp1+fn1)*(tn1+fp1)*(tn1+fn1))#Matthews correlation coefficient
acc_train=(tp1+tn1)/(tp1+fp1+fn1+tn1)#accurancy
fpr_train, tpr_train, thresholds_train =roc_curve(res_train[:,0],res_train[:,1])
print('bacc_train:',bacc_train)
print('pre_train:',pre_train)
print('rec_train:',rec_train)
print('f1_train:',f1_train)
print('mcc_train:',mcc_train)
print('sp_train:',sp_train)
print('q__train:',q__train)
print('acc_train:',acc_train)
'''
res_dev = dev_res.T
cn_matrix=confusion_matrix(res_dev[:,0], res_dev[:,1])
cn_matrix
tn2 = cn_matrix[0,0]
tp2 = cn_matrix[1,1]
fn2 = cn_matrix[1,0]
fp2 = cn_matrix[0,1]
bacc_dev = ((tp2/(tp2+fn2))+(tn2/(tn2+fp2)))/2#balance accurance
pre_dev= tp2/(tp2+fp2)#precision/q+
rec_dev = tp2/(tp2+fn2)#recall/se
sp_dev=tn2/(tn2+fp2)
q__dev=tn2/(tn2+fn2)
f1_dev = 2*pre_dev*rec_dev/(pre_dev+rec_dev)#f1score
mcc_dev = ((tp2*tn2) - (fp2*fn2))/math.sqrt((tp2+fp2)*(tp2+fn2)*(tn2+fp2)*(tn2+fn2))#Matthews correlation coefficient
acc_dev=(tp2+tn2)/(tp2+fp2+fn2+tn2)#accurancy
fpr_dev, tpr_dev, thresholds_dev =roc_curve(res_dev[:,0],res_dev[:,1])
print('bacc_dev:',bacc_dev)
print('pre_dev:',pre_dev)
print('rec_dev:',rec_dev)
print('f1_dev:',f1_dev)
print('mcc_dev:',mcc_dev)
print('sp_dev:',sp_dev)
print('q__dev:',q__dev)
print('acc_dev:',acc_dev)
'''
cnf_matrix=confusion_matrix(res_test[:,0], res_test[:,1])
cnf_matrix
tn = cnf_matrix[0,0]
tp = cnf_matrix[1,1]
fn = cnf_matrix[1,0]
fp = cnf_matrix[0,1]
bacc = ((tp/(tp+fn))+(tn/(tn+fp)))/2#balance accurance
pre = tp/(tp+fp)#precision/q+
rec = tp/(tp+fn)#recall/se
sp=tn/(tn+fp)
q_=tn/(tn+fn)
f1 = 2*pre*rec/(pre+rec)#f1score
mcc = ((tp*tn) - (fp*fn))/math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))#Matthews correlation coefficient
acc=(tp+tn)/(tp+fp+fn+tn)#accurancy
fpr, tpr, thresholds =roc_curve(res_test[:,0], res_test[:,1])
print('bacc:',bacc)
print('pre:',pre)
print('rec:',rec)
print('f1:',f1)
print('mcc:',mcc)
print('sp:',sp)
print('q_:',q_)
print('acc:',acc)
print('auc:',prediction_test)
``` |
{
"source": "JinyuXu/langevin_dynamics",
"score": 3
} |
#### File: langevin_dynamics/langevin_dynamics/langevin_dynamics.py
```python
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import argparse
def getinput():
input = argparse.ArgumentParser()
input.add_argument('--initial_position', type = float, default = 0, help = 'Initial position of the particle, default = 0' )
input.add_argument('--initial_velocity', type = float, default = 0, help = 'Initial velocity of the particle, default = 0' )
input.add_argument('--temperature', type = float, default = 300, help = 'Temperature of the molecule, default = 300' )
input.add_argument('--damping_coefficient', type = float, default = 0.1, help = 'Damping Coefficient of the molecule, default = 0.1' )
input.add_argument('--time_step', type = float, default = 0.2, help = 'Time interval of the simulation, default = 0.01' )
input.add_argument('--wall_size', type = float, default = 5, help = 'Wall size of the simulation, default = 5' )
input.add_argument('--total_time', type = float, default = 1000, help = 'Total time of the simulation, default = 1000' )
inp = input.parse_args()
return inp
def acceleration(gamma=0.1,velocity=0,temperature=300,timestep=0.1,mass=1):
sigma=np.sqrt(2*temperature*gamma*1*timestep)
return (-gamma*velocity/mass + np.random.normal(0,sigma))*timestep
def checkwall(position, wallsize):
if position >= wallsize or position<=0:
return True
else:
return False
def lgmotion(velocity,timestep):
return velocity*timestep
def integrate(position=0,velocity=0,temperature=300,gamma=0.1,timestep=0.1,wallsize=5,totaltime=1000,mass=1):
timepass=0
indexnum=0
index=[]
while timepass < totaltime :
indexnum +=1
index.append([indexnum,timepass,position,velocity])
timepass+=timestep
velocity += acceleration(gamma, velocity, temperature, timestep)
position += lgmotion(velocity, timestep)
if checkwall(position,wallsize):
if position >= wallsize:
position = wallsize
index.append([indexnum+1,timepass,position,velocity])
else:
position= 0
index.append([indexnum+1,timepass,position,velocity])
break
return timepass,index
def filecreation(index):
indexf=np.array(index)
timef=indexf[:,1]
positionf=indexf[:,2]
velocityf=indexf[:,3]
with open('Langevin_Motion.txt','w+') as file:
file.write('Index Time Position Velocity \n')
for i in range(len(timef)):
file.write('{} {:.3f} {:.5f} {:.5f} \n'.format(i,timef[i],positionf[i],velocityf[i]))
def histogram(arr):
plt.figure(0)
plt.hist(arr,bins=20)
plt.title('100 runs of Langevin Motion')
plt.xlabel('Time passed')
plt.ylabel('Number of runs')
plt.savefig('histogram.png')
def trajectory(x,y):
plt.figure(1)
plt.plot(x,y)
plt.title('Position vs Time')
plt.xlabel('Time passed')
plt.ylabel('Position')
plt.savefig('trajectory.png')
def main():
#get input for simulation
inp=getinput()
#run for 100 times, collecting all the relavant data
t_arr=[] #time
for i in range(100):
t,idx=integrate(position=inp.initial_position,velocity=inp.initial_velocity,temperature=inp.temperature,gamma=inp.damping_coefficient,timestep=inp.time_step,wallsize=inp.wall_size,totaltime=inp.total_time,mass=1)
t_arr.append(t)
#plot the histogram of 100 runs
histogram(t_arr)
#plot the position vs time plot of the last run
trjdata=np.array(idx)
xdata=trjdata[:,1]
ydata=trjdata[:,2]
trajectory(xdata,ydata)
#write the index in to a txt file of the first run
filecreation(idx)
if __name__ == '__main__':
main()
``` |
{
"source": "jinyyy666/splendor",
"score": 3
} |
#### File: jinyyy666/splendor/model.py
```python
from enum import Enum
import csv
import random
from util import greater_than_or_equal_to
class Gem(Enum):
RED = 'r'
GREEN = 'g'
BLUE = 'b'
WHITE = 'w'
BLACK = 'k'
GOLD = 'o'
class Card(object):
def __init__(self, id, level, gem, reputation, cost):
self.id = id
self.level = level
self.gem = gem
self.reputation = reputation
self.cost = cost
class Noble(object):
def __init__(self, id, reputation, cost):
self.id = id
self.reputation = reputation
self.cost = cost
def can_attract(self, card_summary):
'''Returns whether a player can attract the noble'''
return greater_than_or_equal_to(card_summary, self.cost)
```
#### File: splendor/strategies/aggressive_strategy.py
```python
import functools, operator, math
from strategies.strategy import Strategy
from copy import deepcopy
from player import (
Action,
ActionParams,
)
from model import (
Gem
)
from util import (
greater_than_or_equal_to
)
class CardValue(object):
def __init__(self, card_id, card_rep, can_afford, value, dist, req_gems):
self.card_id = card_id
self.card_reputation = card_rep
self.can_afford = can_afford
self.value = value
self.dist = dist
self.req_gems = req_gems
'''
Aggressive Strategy:
- It looks at other players as well
- It will try to collect the most common gems
- It will try to purchase the cards from low to high if it can afford
'''
class AggressiveStrategy(Strategy):
def __init__(self, board, player):
self.steps = 0
self.player = player
#self.other_players = other_players
super().__init__(board, [player])
## just pick the top three most common gems
def recommend_gems_to_pick(self, gems_on_board, sorted_card_values):
gem_scores = {gem : 0 for gem in Gem}
for c_v in sorted_card_values:
for g, c in c_v.req_gems.items():
if g == Gem.GOLD:
import pdb; pdb.set_trace()
gem_scores[g] += math.log(1 + c_v.value) * c
sorted_v = sorted(gem_scores.items(), key=lambda kv: kv[1], reverse=True)
gems_to_pick = {}
for gem, _ in sorted_v:
if gem == Gem.GOLD:
continue
if gems_on_board[gem] > 0:
if gem == Gem.GOLD:
import pdb; pdb.set_trace()
gems_to_pick[gem] = 1
if len(gems_to_pick) == 3:
break
return gems_to_pick
def compute_distance(self, eff_gems, cost):
gems_to_pay = {}
gold_count = eff_gems.get(Gem.GOLD, 0)
dist = 0
for g, v in cost.items():
eff_gem = eff_gems.get(g, 0)
if (eff_gem < v):
dist += v - eff_gem
gems_to_pay[g] = v - eff_gem
dist = max(0, dist - gold_count)
if gold_count > 0:
for g, c in gems_to_pay.items():
if c > 0:
while (gold_count > 0) and (gems_to_pay[g] > 0):
gems_to_pay[g] -= 1
gold_count -= 1
if gold_count == 0:
break
cost_to_pay = 0
for _, c in gems_to_pay.items():
cost_to_pay += c
if dist != cost_to_pay:
import pdb; pdb.set_trace()
assert (dist == cost_to_pay)
return dist, gems_to_pay
def get_card_value(self, card_gem, card_rep, can_afford, dist, ply_card_summary):
aff_score = 2 if can_afford else 0
dist_score = math.exp(-1 * dist)
sum_c = 0
n_c = 0
for g, c in ply_card_summary.items():
if c > 0:
n_c += 1
sum_c += c
mean_c = 0 if n_c == 0 else 1.0 * sum_c / n_c
var_new = 0
var_old = 0
for g, c in ply_card_summary.items():
if c > 0:
var_old += abs(c - mean_c) ** 2
if g != card_gem:
var_new += abs(c - mean_c) ** 2
else:
var_new += abs((c + 1) - mean_c) ** 2
card_gem_score = 1 if var_old > var_new else 0
card_rep_score = math.log( 1 + card_rep )
return aff_score + dist_score + card_gem_score + card_rep_score
def get_current_cards_summary(self, cards):
summary = []
eff_gems = self.player.card_summary_plus_current_gems()
ply_card_summary = self.player.card_summary()
for card in cards:
dist, diff = self.compute_distance(eff_gems, card.cost)
can_afford = self.player.can_afford(card)
value = self.get_card_value(card.gem, card.reputation, can_afford, dist, ply_card_summary)
summary.append(CardValue(card.id, card.reputation, can_afford, round(value, 2), dist, diff))
return summary
def next_step(self):
# print(f'In step: {self.steps}')
self.steps = self.steps + 1
cards = self.board.get_cards()
cards_list = functools.reduce(operator.iconcat, cards, [])
gems_on_board = self.board.get_gems()
# get a collective view of the current cards
# try to get the best value one
# if we cannot buy, we just fetch the gems so that we can buy it later
card_values = self.get_current_cards_summary(cards_list)
sorted_card_vals = sorted(card_values, key=lambda c: c.value, reverse=True)
# only for debugging:
# print([ (c.card_id, c.card_reputation, c.can_afford, c.value, c.req_gems, c.dist) for c in sorted_card_vals ])
# try to buy the top 5 cards
for sorted_value in sorted_card_vals[:5]:
if sorted_value.can_afford:
# print(f'buy card: {sorted_value.card_id}')
return ActionParams(self.player.id, Action.BUY_CARD, None, sorted_value.card_id)
# if we cannot afford them, let's collect gems to get closer
gems_to_pick = self.recommend_gems_to_pick(gems_on_board, sorted_card_vals[:5])
if greater_than_or_equal_to(gems_on_board, gems_to_pick) and len(gems_to_pick) > 0:
# print(f'pick three gems - 1: {gems_to_pick}')
return ActionParams(self.player.id, Action.PICK_THREE, gems_to_pick, None)
else:
# print(f'Reserve card: {cards_list[0].id}')
return ActionParams(self.player.id, Action.RESERVE_CARD, None, cards_list[0].id)
```
#### File: splendor/test/player_test.py
```python
import unittest
from copy import deepcopy
from board import Board
from player import Player
from model import (
Gem,
Card,
)
class PlayerTest(unittest.TestCase):
def _check_board_gems(self, current_gems, orginal_gems):
for gem, cnt in current_gems.items():
self.assertEqual(cnt, orginal_gems[gem])
def test_player_sanity(self):
player1 = Player(0)
player2 = Player(1)
self.assertEqual(player1.get_id(), 0)
self.assertEqual(player2.get_id(), 1)
self.assertEqual(player1.card_summary(), {})
def test_can_afford(self):
player = Player(0)
card = Card(
0, 1, Gem.GREEN, 0,
{Gem.GREEN : 1, Gem.RED : 1, Gem.BLACK : 1}
)
self.assertEqual(player.can_afford(card), False)
player.set_gems({Gem.GREEN : 1, Gem.RED: 1, Gem.BLACK: 1})
self.assertEqual(player.can_afford(card), True)
player.set_gems({Gem.GOLD : 3})
self.assertEqual(player.can_afford(card), True)
player.set_gems({Gem.GOLD : 1, Gem.RED: 1, Gem.BLACK: 1})
self.assertEqual(player.can_afford(card), True)
player.set_gems({Gem.GOLD : 1, Gem.RED: 1, Gem.BLACK: 0})
self.assertEqual(player.can_afford(card), False)
player.set_gems({Gem.GOLD : 1, Gem.RED: 1, Gem.BLUE: 2})
self.assertEqual(player.can_afford(card), False)
def test_pick_same_gems(self):
my_board = Board(2)
player1 = Player(0)
player2 = Player(1)
gems = {
Gem.RED : 2
}
player1.pick_same_gems(gems, None, my_board)
self.assertEqual(player1.get_gems()[Gem.RED], 2)
self.assertEqual(player2.get_gems()[Gem.RED], 0)
self.assertEqual(my_board.get_gems()[Gem.RED], 2)
self.assertEqual(my_board.get_gems()[Gem.GREEN], 4)
self.assertEqual(my_board.get_gems()[Gem.BLUE], 4)
self.assertEqual(my_board.get_gems()[Gem.WHITE], 4)
self.assertEqual(my_board.get_gems()[Gem.BLACK], 4)
gems = {
Gem.BLUE : 2
}
player2.pick_same_gems(gems, None, my_board)
self.assertEqual(player2.get_gems()[Gem.BLUE], 2)
self.assertEqual(player1.get_gems()[Gem.BLUE], 0)
self.assertEqual(my_board.get_gems()[Gem.BLUE], 2)
self.assertEqual(my_board.get_gems()[Gem.RED], 2)
self.assertEqual(my_board.get_gems()[Gem.GREEN], 4)
self.assertEqual(my_board.get_gems()[Gem.WHITE], 4)
self.assertEqual(my_board.get_gems()[Gem.BLACK], 4)
def test_pick_different_gems(self):
my_board = Board(2)
player1 = Player(0)
player2 = Player(1)
gems = {
Gem.RED : 1,
Gem.GREEN : 1,
Gem.BLACK : 1,
}
player1.pick_different_gems(gems, None, my_board)
self.assertEqual(player1.get_gems()[Gem.RED], 1)
self.assertEqual(player1.get_gems()[Gem.GREEN], 1)
self.assertEqual(player1.get_gems()[Gem.BLACK], 1)
self.assertEqual(my_board.get_gems()[Gem.RED], 3)
self.assertEqual(my_board.get_gems()[Gem.GREEN], 3)
self.assertEqual(my_board.get_gems()[Gem.BLACK], 3)
self.assertEqual(my_board.get_gems()[Gem.BLUE], 4)
self.assertEqual(my_board.get_gems()[Gem.WHITE], 4)
gems = {
Gem.BLUE : 1,
Gem.GREEN : 1,
Gem.BLACK : 1,
}
player2.pick_different_gems(gems, None, my_board)
self.assertEqual(player2.get_gems()[Gem.BLUE], 1)
self.assertEqual(player2.get_gems()[Gem.GREEN], 1)
self.assertEqual(player2.get_gems()[Gem.BLACK], 1)
self.assertEqual(my_board.get_gems()[Gem.BLUE], 3)
self.assertEqual(my_board.get_gems()[Gem.GREEN], 2)
self.assertEqual(my_board.get_gems()[Gem.BLACK], 2)
self.assertEqual(my_board.get_gems()[Gem.RED], 3)
self.assertEqual(my_board.get_gems()[Gem.WHITE], 4)
gems = {
Gem.BLACK: 3,
}
with self.assertRaises(ValueError):
player1.pick_same_gems(gems, None, my_board)
def test_buy_board_card(self):
my_board = Board(2)
orginal_board_gems = deepcopy(my_board.get_gems())
player1 = Player(0)
cards = my_board.get_cards()
# get level 1 card
card = cards[0][0]
expensive_card = cards[2][0]
# just let player1 pick enough gems to buy the card
player1.pick_different_gems(card.cost, None, my_board)
player1.buy_board_card(None, card, my_board)
self.assertEqual(player1.get_cards(), {card})
self.assertEqual(player1.card_summary()[card.gem], 1)
# check if board gems are back to initial value
current_gems = my_board.get_gems()
self._check_board_gems(current_gems, orginal_board_gems)
# try to buy without any gems at hand:
with self.assertRaises(ValueError):
player1.buy_board_card(None, expensive_card, my_board)
def test_reserve_card(self):
my_board = Board(2)
gold_original = my_board.get_gems()[Gem.GOLD]
player1 = Player(0)
cards = my_board.get_cards()
# get level 1 card
card = cards[0][1]
player1.reserve_card(None, card, my_board)
self.assertEqual(player1.get_gems()[Gem.GOLD], 1)
self.assertEqual(player1.get_rev_cards(), {card})
self.assertEqual(player1.reserve_count, 1)
# try to reverse more:
player1.reserve_card(None, cards[1][0], my_board)
player1.reserve_card(None, cards[1][1], my_board)
self.assertEqual(my_board.get_gems()[Gem.GOLD], gold_original - 3)
with self.assertRaises(ValueError):
player1.reserve_card(None, cards[2][0], my_board)
def test_buy_reserve_card(self):
my_board = Board(2)
orginal_board_gems = deepcopy(my_board.get_gems())
player1 = Player(0)
cards = my_board.get_cards()
# get level 1 card and reserve it
card = cards[0][0]
player1.reserve_card(None, card, my_board)
cost = deepcopy(card.cost)
# test if we can use the gold
for g, c in cost.items():
if c > 0:
cost[g] -= 1
break
player1.pick_different_gems(cost, None, my_board)
player1.buy_reserve_card(None, card, my_board)
self.assertEqual(player1.get_cards(), {card})
self.assertEqual(player1.card_summary()[card.gem], 1)
self.assertEqual(player1.reserve_count, 0)
# check if board gems are back to initial value
current_gems = my_board.get_gems()
self._check_board_gems(current_gems, orginal_board_gems)
def test_reserve_card_and_buy_card(self):
my_board = Board(2)
player1 = Player(0)
cards = my_board.get_cards()
# get level 1 card and reserve it
card1 = cards[0][0]
card2 = cards[0][1]
player1.reserve_card(None, card1, my_board)
player1.reserve_card(None, card2, my_board)
self.assertEqual(player1.reserve_count, 2)
card = cards[0][2]
cost = deepcopy(card.cost)
# test if we can use two golds to buy a simple card
times = 2
for g, c in cost.items():
if c > 0 and times > 0:
cost[g] -= 1
times -= 1
player1.pick_different_gems(cost, None, my_board)
player1.buy_board_card(None, card, my_board)
self.assertEqual(player1.get_cards(), {card})
def test_buy_card_and_use_card_gem(self):
my_board = Board(2)
orginal_board_gems = deepcopy(my_board.get_gems())
player1 = Player(0)
cards = my_board.get_cards()
# just let player1 pick enough gems and use them to buy cards
for i in range(4):
card = cards[0][i]
player1.pick_different_gems(card.cost, None, my_board)
player1.buy_board_card(None, card, my_board)
# get a new card from the refreshed board
card = my_board.get_cards()[0][0]
cost = deepcopy(card.cost)
card_summary = player1.card_summary()
# use the gems to adjust the cost
for g, c in cost.items():
c = max(0, c - card_summary.get(g, 0))
player1.pick_different_gems(cost, None, my_board)
player1.buy_board_card(None, card, my_board)
self.assertEqual(len(player1.get_cards()), 5)
# check if board gems are back to initial value
current_gems = my_board.get_gems()
self._check_board_gems(current_gems, orginal_board_gems)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jinzaizhichi/gandalf",
"score": 2
} |
#### File: gandalf/gandalf/__init__.py
```python
import os
from flask import Flask
from gandalf.api import api
__version__ = '0.3'
def create_app():
app = Flask(__name__)
token = os.getenv('GITHUB_AUTH_TOKEN')
if token is None:
raise EnvironmentError(
'Environment Variable $GITHUB_AUTH_TOKEN must be set '
'to use the github reporter.')
app.config['GITHUB_AUTH_TOKEN'] = token
app.register_blueprint(api, url_prefix='/api')
return app
``` |
{
"source": "jinzcdev/occupied-gpu",
"score": 3
} |
#### File: occupied-gpu/occupiedgpus/core.py
```python
r'''
The programming is used to occupy free video memories at the corresponding gpu_id.
$ python train.py --gpu-ids 0,1,2,3 --epochs 120 --options 0
$ python -m occupiedgpus.core --gpu-ids 0,1,2,3 --epochs 120 --options 0
'''
import argparse
import pynvml
import time
import torch
import torch.nn as nn
from threading import Thread
pynvml.nvmlInit()
class ComputeThread(Thread):
'''
分线程计算张量的主函数, 定时3秒钟计算一次
*name* is the thead name.
*target* is a callable object to be invoked by the `run()`.
*args* is the argument tuple for the target invocation.
'''
def __init__(self, name, *args, target=None):
super(ComputeThread, self).__init__()
self.name = name
self.target = target
self._args = args
def run(self):
print(f'starting {self.name}')
try:
self.target(*self._args) # 输入两个参数 (x, delay)
except RuntimeError as e:
print(str(e))
def get_used_free_memory(gpu_id: int):
'''
used与free的单位: 字节(B)
2^30 = 1073741824
return: 指定显卡的剩余显存容量, 单位GB
'''
if gpu_id < pynvml.nvmlDeviceGetCount():
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return mem_info.used // 1073741824, mem_info.free // 1073741824
else:
return -1, -1
def init_args():
'''
初始化输入参数, 假装输入一些训练参数, 如 epochs
'''
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'--gpu-ids', default='0', type=str,
help='gpu ids to be used')
parser.add_argument(
'--epochs', default=1000, type=int,
help='the number of epoch')
parser.add_argument(
'--options', default=0, type=int,
help='options: whether to occupy the free video memory forcefully'
)
args = parser.parse_args()
return args
class Compute(nn.Module):
def __init__(self, thread_id=0, delay=3):
super(Compute, self).__init__()
self.thread_id = thread_id
self.delay = delay
def forward(self, x):
i = 0
while True:
time.sleep(self.delay)
for _ in range(3):
x = x @ x @ x
i += 1
if i == 100:
print(f'Thread {self.thread_id} is running.')
i = 0
def allocate(gids, is_forced=False):
num_gpus, cnt = len(gids), 0
is_allocated = {}
while cnt != num_gpus:
for i, gid in enumerate(gids):
if not is_allocated.get(gid, False):
used, free = get_used_free_memory(gid)
# 向下取整, used==0 denotes 显存使用不足1GB.
if used != -1 and ((is_forced and free > 1) or (not is_forced and used == 0)):
x = torch.randn(
(2 * (free-1), 512*(256-2**abs(i-num_gpus//2)), 16, 16))
x = x.to(f'cuda:{gid}')
compute = Compute(thread_id=i, delay=3)
compute = compute.to(f'cuda:{gid}')
ComputeThread(f'Thread-{i}-GPU{gid}',
x, target=compute).start()
is_allocated[gid] = True
cnt += 1
def main():
args = init_args()
try:
gids = list(map(int, args.gpu_ids.split(',')))
allocate(gids, args.options != 0)
except Exception as e:
print(str(e))
'''
if __name__ == '__main__':
python -m occupiedgpus.core --gpu-ids 0,1,2,3 --epochs 120 --options 0
main()
'''
main()
``` |
{
"source": "jinzh154/CertifiedReLURobustness",
"score": 3
} |
#### File: jinzh154/CertifiedReLURobustness/save_nlayer_weights.py
```python
import numpy as np
import os
import pickle
import gzip
import argparse
import urllib.request
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D
from tensorflow.contrib.keras.api.keras.models import load_model
from tensorflow.contrib.keras.api.keras import backend as K
class NLayerModel:
def __init__(self, params, restore = None, session=None, use_log=False, image_size=28, image_channel=1):
self.image_size = image_size
self.num_channels = image_channel
self.num_labels = 10
model = Sequential()
model.add(Flatten(input_shape=(image_size, image_size, image_channel)))
# list of all hidden units weights
self.U = []
for param in params:
# add each dense layer, and save a reference to list U
self.U.append(Dense(param))
model.add(self.U[-1])
# ReLU activation
model.add(Activation('relu'))
self.W = Dense(10)
model.add(self.W)
# output log probability, used for black-box attack
if use_log:
model.add(Activation('softmax'))
if restore:
model.load_weights(restore)
layer_outputs = []
for layer in model.layers:
if isinstance(layer, Conv2D) or isinstance(layer, Dense):
layer_outputs.append(K.function([model.layers[0].input], [layer.output]))
self.layer_outputs = layer_outputs
self.model = model
def predict(self, data):
return self.model(data)
if __name__ == "__main__":
import scipy.io as sio
parser = argparse.ArgumentParser(description='save n-layer MNIST and CIFAR weights')
parser.add_argument('--model',
default="mnist",
choices=["mnist", "cifar"],
help='model name')
parser.add_argument('--modelfile',
default="",
help='override the model filename, use user specied one')
parser.add_argument('layer_parameters',
nargs='+',
help='number of hidden units per layer')
args = parser.parse_args()
nlayers = len(args.layer_parameters) + 1
import tensorflow as tf
with tf.Session() as sess:
# if a model file is not specified, use a manual override
if not args.modelfile:
args.modelfile = "models/"+args.model+"_"+str(nlayers)+"layer_relu"
if args.model == "mnist":
model = NLayerModel(args.layer_parameters, args.modelfile, sess)
#model = NLayerModel(args.layer_parameters, "models/mnist_"+str(nlayers)+"layer_relu")
elif args.model == "cifar":
model = NLayerModel(args.layer_parameters, args.modelfile, sess, image_size=32, image_channel=3)
else:
raise(RuntimeError("Unknow model"))
[W, bias_W] = model.W.get_weights()
save_dict = {'W': W, 'bias_W': bias_W}
print("Output layer shape:", W.shape)
U = model.U
for i, Ui in enumerate(U):
# save hidden layer weights, layer by layer
[weight_Ui, bias_Ui] = Ui.get_weights()
print("Hidden layer {} shape: {}".format(i, weight_Ui.shape))
save_dict['U'+str(i+1)] = weight_Ui
save_dict['bias_U'+str(i+1)] = bias_Ui
save_name = args.model + "_" + str(nlayers) + "layers"
print('saving to {}.mat with matrices {}'.format(save_name, save_dict.keys()))
# results saved to mnist.mat or cifar.mat
sio.savemat(save_name, save_dict)
```
#### File: jinzh154/CertifiedReLURobustness/utils.py
```python
import numpy as np
import random
import os
import pandas as pd
from PIL import Image
random.seed(1215)
np.random.seed(1215)
def linf_dist(x, y):
return np.linalg.norm(x.flatten() - y.flatten(), ord=np.inf)
def l2_dist(x, y):
return np.linalg.norm(x.flatten() - y.flatten(), ord=2)
def l1_dist(x, y):
return np.linalg.norm(x.flatten() - y.flatten(), ord=1)
def l0_dist(x, y):
return np.linalg.norm(x.flatten() - y.flatten(), ord=0)
def show(img, name = "output.png"):
"""
Show MNSIT digits in the console.
"""
np.save('img', img)
fig = np.around((img + 0.5)*255)
fig = fig.astype(np.uint8).squeeze()
pic = Image.fromarray(fig)
# pic.resize((512,512), resample=PIL.Image.BICUBIC)
pic.save(name)
remap = " .*#"+"#"*100
img = (img.flatten()+.5)*3
return
if len(img) != 784: return
print("START")
for i in range(28):
print("".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))
def generate_data(data, samples, targeted=True, random_and_least_likely = False, skip_wrong_label = True, start=0, ids = None,
target_classes = None, target_type = 0b1111, predictor = None, imagenet=False, remove_background_class=False, save_inputs=False, model_name=None, save_inputs_dir=None):
"""
Generate the input data to the attack algorithm.
data: the images to attack
samples: number of samples to use
targeted: if true, construct targeted attacks, otherwise untargeted attacks
start: offset into data to use
ids: true IDs of images in the dataset, if given, will use these images
target_classes: a list of list of labels for each ids
inception: if targeted and inception, randomly sample 100 targets intead of 1000
"""
inputs = []
targets = []
true_labels = []
true_ids = []
information = []
target_candidate_pool = np.eye(data.test_labels.shape[1])
target_candidate_pool_remove_background_class = np.eye(data.test_labels.shape[1] - 1)
print('generating labels...')
if ids is None:
ids = range(samples)
else:
ids = ids[start:start+samples]
if target_classes:
target_classes = target_classes[start:start+samples]
start = 0
total = 0
for i in ids:
total += 1
if targeted:
predicted_label = -1 # unknown
if random_and_least_likely:
# if there is no user specified target classes
if target_classes is None:
original_predict = np.squeeze(predictor(np.array([data.test_data[start+i]])))
num_classes = len(original_predict)
predicted_label = np.argmax(original_predict)
least_likely_label = np.argmin(original_predict)
top2_label = np.argsort(original_predict)[-2]
start_class = 1 if (imagenet and not remove_background_class) else 0
random_class = predicted_label
new_seq = [least_likely_label, top2_label, predicted_label]
while random_class in new_seq:
random_class = random.randint(start_class, start_class + num_classes - 1)
new_seq[2] = random_class
true_label = np.argmax(data.test_labels[start+i])
seq = []
if true_label != predicted_label and skip_wrong_label:
seq = []
else:
if target_type & 0b10000:
for c in range(num_classes):
if c != predicted_label:
seq.append(c)
information.append('class'+str(c))
else:
if target_type & 0b0100:
# least
seq.append(new_seq[0])
information.append('least')
if target_type & 0b0001:
# top-2
seq.append(new_seq[1])
information.append('top2')
if target_type & 0b0010:
# random
seq.append(new_seq[2])
information.append('random')
else:
# use user specified target classes
seq = target_classes[total - 1]
information.extend(len(seq) * ['user'])
else:
if imagenet:
if remove_background_class:
seq = random.sample(range(0,1000), 10)
else:
seq = random.sample(range(1,1001), 10)
information.extend(data.test_labels.shape[1] * ['random'])
else:
seq = range(data.test_labels.shape[1])
information.extend(data.test_labels.shape[1] * ['seq'])
print("[DATAGEN][L1] no = {}, true_id = {}, true_label = {}, predicted = {}, correct = {}, seq = {}, info = {}".format(total, start + i,
np.argmax(data.test_labels[start+i]), predicted_label, np.argmax(data.test_labels[start+i]) == predicted_label, seq, [] if len(seq) == 0 else information[-len(seq):]))
for j in seq:
# skip the original image label
if (j == np.argmax(data.test_labels[start+i])):
continue
inputs.append(data.test_data[start+i])
if remove_background_class:
targets.append(target_candidate_pool_remove_background_class[j])
else:
targets.append(target_candidate_pool[j])
true_labels.append(data.test_labels[start+i])
if remove_background_class:
true_labels[-1] = true_labels[-1][1:]
true_ids.append(start+i)
else:
true_label = np.argmax(data.test_labels[start+i])
original_predict = np.squeeze(predictor(np.array([data.test_data[start+i]])))
num_classes = len(original_predict)
predicted_label = np.argmax(original_predict)
if true_label != predicted_label and skip_wrong_label:
continue
else:
inputs.append(data.test_data[start+i])
if remove_background_class:
# shift target class by 1
print(np.argmax(data.test_labels[start+i]))
print(np.argmax(data.test_labels[start+i][1:1001]))
targets.append(data.test_labels[start+i][1:1001])
else:
targets.append(data.test_labels[start+i])
true_labels.append(data.test_labels[start+i])
if remove_background_class:
true_labels[-1] = true_labels[-1][1:]
true_ids.append(start+i)
information.extend(['original'])
inputs = np.array(inputs)
targets = np.array(targets)
true_labels = np.array(true_labels)
true_ids = np.array(true_ids)
print('labels generated')
print('{} images generated in total.'.format(len(inputs)))
if save_inputs:
if not os.path.exists(save_inputs_dir):
os.makedirs(save_inputs_dir)
save_model_dir = os.path.join(save_inputs_dir,model_name)
if not os.path.exists(save_model_dir):
os.makedirs(save_model_dir)
info_set = list(set(information))
for info_type in info_set:
save_type_dir = os.path.join(save_model_dir,info_type)
if not os.path.exists(save_type_dir):
os.makedirs(save_type_dir)
counter = 0
for i in range(len(information)):
if information[i] == info_type:
df = inputs[i,:,:,0]
df = df.flatten()
np.savetxt(os.path.join(save_type_dir,'point{}.txt'.format(counter)),df,newline='\t')
counter += 1
target_labels = np.array([np.argmax(targets[i]) for i in range(len(information)) if information[i]==info_type])
np.savetxt(os.path.join(save_model_dir,model_name+'_target_'+info_type+'.txt'),target_labels,fmt='%d',delimiter='\n')
return inputs, targets, true_labels, true_ids, information
``` |
{
"source": "jinzhang21/mlflow",
"score": 2
} |
#### File: examples/pmdarima/train.py
```python
import mlflow
import json
import numpy as np
from pmdarima import auto_arima
from pmdarima.datasets import load_wineind
from pmdarima import model_selection
ARTIFACT_PATH = "model"
def calculate_cv_metrics(model, endog, metric, cv):
cv_metric = model_selection.cross_val_score(model, endog, cv=cv, scoring=metric, verbose=0)
return cv_metric[~np.isnan(cv_metric)].mean()
with mlflow.start_run():
data = load_wineind()
train, test = model_selection.train_test_split(data, train_size=150)
print("Training AutoARIMA model...")
arima = auto_arima(
train,
error_action="ignore",
trace=False,
suppress_warnings=True,
maxiter=5,
seasonal=True,
m=12,
)
print("Model trained. \nExtracting parameters...")
parameters = arima.get_params(deep=True)
metrics = {x: getattr(arima, x)() for x in ["aicc", "aic", "bic", "hqic", "oob"]}
# Cross validation backtesting
cross_validator = model_selection.RollingForecastCV(h=10, step=20, initial=60)
for x in ["smape", "mean_absolute_error", "mean_squared_error"]:
metrics[x] = calculate_cv_metrics(arima, data, x, cross_validator)
print(f"Metrics: \n{json.dumps(metrics, indent=2)}")
print(f"Parameters: \n{json.dumps(parameters, indent=2)}")
mlflow.pmdarima.log_model(pmdarima_model=arima, artifact_path=ARTIFACT_PATH)
mlflow.log_params(parameters)
mlflow.log_metrics(metrics)
model_uri = mlflow.get_artifact_uri(ARTIFACT_PATH)
print(f"Model artifact logged to: {model_uri}")
loaded_model = mlflow.pmdarima.load_model(model_uri)
forecast = loaded_model.predict(30)
print(f"Forecast: \n{forecast}")
```
#### File: mlflow/paddle/_paddle_autolog.py
```python
import paddle
import mlflow
from mlflow.utils.autologging_utils import (
ExceptionSafeAbstractClass,
BatchMetricsLogger,
MlflowAutologgingQueueingClient,
get_autologging_config,
)
class __MLflowPaddleCallback(paddle.callbacks.Callback, metaclass=ExceptionSafeAbstractClass):
"""
Callback for auto-logging metrics and parameters.
"""
def __init__(self, client, metrics_logger, run_id, log_models, log_every_n_epoch):
super().__init__()
self.early_stopping = False
self.client = client
self.metrics_logger = metrics_logger
self.run_id = run_id
self.log_models = log_models
self.log_every_n_epoch = log_every_n_epoch
self.epoch = 0
def _log_metrics(self, logs, current_epoch):
metrics = {
key: (metric[0] if isinstance(metric, list) else metric) for key, metric in logs.items()
}
self.metrics_logger.record_metrics(metrics, current_epoch)
def on_epoch_end(self, epoch, logs=None):
if self.model is not None and epoch % self.log_every_n_epoch == 0:
self._log_metrics(logs, epoch)
self.epoch = epoch
def on_train_begin(self, logs=None):
params = {
"optimizer_name": self.model._optimizer.__class__.__name__,
"learning_rate": self.model._optimizer._learning_rate,
}
self.client.log_params(self.run_id, params)
self.client.flush(synchronous=True)
def on_train_end(self, logs=None):
self.metrics_logger.flush()
self.client.flush(synchronous=True)
def on_eval_end(self, logs=None):
eval_logs = {
"eval_" + key: (metric[0] if isinstance(metric, list) else metric)
for key, metric in logs.items()
}
self._log_metrics(eval_logs, self.epoch)
def _log_early_stop_params(early_stop_callback, client, run_id):
"""
Logs early stopping configuration parameters to MLflow.
:param early_stop_callback: The early stopping callback instance used during training.
:param client: An `MlflowAutologgingQueueingClient` instance used for MLflow logging.
:param run_id: The ID of the MLflow Run to which to log configuration parameters.
"""
client.log_params(
run_id,
{
p: getattr(early_stop_callback, p)
for p in ["monitor", "patience", "min_delta", "baseline"]
if hasattr(early_stop_callback, p)
},
)
def _log_early_stop_metrics(early_stop_callback, client, run_id):
"""
Logs early stopping behavior results (e.g. stopped epoch) as metrics to MLflow.
:param early_stop_callback: The early stopping callback instance used during training.
:param client: An `MlflowAutologgingQueueingClient` instance used for MLflow logging.
:param run_id: The ID of the MLflow Run to which to log configuration parameters.
"""
if early_stop_callback.stopped_epoch == 0:
return
metrics = {
"stopped_epoch": early_stop_callback.stopped_epoch,
"best_value": early_stop_callback.best_value,
}
client.log_metrics(run_id, metrics)
def patched_fit(original, self, *args, **kwargs):
run_id = mlflow.active_run().info.run_id
tracking_uri = mlflow.get_tracking_uri()
client = MlflowAutologgingQueueingClient(tracking_uri)
metrics_logger = BatchMetricsLogger(run_id, tracking_uri)
log_models = get_autologging_config(mlflow.paddle.FLAVOR_NAME, "log_models", True)
log_every_n_epoch = get_autologging_config(mlflow.paddle.FLAVOR_NAME, "log_every_n_epoch", 1)
early_stop_callback = None
mlflow_callback = __MLflowPaddleCallback(
client, metrics_logger, run_id, log_models, log_every_n_epoch
)
if "callbacks" in kwargs:
callbacks = kwargs["callbacks"]
for callback in callbacks:
if isinstance(callback, paddle.callbacks.EarlyStopping):
early_stop_callback = callback
_log_early_stop_params(early_stop_callback, client, run_id)
break
kwargs["callbacks"].append(mlflow_callback)
else:
kwargs["callbacks"] = [mlflow_callback]
client.flush(synchronous=False)
result = original(self, *args, **kwargs)
if early_stop_callback is not None:
_log_early_stop_metrics(early_stop_callback, client, run_id)
mlflow.log_text(str(self.summary()), "model_summary.txt")
if log_models:
registered_model_name = get_autologging_config(
mlflow.paddle.FLAVOR_NAME, "registered_model_name", None
)
mlflow.paddle.log_model(
pd_model=self, artifact_path="model", registered_model_name=registered_model_name
)
client.flush(synchronous=True)
return result
```
#### File: mlflow/pyfunc/mlserver.py
```python
import os
from typing import Tuple, Dict
MLServerMLflowRuntime = "mlserver_mlflow.MLflowRuntime"
MLServerDefaultModelName = "mlflow-model"
def get_cmd(
model_uri: str, port: int = None, host: str = None, nworkers: int = None
) -> Tuple[str, Dict[str, str]]:
cmd = f"mlserver start {model_uri}"
cmd_env = os.environ.copy()
if port:
cmd_env["MLSERVER_HTTP_PORT"] = str(port)
if host:
cmd_env["MLSERVER_HOST"] = host
cmd_env["MLSERVER_MODEL_NAME"] = MLServerDefaultModelName
if nworkers:
cmd_env["MLSERVER_MODEL_PARALLEL_WORKERS"] = str(nworkers)
cmd_env["MLSERVER_MODEL_IMPLEMENTATION"] = MLServerMLflowRuntime
cmd_env["MLSERVER_MODEL_URI"] = model_uri
return cmd, cmd_env
```
#### File: store/artifact/mlflow_artifacts_repo.py
```python
from urllib.parse import urlparse, urlunparse
import re
from mlflow.store.artifact.http_artifact_repo import HttpArtifactRepository
from mlflow.tracking._tracking_service.utils import get_tracking_uri
from mlflow.exceptions import MlflowException
def _check_if_host_is_numeric(hostname):
if hostname:
try:
float(hostname)
return True
except ValueError:
return False
else:
return False
def _validate_port_mapped_to_hostname(uri_parse):
# This check is to catch an mlflow-artifacts uri that has a port designated but no
# hostname specified. `urllib.parse.urlparse` will treat such a uri as a filesystem
# definition, mapping the provided port as a hostname value if this condition is not
# validated.
if uri_parse.hostname and _check_if_host_is_numeric(uri_parse.hostname) and not uri_parse.port:
raise MlflowException(
"The mlflow-artifacts uri was supplied with a port number: "
f"{uri_parse.hostname}, but no host was defined."
)
def _validate_uri_scheme(scheme):
allowable_schemes = {"http", "https"}
if scheme not in allowable_schemes:
raise MlflowException(
f"The configured tracking uri scheme: '{scheme}' is invalid for use with the proxy "
f"mlflow-artifact scheme. The allowed tracking schemes are: {allowable_schemes}"
)
class MlflowArtifactsRepository(HttpArtifactRepository):
"""Scheme wrapper around HttpArtifactRepository for mlflow-artifacts server functionality"""
def __init__(self, artifact_uri):
super().__init__(self.resolve_uri(artifact_uri, get_tracking_uri()))
@classmethod
def resolve_uri(cls, artifact_uri, tracking_uri):
base_url = "/api/2.0/mlflow-artifacts/artifacts"
track_parse = urlparse(tracking_uri)
uri_parse = urlparse(artifact_uri)
# Check to ensure that a port is present with no hostname
_validate_port_mapped_to_hostname(uri_parse)
# Check that tracking uri is http or https
_validate_uri_scheme(track_parse.scheme)
if uri_parse.path == "/": # root directory; build simple path
resolved = f"{base_url}{uri_parse.path}"
elif uri_parse.path == base_url: # for operations like list artifacts
resolved = base_url
else:
resolved = f"{track_parse.path}/{base_url}/{uri_parse.path}"
resolved = re.sub("//+", "/", resolved)
resolved_artifacts_uri = urlunparse(
(
# scheme
track_parse.scheme,
# netloc
uri_parse.netloc if uri_parse.netloc else track_parse.netloc,
# path
resolved,
# params
"",
# query
"",
# fragment
"",
)
)
return resolved_artifacts_uri.replace("///", "/").rstrip("/")
```
#### File: tracking/context/databricks_command_context.py
```python
from mlflow.tracking.context.abstract_context import RunContextProvider
from mlflow.utils import databricks_utils
from mlflow.utils.mlflow_tags import MLFLOW_DATABRICKS_NOTEBOOK_COMMAND_ID
class DatabricksCommandRunContext(RunContextProvider):
def in_context(self):
return databricks_utils.get_job_group_id() is not None
def tags(self):
job_group_id = databricks_utils.get_job_group_id()
tags = {}
if job_group_id is not None:
tags[MLFLOW_DATABRICKS_NOTEBOOK_COMMAND_ID] = job_group_id
return tags
```
#### File: mlflow/utils/class_utils.py
```python
import importlib
def _get_class_from_string(fully_qualified_class_name):
module, class_name = fully_qualified_class_name.rsplit(".", maxsplit=1)
return getattr(importlib.import_module(module), class_name)
```
#### File: mlflow/utils/string_utils.py
```python
def strip_prefix(original, prefix):
if original.startswith(prefix):
return original[len(prefix) :]
return original
def strip_suffix(original, suffix):
if original.endswith(suffix) and suffix != "":
return original[: -len(suffix)]
return original
def is_string_type(item):
return isinstance(item, str)
def truncate_str_from_middle(s, max_length):
assert max_length > 5
if len(s) <= max_length:
return s
else:
left_part_len = (max_length - 3) // 2
right_part_len = max_length - 3 - left_part_len
return f"{s[:left_part_len]}...{s[-right_part_len:]}"
```
#### File: pylint_plugins/pytest_raises_without_match/__init__.py
```python
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
class PytestRaisesWithoutMatch(BaseChecker):
__implements__ = IAstroidChecker
name = "pytest-raises-without-match"
msgs = {
"W0001": (
"`pytest.raises` must be called with `match` argument` ",
name,
"Use `pytest.raises(<exception>, match=...)`",
),
}
priority = -1
@staticmethod
def _is_pytest_raises_call(node: astroid.Call):
if not isinstance(node.func, astroid.Attribute) or not isinstance(
node.func.expr, astroid.Name
):
return False
return node.func.expr.name == "pytest" and node.func.attrname == "raises"
@staticmethod
def _called_with_match(node: astroid.Call):
# Note `match` is a keyword-only argument:
# https://docs.pytest.org/en/latest/reference/reference.html#pytest.raises
return any(k.arg == "match" for k in node.keywords)
def visit_call(self, node: astroid.Call):
if not PytestRaisesWithoutMatch._is_pytest_raises_call(node):
return
if not PytestRaisesWithoutMatch._called_with_match(node):
self.add_message(self.name, node=node)
```
#### File: tests/pyfunc/test_warn_dependency_requirement_mismatches.py
```python
from unittest import mock
import pytest
import cloudpickle
import sklearn
from mlflow.pyfunc import _warn_dependency_requirement_mismatches
import mlflow.utils.requirements_utils
from tests.helper_functions import AnyStringWith
@pytest.mark.large
def test_warn_dependency_requirement_mismatches(tmpdir):
req_file = tmpdir.join("requirements.txt")
req_file.write(f"cloudpickle=={cloudpickle.__version__}\nscikit-learn=={sklearn.__version__}\n")
with mock.patch("mlflow.pyfunc._logger.warning") as mock_warning:
# Test case: all packages satisfy requirements.
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_not_called()
mock_warning.reset_mock()
original_get_installed_version_fn = mlflow.utils.requirements_utils._get_installed_version
def gen_mock_get_installed_version_fn(mock_versions):
def mock_get_installed_version_fn(package, module=None):
if package in mock_versions:
return mock_versions[package]
else:
return original_get_installed_version_fn(package, module)
return mock_get_installed_version_fn
# Test case: multiple mismatched packages
with mock.patch(
"mlflow.utils.requirements_utils._get_installed_version",
gen_mock_get_installed_version_fn(
{
"scikit-learn": "999.99.11",
"cloudpickle": "999.99.22",
}
),
):
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_called_once_with(
"""
Detected one or more mismatches between the model's dependencies and the current Python environment:
- cloudpickle (current: 999.99.22, required: cloudpickle=={cloudpickle_version})
- scikit-learn (current: 999.99.11, required: scikit-learn=={sklearn_version})
""".strip().format(
sklearn_version=sklearn.__version__, cloudpickle_version=cloudpickle.__version__
)
)
mock_warning.reset_mock()
req_file.write("scikit-learn>=0.8,<=0.9")
# Test case: requirement with multiple version specifiers is satisfied
with mock.patch(
"mlflow.utils.requirements_utils._get_installed_version",
gen_mock_get_installed_version_fn({"scikit-learn": "0.8.1"}),
):
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_not_called()
mock_warning.reset_mock()
# Test case: requirement with multiple version specifiers is not satisfied
with mock.patch(
"mlflow.utils.requirements_utils._get_installed_version",
gen_mock_get_installed_version_fn({"scikit-learn": "0.7.1"}),
):
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_called_once_with(
AnyStringWith(" - scikit-learn (current: 0.7.1, required: scikit-learn>=0.8,<=0.9)")
)
mock_warning.reset_mock()
# Test case: required package is uninstalled.
req_file.write("uninstalled-pkg==1.2.3")
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_called_once_with(
AnyStringWith(
" - uninstalled-pkg (current: uninstalled, required: uninstalled-pkg==1.2.3)"
)
)
mock_warning.reset_mock()
# Test case: requirement without version specifiers
req_file.write("mlflow")
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_not_called()
mock_warning.reset_mock()
# Test case: an unexpected error happens while detecting mismatched packages.
with mock.patch(
"mlflow.pyfunc._check_requirement_satisfied",
side_effect=RuntimeError("check_requirement_satisfied_fn_failed"),
):
_warn_dependency_requirement_mismatches(model_path=tmpdir)
mock_warning.assert_called_once_with(
AnyStringWith(
"Encountered an unexpected error "
"(RuntimeError('check_requirement_satisfied_fn_failed')) while "
"detecting model dependency mismatches"
)
)
```
#### File: artifact/utils/test_model_utils.py
```python
import pytest
from unittest import mock
from mlflow.exceptions import MlflowException
from mlflow.store.artifact.utils.models import _parse_model_uri, get_model_name_and_version
from mlflow.tracking import MlflowClient
from mlflow.entities.model_registry import ModelVersion
@pytest.mark.parametrize(
"uri, expected_name, expected_version",
[
("models:/AdsModel1/0", "AdsModel1", 0),
("models:/Ads Model 1/12345", "Ads Model 1", 12345),
("models:/12345/67890", "12345", 67890),
("models://profile@databricks/12345/67890", "12345", 67890),
],
)
def test_parse_models_uri_with_version(uri, expected_name, expected_version):
(name, version, stage) = _parse_model_uri(uri)
assert name == expected_name
assert version == expected_version
assert stage is None
@pytest.mark.parametrize(
"uri, expected_name, expected_stage",
[
("models:/AdsModel1/Production", "AdsModel1", "Production"),
("models:/AdsModel1/production", "AdsModel1", "production"), # case insensitive
("models:/AdsModel1/pROduction", "AdsModel1", "pROduction"), # case insensitive
("models:/Ads Model 1/None", "Ads Model 1", "None"),
("models://scope:key@databricks/Ads Model 1/None", "Ads Model 1", "None"),
],
)
def test_parse_models_uri_with_stage(uri, expected_name, expected_stage):
(name, version, stage) = _parse_model_uri(uri)
assert name == expected_name
assert version is None
assert stage == expected_stage
@pytest.mark.parametrize(
"uri, expected_name",
[
("models:/AdsModel1/latest", "AdsModel1"),
("models:/AdsModel1/Latest", "AdsModel1"), # case insensitive
("models:/AdsModel1/LATEST", "AdsModel1"), # case insensitive
("models:/Ads Model 1/latest", "Ads Model 1"),
("models://scope:key@databricks/Ads Model 1/latest", "Ads Model 1"),
],
)
def test_parse_models_uri_with_latest(uri, expected_name):
(name, version, stage) = _parse_model_uri(uri)
assert name == expected_name
assert version is None
assert stage is None
@pytest.mark.parametrize(
"uri",
[
"notmodels:/NameOfModel/12345", # wrong scheme with version
"notmodels:/NameOfModel/StageName", # wrong scheme with stage
"models:/", # no model name
"models:/Name/Stage/0", # too many specifiers
"models:Name/Stage", # missing slash
"models://Name/Stage", # hostnames are ignored, path too short
],
)
def test_parse_models_uri_invalid_input(uri):
with pytest.raises(MlflowException, match="Not a proper models"):
_parse_model_uri(uri)
def test_get_model_name_and_version_with_version():
with mock.patch.object(
MlflowClient, "get_latest_versions", return_value=[]
) as mlflow_client_mock:
assert get_model_name_and_version(MlflowClient(), "models:/AdsModel1/123") == (
"AdsModel1",
"123",
)
mlflow_client_mock.assert_not_called()
def test_get_model_name_and_version_with_stage():
with mock.patch.object(
MlflowClient,
"get_latest_versions",
return_value=[
ModelVersion(
name="mv1", version="10", creation_timestamp=123, current_stage="Production"
),
ModelVersion(
name="mv2", version="15", creation_timestamp=124, current_stage="Production"
),
],
) as mlflow_client_mock:
assert get_model_name_and_version(MlflowClient(), "models:/AdsModel1/Production") == (
"AdsModel1",
"15",
)
mlflow_client_mock.assert_called_once_with("AdsModel1", ["Production"])
def test_get_model_name_and_version_with_latest():
with mock.patch.object(
MlflowClient,
"get_latest_versions",
return_value=[
ModelVersion(
name="mv1", version="10", creation_timestamp=123, current_stage="Production"
),
ModelVersion(name="mv3", version="20", creation_timestamp=125, current_stage="None"),
ModelVersion(name="mv2", version="15", creation_timestamp=124, current_stage="Staging"),
],
) as mlflow_client_mock:
assert get_model_name_and_version(MlflowClient(), "models:/AdsModel1/latest") == (
"AdsModel1",
"20",
)
mlflow_client_mock.assert_called_once_with("AdsModel1", None)
# Check that "latest" is case insensitive.
assert get_model_name_and_version(MlflowClient(), "models:/AdsModel1/lATest") == (
"AdsModel1",
"20",
)
```
#### File: mlflow/tests/test_exceptions.py
```python
import json
from mlflow.exceptions import MlflowException, RestException
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
INVALID_STATE,
ENDPOINT_NOT_FOUND,
INTERNAL_ERROR,
RESOURCE_ALREADY_EXISTS,
IO_ERROR,
)
class TestMlflowException:
def test_error_code_constructor(self):
assert (
MlflowException("test", error_code=INVALID_PARAMETER_VALUE).error_code
== "INVALID_PARAMETER_VALUE"
)
def test_default_error_code(self):
assert MlflowException("test").error_code == "INTERNAL_ERROR"
def test_serialize_to_json(self):
mlflow_exception = MlflowException("test")
deserialized = json.loads(mlflow_exception.serialize_as_json())
assert deserialized["message"] == "test"
assert deserialized["error_code"] == "INTERNAL_ERROR"
def test_get_http_status_code(self):
assert MlflowException("test default").get_http_status_code() == 500
assert MlflowException("code not in map", error_code=IO_ERROR).get_http_status_code() == 500
assert MlflowException("test", error_code=INVALID_STATE).get_http_status_code() == 500
assert MlflowException("test", error_code=ENDPOINT_NOT_FOUND).get_http_status_code() == 404
assert (
MlflowException("test", error_code=INVALID_PARAMETER_VALUE).get_http_status_code()
== 400
)
assert MlflowException("test", error_code=INTERNAL_ERROR).get_http_status_code() == 500
assert (
MlflowException("test", error_code=RESOURCE_ALREADY_EXISTS).get_http_status_code()
== 400
)
def test_rest_exception():
mlflow_exception = MlflowException("test", error_code=RESOURCE_ALREADY_EXISTS)
json_exception = mlflow_exception.serialize_as_json()
deserialized_rest_exception = RestException(json.loads(json_exception))
assert deserialized_rest_exception.error_code == "RESOURCE_ALREADY_EXISTS"
assert "test" in deserialized_rest_exception.message
```
#### File: tracking/context/test_databricks_job_context.py
```python
from unittest import mock
from mlflow.entities import SourceType
from mlflow.utils.mlflow_tags import (
MLFLOW_SOURCE_NAME,
MLFLOW_SOURCE_TYPE,
MLFLOW_DATABRICKS_JOB_ID,
MLFLOW_DATABRICKS_JOB_RUN_ID,
MLFLOW_DATABRICKS_JOB_TYPE,
MLFLOW_DATABRICKS_WEBAPP_URL,
MLFLOW_DATABRICKS_WORKSPACE_URL,
MLFLOW_DATABRICKS_WORKSPACE_ID,
)
from mlflow.tracking.context.databricks_job_context import DatabricksJobRunContext
from tests.helper_functions import multi_context
def test_databricks_job_run_context_in_context():
with mock.patch("mlflow.utils.databricks_utils.is_in_databricks_job") as in_job_mock:
assert DatabricksJobRunContext().in_context() == in_job_mock.return_value
def test_databricks_job_run_context_tags():
patch_job_id = mock.patch("mlflow.utils.databricks_utils.get_job_id")
patch_job_run_id = mock.patch("mlflow.utils.databricks_utils.get_job_run_id")
patch_job_type = mock.patch("mlflow.utils.databricks_utils.get_job_type")
patch_webapp_url = mock.patch("mlflow.utils.databricks_utils.get_webapp_url")
patch_workspace_url = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_url",
return_value="https://dev.databricks.com",
)
patch_workspace_url_none = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_url", return_value=None
)
patch_workspace_info = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_info_from_dbutils",
return_value=("https://databricks.com", "123456"),
)
with multi_context(
patch_job_id,
patch_job_run_id,
patch_job_type,
patch_webapp_url,
patch_workspace_url,
patch_workspace_info,
) as (
job_id_mock,
job_run_id_mock,
job_type_mock,
webapp_url_mock,
workspace_url_mock,
workspace_info_mock,
):
assert DatabricksJobRunContext().tags() == {
MLFLOW_SOURCE_NAME: "jobs/{job_id}/run/{job_run_id}".format(
job_id=job_id_mock.return_value, job_run_id=job_run_id_mock.return_value
),
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.JOB),
MLFLOW_DATABRICKS_JOB_ID: job_id_mock.return_value,
MLFLOW_DATABRICKS_JOB_RUN_ID: job_run_id_mock.return_value,
MLFLOW_DATABRICKS_JOB_TYPE: job_type_mock.return_value,
MLFLOW_DATABRICKS_WEBAPP_URL: webapp_url_mock.return_value,
MLFLOW_DATABRICKS_WORKSPACE_URL: workspace_url_mock.return_value,
MLFLOW_DATABRICKS_WORKSPACE_ID: workspace_info_mock.return_value[1],
}
with multi_context(
patch_job_id,
patch_job_run_id,
patch_job_type,
patch_webapp_url,
patch_workspace_url_none,
patch_workspace_info,
) as (
job_id_mock,
job_run_id_mock,
job_type_mock,
webapp_url_mock,
workspace_url_mock,
workspace_info_mock,
):
assert DatabricksJobRunContext().tags() == {
MLFLOW_SOURCE_NAME: "jobs/{job_id}/run/{job_run_id}".format(
job_id=job_id_mock.return_value, job_run_id=job_run_id_mock.return_value
),
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.JOB),
MLFLOW_DATABRICKS_JOB_ID: job_id_mock.return_value,
MLFLOW_DATABRICKS_JOB_RUN_ID: job_run_id_mock.return_value,
MLFLOW_DATABRICKS_JOB_TYPE: job_type_mock.return_value,
MLFLOW_DATABRICKS_WEBAPP_URL: webapp_url_mock.return_value,
MLFLOW_DATABRICKS_WORKSPACE_URL: workspace_info_mock.return_value[0], # fallback value
MLFLOW_DATABRICKS_WORKSPACE_ID: workspace_info_mock.return_value[1],
}
def test_databricks_job_run_context_tags_nones():
patch_job_id = mock.patch("mlflow.utils.databricks_utils.get_job_id", return_value=None)
patch_job_run_id = mock.patch("mlflow.utils.databricks_utils.get_job_run_id", return_value=None)
patch_job_type = mock.patch("mlflow.utils.databricks_utils.get_job_type", return_value=None)
patch_webapp_url = mock.patch("mlflow.utils.databricks_utils.get_webapp_url", return_value=None)
patch_workspace_info = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_info_from_dbutils", return_value=(None, None)
)
with patch_job_id, patch_job_run_id, patch_job_type, patch_webapp_url, patch_workspace_info:
assert DatabricksJobRunContext().tags() == {
MLFLOW_SOURCE_NAME: None,
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.JOB),
}
```
#### File: tests/utils/test_gorilla.py
```python
import pytest
from mlflow.utils import gorilla
class Delegator:
def __init__(self, delegated_fn):
self.delegated_fn = delegated_fn
def __get__(self, instance, owner):
return self.delegated_fn
def delegate(delegated_fn):
return lambda fn: Delegator(delegated_fn)
def gen_class_A_B():
class A:
def f1(self):
pass
def f2(self):
pass
def delegated_f3(self):
pass
@delegate(delegated_f3)
def f3(self):
pass
class B(A):
def f1(self):
pass
return A, B
@pytest.fixture
def gorilla_setting():
return gorilla.Settings(allow_hit=True, store_hit=True)
def test_basic_patch_for_class(gorilla_setting):
A, B = gen_class_A_B()
original_A_f1 = A.f1
original_A_f2 = A.f2
original_B_f1 = B.f1
def patched_A_f1(self): # pylint: disable=unused-argument
pass
def patched_A_f2(self): # pylint: disable=unused-argument
pass
def patched_B_f1(self): # pylint: disable=unused-argument
pass
patch_A_f1 = gorilla.Patch(A, "f1", patched_A_f1, gorilla_setting)
patch_A_f2 = gorilla.Patch(A, "f2", patched_A_f2, gorilla_setting)
patch_B_f1 = gorilla.Patch(B, "f1", patched_B_f1, gorilla_setting)
assert gorilla.get_original_attribute(A, "f1") is original_A_f1
assert gorilla.get_original_attribute(B, "f1") is original_B_f1
assert gorilla.get_original_attribute(B, "f2") is original_A_f2
gorilla.apply(patch_A_f1)
assert A.f1 is patched_A_f1
assert gorilla.get_original_attribute(A, "f1") is original_A_f1
assert gorilla.get_original_attribute(B, "f1") is original_B_f1
gorilla.apply(patch_B_f1)
assert A.f1 is patched_A_f1
assert B.f1 is patched_B_f1
assert gorilla.get_original_attribute(A, "f1") is original_A_f1
assert gorilla.get_original_attribute(B, "f1") is original_B_f1
gorilla.apply(patch_A_f2)
assert A.f2 is patched_A_f2
assert B.f2 is patched_A_f2
assert gorilla.get_original_attribute(A, "f2") is original_A_f2
assert gorilla.get_original_attribute(B, "f2") is original_A_f2
gorilla.revert(patch_A_f2)
assert A.f2 is original_A_f2
assert B.f2 is original_A_f2
assert gorilla.get_original_attribute(A, "f2") == original_A_f2
assert gorilla.get_original_attribute(B, "f2") == original_A_f2
gorilla.revert(patch_B_f1)
assert A.f1 is patched_A_f1
assert B.f1 is original_B_f1
assert gorilla.get_original_attribute(A, "f1") == original_A_f1
assert gorilla.get_original_attribute(B, "f1") == original_B_f1
gorilla.revert(patch_A_f1)
assert A.f1 is original_A_f1
assert B.f1 is original_B_f1
assert gorilla.get_original_attribute(A, "f1") == original_A_f1
assert gorilla.get_original_attribute(B, "f1") == original_B_f1
def test_patch_for_descriptor(gorilla_setting):
A, _ = gen_class_A_B()
original_A_f3_raw = object.__getattribute__(A, "f3")
def patched_A_f3(self): # pylint: disable=unused-argument
pass
patch_A_f3 = gorilla.Patch(A, "f3", patched_A_f3, gorilla_setting)
assert gorilla.get_original_attribute(A, "f3") is A.delegated_f3
assert (
gorilla.get_original_attribute(A, "f3", bypass_descriptor_protocol=True)
is original_A_f3_raw
)
gorilla.apply(patch_A_f3)
assert A.f3 is patched_A_f3
assert gorilla.get_original_attribute(A, "f3") is A.delegated_f3
assert (
gorilla.get_original_attribute(A, "f3", bypass_descriptor_protocol=True)
is original_A_f3_raw
)
gorilla.revert(patch_A_f3)
assert A.f3 is A.delegated_f3
assert gorilla.get_original_attribute(A, "f3") is A.delegated_f3
assert (
gorilla.get_original_attribute(A, "f3", bypass_descriptor_protocol=True)
is original_A_f3_raw
)
# test patch a descriptor
@delegate(patched_A_f3)
def new_patched_A_f3(self): # pylint: disable=unused-argument
pass
new_patch_A_f3 = gorilla.Patch(A, "f3", new_patched_A_f3, gorilla_setting)
gorilla.apply(new_patch_A_f3)
assert A.f3 is patched_A_f3
assert object.__getattribute__(A, "f3") is new_patched_A_f3
assert gorilla.get_original_attribute(A, "f3") is A.delegated_f3
assert (
gorilla.get_original_attribute(A, "f3", bypass_descriptor_protocol=True)
is original_A_f3_raw
)
@pytest.mark.parametrize("store_hit", [True, False])
def test_patch_on_inherit_method(store_hit):
A, B = gen_class_A_B()
original_A_f2 = A.f2
def patched_B_f2(self): # pylint: disable=unused-argument
pass
gorilla_setting = gorilla.Settings(allow_hit=True, store_hit=store_hit)
patch_B_f2 = gorilla.Patch(B, "f2", patched_B_f2, gorilla_setting)
gorilla.apply(patch_B_f2)
assert B.f2 is patched_B_f2
assert gorilla.get_original_attribute(B, "f2") is original_A_f2
gorilla.revert(patch_B_f2)
assert B.f2 is original_A_f2
assert gorilla.get_original_attribute(B, "f2") is original_A_f2
assert "f2" not in B.__dict__ # assert no side effect after reverting
@pytest.mark.parametrize("store_hit", [True, False])
def test_patch_on_attribute_not_exist(store_hit):
A, _ = gen_class_A_B()
def patched_fx(self): # pylint: disable=unused-argument
return 101
gorilla_setting = gorilla.Settings(allow_hit=True, store_hit=store_hit)
fx_patch = gorilla.Patch(A, "fx", patched_fx, gorilla_setting)
gorilla.apply(fx_patch)
a1 = A()
assert a1.fx() == 101
gorilla.revert(fx_patch)
assert not hasattr(A, "fx")
```
#### File: tests/utils/test_validation.py
```python
import copy
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.entities import Metric, Param, RunTag
from mlflow.protos.databricks_pb2 import ErrorCode, INVALID_PARAMETER_VALUE
from mlflow.utils.validation import (
_is_numeric,
_validate_metric_name,
_validate_param_name,
_validate_tag_name,
_validate_run_id,
_validate_batch_log_data,
_validate_batch_log_limits,
_validate_experiment_artifact_location,
_validate_db_type_string,
_validate_experiment_name,
)
GOOD_METRIC_OR_PARAM_NAMES = [
"a",
"Ab-5_",
"a/b/c",
"a.b.c",
".a",
"b.",
"a..a/._./o_O/.e.",
"a b/c d",
]
BAD_METRIC_OR_PARAM_NAMES = [
"",
".",
"/",
"..",
"//",
"a//b",
"a/./b",
"/a",
"a/",
":",
"\\",
"./",
"/./",
]
def test_is_numeric():
assert _is_numeric(0)
assert _is_numeric(0.0)
assert not _is_numeric(True)
assert not _is_numeric(False)
assert not _is_numeric("0")
assert not _is_numeric(None)
def test_validate_metric_name():
for good_name in GOOD_METRIC_OR_PARAM_NAMES:
_validate_metric_name(good_name)
for bad_name in BAD_METRIC_OR_PARAM_NAMES:
with pytest.raises(MlflowException, match="Invalid metric name") as e:
_validate_metric_name(bad_name)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_validate_param_name():
for good_name in GOOD_METRIC_OR_PARAM_NAMES:
_validate_param_name(good_name)
for bad_name in BAD_METRIC_OR_PARAM_NAMES:
with pytest.raises(MlflowException, match="Invalid parameter name") as e:
_validate_param_name(bad_name)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_validate_tag_name():
for good_name in GOOD_METRIC_OR_PARAM_NAMES:
_validate_tag_name(good_name)
for bad_name in BAD_METRIC_OR_PARAM_NAMES:
with pytest.raises(MlflowException, match="Invalid tag name") as e:
_validate_tag_name(bad_name)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_validate_run_id():
for good_id in [
"a" * 32,
"f0" * 16,
"abcdef0123456789" * 2,
"a" * 33,
"a" * 31,
"a" * 256,
"A" * 32,
"g" * 32,
"a_" * 32,
"abcdefghijklmnopqrstuvqxyz",
]:
_validate_run_id(good_id)
for bad_id in ["a/bc" * 8, "", "a" * 400, "*" * 5]:
with pytest.raises(MlflowException, match="Invalid run ID") as e:
_validate_run_id(bad_id)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_validate_batch_log_limits():
too_many_metrics = [Metric("metric-key-%s" % i, 1, 0, i * 2) for i in range(1001)]
too_many_params = [Param("param-key-%s" % i, "b") for i in range(101)]
too_many_tags = [RunTag("tag-key-%s" % i, "b") for i in range(101)]
good_kwargs = {"metrics": [], "params": [], "tags": []}
bad_kwargs = {
"metrics": [too_many_metrics],
"params": [too_many_params],
"tags": [too_many_tags],
}
match = r"A batch logging request can contain at most \d+"
for arg_name, arg_values in bad_kwargs.items():
for arg_value in arg_values:
final_kwargs = copy.deepcopy(good_kwargs)
final_kwargs[arg_name] = arg_value
with pytest.raises(MlflowException, match=match):
_validate_batch_log_limits(**final_kwargs)
# Test the case where there are too many entities in aggregate
with pytest.raises(MlflowException, match=match):
_validate_batch_log_limits(too_many_metrics[:900], too_many_params[:51], too_many_tags[:50])
# Test that we don't reject entities within the limit
_validate_batch_log_limits(too_many_metrics[:1000], [], [])
_validate_batch_log_limits([], too_many_params[:100], [])
_validate_batch_log_limits([], [], too_many_tags[:100])
def test_validate_batch_log_data():
metrics_with_bad_key = [
Metric("good-metric-key", 1.0, 0, 0),
Metric("super-long-bad-key" * 1000, 4.0, 0, 0),
]
metrics_with_bad_val = [Metric("good-metric-key", "not-a-double-val", 0, 0)]
metrics_with_bool_val = [Metric("good-metric-key", True, 0, 0)]
metrics_with_bad_ts = [Metric("good-metric-key", 1.0, "not-a-timestamp", 0)]
metrics_with_neg_ts = [Metric("good-metric-key", 1.0, -123, 0)]
metrics_with_bad_step = [Metric("good-metric-key", 1.0, 0, "not-a-step")]
params_with_bad_key = [
Param("good-param-key", "hi"),
Param("super-long-bad-key" * 1000, "but-good-val"),
]
params_with_bad_val = [
Param("good-param-key", "hi"),
Param("another-good-key", "but-bad-val" * 1000),
]
tags_with_bad_key = [
RunTag("good-tag-key", "hi"),
RunTag("super-long-bad-key" * 1000, "but-good-val"),
]
tags_with_bad_val = [
RunTag("good-tag-key", "hi"),
RunTag("another-good-key", "but-bad-val" * 1000),
]
bad_kwargs = {
"metrics": [
metrics_with_bad_key,
metrics_with_bad_val,
metrics_with_bool_val,
metrics_with_bad_ts,
metrics_with_neg_ts,
metrics_with_bad_step,
],
"params": [params_with_bad_key, params_with_bad_val],
"tags": [tags_with_bad_key, tags_with_bad_val],
}
good_kwargs = {"metrics": [], "params": [], "tags": []}
for arg_name, arg_values in bad_kwargs.items():
for arg_value in arg_values:
final_kwargs = copy.deepcopy(good_kwargs)
final_kwargs[arg_name] = arg_value
with pytest.raises(MlflowException, match=r".+"):
_validate_batch_log_data(**final_kwargs)
# Test that we don't reject entities within the limit
_validate_batch_log_data(
metrics=[Metric("metric-key", 1.0, 0, 0)],
params=[Param("param-key", "param-val")],
tags=[RunTag("tag-key", "tag-val")],
)
def test_validate_experiment_artifact_location():
_validate_experiment_artifact_location("abcde")
_validate_experiment_artifact_location(None)
with pytest.raises(MlflowException, match="Artifact location cannot be a runs:/ URI"):
_validate_experiment_artifact_location("runs:/blah/bleh/blergh")
def test_validate_experiment_name():
_validate_experiment_name("validstring")
bytestring = b"test byte string"
_validate_experiment_name(bytestring.decode("utf-8"))
for invalid_name in ["", 12, 12.7, None, {}, []]:
with pytest.raises(MlflowException, match="Invalid experiment name"):
_validate_experiment_name(invalid_name)
def test_validate_list_experiments_max_results():
client = mlflow.tracking.MlflowClient()
client.list_experiments(max_results=50)
with pytest.raises(MlflowException, match="It must be at most 50000"):
client.list_experiments(max_results=50001)
for invalid_num in [-12, 0]:
with pytest.raises(MlflowException, match="It must be at least 1"):
client.list_experiments(max_results=invalid_num)
def test_db_type():
for db_type in ["mysql", "mssql", "postgresql", "sqlite"]:
# should not raise an exception
_validate_db_type_string(db_type)
# error cases
for db_type in ["MySQL", "mongo", "cassandra", "sql", ""]:
with pytest.raises(MlflowException, match="Invalid database engine") as e:
_validate_db_type_string(db_type)
assert "Invalid database engine" in e.value.message
``` |
{
"source": "jinzhao3611/Political_Stance_Prediction",
"score": 3
} |
#### File: Political_Stance_Prediction/reader/data.py
```python
import random
from typing import (
Tuple,
Optional,
Generator,
Union,
overload,
List,
Iterable,
TypeVar,
Any,
Iterator,
NamedTuple,
Dict,
)
import attr
import pandas as pd
def _to_tokens(items: Iterable[str]) -> Tuple["Token", ...]:
return tuple(Token.from_text(item) for item in items)
class Token(NamedTuple):
text: str
is_special: bool
is_hashtag: bool
is_mention: bool
is_url: bool
@staticmethod
def from_text(text: str) -> "Token":
is_hashtag = text.startswith("#")
is_mention = text.startswith("@") or text.startswith(".@")
is_url = text.startswith("http")
is_special = any([is_hashtag, is_mention, is_url])
return Token(text, is_special, is_hashtag, is_mention, is_url,)
def __str__(self) -> str:
return self.text
def __repr__(self) -> str:
return self.text
@attr.s(frozen=True, slots=True)
class Sentence(Iterable[Tuple[Token, str]]):
id: int = attr.ib()
stance: str = attr.ib()
tokens: Tuple[Token, ...] = attr.ib()
tags: Tuple[str, ...] = attr.ib()
@staticmethod
def create(
instance_id: int, stance: str, tokens: List[str], tags: List[str]
) -> "Sentence":
return Sentence(instance_id, stance, _to_tokens(tokens), tuple(tags))
@property
def token_strs(self) -> Tuple[str, ...]:
return tuple(token.text for token in self.tokens)
def __iter__(self) -> Iterator[Tuple[Token, str]]:
yield from zip(self.tokens, self.tags)
@overload
def __getitem__(self, i: int) -> Tuple[Token, str]:
raise NotImplementedError
@overload
def __getitem__(self, i: slice) -> Tuple[Tuple[Token, ...], Tuple[str, ...]]:
raise NotImplementedError
def __getitem__(
self, i: Union[int, slice]
) -> Tuple[Union[Token, Tuple[Token, ...]], Union[str, Tuple[str, ...]]]:
return self.tokens[i], self.tags[i]
def copy_with_stance(self, stance: str) -> "Sentence":
return Sentence(self.id, stance, self.tokens, self.tags)
@attr.s(frozen=True)
class Corpus(Iterable[Sentence]):
instances: Tuple[Sentence, ...] = attr.ib()
def __len__(self) -> int:
return len(self.instances)
def __iter__(self) -> Iterator[Sentence]:
return iter(self.instances)
@overload
def __getitem__(self, i: int) -> Sentence:
raise NotImplementedError
@overload
def __getitem__(self, i: slice) -> "Corpus":
raise NotImplementedError
def __getitem__(self, i: Union[int, slice]) -> Union[Sentence, "Corpus"]:
if isinstance(i, slice):
return Corpus(self.instances[i])
else:
return self.instances[i]
@property
def stances(self) -> Generator[str, None, None]:
return (instance.stance for instance in self.instances)
def shuffled(self, seed: Optional[int]) -> "Corpus":
if seed is not None:
random.seed(seed)
insts = tuple(self.instances)
random.shuffle(insts)
return Corpus(insts)
def load_corpus_csv(file: pd.DataFrame, bias_only, addi_mapping) -> Corpus:
return Corpus(tuple(_gen_instances(file, bias_only, None, addi_mapping)))
def load_corpus(path: str, bias_only: bool, addi_mapping: Optional[Dict]) -> Corpus:
if path.endswith(".csv"):
return load_corpus_csv(pd.read_csv(path), bias_only, addi_mapping)
else:
raise ValueError(f"File should be of CSV format!")
def _gen_instances(
file: pd.DataFrame, bias_only: bool, tokenizer: Any, addi_mapping: Dict
) -> Generator[Sentence, None, None]:
file_data = file.values
for idx, line in enumerate(file_data):
if tokenizer:
tokens = tokenizer(line[0])
else:
tokens = line[0].split()
if addi_mapping:
addi_tokens = []
for token in tokens:
token = token.lower()
for key in addi_mapping:
if key.endswith("*"):
if token.startswith(key[:-1]):
addi_tokens.extend(addi_mapping[key])
else:
if token == key:
addi_tokens.extend(addi_mapping[key])
tokens.extend(addi_tokens)
tags = ["Other"] * len(
tokens
) # in the future we might use token level annotation
stance = line[1]
if bias_only and stance == "Cannot-decide":
continue
yield Sentence.create(idx, stance, tokens, tags)
if __name__ == "__main__":
# with open("../data/moral_dict.pkl", "rb") as f:
# moral_dict_mapping = pickle.load(f)
cp = load_corpus(
"../data/gold-split/test/combined.csv", bias_only=False, addi_mapping=None
)
```
#### File: Political_Stance_Prediction/scripts/corpus_analysis.py
```python
from collections import defaultdict, Counter
import csv
import random
import json
from nltk.metrics import agreement
# constants
HITID = "HITId"
LABEL = "Answer.political bias.label"
WORKERID = "WorkerId"
WORKTIME = "WorkTimeInSeconds"
APPROVE = "Approve"
TEXT = "Input.text"
sample_path = "amt_output_csv/abortion_batch_results.csv"
class CorpusAnalysis(object):
def __init__(self, data_path):
self.table_titiles = list()
# ['HITId', 'HITTypeId', 'Title', 'Description', 'Keywords', 'Reward', 'CreationTime', 'MaxAssignments', 'RequesterAnnotation', 'AssignmentDurationInSeconds', 'AutoApprovalDelayInSeconds', 'Expiration', 'NumberOfSimilarHITs', 'LifetimeInSeconds', 'AssignmentId', 'WorkerId', 'AssignmentStatus', 'AcceptTime', 'SubmitTime', 'AutoApprovalTime', 'ApprovalTime', 'RejectionTime', 'RequesterFeedback', 'WorkTimeInSeconds', 'LifetimeApprovalRate', 'Last30DaysApprovalRate', 'Last7DaysApprovalRate', 'Input.video_title', 'Input.policy', 'Input.media', 'Input.text', 'Answer.political bias.label', 'Approve', 'Reject']
self.full_table = list()
self.hitid_labels = defaultdict(list)
self.hitid_goldlabel = defaultdict(str)
self.hitid_majoritylabel = defaultdict(str)
self.hit_adjudicate = defaultdict(list)
with open(data_path, mode="r") as infile:
reader = csv.reader(infile)
for i, row in enumerate(reader):
if i == 0:
self.table_titiles = row
else:
self.full_table.append(row)
self.hitid_labels[row[0]].append(row[-1])
self.title_index = {k: v for v, k in enumerate(self.table_titiles)}
self.policy = self.full_table[0][self.title_index["Input.policy"]]
def populate_hitid_goldlabel(self):
# get the majority voting as the gold label in hit_goldlabel
# me as adjudicator breaking ties manually in hit_adjudicate
for k, v in self.hitid_labels.items():
majority_label = Counter(v).most_common()[0][0]
majority_label_count = Counter(v).most_common()[0][1]
if len(v) == 3 and majority_label_count != 1:
self.hitid_goldlabel[k] = majority_label
self.hitid_majoritylabel[k] = majority_label
else:
self.hit_adjudicate[k] = v
# get Majority aggregation/ties
# print(len(self.hit_goldlabel))
# print(len(self.hit_adjudicate.keys()))
##TODO:change this when get full data and manually adjudicated
for k, v in self.hit_adjudicate.items():
self.hitid_goldlabel[k] = v[0]
# adjudicate, get the gold labels
for row in self.full_table:
hit_id = row[self.title_index[HITID]]
label = row[self.title_index[LABEL]]
if label == self.hitid_goldlabel.get(hit_id, "non-exist"):
row.append("Approved")
else:
row.append("Rejected")
# get label distribution:
# print(Counter(self.hit_goldlabel.values()))
# print("*****************************************")
def turker_accuracy(self):
# get how many turkers got it right/wrong
adjudication_list = list()
for row in self.full_table:
adjudication_list.append(row[-1])
# print("*****************************************")
# print(Counter(adjudication_list))
worker_app_rej = defaultdict(list)
for row in self.full_table:
if row[self.title_index[APPROVE]] == "Approved":
if worker_app_rej[row[self.title_index[WORKERID]]]:
worker_app_rej[row[self.title_index[WORKERID]]][0] += 1
else:
worker_app_rej[row[self.title_index[WORKERID]]].append(1)
worker_app_rej[row[self.title_index[WORKERID]]].append(0)
else:
if worker_app_rej[row[self.title_index[WORKERID]]]:
worker_app_rej[row[self.title_index[WORKERID]]][1] += 1
else:
worker_app_rej[row[self.title_index[WORKERID]]].append(0)
worker_app_rej[row[self.title_index[WORKERID]]].append(1)
worker_error_rate = {
k: [v[0] / (v[0] + v[1]), v[0] + v[1]] for k, v in worker_app_rej.items()
}
sorted_worker_error_rate = {
k: v
for k, v in sorted(
worker_error_rate.items(), key=lambda item: item[1][1], reverse=True
)
}
with open("turker_accuracy/{}.json".format(self.policy), "w") as f:
json.dump(sorted_worker_error_rate, f, indent=2)
x = sum(a[0] for a in sorted_worker_error_rate.values())
y = sum(a[1] for a in sorted_worker_error_rate.values())
length = len(sorted_worker_error_rate)
return x / length, y / length
# def get_iaa(self):
# iaa_data = list()
# prev_hitid = full_table[0][title_index[HITID]]
# for i in range(0, len(full_table), 3):
# iaa_data.append([0, full_table[i][title_index[HITID]], full_table[i][title_index[LABEL]]])
# iaa_data.append([1, full_table[i+1][title_index[HITID]], full_table[i+1][title_index[LABEL]]])
# iaa_data.append([2, full_table[i+2][title_index[HITID]], full_table[i+2][title_index[LABEL]]])
#
# task = agreement.AnnotationTask(data=iaa_data)
# print(task.kappa())
# print(task.alpha())
def get_data(self):
self.hitid_text = defaultdict(str)
for row in self.full_table:
self.hitid_text[row[self.title_index[HITID]]] = row[self.title_index[TEXT]]
text_adjudicate = set()
for id in self.hit_adjudicate:
text_adjudicate.add(self.hitid_text[id])
# print(text_adjudicate)
# with open('tied_sents/{}.txt'.format(self.policy), 'w') as f:
# f.write("\n\n".join(text_adjudicate))
def get_training_data(self):
data = [["text", "label"]]
for id, label in self.hitid_goldlabel.items():
data.append([self.hitid_text[id], label])
with open("unsplitted_data/{}.csv".format(self.policy), "w") as out:
csv_out = csv.writer(out)
for row in data:
csv_out.writerow(row)
def get_avg_accuracy(self):
agreed = 0
disagreed = 0
for id, labels_list in self.hitid_labels.items():
for label in labels_list:
if label == self.hitid_goldlabel[id]:
agreed += 1
else:
disagreed += 1
return agreed / (agreed + disagreed)
def get_wawa(self):
agreed = 0
disagreed = 0
for id, labels_list in self.hitid_labels.items():
for label in labels_list:
if label == self.hitid_majoritylabel[id]:
agreed += 1
else:
disagreed += 1
return agreed / (agreed + disagreed)
def get_random_sampling_accuracy(self, num_sample=100):
keys = random.sample(self.hitid_labels.keys(), num_sample)
agreed = 0
disagreed = 0
for id in keys:
for label in self.hitid_labels[id]:
if label == self.hitid_goldlabel[id]:
agreed += 1
else:
disagreed += 1
return agreed / (agreed + disagreed)
if __name__ == "__main__":
policies = [
"healthcare",
"economic",
"immigration",
"education",
"abortion",
"LGBTQ",
"gun",
"environment",
]
data_paths = [
"amt_output_csv/{}_batch_results.csv".format(policy) for policy in policies
]
for path in data_paths:
ca = CorpusAnalysis(path)
ca.populate_hitid_goldlabel()
print(ca.turker_accuracy())
ca.get_data()
ca.get_training_data()
# print("*******************************")
# print(ca.get_avg_accuracy())
# print(ca.get_wawa())
# print(ca.get_random_sampling_accuracy())
# path = '/Users/jinzhao/Desktop/4th_semester/thesis/thesis/amt_output_csv/healthcare_batch_results.csv'
# ca = CorpusAnalysis(path)
# ca.populate_hitid_goldlabel()
# ca.turker_accuracy()
# ca.get_data()
# ca.get_training_data()
```
#### File: Political_Stance_Prediction/text_cnn/vocab.py
```python
import numpy as np
from typing import List
from reader.data import Corpus, Sentence
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
class Vocabulary(object):
def __init__(self, train_corpus: Corpus):
self.UNK = "<UNK>"
self.PAD = "<PAD>"
self.vocab_index = self._get_vocab_index(train_corpus)
@staticmethod
def _instance_tokens(instance: Sentence) -> List[str]:
return [token.text for token in instance.tokens]
def _get_vocab_index(self, corpus: Corpus):
self.vocab = set()
for ins in corpus:
self.vocab.update(self._instance_tokens(ins))
vocab_index = {v: i for i, v in enumerate(self.vocab, 2)}
vocab_index[self.UNK] = 0
vocab_index[self.PAD] = 1
return vocab_index
def add_word(self, word: str):
if word in self.vocab:
return
else:
self.vocab.add(word)
self.vocab_index[word] = len(self.vocab_index)
class DataTransformer(object):
def __init__(self, corpus: Corpus, max_len=25, vocab: Vocabulary = None):
self.max_len = max_len
self.label_encoding = {"Cannot-decide": 0, "Left-leaning": 1, "Right-leaning": 2}
if isinstance(vocab, Vocabulary):
self.vocab = vocab
else:
self.vocab = Vocabulary(corpus)
data = []
labels = []
for ins in corpus:
data.append(self._instance_tokens(ins))
labels.append(ins.stance)
self.data = self._compose(data)
self.labels = self._label2idx(labels)
@staticmethod
def _instance_tokens(instance: Sentence) -> List[str]:
return [token.text for token in instance.tokens]
def train_val_split(self, val_size=0.1, shuffle=True, random_state=521):
X_train, X_test, y_train, y_test = train_test_split(
self.data,
self.labels,
test_size=val_size,
shuffle=shuffle,
random_state=random_state,
)
return (X_train, y_train), (X_test, y_test)
def _token2idx(self, tokens: List[str]):
unk_idx = self.vocab.vocab_index[self.vocab.UNK]
token_idx = [self.vocab.vocab_index.get(t, unk_idx) for t in tokens]
return token_idx
def _label2idx(self, labels: List[str]):
label_idx = np.array([self.label_encoding[label] for label in labels])
return label_idx
def _compose(self, data: List[List[str]]):
data_lst = []
for line in data:
data_lst.append(self._token2idx(line))
data = self._padding(data_lst)
return data
def _padding(self, idx_matrix: List[List[str]]):
pad_value = self.vocab.vocab_index[self.vocab.PAD]
padded = pad_sequences(
idx_matrix, maxlen=self.max_len, padding="post", value=pad_value
)
return padded
if __name__ == "__main__":
pass
```
#### File: jinzhao3611/Political_Stance_Prediction/train_CNN.py
```python
import argparse
from text_cnn import TextCNN, cross_entropy_loss, cnn_config
from text_cnn import Vocabulary, DataTransformer
from text_cnn import accuracy, evaluation, load_fasttext_text
from reader.data import load_corpus, Corpus
from reader.scoring import score_corpus
import tensorflow as tf
import os
import datetime
def get_timestamp():
return datetime.datetime.today().strftime("%Y%m%d-%H%M")
def train(
model,
train_data: DataTransformer,
dev_data: DataTransformer,
test_data: DataTransformer,
out_name,
):
train_X = train_data.data
train_y = train_data.labels
train_dataset = (
tf.data.Dataset.from_tensor_slices((train_X, train_y))
.repeat()
.shuffle(buffer_size=10000)
)
train_dataset = train_dataset.batch(batch_size=cnn_config["batch_size"]).prefetch(
buffer_size=1
)
dev_X = dev_data.data
dev_y = dev_data.labels
test_X = test_data.data
test_y = test_data.labels
optimizer = tf.optimizers.Adam(cnn_config["lr"])
def run_optimization(x, y):
# Wrap computation inside a GradientTape for automatic differentiation
with tf.GradientTape() as g:
# Forward pass
pred = model(x, use_softmax=False)
# Compute loss
loss = cross_entropy_loss(pred, y)
# Variables to update
trainable_variables = model.trainable_variables
# Compute gradients
gradients = g.gradient(loss, trainable_variables)
# Update W and b following gradients
optimizer.apply_gradients(zip(gradients, trainable_variables))
highest_val_f1 = -1
early_stop = 0
for step, (batch_x, batch_y) in enumerate(train_dataset.take(cnn_config["steps"]), 1):
# Run the optimization to update W and b values
run_optimization(batch_x, batch_y)
if step % 100 == 0:
pred = model(batch_x, use_softmax=True)
loss = cross_entropy_loss(pred, batch_y)
acc = accuracy(pred, batch_y)
dev_pred = model(dev_X, use_softmax=True)
val_pre, val_recall, val_f1 = evaluation(dev_pred, dev_y)
if step % 100 == 0:
print(
f"step: {step:03} loss: {loss:.6f}\ttrain acc: {acc:.4f}\tF1: {val_f1:.4f}\tP: {val_pre:.4f}\tR: {val_recall:.4f}"
)
if val_f1 > highest_val_f1:
highest_val_f1 = val_f1
early_stop = 0
model.save_weights(filepath=out_name)
test_pred = model(test_X, use_softmax=True)
pred_out = tf.argmax(test_pred, 1).numpy()
test_pre, test_recall, test_f1 = evaluation(test_pred, test_y)
print(
f"On Test: F1: {test_f1:.4f}\tP: {test_pre:.4f}\tR: {test_recall:.4f}"
)
else:
early_stop += 1
if early_stop > 5:
break
def predict(model, max_len, test_corpus: Corpus, vocab, ckpt_path=None) -> Corpus:
if ckpt_path is not None:
model.load_weights(ckpt_path)
label_encoding = {"Cannot-decide": 0, "Left-leaning": 1, "Right-leaning": 2}
rev_label_encoding = {v: k for k, v in label_encoding.items()}
test_data = DataTransformer(test_corpus, max_len=max_len, vocab=vocab)
pred = model(test_data.data, use_softmax=True)
pre, rec, f1 = evaluation(pred, test_data.labels)
print(f"On Test: F1: {f1:.4f}\tP: {pre:.4f}\tR: {rec:.4f}")
pred = tf.argmax(pred, 1).numpy()
return Corpus(
tuple(
instance.copy_with_stance(rev_label_encoding[pred])
for instance, pred in zip(test_corpus, pred)
)
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("train_path")
parser.add_argument("dev_path")
parser.add_argument("test_path")
parser.add_argument("embed_path")
args = parser.parse_args()
MODEL_DIR = "cnn_output"
train_corpus = load_corpus(args.train_path, bias_only=False, addi_mapping=None)
dev_corpus = load_corpus(args.dev_path, bias_only=False, addi_mapping=None)
test_corpus = load_corpus(args.test_path, bias_only=False, addi_mapping=None)
vocab = Vocabulary(train_corpus)
embed = load_fasttext_text(args.embed_path, vocab.vocab_index, dim=300)
fs = [3, 4, 5]
train_tr = DataTransformer(train_corpus, max_len=30, vocab=vocab)
dev_tr = DataTransformer(dev_corpus, max_len=30, vocab=vocab)
test_tr = DataTransformer(test_corpus, max_len=30, vocab=vocab)
senti_cnn = TextCNN(
embed=embed, filter_nums=100, filter_sizes=fs, max_sent_len=30, num_classes=3,
)
ts = get_timestamp()
train(
senti_cnn,
train_tr,
dev_tr,
test_tr,
out_name=os.path.join(MODEL_DIR, ts, f'cnn_{cnn_config["embed_dim"]}.ckpt'),
)
senti_cnn = TextCNN(
embed=embed, filter_nums=100, filter_sizes=fs, max_sent_len=30, num_classes=3
)
pred_corpus = predict(
senti_cnn,
30,
test_corpus,
os.path.join(MODEL_DIR, ts, f'cnn_{cnn_config["embed_dim"]}.ckpt'),
)
score = score_corpus(test_corpus, pred_corpus)
print(score)
if __name__ == "__main__":
main()
``` |
{
"source": "jinzhengyu1212/Clovers",
"score": 3
} |
#### File: Clovers/Python/test1.py
```python
def f(n):
ret=1
for i in range(1,n+1):
ret*=i
return ret
A=input().split()
n=int(A[0]); m=int(A[1])
print(int(f(n+m)/f(n)/f(m)))
``` |
{
"source": "jinzhijie/authlib-injector-updater",
"score": 2
} |
#### File: jinzhijie/authlib-injector-updater/cliui.py
```python
from colorama import init, Fore, Back, Style
__author__ = 'SkEy'
__all__ = ['Fore', 'Back', 'Style', 'UIPrinter']
class _InternalColors:
'''
内部颜色组
'''
CAPTION = Style.RESET_ALL + Fore.LIGHTBLACK_EX
PRINT = Style.NORMAL
NOTE = Fore.LIGHTBLUE_EX
WAIT = Fore.LIGHTBLACK_EX
SUCC = Fore.LIGHTGREEN_EX
WARN = Fore.BLACK + Back.YELLOW
FAIL = Fore.LIGHTWHITE_EX + Back.RED
ASK = Fore.WHITE + Back.BLUE
CONFIRM = Fore.LIGHTRED_EX + Back.YELLOW
NO = Fore.LIGHTWHITE_EX + Back.LIGHTBLACK_EX
END = Style.RESET_ALL
COLORAMA_INITED = False
class UIPrinter(_InternalColors):
'''
基础用户交互接口
'''
def __init__(self, name):
global COLORAMA_INITED
if not COLORAMA_INITED:
init()
COLORAMA_INITED = True
self.cprint = lambda prompt, msg, no_new_line = False: print(prompt + f'{self.CAPTION} {name}{self.END}:' , msg, end = '' if no_new_line else None)
def print(self, msg):
'''一般消息'''
self.cprint(f'{self.PRINT}[ ]', msg)
def note(self, msg):
'''提示'''
self.cprint(f'{self.NOTE}[*]', msg)
def wait(self, msg):
'''请稍候'''
self.cprint(f'{self.WAIT}[.]', msg)
def succ(self, msg):
'''成功'''
self.cprint(f'{self.SUCC}[+]', msg)
def warn(self, msg):
'''警告'''
self.cprint(f'{self.WARN}[!]', msg)
def fail(self, msg):
'''错误'''
self.cprint(f'{self.FAIL}[-]', msg)
def ask(self, msg):
'''一般询问'''
self.cprint(f'{self.ASK}[?]', msg, no_new_line=True)
def no(self, msg):
'''操作未完成'''
self.cprint(f'{self.NO}[=]', msg)
def confirm(self, msg):
'''确认询问'''
self.cprint(f'{self.CONFIRM}[?]', msg, no_new_line=True)
``` |
{
"source": "jinzhuoran/CogKGE",
"score": 2
} |
#### File: cogkge/core/sampler.py
```python
import torch
class UnifNegativeSampler():
def __init__(self, triples, entity_dict_len, relation_dict_len, node_lut=None, device=torch.device('cuda:0')):
# tensor(len,3)
self.triples = triples
self.entity_dict_len = entity_dict_len
self.relation_dict_len = relation_dict_len
self.device = device
self.node_lut = node_lut
def tuple_to_dict(self, data_tuple):
data_dict = {
"h": data_tuple[0],
"r": data_tuple[1],
"t": data_tuple[2],
}
if len(data_tuple) == 3:
pass
elif len(data_tuple) == 5: # time info
data_dict.update({
"start": data_tuple[3],
"end": data_tuple[4],
})
elif len(data_tuple) == 6: # type info
data_dict.update({
"h_type": data_tuple[3],
"t_type": data_tuple[4],
"r_type": data_tuple[5],
})
elif len(data_tuple) == 7: # descriptions info
data_dict.update({
"h_token": data_tuple[3],
"t_token": data_tuple[4],
"h_mask": data_tuple[5],
"t_mask": data_tuple[6],
})
else:
raise ValueError("Length of data_tuple {} unexpected!".format(len(data_tuple)))
return data_dict
def create_negative(self, batch_pos_tuple):
batch_pos_dict = self.tuple_to_dict(batch_pos_tuple)
batch_neg_dict = self._create_negative(batch_pos_dict)
return list(tuple(batch_neg_dict.values()))
def _create_negative(self, batch_pos_dict):
# {"h":tensor(batch,),"r":tensor(batch,),"t":tensor(batch,),...}
h, r, t = batch_pos_dict["h"], batch_pos_dict["r"], batch_pos_dict["t"]
batch_pos = torch.cat((h.unsqueeze(1), r.unsqueeze(1), t.unsqueeze(1)), dim=1).to(
self.device) # tensor(batch,3)
batch_neg = batch_pos.clone().to(self.device)
entity_number = torch.randint(self.entity_dict_len, (batch_neg.size()[0],)).to(self.device)
mask = torch.rand(batch_neg.size()[0])
head_mask = (mask > 0.5).to(self.device)
tail_mask = (mask <= 0.5).to(self.device)
batch_neg[head_mask, 0] = entity_number[head_mask].to(self.device)
batch_neg[tail_mask, 2] = entity_number[tail_mask].to(self.device)
index_dict = {"h": 0, "t": 2}
batch_neg_dict = {}
for key, values in batch_pos_dict.items():
if len(key.split("_")) > 1 and key != "r_type":
index, attribute = key.split("_")
lut_values = getattr(self.node_lut, attribute)
batch_neg_dict.update({key: lut_values[batch_neg[:, index_dict[index]]]})
else:
batch_neg_dict.update({key: batch_pos_dict[key]})
batch_neg_dict.update({"h": batch_neg[:, 0],
"r": batch_neg[:, 1],
"t": batch_neg[:, 2]})
return batch_neg_dict
class BernNegativeSampler():
def __init__(self, triples, entity_dict_len, relation_dict_len, device=torch.device('cuda:0')):
# tensor(len,3)
self.triples = triples
self.entity_dict_len = entity_dict_len
self.relation_dict_len = relation_dict_len
self.device = device
h_r_uniq, t_count = torch.unique(triples[:, :-1], return_counts=True, dim=0)
r_t_uniq, h_count = torch.unique(triples[:, 1:], return_counts=True, dim=0)
self.P_remove_head = torch.zeros(self.relation_dict_len)
for r in range(self.relation_dict_len):
idx = h_r_uniq[:, 1] == r
tph = torch.mean(t_count[idx].type(torch.FloatTensor))
idx = r_t_uniq[:, 0] == r
hpt = torch.mean(h_count[idx].type(torch.FloatTensor))
self.P_remove_head[r] = tph / (tph + hpt)
def create_negative(self, batch_pos):
# tensor(batch,3)
batch_neg = batch_pos.clone()
entity_number = torch.randint(self.entity_dict_len, (batch_neg.size()[0],)).to(self.device)
relation = batch_pos[:, 1]
mask = torch.rand(batch_neg.size()[0])
head_mask = (mask < self.P_remove_head[relation]).to(self.device)
tail_mask = (mask >= self.P_remove_head[relation]).to(self.device)
batch_neg[head_mask, 0] = entity_number[head_mask].to(self.device)
batch_neg[tail_mask, 2] = entity_number[tail_mask].to(self.device)
return batch_neg
class AdversarialSampler:
def __init__(self, triples, entity_dict_len, relation_dict_len, neg_per_pos, device=torch.device('cuda:0')):
# tensor(len,3)
self.triples = triples
self.entity_dict_len = entity_dict_len
self.relation_dict_len = relation_dict_len
self.neg_per_pos = neg_per_pos
self.device = device
def create_negative(self,batch_pos_tuple):
batch_pos_dict = self.tuple_to_dict(batch_pos_tuple)
batch_neg_dict = self._create_negative(batch_pos_dict)
return list(batch_neg_dict.values())
def _create_negative(self, batch_pos_dict):
"""
batch_pos:(batch,3)
return: batch_neg(batch * neg_per_pos,3)
"""
h,r,t = batch_pos_dict["h"],batch_pos_dict["r"],batch_pos_dict["t"]
batch_pos = torch.cat((h.unsqueeze(1), r.unsqueeze(1), t.unsqueeze(1)), dim=1).to(self.device) # tensor(batch,3)
batch_neg = torch.cat([self.__create_negative(batch_pos) for i in range(self.neg_per_pos)], dim=0)
index_dict = {"h":0,"t":2}
batch_neg_dict = {}
for key,values in batch_pos_dict.items():
if len(key.split("_")) > 1 and key != "r_type":
index,attribute = key.split("_")
lut_values = getattr(self.node_lut,attribute)
batch_neg_dict.update({key:lut_values[batch_neg[:,index_dict[index]]]})
else:
batch_neg_dict.update({key:batch_pos_dict[key]})
batch_neg_dict.update({"h": batch_neg[:, 0],
"r": batch_neg[:, 1],
"t": batch_neg[:, 2]})
return batch_neg_dict
def __create_negative(self, batch_pos):
batch_neg = batch_pos.clone()
entity_number = torch.randint(self.entity_dict_len, (batch_neg.size()[0],)).to(self.device)
mask = torch.rand(batch_neg.size()[0])
head_mask = (mask > 0.5).bool().to(self.device)
tail_mask = (mask <= 0.5).bool().to(self.device)
batch_neg[head_mask, 0] = entity_number[head_mask].to(self.device)
batch_neg[tail_mask, 2] = entity_number[tail_mask].to(self.device)
return batch_neg
def tuple_to_dict(self,data_tuple):
data_dict = {
"h":data_tuple[0],
"r":data_tuple[1],
"t":data_tuple[2],
}
if len(data_tuple) == 3:
pass
elif len(data_tuple) == 5: # time info
data_dict.update({
"start":data_tuple[3],
"end":data_tuple[4],
})
elif len(data_tuple) == 6: # type info
data_dict.update({
"h_type":data_tuple[3],
"t_type":data_tuple[4],
"r_type":data_tuple[5],
})
elif len(data_tuple) == 7: # descriptions info
data_dict.update({
"h_token":data_tuple[3],
"t_token":data_tuple[4],
"h_mask":data_tuple[5],
"t_mask":data_tuple[6],
})
else:
raise ValueError("Length of data_tuple {} unexpected!".format(len(data_tuple)))
return data_dict
if __name__ == "__main__":
fake_triples = torch.tensor([[1, 0, 0],
[1, 0, 4],
[1, 0, 3],
[5, 0, 3],
[2, 1, 5],
[4, 2, 2]]).to("cuda:0")
batch_pos = torch.tensor([[4, 2, 1],
[0, 3, 4]]).to("cuda:0")
data_batch = (batch_pos[:,0],batch_pos[:,1],batch_pos[:,2])
# sampler=UnifNegativeSampler(fake_triples,5,4)
# batch_neg=sampler.create_negative(batch_pos)
# sampler = BernNegativeSampler(fake_triples, 6, 3)
# batch_neg = sampler.create_negative(batch_pos)
sampler=AdversarialSampler(fake_triples,5,4,3)
batch_neg=sampler.create_negative(data_batch)
print(batch_neg)
```
#### File: data/loader/codexmloader.py
```python
from .baseloader import BaseLoader
class CODEXMLoader(BaseLoader):
def __init__(self, dataset_path, download=False):
super().__init__(dataset_path, download,
raw_data_path="CODEXM/raw_data",
processed_data_path="CODEXM/processed_data",
train_name="train.txt",
valid_name="valid.txt",
test_name="test.txt",
data_name="CODEXM")
def download_action(self):
self.downloader.CODEXM()
```
#### File: data/loader/fb15k237loader.py
```python
from .baseloader import BaseLoader
class FB15K237Loader(BaseLoader):
def __init__(self,dataset_path,download=False):
super().__init__(dataset_path, download,
raw_data_path="FB15K237/raw_data",
processed_data_path="FB15K237/processed_data",
train_name="train.txt",
valid_name="valid.txt",
test_name="test.txt",
data_name="FB15K237"
)
def download_action(self):
self.downloader.FB15K237()
```
#### File: data/processor/codexlprocessor.py
```python
from .baseprocessor import BaseProcessor
class CODEXLProcessor(BaseProcessor):
def __init__(self, node_lut, relation_lut, reprocess):
super().__init__("CODEXL", node_lut, relation_lut, reprocess)
```
#### File: data/processor/cskgprocessor.py
```python
from .baseprocessor import BaseProcessor
class CSKGProcessor(BaseProcessor):
def __init__(self, node_lut, relation_lut, reprocess):
super().__init__("CSKG", node_lut, relation_lut, reprocess)
```
#### File: data/processor/fb15k237processor.py
```python
from .baseprocessor import BaseProcessor
class FB15K237Processor(BaseProcessor):
def __init__(self, node_lut, relation_lut, reprocess):
super().__init__("FB15K237", node_lut, relation_lut, reprocess)
```
#### File: cogkge/models/rescal.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from cogkge.models.basemodel import BaseModel
class Rescal(BaseModel):
def __init__(self,
entity_dict_len,
relation_dict_len,
embedding_dim,
penalty_weight=0.0):
super().__init__(model_name="Rescal", penalty_weight=penalty_weight)
self.entity_dict_len = entity_dict_len
self.relation_dict_len = relation_dict_len
self.embedding_dim = embedding_dim
self.entity_embedding = nn.Embedding(entity_dict_len, embedding_dim)
self.relation_embedding = nn.Embedding(relation_dict_len, embedding_dim * embedding_dim)
self._reset_param()
def _reset_param(self):
# 重置参数
nn.init.xavier_uniform_(self.entity_embedding.weight.data)
nn.init.xavier_uniform_(self.relation_embedding.weight.data)
def get_realation_embedding(self, relation_ids):
# 得到关系的embedding
return self.r_embedding(relation_ids)
def get_entity_embedding(self, entity_ids):
# 得到实体的embedding
return self.e_embedding(entity_ids)
def get_triplet_embedding(self, data):
# 得到三元组的embedding
h_embedding = self.e_embedding(data[0])
r_embedding = self.r_embedding(data[1])
t_embedding = self.e_embedding(data[2])
return h_embedding, r_embedding, t_embedding
def forward(self, data):
batch_h, batch_r, batch_t = data[ 0], data[1], data[2]
A = self.entity_embedding(batch_h) # (batch,embedding)
A = F.normalize(A, p=2, dim=-1)
R = self.relation_embedding(batch_r).view(-1, self.embedding_dim,self.embedding_dim) # (batch,embedding,embedding)
A_T = self.entity_embedding(batch_t).view(-1, self.embedding_dim, 1) # (batch,embedding,1)
A_T = F.normalize(A_T, p=2, dim=1)
tr = torch.matmul(R, A_T) # (batch,embedding_dim,1)
tr = tr.view(-1, self.embedding_dim) # (batch,embedding_dim)
return -torch.sum(A * tr, dim=-1) # (batch,)
def loss(self, data):
# 计算损失
pos_data = data
pos_data = self.data_to_device(pos_data)
neg_data = self.model_negative_sampler.create_negative(data)
neg_data = self.data_to_device(neg_data)
pos_score = self.forward(pos_data)
neg_score = self.forward(neg_data)
return self.model_loss(pos_score, neg_score) + self.penalty(data)
```
#### File: cogkge/utils/kr_utils.py
```python
import datetime
import os
import random
import numpy as np
import torch
# import the specified class
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
# compute the output path from the data path
def cal_output_path(data_path, model_name):
output_path = os.path.join(*data_path.split("/")[:], "experimental_output",
model_name + str(datetime.datetime.now())[:-4]).replace(
':', '-').replace(' ', '--')
return output_path
def init_cogkge(device_id, seed):
"""
cogkge初始化
:param device_id: 使用GPU:比如输入GPU编号 "0,1"或者“cuda:0,cuda:1”;使用CPU:输入“cpu”
:param seed: 随机数种子
:return: device
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
device_list = str(device_id).strip().lower().replace('cuda:', '')
cpu = device_list == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device_list: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device_list # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device_list} requested' # check availability
device = torch.device('cuda' if torch.cuda.is_available() == True else "cpu")
return device
``` |
{
"source": "jinzitian/HMM",
"score": 3
} |
#### File: HMM/hmmtool/hmm.py
```python
import numpy as np
class HMM(object):
def __init__(self, state_num, observe_num, train_params = 'ste'):
self.s_n = state_num
self.o_n = observe_num
self.A = np.random.rand(self.s_n, self.s_n)
self.A = self.A/self.A.sum(axis = 1).reshape([-1,1])
self.B = np.random.rand(self.s_n, self.o_n)
self.B = self.B/self.B.sum(axis = 1).reshape([-1,1])
self.pi = np.random.rand(self.s_n)
self.pi = self.pi/sum(self.pi)
self.train_data = []
self.train_params = train_params
#input_data 格式为 [[o1,o2,o3,...,ox], [o1,o2,o3,...,oy]]
#支持多观测序列输入,观测序列的输入是index化后的值,需要提前根据实际的观测值字典去映射,解码出的隐状态值也是一样,都是index数据,需要再根据映射还原
def add_data(self, input_data):
self.train_data.extend(input_data)
#计算所有的前向概率
# [[o1,o2,o3,...,ot1], [o1,o2,o3,...,ot2]]
# [t1 * s_n, t2 * s_n]
def forward(self, o_seqs):
self.alpha = []
for seq in o_seqs:
alpha = np.zeros((len(seq),self.s_n))
for i in range(self.s_n):
alpha[0,i] = self.pi[i] * self.B[i,seq[0]]
for r in range(1,len(seq)):
for i in range(self.s_n):
alpha[r,i] = sum([alpha[r-1,j]*self.A[j,i] for j in range(self.s_n)])*self.B[i,seq[r]]
self.alpha.append(alpha)
#计算所有的后向概率
# [[o1,o2,o3,...,ot1], [o1,o2,o3,...,ot2]]
# [t1 * s_n, t2 * s_n]
def backward(self, o_seqs):
self.beta = []
for seq in o_seqs:
beta = np.zeros((len(seq),self.s_n))
for i in range(self.s_n):
beta[len(seq)-1,i] = 1
for r in range(len(seq)-2,-1,-1):
for i in range(self.s_n):
beta[r,i] = sum([self.A[i,j]*self.B[j,seq[r+1]]*beta[r+1,j] for j in range(self.s_n)])
self.beta.append(beta)
#给定模型参数和观测序列,时刻t的状态为xx的概率
# t * s_n
# 多条观测序列输入 则为[t1 * s_n, t2 * s_n, ... , tk * s_n]
def gamma_matrix(self):
self.gamma = []
for i in range(len(self.alpha)):
alpha = self.alpha[i]
beta = self.beta[i]
self.gamma.append(alpha*beta/sum(alpha[len(alpha)-1]))
#给定模型参数和观测序列,时刻t的状态为xx,且t+1的状态为yy的概率
# t * s_n * s_n
# 多条观测序列输入 则为[t1-1 * s_n * s_n, t2-1 * s_n * s_n, ... , tk-1 * s_n * s_n]
def ksi_matrix(self):
self.ksi = []
for i in range(len(self.train_data)):
seq = self.train_data[i]
alpha = self.alpha[i]
beta = self.beta[i]
ksi = np.zeros((len(seq)-1, self.s_n, self.s_n))
for t in range(len(seq)-1):
for i in range(self.s_n):
for j in range(self.s_n):
ksi[t,i,j] = alpha[t,i]*self.A[i,j]*self.B[j,seq[t+1]]*beta[t+1,j]/sum(alpha[len(alpha)-1])
self.ksi.append(ksi)
#EM思想 Baum-Welch算法
def train(self, maxStep = 10, delta = 0.01):
step = 0
while step < maxStep:
print("=============== step {} ===============".format(step))
#固定模型参数计算隐含数据
'''
self.forward(self.train_data)
'''
#这里estimate_prob中计算了forward,所以 不用单独计算一次forward
log_prob = [np.log(p) for p in self.estimate_prob(self.train_data)]
self.backward(self.train_data)
self.gamma_matrix()
self.ksi_matrix()
#固定隐含数据计算模型参数
new_pi = sum([gamma[0] for gamma in self.gamma])/len(self.gamma)
new_A = sum([ksi.sum(axis = 0) for ksi in self.ksi])/np.reshape(sum([gamma[:-1].sum(axis = 0) for gamma in self.gamma]), [-1,1])
sn_on_list = []
for i in range(len(self.train_data)):
seq = np.array(self.train_data[i])
gamma = self.gamma[i]
sn_on = []
for o in range(self.o_n):
sn_o = (np.reshape(seq == o, [-1,1]) * gamma).sum(axis = 0).reshape([-1,1])
sn_on.append(sn_o)
sn_on_list.append(np.concatenate(sn_on,axis = 1))
new_B = sum(sn_on_list)/np.reshape(sum([gamma.sum(axis = 0) for gamma in self.gamma]), [-1,1])
#误差小也停止
pi_error = np.sum(np.square(new_pi - self.pi))
A_error = np.sum(np.square(new_A - self.A))
B_error = np.sum(np.square(new_B - self.B))
print("pi_error is {}".format(pi_error))
print("A_error is {}".format(A_error))
print("B_error is {}".format(B_error))
print("log_prob is {}".format(log_prob))
if pi_error < delta and A_error < delta and B_error < delta:
if 's' in self.train_params:
self.pi = new_pi
if 't' in self.train_params:
self.A = new_A
if 'e' in self.train_params:
self.B = new_B
break
if 's' in self.train_params:
self.pi = new_pi
if 't' in self.train_params:
self.A = new_A
if 'e' in self.train_params:
self.B = new_B
step += 1
#viterbi算法
#单条输入:[[o1,o2,o3,...,ot1]]
#多条输入:[[o1,o2,o3,...,ot1],[o1,o2,o3,...,ot2]]
#输出:[(prob1, [s1,s2,s3,...,st1]), (prob2, [s1,s2,s3,...,st2])]
def decode(self, o_seq):
result = []
for i in range(len(o_seq)):
seq = o_seq[i]
last_max_state = [[-1]*self.s_n]
max_state_prob_now = [self.pi[s]*self.B[s,seq[0]] for s in range(self.s_n)]
for o in seq[1:]:
current_last_max_state = [0]*self.s_n
max_state_prob_new = [0]*self.s_n
for ns in range(self.s_n):
candidates = [max_state_prob_now[bs]*self.A[bs,ns]*self.B[ns,o] for bs in range(self.s_n)]
max_index = np.argmax(candidates)
current_last_max_state[ns] = max_index
max_state_prob_new[ns] = candidates[max_index]
last_max_state.append(current_last_max_state)
max_state_prob_now = max_state_prob_new
#状态回溯
hidden_state = []
current_state = np.argmax(max_state_prob_now)
max_prob = max_state_prob_now[current_state]
hidden_state.append(current_state)
for current_t in range(len(seq)-1,0,-1):
current_state = last_max_state[current_t][current_state]
hidden_state.append(current_state)
result.append((max_prob, hidden_state[::-1]))
return result
#计算概率 P(O|λ)
#单条[[o1,o2,o3,...,ot1]]
#多条[[o1,o2,o3,...,ot1],[o1,o2,o3,...,ot2]]
#输出:[prob1, prob2]
def estimate_prob(self, o_seq):
self.forward(o_seq)
result = []
for alpha in self.alpha:
result.append(sum(alpha[len(alpha)-1]))
return result
if __name__ == '__main__':
s1 = np.random.randint(6,size = 60)
s2 = np.random.randint(6,size = 40)
s = np.concatenate([s1,s2])
sh = s.reshape([-1,1])
myhmm = HMM(3,6)
myhmm.add_data([s])
myhmm.train(maxStep=50,delta=0.001)
print(myhmm.pi)
print(myhmm.A)
print(myhmm.B)
print(myhmm.estimate_prob([s]))
from hmmlearn import hmm
model = hmm.MultinomialHMM(n_components=3, n_iter=50, tol=0.01)
model.fit(sh,lengths=[60,40])
print(model.startprob_)
print(model.transmat_)
print(model.emissionprob_)
print(np.e**model.score(sh))
'''
model = hmm.MultinomialHMM(n_components=3)
model.startprob_=myhmm.pi
model.transmat_=myhmm.A
model.emissionprob_=myhmm.B
'''
ss = np.random.randint(6,size = 14)
max_hidden_prob, hidden_state = myhmm.decode([ss])[0]
print(max_hidden_prob, hidden_state)
o_prob = myhmm.estimate_prob([ss])[0]
print(o_prob)
d = model.decode(ss.reshape([-1,1]), algorithm="viterbi")
max_hidden_prob, hidden_state = np.e**d[0], list(d[1])
print(max_hidden_prob, hidden_state)
o_prob = np.e**(model.score(ss.reshape([-1,1])))
print(o_prob)
``` |
{
"source": "Jinzy/PIconnect",
"score": 2
} |
#### File: PIconnect/PIconnect/AFSDK.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
# pragma pylint: enable=unused-import
import sys
from builtins import (
ascii,
bytes,
chr,
dict,
filter,
hex,
input,
int,
list,
map,
next,
object,
oct,
open,
pow,
range,
round,
str,
super,
zip,
)
try:
import clr
# Get the installation directory from the environment variable or fall back
# to the Windows default installation path
PIAF_SDK = os.getenv("PIHOME", "C:\\Program Files\\PIPC")
PIAF_SDK += "\\AF\\PublicAssemblies\\4.0\\"
if not os.path.isdir(PIAF_SDK):
raise ImportError("PIAF SDK not found in %s, check installation" % PIAF_SDK)
sys.path.append(PIAF_SDK)
clr.AddReference("OSIsoft.AFSDK") # pylint: disable=no-member
from OSIsoft import AF # pylint: wrong-import-position
AF_SDK_VERSION = AF.PISystems().Version
print("OSIsoft(r) AF SDK Version: {}".format(AF_SDK_VERSION))
except ImportError:
import enum
import warnings
warnings.warn("Can't import the PI AF SDK, running in test mode", ImportWarning)
AF_SDK_VERSION = "2.7_compatible"
# pragma pylint: disable=invalid-name, unused-argument, too-few-public-methods
class AF:
"""Mock class of the AF namespace"""
class Data:
"""Mock class of the AF.Data namespace"""
class AFBoundaryType(enum.IntEnum):
"""Mock class of the AF.Data.AFBoundaryType enumeration"""
Inside = 0
Outside = 1
Interpolated = 2
class PI:
"""Mock class of the AF.PI namespace"""
class PIPoint:
"""Mock class of the AF.PI.PIPoint class"""
@staticmethod
def FindPIPoints(connection, query, source, attribute_names):
"""Stub to mock querying PIPoints"""
return []
class PIServer:
"""Mock class of the AF.PI.PIServer class"""
def __init__(self, name):
self.Name = name
def Connect(self, retry):
"""Stub for connecting to test server"""
pass
def Disconnect(self):
"""Stub for disconnecting from test server"""
pass
class PIServers:
"""Mock class of the AF.PI.PIServers class"""
DefaultPIServer = None
def __init__(self):
self._init()
def _init(self):
if not self.DefaultPIServer:
self.DefaultPIServer = AF.PI.PIServer("Testing")
def __iter__(self):
self._init()
return (x for x in [self.DefaultPIServer])
class AFElement:
"""Mock class of the AF.AFElement class"""
def __init__(self, name):
self.Name = name
class AFDatabase:
"""Mock class of the AF.AFDatabase class"""
def __init__(self, name):
self.Name = name
self.Elements = [AF.AFElement("TestElement")]
class PISystem:
"""Mock class of the AF.PISystem class"""
class InternalDatabases:
"""Mock class for the AF.PISystem.Databases property"""
def __init__(self):
self.DefaultDatabase = AF.AFDatabase("TestDatabase")
def __iter__(self):
return (x for x in [self.DefaultDatabase])
def __init__(self, name):
self.Name = name
self.Databases = AF.PISystem.InternalDatabases()
def Connect(self):
"""Stub to connect to the testing system"""
pass
def Disconnect(self):
"""Stub to disconnect from the testing system"""
pass
class PISystems:
"""Mock class of the AF.PISystems class"""
DefaultPISystem = None
Version = "0.0.0.0"
def __init__(self):
self._init()
def _init(self):
if not self.DefaultPISystem:
self.DefaultPISystem = AF.PISystem("TestingAF")
def __iter__(self):
self._init()
return (x for x in [self.DefaultPISystem])
class Time:
"""Mock class of the AF.Time namespace"""
class AFTimeRange:
"""Mock class of the AF.Time.AFTimeRange class"""
def __init__(self, start_time, end_time):
pass
class AFTimeSpan:
"""Mock class of the AF.Time.AFTimeSpan class"""
def __init__(self):
pass
@staticmethod
def Parse(interval):
"""Stub for parsing strings that should return a AFTimeSpan"""
return AF.Time.AFTimeSpan()
# pragma pylint: enable=invalid-name, unused-argument, too-few-public-methods
```
#### File: PIconnect/PIconnect/config.py
```python
class PIConfigContainer:
_default_timezone = "UTC"
@property
def DEFAULT_TIMEZONE(self):
return self._default_timezone
@DEFAULT_TIMEZONE.setter
def DEFAULT_TIMEZONE(self, value):
import pytz
if value not in pytz.all_timezones:
raise ValueError("{v!r} not found in pytz.all_timezones".format(v=value))
self._default_timezone = value
PIConfig = PIConfigContainer()
```
#### File: PIconnect/PIconnect/_operators.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (
bytes,
dict,
int,
list,
object,
range,
str,
ascii,
chr,
hex,
input,
next,
oct,
open,
pow,
round,
super,
filter,
map,
zip,
)
try:
from __builtin__ import str as BuiltinStr
except ImportError:
BuiltinStr = str
# pragma pylint: enable=unused-import
from collections import namedtuple
import wrapt
def operate(operator, operand):
"""Create a decorator to apply an operator to the function and a given operand.
Operand can be either a constant or a function which accepts the same arguments
as the base function to which the decorator is applied. Operator must be a
function of two arguments.
"""
@wrapt.decorator
def operate_(func, instance, args, kwargs): # pylint: disable=unused-argument
"""Decorate function to apply an operator to the function and a given operand."""
if hasattr(operand, func.__name__):
func2 = getattr(operand, func.__name__)
return operator(func(*args, **kwargs), func2(*args, **kwargs))
return operator(func(*args, **kwargs), operand)
return operate_
def decorate(decorator, base, *args, **kwargs):
"""Return function decorated with the operate decorator.
Inline replacement for @*decorator(*args, **kwargs)*
"""
return decorator(*args, **kwargs)(base)
def add_operators(operators, members, newclassname, attributes):
"""Return a class decorator to add operators which patch each of a list of members.
Keyword arguments:
operators -- a list of tuples containing the function name to be added to the class,
a definition of the operator (as a function of two arguments), and a
docstring for the new function.
members -- a list of strings with the names of the class members that must be
decorated.
newclassname -- the name of the new class that will be returned by the patched
versions of *members*.
attributes -- a list of attributes that are extracted from the original object and
passed as arguments to <newclassname>.__init__.
"""
def build_operator_method(method, operator, docstring, cls):
"""Return a method definition for a numerical operator.
Keyword arguments:
method -- name of the operator method of a subclass of *cls*, will used for
<operator>.__name__ for clean output in the class documentation.
operator -- function of two arguments that is applied to the original function
result and a given operand.
docstring -- docstring for the new operator method.
cls -- class of which the new dynamic class will be subclassed.
"""
def patch_members(self, other):
"""Return new object of class *newclassname* with patched members.
Creates a new virtual class with the members in *members* patched to apply
the given *operator* to the original function definition.
"""
newmembers = {
member: decorate(
decorator=operate,
base=getattr(self, member),
operator=operator,
operand=other,
)
for member in members
}
newclass = type(BuiltinStr(newclassname), (cls,), newmembers)
return newclass(*[getattr(self, attr) for attr in attributes])
patch_members.__name__ = BuiltinStr(method)
patch_members.__doc__ = docstring
return patch_members
def add_numops_(cls):
"""Decorate a class to add a function for each operator in a list of operators."""
for operator in operators:
setattr(
cls,
operator.method,
build_operator_method(
method=operator.method,
operator=operator.operator,
docstring=operator.docstring,
cls=cls,
),
)
return cls
return add_numops_
Operator = namedtuple("Operator", ["method", "operator", "docstring"])
OPERATORS = [
Operator("__add__", lambda x, y: x + y, """Add value(s) to PIPoint"""),
Operator(
"__radd__", lambda x, y: y + x, """Add PIPoint to value(s) (reverse order)"""
),
Operator("__sub__", lambda x, y: x - y, """Subtract value(s) from PIPoint"""),
Operator(
"__rsub__",
lambda x, y: y - x,
"""Subtract PIPoint from value(s) (reverse order)""",
),
Operator("__mul__", lambda x, y: x * y, """Multiply PIPoint by value(s)"""),
Operator(
"__rmul__",
lambda x, y: y * x,
"""Multiply value(s) by PIPoint (reverse order)""",
),
# # Removed for now, Python 3 only
# Operator('__matmul__',
# lambda x, y: x @ y,
# """Matrix multiply"""),
# # Removed for now, Python 3 only
# Operator('__rmatmul__',
# lambda x, y: y @ x,
# """Matrix multiply (reverse order)"""),
Operator("__div__", lambda x, y: x / y, """Divide PIPoint by value(s)"""),
Operator(
"__rdiv__", lambda x, y: y / x, """Divide value(s) by PIPoint (reverse order)"""
),
Operator("__truediv__", lambda x, y: x / y, """Divide PIPoint by value(s)"""),
Operator(
"__rtruediv__",
lambda x, y: y / x,
"""Divide value(s) by PIPoint (reverse order)""",
),
Operator(
"__floordiv__", lambda x, y: x // y, """Floordivide PIPoint by value(s)"""
),
Operator(
"__rfloordiv__",
lambda x, y: y // x,
"""Floordivide value(s) by PIPoint (reverse order)""",
),
Operator("__mod__", lambda x, y: x % y, """Modulo PIPoint by value(s)"""),
Operator(
"__rmod__", lambda x, y: y % x, """Modulo value(s) by PIPoint (reverse order)"""
),
Operator(
"__divmod__",
divmod, # This is already a function of x and y
"""Return divmod of PIPoint by value(s).
divmod(a, b) returns a tuple of the floordivision of a and b, a // b, and the
modulo of a and b, a % b. For integers this is faster than when the operations
are performed separately.
""",
),
Operator(
"__rdivmod__",
lambda x, y: divmod(y, x),
"""Return divmod of value(s) by PIPoint (reverse order).
divmod(a, b) returns a tuple of the floordivision of a and b, a // b, and the
modulo of a and b, a % b. For integers this is faster than when the operations
are performed separately.
""",
),
]
```
#### File: PIconnect/PIconnect/PIConsts.py
```python
from enum import IntEnum
try:
from enum import IntFlag
except ImportError:
IntFlag = IntEnum
class UpdateMode(IntEnum):
"""Indicates how to treat duplicate values in the archive, when supported by the Data Reference.
Detailed information is available at :afsdk:`AF.Data.AFUpdateOption <T_OSIsoft_AF_Data_AFUpdateOption.htm>`
"""
#: Add the value to the archive.
#: If any values exist at the same time, will overwrite one of them and set its Substituted flag.
REPLACE = 0
#: Add the value to the archive. Any existing values at the same time are not overwritten.
INSERT = 1
#: Add the value to the archive only if no value exists at the same time.
#: If a value already exists for that time, the passed value is ignored.
NO_REPLACE = 2
#: Replace an existing value in the archive at the specified time.
#: If no existing value is found, the passed value is ignored.
REPLACE_ONLY = 3
#: Add the value to the archive without compression.
#: If this value is written to the snapshot, the previous snapshot value will be written to the archive,
#: without regard to compression settings.
#: Note that if a subsequent snapshot value is written without the InsertNoCompression option,
#: the value added with the InsertNoCompression option is still subject to compression.
INSERT_NO_COMPRESSION = 5
#: Remove the value from the archive if a value exists at the passed time.
REMOVE = 6
class BufferMode(IntEnum):
"""Indicates buffering option in updating values, when supported by the Data Reference.
Detailed information is available at :afsdk:`AF.Data.AFBufferOption <T_OSIsoft_AF_Data_AFBufferOption.htm>`
"""
#: Updating data reference values without buffer.
DO_NOT_BUFFER = 0
#: Try updating data reference values with buffer.
#: If fails (e.g. data reference AFDataMethods does not support Buffering, or its Buffering system is not available),
#: then try updating directly without buffer.
BUFFER_IF_POSSIBLE = 1
# Updating data reference values with buffer.
BUFFER = 2
class AuthenticationMode(IntEnum):
"""AuthenticationMode indicates how a user authenticates to a PI Server
Detailed information is available at :afsdk:`AF.PI.PIAuthenticationMode <T_OSIsoft_AF_PI_PIAuthenticationMode.htm>`.
"""
#: Use Windows authentication when making a connection
WINDOWS_AUTHENTICATION = 0
#: Use the PI User authentication mode when making a connection
PI_USER_AUTHENTICATION = 1
class CalculationBasis(IntEnum):
"""CalculationBasis indicates how values should be weighted over a time range
Detailed information is available at :afsdk:`AF.Data.AFCalculationBasis <T_OSIsoft_AF_Data_AFCalculationBasis.htm>`.
"""
#: Each event is weighted according to the time over which it applies.
TIME_WEIGHTED = 0
#: Each event is weighted equally.
EVENT_WEIGHTED = 1
#: Each event is time weighted, but interpolation is always done as if it is continous data.
TIME_WEIGHTED_CONTINUOUS = 2
#: Each event is time weighted, but interpolation is always done as if it is discrete, stepped, data.
TIME_WEIGHTED_DISCRETE = 3
#: Each event is weighted equally, except data at the end of the interval is excluded.
EVENT_WEIGHTED_EXCLUDE_MOST_RECENT = 4
#: Each event is weighted equally, except data at the beginning of the interval is excluded.
EVENT_WEIGHTED_EXCLUDE_EARLIEST = 5
#: Each event is weighted equally, data at both boundaries of the interval are explicitly included.
EVENT_WEIGHTED_INCLUDE_BOTH_ENDS = 6
class ExpressionSampleType(IntEnum):
"""ExpressionSampleType indicates how expressions are evaluated over a time range.
Detailed information is available at :afsdk:`AF.Data.AFSampleType <T_OSIsoft_AF_Data_AFSampleType.htm>`.
"""
#: The expression is evaluated at each archive event.
EXPRESSION_RECORDED_VALUES = 0
#: The expression is evaluated at a sampling interval, passed as a separate argument.
INTERVAL = 1
class RetrievalMode(IntEnum):
"""RetrievalMode indicates which recorded value should be returned
Detailed information is available at :afsdk:`AF.Data.AFRetrievalMode <T_OSIsoft_AF_Data_AFRetrievalMode.htm>`.
"""
#: Autmatic detection
AUTO = 0
#: At the exact time if available, else the first before the requested time
AT_OR_BEFORE = 1
#: The first before the requested time
BEFORE = 6
#: At the exact time if available, else the first after the requested time
AT_OR_AFTER = 2
#: The first after the requested time
AFTER = 7
#: At the exact time if available, else return an error
EXACT = 4
class SummaryType(IntFlag):
"""SummaryType indicates which types of summary should be calculated.
Based on :class:`enum.IntEnum` in Python 3.5 or earlier. `SummaryType`'s can
be or'ed together. Python 3.6 or higher returns a new `IntFlag`, while in
previous versions it will be casted down to `int`.
>>> SummaryType.MINIMUM | SummaryType.MAXIMUM # Returns minimum and maximum
<SummaryType.MAXIMUM|MINIMUM: 12> # On Python 3.6+
12 # On previous versions
Detailed information is available at :afsdk:`AF.Data.AFSummaryTypes <T_OSIsoft_AF_Data_AFSummaryTypes.htm>`.
"""
#: No summary data
NONE = 0
#: A total over the time span
TOTAL = 1
#: Average value over the time span
AVERAGE = 2
#: The minimum value in the time span
MINIMUM = 4
#: The maximum value in the time span
MAXIMUM = 8
#: The range of the values (max-min) in the time span
RANGE = 16
#: The sample standard deviation of the values over the time span
STD_DEV = 32
#: The population standard deviation of the values over the time span
POP_STD_DEV = 64
#: The sum of the event count (when the calculation is event weighted). The sum of the event time duration (when the calculation is time weighted.)
COUNT = 128
#: The percentage of the data with a good value over the time range. Based on time for time weighted calculations, based on event count for event weigthed calculations.
PERCENT_GOOD = 8192
#: The total over the time span, with the unit of measurement that's associated with the input (or no units if not defined for the input).
TOTAL_WITH_UOM = 16384
#: A convenience to retrieve all summary types
ALL = 24831
#: A convenience to retrieve all summary types for non-numeric data
ALL_FOR_NON_NUMERIC = 8320
class TimestampCalculation(IntEnum):
"""
TimestampCalculation defines the timestamp returned for a given summary calculation
Detailed information is available at :afsdk:`AF.Data.AFTimeStampCalculation <T_OSIsoft_AF_Data_AFTimestampCalculation.htm>`.
"""
#: The timestamp is the event time of the minimum or maximum for those summaries or the beginning of the interval otherwise.
AUTO = 0
#: The timestamp is always the beginning of the interval.
EARLIEST_TIME = 1
#: The timestamp is always the end of the interval.
MOST_RECENT_TIME = 2
class EventFrameSearchMode(IntEnum):
"""EventFrameSearchMode
EventFrameSearchMode defines the interpretation and direction from the start time
when searching for event frames.
Detailed information is available at https://techsupport.osisoft.com/Documentation/PI-AF-SDK/html/T_OSIsoft_AF_EventFrame_AFEventFrameSearchMode.htm,
including a graphical display of event frames that are returned for a given search
mode.
"""
#: Uninitialized
NONE = 0
#: Backward from start time, also known as starting before
BACKWARD_FROM_START_TIME = 1
STARTING_BEFORE = 1
#: Forward from start time, also known as starting after
FORWARD_FROM_START_TIME = 2
STARTING_AFTER = 2
#: Backward from end time, also known as ending before
BACKWARD_FROM_END_TIME = 3
ENDING_BEFORE = 3
#: Forward from end time, also known as ending after
FORWARD_FROM_END_TIME = 4
ENDING_AFTER = 4
#: Backward in progress, also known as starting before and in progress
BACKWARD_IN_PROGRESS = 5
STARTING_BEFORE_IN_PROGRESS = 5
#: Forward in progress, also known as starting after and in progress
FORWARD_IN_PROGRESS = 6
STARTING_AFTER_IN_PROGRESS = 6
def get_enumerated_value(enumeration, value, default):
if not value:
return default
return enumeration(value)
```
#### File: PIconnect/PIconnect/PI.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (
ascii,
bytes,
chr,
dict,
filter,
hex,
input,
int,
list,
map,
next,
object,
oct,
open,
pow,
range,
round,
str,
super,
zip,
)
try:
from __builtin__ import str as BuiltinStr
except ImportError:
BuiltinStr = str
# pragma pylint: enable=unused-import, redefined-builtin
from warnings import warn
from PIconnect._operators import OPERATORS, add_operators
from PIconnect._utils import classproperty
from PIconnect.AFSDK import AF
from PIconnect.PIConsts import AuthenticationMode
from PIconnect.PIData import PISeriesContainer
from PIconnect.time import timestamp_to_index
_NOTHING = object()
class PIServer(object): # pylint: disable=useless-object-inheritance
"""PIServer is a connection to an OSIsoft PI Server
Args:
server (str, optional): Name of the server to connect to, defaults to None
username (str, optional): can be used only with password as well
password (str, optional): -//-
todo: domain, auth
timeout (int, optional): the maximum seconds an operation can take
.. note::
If the specified `server` is unknown a warning is thrown and the connection
is redirected to the default server, as if no server was passed. The list
of known servers is available in the `PIServer.servers` dictionary.
"""
version = "0.2.2"
#: Dictionary of known servers, as reported by the SDK
_servers = _NOTHING
#: Default server, as reported by the SDK
_default_server = _NOTHING
def __init__(
self,
server=None,
username=None,
password=<PASSWORD>,
domain=None,
authentication_mode=AuthenticationMode.PI_USER_AUTHENTICATION,
timeout=None,
):
if server and server not in self.servers:
message = 'Server "{server}" not found, using the default server.'
warn(message=message.format(server=server), category=UserWarning)
if bool(username) != bool(password):
raise ValueError(
"When passing credentials both the username and password must be specified."
)
if domain and not username:
raise ValueError(
"A domain can only specified together with a username and password."
)
if username:
from System.Net import NetworkCredential
from System.Security import SecureString
secure_pass = SecureString()
for c in password:
secure_pass.AppendChar(c)
cred = [username, secure_pass] + ([domain] if domain else [])
self._credentials = (NetworkCredential(*cred), int(authentication_mode))
else:
self._credentials = None
self.connection = self.servers.get(server, self.default_server)
if timeout:
from System import TimeSpan
# System.TimeSpan(hours, minutes, seconds)
self.connection.ConnectionInfo.OperationTimeOut = TimeSpan(0, 0, timeout)
@classproperty
def servers(self):
if self._servers is _NOTHING:
i, failures = 0, 0
self._servers = {}
from System import Exception as dotNetException # type: ignore
for i, server in enumerate(AF.PI.PIServers(), start=1):
try:
self._servers[server.Name] = server
except Exception:
failures += 1
except dotNetException:
failures += 1
if failures:
warn(
"Could not load {} PI Server(s) out of {}".format(failures, i),
ResourceWarning,
)
return self._servers
@classproperty
def default_server(self):
if self._default_server is _NOTHING:
self._default_server = None
try:
self._default_server = AF.PI.PIServers().DefaultPIServer
except Exception:
warn("Could not load the default PI Server", ResourceWarning)
return self._default_server
def __enter__(self):
if self._credentials:
self.connection.Connect(*self._credentials)
else:
# Don't force to retry connecting if previous attempt failed
force_connection = False
self.connection.Connect(force_connection)
return self
def __exit__(self, *args):
self.connection.Disconnect()
def __repr__(self):
return "%s(\\\\%s)" % (self.__class__.__name__, self.server_name)
@property
def server_name(self):
"""server_name
Name of the connected server
"""
return self.connection.Name
def search(self, query, source=None):
"""search
Search PIPoints on the PIServer
Args:
query (str or [str]): String or list of strings with queries
source (str, optional): Defaults to None. Point source to limit the results
Returns:
list: A list of :class:`PIPoint` objects as a result of the query
.. todo::
Reject searches while not connected
"""
if isinstance(query, list):
return [y for x in query for y in self.search(x, source)]
# elif not isinstance(query, str):
# raise TypeError('Argument query must be either a string or a list of strings,' +
# 'got type ' + str(type(query)))
return [
PIPoint(pi_point)
for pi_point in AF.PI.PIPoint.FindPIPoints(
self.connection, BuiltinStr(query), source, None
)
]
@add_operators(
operators=OPERATORS,
members=["_current_value", "interpolated_values"],
newclassname="VirtualPIPoint",
attributes=["pi_point"],
)
class PIPoint(PISeriesContainer):
"""PIPoint
Reference to a PI Point to get data and corresponding metadata from the server.
Args:
pi_point (AF.PI.PIPoint): Reference to a PIPoint as returned by the SDK
"""
version = "0.3.0"
def __init__(self, pi_point):
super().__init__()
self.pi_point = pi_point
self.tag = pi_point.Name
self.__attributes_loaded = False
self.__raw_attributes = {}
def __repr__(self):
return "%s(%s, %s; Current Value: %s %s)" % (
self.__class__.__name__,
self.tag,
self.description,
self.current_value,
self.units_of_measurement,
)
@property
def last_update(self):
"""Return the time at which the last value for this PI Point was recorded."""
return timestamp_to_index(self.pi_point.CurrentValue().Timestamp.UtcTime)
@property
def raw_attributes(self):
"""Return a dictionary of the raw attributes of the PI Point."""
self.__load_attributes()
return self.__raw_attributes
@property
def units_of_measurement(self):
"""Return the units of measument in which values for this PI Point are reported."""
return self.raw_attributes["engunits"]
@property
def description(self):
"""Return the description of the PI Point.
.. todo::
Add setter to alter displayed description
"""
return self.raw_attributes["descriptor"]
@property
def created(self):
"""Return the creation datetime of a point."""
return timestamp_to_index(self.raw_attributes["creationdate"])
def __load_attributes(self):
"""Load the raw attributes of the PI Point from the server"""
if not self.__attributes_loaded:
self.pi_point.LoadAttributes([])
self.__attributes_loaded = True
self.__raw_attributes = {
att.Key: att.Value for att in self.pi_point.GetAttributes([])
}
@property
def name(self):
return self.tag
def _current_value(self):
"""Return the last recorded value for this PI Point (internal use only)."""
return self.pi_point.CurrentValue().Value
def _interpolated_value(self, time):
"""Return a single value for this PI Point"""
return self.pi_point.InterpolatedValue(time)
def _recorded_value(self, time, retrieval_mode):
"""Return a single value for this PI Point"""
return self.pi_point.RecordedValue(time, int(retrieval_mode))
def _update_value(self, value, update_mode, buffer_mode):
return self.pi_point.UpdateValue(value, update_mode, buffer_mode)
def _recorded_values(self, time_range, boundary_type, filter_expression):
include_filtered_values = False
return self.pi_point.RecordedValues(
time_range, boundary_type, filter_expression, include_filtered_values
)
def _interpolated_values(self, time_range, interval, filter_expression):
"""Internal function to actually query the pi point"""
include_filtered_values = False
return self.pi_point.InterpolatedValues(
time_range, interval, filter_expression, include_filtered_values
)
def _summary(self, time_range, summary_types, calculation_basis, time_type):
return self.pi_point.Summary(
time_range, summary_types, calculation_basis, time_type
)
def _summaries(
self, time_range, interval, summary_types, calculation_basis, time_type
):
return self.pi_point.Summaries(
time_range, interval, summary_types, calculation_basis, time_type
)
def _filtered_summaries(
self,
time_range,
interval,
filter_expression,
summary_types,
calculation_basis,
filter_evaluation,
filter_interval,
time_type,
):
return self.pi_point.FilteredSummaries(
time_range,
interval,
filter_expression,
summary_types,
calculation_basis,
filter_evaluation,
filter_interval,
time_type,
)
def _normalize_filter_expression(self, filter_expression):
return filter_expression.replace("%tag%", self.tag)
```
#### File: PIconnect/PIconnect/time.py
```python
from datetime import datetime
import pytz
from PIconnect.AFSDK import AF
from PIconnect.config import PIConfig
def to_af_time_range(start_time, end_time):
"""Convert a combination of start and end time to a time range.
Both `start_time` and `end_time` can be either a :any:`datetime.datetime` object or a string.
`datetime` objects are first converted to a string, before being passed to
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`. It is also
possible to specify either end as a `datetime` object, and then specify the other
boundary as a relative string.
Args:
start_time (str | datetime): Start time of the time range.
end_time (str | datetime): End time of the time range.
Returns:
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`: Time range covered by the start and end time.
"""
if isinstance(start_time, datetime):
start_time = start_time.isoformat()
if isinstance(end_time, datetime):
end_time = end_time.isoformat()
return AF.Time.AFTimeRange(start_time, end_time)
def to_af_time(time):
"""Convert a time to a AFTime value.
Args:
time (str | datetime): Time to convert to AFTime.
Returns:
:afsdk:`AF.Time.AFTime <M_OSIsoft_AF_Time_AFTime__ctor_7.htm>`: Time range covered by the start and end time.
"""
if isinstance(time, datetime):
time = time.isoformat()
return AF.Time.AFTime(time)
def timestamp_to_index(timestamp):
"""Convert AFTime object to datetime in local timezone.
Args:
timestamp (`System.DateTime`): Timestamp in .NET format to convert to `datetime`.
Returns:
`datetime`: Datetime with the timezone info from :data:`PIConfig.DEFAULT_TIMEZONE <PIconnect.config.PIConfigContainer.DEFAULT_TIMEZONE>`.
"""
local_tz = pytz.timezone(PIConfig.DEFAULT_TIMEZONE)
return (
datetime(
timestamp.Year,
timestamp.Month,
timestamp.Day,
timestamp.Hour,
timestamp.Minute,
timestamp.Second,
timestamp.Millisecond * 1000,
)
.replace(tzinfo=pytz.utc)
.astimezone(local_tz)
)
``` |
{
"source": "jiobu1/CodeSignal_Arcade",
"score": 4
} |
#### File: Intro/Level 5/areEquallyStrong.py
```python
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
if ((yourLeft == friendsLeft) or (yourLeft == friendsRight)) and ((yourRight == friendsLeft) or (yourRight == friendsRight)):
return True
return False
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
return {yourLeft, yourRight} == {friendsLeft, friendsRight}
``` |
{
"source": "jiobu1/CPI-Calculator",
"score": 4
} |
#### File: CPI-Calculator/cpi/calculator_class.py
```python
class Year:
"""
This is a Year class
Params:
This takes a year value and then looks up consumer price index for the year
Returns:
It returns the inflation rate for the year selected
"""
def __init__(self, value):
self.year = value
self.inflation_rate = self.get_inflation_rate()
def get_inflation_rate(self):
cpi_dictionary = {1992: 140.3,
1993: 144.5,
1994: 148.2,
1995: 152.4,
1996: 156.9,
1997: 160.5,
1998: 163.0,
1999: 166.6,
2000: 172.2,
2001: 177.1,
2002: 179.9,
2003: 184.0,
2004: 188.9,
2005: 195.3,
2006: 201.6,
2007: 207.3,
2008: 215.303,
2009: 214.537,
2010: 218.056,
2011: 224.939,
2012: 229.594,
2013: 232.957,
2014: 236.736,
2015: 237.017,
2016: 240.007,
2017: 245.120,
2018: 251.107,
2019: 255.657
}
return cpi_dictionary[self.year]
class Conversion_Year(Year):
"""
This is the conversion year class. This class inherits from the year class.
Params:
This utilizes the year's cpi get inflation rate function and needs an dollar famount.
Returns:
A literal that converts dollar amounts using inflation rate.
"""
def __init__(self, value, amount):
super().__init__(value)
self.amount = amount
def get_dollars(self, year_instance):
output = self.amount * \
(year_instance.inflation_rate / self.inflation_rate)
return (f' If you had ${self.amount} in {self.year}, you would have ${output:.2f} in {year_instance.year} dollars.')
amount = int(input("Please choose a number (e.g. 10540) "))
starting_year = int(input("[Start] Choose a year between 1992 and 2019 "))
conversion_year = int(input("[Conversion] Choose a year between 1992 and 2019 "))
conversion_year_instance = Year(conversion_year)
year_instance =Conversion_Year(starting_year, amount)
dollars = year_instance.get_dollars(conversion_year_instance)
print(dollars)
``` |
{
"source": "jiobu1/CS_Build_Week_1",
"score": 3
} |
#### File: KNN/Workspace/work_place.py
```python
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
def euclidean_distance(point1, point2, length):
"""
"""
distance = 0
for x in range(length):
distance += (point1[x]-point2[x])**2
return np.sqrt(distance)
def get_distances(X_test, X_train):
"""
"""
length = X_test.shape[1]
# Initialize empty distance array
distances = []
for idx in range(len(X_test)):
distances.append([ X_test[idx], [] ])
# Loop through each row in x_train
for row in X_train:
#find the euclidean distance and append to distance list
dist = euclidean_distance(row, X_test[idx], length)
distances[idx][1].append(dist)
return distances
def get_labels(distances, y_train, k):
labels = []
for row in range(len(distances)):
# sort distances
distance = distances[row]
y_indices = np.argsort(distance[1])[:k] #sort distances and record up to k values
#find the classes that correspond with nearest neighbors
k_nearest_classes = [y_train[i%len(y_train)] for i in y_indices]
# make a predication based on the mode of the classes
y_pred = [stats.mode(k_nearest_classes)][0][0][0]
labels.append(y_pred)
return labels
X_train = np.array([[0,3,0],[2,0,0],[9,4,2],[1,7,4],[8,12,3]])
# X_train = pd.DataFrame(X_train)
X_test = np.array([[9,4,2], [0,3,0]])
# X_test = pd.DataFrame(X_test)
y_train = ['a','a','l', 'a','l']
y_train = np.array(y_train)
# # Load Data
# iris = load_iris()
# # Separate into target from features
# #Scale features
# X = scale(iris.data)
# y = iris.target # classes
# # Train/Test split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42) # for reproducible results
distances = get_distances(X_test, X_train)
print("Distances: ", distances)
labels = get_labels(distances, y_train, 3)
print("Labels: ", labels)
``` |
{
"source": "jiobu1/labspt15-cityspire-g-ds",
"score": 3
} |
#### File: app/tests/test_db.py
```python
import os
import requests
import json
from jsonschema import validate
from jsonschema import Draft6Validator
#All Cities Test
def test_temperature_check_status_code_equals_200():
response = requests.get("http://127.0.0.1:8000/all_cities")
assert response.status_code == 200
all_cities_schema = {
"$schema": "https://json-schema.org/schema#",
"city": "string",
"state": "string"
}
def test_weather_data_validates_json_resonse_schema():
response = requests.get("http://127.0.0.1:8000/all_cities")
# Validate response headers and body contents, e.g. status code.
assert response.status_code == 200
# Validate response content type header
assert response.headers["Content-Type"] == "application/json"
resp_body = response.json()
# Validate will raise exception if given json is not
# what is described in schema.
validate(instance=resp_body, schema=all_cities_schema)
```
#### File: app/tests/test_functions.py
```python
import unittest
def parse_grades(grades_string):
GRADES = ['PK', 'K', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', 'Ungraded']
# Remove & for grades list
grades_string = grades_string.replace(' &', ',')
# Grades list - will add to separated grade string to grades
grades = []
# split strings based on ','
string_list = grades_string.split(',')
# look for sections of list with '-'
dash = "-"
for i in range(len(string_list)):
clean_string = string_list[i].strip()
if dash in clean_string:
# split using '-', loop and add to grades variable
start_grade, end_grade = clean_string.split(dash)
grades += GRADES[GRADES.index(start_grade) : GRADES.index(end_grade)+ 1]
else:
# add string to grades
grades.append(clean_string)
return grades
unique_grades_combination = ['9-12', '6-8', 'K-4', '5-8', '4-5', 'K-5', '4-6', '7-12', 'K-6',
'4-8', 'K-8', '1-6', 'PK-3', '6-12', 'K-3', 'PK-K', 'PK', 'PK-8',
'PK-6', '4-12', 'PK-6 & Ungraded', '1-8', 'K', 'PK-5', 'PK-12',
'7-11', '3-6', 'K-12', '3-8', '2-10', 'K-1, 5-8', 'PK-4',
'Ungraded', '1-12', '2-5', '3-5', '10-12', 'PK-1 & Ungraded',
'K-11', 'K-2', 'K-1', '9-10', 'K-7', '1-5', 'PK-1', 'PK-K, 2',
'PK-2', '7-8', 'PK-11', '9', 'K-9', '2-11', '2-12', '2-9', '8-12',
'K-10', 'PK & Ungraded', '7-9', '6', '5-6', '2, 5-6, 8-9, 11-12',
'11-12', '3-12', 'K-1, 3-4, 6-7, 9, 11', '1-11', '5-12', '6-10',
'11', '3-7', '7-10', 'PK-10', 'PK-12 & Ungraded', 'PK-9', '6-9',
'4-9', '9-11', '6-7', '5-12 & Ungraded', '8-11', '2-8',
'3, 5, 7-11', 'PK, 8', 'PK-7', '6, 9-12', '1-3', 'K-3, 5-10, 12',
'PK-8 & Ungraded', '5', '12', 'K-8, 10-12', '1-9', '1-5, 7-8',
'9, 12', '5-7', '8', '3-10', '1-12 & Ungraded', '5, 7-8, 10-12',
'PK, 1-4', '1-4', '3, 6-7, 10, 12', '3-4, 6, 9-12', '1-2', '8-9',
'4', 'PK-3, 5-7', 'K-1, 4, 6, 8', '5-10', '6-12 & Ungraded',
'6-11', 'K, 5-12', 'K-6, 8', 'PK-4 & Ungraded', 'PK-1, 3-5',
'PK-1, 3', '10-12 & Ungraded', 'K-3, 5-9, 11-12', 'PK-K, 2-4, 6-8',
'PK, 3-4', '1-7', '1-10', 'PK, 2', '2, 4, 7, 9-10, 12', 'PK, 1',
'1-3, 5, 8', '1-3, 5-6, 8', 'K, 3, 5-12', '4-11', 'K-4, 7-8',
'7-8, 10-12', 'PK, 1, 3-12', '7, 9-12', 'PK-2, 6, 8', '2-6',
'PK-8, 10-11', '2-3', '3-4', 'PK, 3-5', '3-9', 'PK-4, 6',
'PK, 1-6', 'PK-4, 8', 'PK-K, 9-12', '3-11', 'PK-3 & Ungraded',
'PK-3, 5-6', '2-7', '2-12 & Ungraded', 'PK-K, 2-7', '8-10', '2-4',
'PK-4, 6-8', 'PK-K, 3-5, 7', '7, 11', 'K-5, 7, 9-10, 12',
'PK-K, 2-4, 6 & Ungraded', '2', 'PK-7, 11', 'K, 2, 4-9, 12',
'PK-1, 4, 6', '9-12 & Ungraded', '4-6, 8', '2-4, 6, 9-12', '4-7',
'PK-3, 5-8', '1-2, 4-8', 'PK, 12', '10', '3-5, 9-10, 12', '5-11',
'2-3, 5, 10-12', '3-10, 12', 'K-12 & Ungraded', '4-10',
'PK-1, 3, 5', 'PK-1, 5, 7, 9', 'PK-K, 2-5', 'K, 2-7, 9',
'K-7, 9-12', '2, 6-7', '6-10, 12', 'K-5, 7-9, 11',
'2, 4, 6, 8, 10, 12', 'K-2, 4-12', 'PK, 7-12', '1, 3-4, 6-12',
'K-6, 8-11', '2-5, 7-12', '1, 4-10, 12', 'PK-2, 4, 6, 8',
'K-10, 12', '9 & Ungraded', 'PK-5 & Ungraded', 'PK-3, 5',
'1, 3-5, 7', 'PK, 2, 4-7, 9-10, 12', 'K, 2, 5-12', 'PK-4, 6-12',
'3', '4, 10', '2, 5-6, 8-12', 'K-6, 8-12', 'PK, 3-6, 8, 10, 12',
'K-3, 6-8', '3-9, 11-12', 'PK-5, 7-12', 'K, 2-4, 7-12', '1-4, 6-8',
'PK-3, 5-12', '1-3, 5-9', '1, 3-8', '9-11 & Ungraded', '5-9',
'K, 4-5, 7-8', 'PK, 1-5', '3-12 & Ungraded', 'K-8 & Ungraded',
'2-3, 5-8', '1-6, 9-12', 'PK-8, 10-12', 'PK-K, 4-8', 'K-1, 7-12',
'K-4, 6-7, 9-12', '9-10, 12', 'K-5, 7', 'K, 2-8', '1, 4, 7, 10-11',
'K, 2-3, 5-6', 'K-5, 7, 11-12', '7', 'PK-4, 7-12', 'K-9, 11-12',
'K-4, 6-8, 10', '1, 3-5, 7-12', 'K-2, 5-7', 'K, 3, 6-8, 10, 12',
'K, 3, 7-8, 12', 'PK, 9-12', 'K-2, 4-10, 12', '12 & Ungraded',
'PK-6, 8-10', 'PK-1, 3-8', '1, 3', 'K-8, 12', '3-4, 6-9, 12',
'5, 7-8', 'PK-K, 2-3', 'K-1, 3-12', '1, 5-9, 12', '5, 7-8, 10-11',
'PK-5, 7-8', '4, 7, 10-12', 'PK-1, 3-4', 'PK-2, 4-6', 'K-8, 10',
'10-11', '6, 9', 'PK-2, 4', 'K-4, 6', 'K-2, 6-7, 12', 'PK-6, 8',
'3-4, 7, 10-12', '1, 3-7', 'K-5, 8', '2, 4-5, 7, 9-11',
'1-3, 5, 7-8', '1-2, 9-12', 'K-1, 3-5, 7-8', 'K, 2-8, 10',
'PK, 1-3, 6', '7-8, 10-11', '7, 10-12', '3, 5, 8-9, 11-12',
'1-3, 5', 'K-5, 7-12', '1-7, 9-10, 12', '2, 4-8', 'K-3, 6-12',
'PK, 5-8', '6-8, 10', 'PK-5, 7', 'PK, 1, 5', 'PK, 1-4, 6',
'3, 5, 10, 12', 'PK-6, 8-12', '4-5, 7-12', 'PK-1, 6-7, 9-11',
'2-7, 9-11', 'K-1, 6, 8, 11', '7, 9, 11-12', '1-3, 5-10',
'K-5, 7-9', '1', 'K-5, 7-11', '7-12 & Ungraded', '1-6 & Ungraded',
'3, 12', 'PK-8, 11', 'K-5, 7-9, 12', 'K-1, 4, 7-9, 11',
'1-2, 4-12', 'PK-9, 11-12']
class TestParseGrades(unittest.TestCase):
def test_join_string_grades_success(self):
actual = parse_grades('2, 4, 6, 8, 10, 12')
expected = ['2','4','6','8','10','12']
self.assertEqual(actual, expected)
def test_parse_grades_success(self):
actual = parse_grades('K-4')
expected = ['K', '1', '2', '3', '4']
self.assertEqual(actual, expected)
def test_multiple_separators(self):
actual = parse_grades('2-4, 6, 9-12')
expected = ['2', '3', '4', '6', '9', '10', '11', '12']
self.assertEqual(actual, expected)
def test_grades_list_with_ampersand(self):
actual = parse_grades('6-12 & Ungraded')
expected = ['6', '7', '8', '9', '10', '11', '12', 'Ungraded']
self.assertEqual(actual, expected)
def test_small_lists(self):
actual = parse_grades('PK-K')
expected = ['PK', 'K']
self.assertEqual(actual, expected)
def test_multiple_separators2(self):
actual = parse_grades('PK-K, 2-4, 6 & Ungraded')
expected = ['PK', 'K', '2', '3', '4', '6', 'Ungraded']
self.assertEqual(actual, expected)
def test_multiple_separators3(self):
actual = parse_grades('5-10')
expected = ['5', '6', '7', '8', '9', '10']
self.assertEqual(actual, expected)
def test_multiple_separators4(self):
actual = parse_grades('K, 5-12')
expected = ['K', '5', '6', '7', '8', '9', '10', '11', '12']
self.assertEqual(actual, expected)
def test_multiple_separators5(self):
actual = parse_grades( 'PK-3, 5-7')
expected = ['PK', 'K', '1', '2', '3', '5', '6', '7']
self.assertEqual(actual, expected)
def test_complete_dataset(self):
# create a loop that goes thru dataset and invoke parse_grades with each element
# used this to create dictionary of grade combo and parsed_grades
separated_grades_list = []
for i in unique_grades_combination:
separated_grades_list.append(parse_grades(i))
return separated_grades_list
``` |
{
"source": "jiocloudDSS/python-sdk",
"score": 2
} |
#### File: jcsclient/dss_api/dss_bucket_ops.py
```python
from dss_op import *
from dss_auth import *
from jcsclient import utils
import os
import sys
import time
import hmac
import json
import base64
import requests
import exceptions
from email.utils import formatdate
import xml.sax
class BucketOp(DSSOp):
def __init__(self):
DSSOp.__init__(self)
def parse_args(self, options):
self.dss_op_path = '/' + self.bucket_name
def validate_args(self):
pass
def execute(self):
resp = self.make_request()
return resp
def process_result(self, result, response_json=None):
if result is not None:
status = result.status_code
if status != 200 and status != 204:
response_json = {"headers": result.headers, "status_code": result.status_code,
"status_message": result.reason, "error_message": result.text}
else:
response_json = {"headers": result.headers, "status_code": result.status_code,
"status_message": result.reason, "content": result.content}
else:
response_json = {"status_code": "500", "error_message": "Connection not established"}
return response_json
class ListBucketsOp(BucketOp):
def __init__(self):
BucketOp.__init__(self)
self.dss_op_path = '/'
self.http_method = 'GET'
def parse_args(self, options):
pass
class CreateBucketOp(BucketOp):
def __init__(self, name):
BucketOp.__init__(self)
self.http_method = 'PUT'
self.bucket_name = name
class DeleteBucketOp(BucketOp):
def __init__(self, name):
BucketOp.__init__(self)
self.bucket_name = name
self.http_method = 'DELETE'
class HeadBucketOp(BucketOp):
def __init__(self, name):
DSSOp.__init__(self)
self.http_method = 'HEAD'
self.bucket_name = name
class ListObjectsOp(BucketOp):
def __init__(self, name):
DSSOp.__init__(self)
self.http_method = 'GET'
self.bucket_name = name
def parse_args(self, args_dict):
params = {}
is_query_params_set = False
self.dss_query_str = ''
self.dss_op_path = '/' + self.bucket_name
if (args_dict is None):
return
if(args_dict['prefix'] is not None):
self.dss_query_str = 'prefix=' + args_dict['prefix']
is_query_params_set = True
if(args_dict['marker'] is not None):
if(not is_query_params_set):
self.dss_query_str += 'marker=' + args_dict['marker']
is_query_params_set = True
else:
self.dss_query_str += '&marker=' + args_dict['marker']
if(args_dict['max-keys'] is not None):
if(not is_query_params_set):
self.dss_query_str += 'max-keys=' + args_dict['max-keys']
is_query_params_set = True
else:
self.dss_query_str += '&max-keys=' + args_dict['max-keys']
if(args_dict['delimiter'] is not None):
if(not is_query_params_set):
self.dss_query_str += 'delimiter=' + args_dict['delimiter']
is_query_params_set = True
else:
self.dss_query_str += '&delimiter=' + args_dict['delimiter']
if(self.dss_query_str == ''):
self.dss_query_str = None
class ListMPUploadsOp(BucketOp):
def __init__(self, buckname):
BucketOp.__init__(self)
self.http_method = 'GET'
self.dss_query_str_for_signature = 'uploads'
self.dss_query_str = 'uploads'
self.bucket_name = buckname
```
#### File: src/jcsclient/dss_connection.py
```python
from jcsclient.dss_api.dss_bucket_ops import *
from jcsclient.dss_api.dss_object_ops import *
from jcsclient.config import *
class DSSConnection(object):
"""DSS main class, each cli command is processed here
Object is created from inside the dss Controller
"""
def __init__(self, url, access_key, secret_key, secure, debug):
setup_config_handler(url, access_key, secret_key, secure, debug)
def operate(self, op, options=None):
op.parse_args(options)
op.validate_args()
result = op.execute()
processed_result = op.process_result(result)
return processed_result
def main(self):
pass
def create_bucket(self, bucketName):
op = CreateBucketOp(bucketName)
result = self.operate(op)
return result
def delete_bucket(self, bucketName):
op = DeleteBucketOp(bucketName)
result = self.operate(op)
return result
def head_bucket(self, bucketName):
op = HeadBucketOp(bucketName)
result = self.operate(op)
return result
def list_buckets(self):
op = ListBucketsOp()
result = self.operate(op)
return result
def delete_object(self, buckName, objName):
op = DeleteObjectOp(buckName, objName)
result = self.operate(op, options=None)
return result
def get_object(self, buckName, objName, path=None):
op = GetObjectOp(buckName, objName, path)
result = self.operate(op, options=None)
return result
def list_objects(self, buckName, options=None):
op = ListObjectsOp(buckName)
result = self.operate(op, options)
return result
def head_object(self, buckName, objName):
op = HeadObjectOp(buckName, objName)
result = self.operate(op, options=None)
return result
def put_object(self, buckName, objName, path, encryption=False):
op = PutObjectOp(buckName, objName, path, encryption)
result = self.operate(op)
return result
def init_multipart_upload(self, bucketname, keyname):
op = InitMPUploadOp(bucketname, keyname)
result = self.operate(op)
return result
def upload_multipart_parts(self, buckname, keyname, args_dict, data_path, size):
op = UploadPartOp(buckname=buckname, objname=keyname)
op.parse_args(args_dict)
res = op.execute(fp=data_path, size=size)
result = op.process_result(res)
return result
def complete_multipart_upload(self, bucketname, keyname, args_dict):
op = CompleteMPUploadOp(bucketname, keyname)
result = self.operate(op, args_dict)
return result
def cancel_multipart_upload(self, bucketname, keyname, uploadId):
op = CancelMPUploadOp(bucketname, keyname, uploadId)
result = self.operate(op, options=None)
return result
def list_multipart_parts(self, bucketname, keyname, upload_id, outfile=None):
op = ListPartsOp(bucketname, keyname, upload_id, outfile)
op.parse_args(args_dict=None)
op.validate_args()
result = op.execute()
processed_result = op.process_result(result, outfile)
return processed_result
def list_multipart_uploads(self, buckname):
op = ListMPUploadsOp(buckname)
result = self.operate(op)
return result
def copy_object(self, buckname, keyname, sourceName):
op = CopyObjectOp(buckname=buckname, objname=keyname, copysource=sourceName)
result = self.operate(op, options=None)
return result
def get_presigned_url(self, buckname, objname, expiry):
op = GetPresignedURLOp(buckname=buckname, objname=objname, expiry=expiry)
result = self.operate(op, options=None)
return result
def rename_object(self, buckname, objname, newName):
op = RenameObjectOp(buckname=buckname, objname=objname, newName=newName)
result = self.operate(op, options=None)
return result
``` |
{
"source": "JioCloudVPC/compute-ec2-api-vagrant",
"score": 2
} |
#### File: ec2api/api/clients.py
```python
from novaclient import client as novaclient
from novaclient import exceptions as nova_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from ec2api import context as ec2_context
from ec2api.i18n import _, _LW
logger = logging.getLogger(__name__)
ec2_opts = [
cfg.StrOpt('cinder_service_type',
default='volumev2',
help='Service type of Cinder Volume API, registered in Keystone catalog.'),
cfg.StrOpt('volume_api_version',
default='2',
help='volume api version'),
]
service_catalog_opts = [
cfg.StrOpt('compute',
default='http://localhost:8774/v2/TENANT_ID',
help='Endpoint for Compute Service '),
cfg.StrOpt('image',
default='http://localhost:9292',
help='Endpoint for Image Service '),
cfg.StrOpt('volumev2',
default='http://localhost:8776/v2/TENANT_ID',
help='Endpoint for Volume Service '),
cfg.StrOpt('network',
default='http://localhost:9696',
help='Endpoint for Network Service '),
]
SERVICE_CATALOG_GROUP = 'service_catalog'
CONF = cfg.CONF
CONF.register_opts(service_catalog_opts, SERVICE_CATALOG_GROUP)
CONF.register_opts(ec2_opts)
try:
from neutronclient.v2_0 import client as neutronclient
except ImportError:
neutronclient = None
logger.info(_('neutronclient not available'))
try:
from cinderclient import client as cinderclient
except ImportError:
cinderclient = None
logger.info(_('cinderclient not available'))
try:
from glanceclient import client as glanceclient
except ImportError:
glanceclient = None
logger.info(_('glanceclient not available'))
# Nova API's 2.3 microversion provides additional EC2 compliant instance
# properties
_novaclient_vertion = '2.3'
_nova_service_type = 'compute'
def nova(context):
args = {
'auth_url': CONF.keystone_url,
'auth_token': context.auth_token,
# NOTE(ft): These parameters are not used for authentification,
# but are required by novaclient < v2.18 which may be installed in
# Icehouse deployment
'username': None,
'api_key': None,
'project_id': None,
'insecure': CONF.ssl_insecure,
'cacert': CONF.ssl_ca_file
}
global _novaclient_vertion, _nova_service_type
bypass_url = _url_for(context, service_type=_nova_service_type)
if not bypass_url and _nova_service_type == 'computev21':
# NOTE(ft): partial compatibility with pre Kilo OS releases:
# if computev21 isn't provided by Nova, use compute instead
logger.warning(_LW("Nova server doesn't support v2.1, use v2 instead. "
"A lot of useful EC2 compliant instance properties "
"will be unavailable."))
_nova_service_type = 'compute'
return nova(context)
try:
return novaclient.Client(_novaclient_vertion, bypass_url=bypass_url,
**args)
except nova_exception.UnsupportedVersion:
if _novaclient_vertion == '2':
raise
# NOTE(ft): partial compatibility with Nova client w/o microversion
# support
logger.warning(_LW("Nova client doesn't support v2.3, use v2 instead. "
"A lot of useful EC2 compliant instance properties "
"will be unavailable."))
_novaclient_vertion = '2'
return nova(context)
def neutron(context):
if neutronclient is None:
return None
args = {
'auth_url': CONF.keystone_url,
'service_type': 'network',
'token': context.auth_token,
'endpoint_url': _url_for(context, service_type='network'),
'insecure': CONF.ssl_insecure,
'cacert': CONF.ssl_ca_file
}
return neutronclient.Client(**args)
def glance(context):
if glanceclient is None:
return None
args = {
'auth_url': CONF.keystone_url,
'service_type': 'image',
'token': context.auth_token,
'insecure': CONF.ssl_insecure,
'cacert': CONF.ssl_ca_file
}
return glanceclient.Client(
"1", endpoint=_url_for(context, service_type='image'), **args)
def cinder(context):
if cinderclient is None:
return nova(context, CONF.cinder_service_type)
args = {
'service_type': CONF.cinder_service_type,
'auth_url': CONF.keystone_url,
'username': None,
'api_key': None,
'insecure': CONF.ssl_insecure,
'cacert': CONF.ssl_ca_file
}
_cinder = cinderclient.Client(CONF.volume_api_version, **args)
management_url = _url_for(context, service_type=CONF.cinder_service_type)
_cinder.client.auth_token = context.auth_token
_cinder.client.management_url = management_url
return _cinder
def keystone(context):
keystone_client_class = ec2_context.get_keystone_client_class()
return keystone_client_class(
token=context.auth_token,
project_id=context.project_id,
tenant_id=context.project_id,
auth_url=CONF.keystone_url,
insecure=CONF.ssl_insecure,
cacert=CONF.ssl_ca_file)
def nova_cert(context):
_cert_api = _rpcapi_CertAPI(context)
return _cert_api
def _url_for(context, **kwargs):
service_type = kwargs['service_type']
if service_type == 'compute':
url = CONF.service_catalog.compute
url = url.replace('TENANT_ID', context.project_id)
elif service_type == 'image':
url = CONF.service_catalog.image
elif service_type == 'network':
url = CONF.service_catalog.network
elif service_type == 'volumev2':
url = CONF.service_catalog.volumev2
url = url.replace('TENANT_ID', context.project_id)
else:
logger.warning(_LW("Unknown service type in JCS Layer."))
return url
class _rpcapi_CertAPI(object):
'''Client side of the cert rpc API.'''
def __init__(self, context):
super(_rpcapi_CertAPI, self).__init__()
target = messaging.Target(topic=CONF.cert_topic, version='2.0')
self.client = _rpc_get_client(target)
self.context = context
def decrypt_text(self, text):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'decrypt_text',
project_id=self.context.project_id,
text=text)
_rpc_TRANSPORT = None
def _rpc_init(conf):
global _rpc_TRANSPORT
# NOTE(ft): set control_exchange parameter to use Nova cert topic
messaging.set_transport_defaults('nova')
_rpc_TRANSPORT = messaging.get_transport(conf)
def _rpc_get_client(target):
if not _rpc_TRANSPORT:
_rpc_init(CONF)
assert _rpc_TRANSPORT is not None
serializer = _rpc_RequestContextSerializer()
return messaging.RPCClient(_rpc_TRANSPORT,
target,
serializer=serializer)
class _rpc_RequestContextSerializer(messaging.NoOpSerializer):
def serialize_context(self, context):
return context.to_dict()
def deserialize_context(self, context):
return ec2_context.RequestContext.from_dict(context)
```
#### File: ec2api/api/cloud.py
```python
import collections
import itertools
from oslo_config import cfg
from oslo_log import log as logging
from ec2api.api import address
from ec2api.api import availability_zone
from ec2api.api import dhcp_options
from ec2api.api import image
from ec2api.api import instance
from ec2api.api import internet_gateway
from ec2api.api import key_pair
from ec2api.api import network_interface
from ec2api.api import route_table
from ec2api.api import security_group
from ec2api.api import snapshot
from ec2api.api import subnet
from ec2api.api import tag
from ec2api.api import volume
from ec2api.api import vpc
from ec2api import exception
from metricgenerator import publish as metricPublish
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
metriclog_opt = cfg.StrOpt('monitoring_config',
default='/tmp/config.cfg',
help='Config for details on emitting metrics')
CONF.register_opt(metriclog_opt)
metric_publisher = metricPublish.Publish("jcs-api", CONF.monitoring_config)
def module_and_param_types(module, *args, **kwargs):
"""Decorator to check types and call function."""
param_types = args
def wrapped(func):
def func_wrapped(*args, **kwargs):
impl_func = getattr(module, func.func_name)
context = args[1]
params = collections.OrderedDict(itertools.izip(
func.func_code.co_varnames[2:], param_types))
param_num = 0
mandatory_params_num = (func.func_code.co_argcount - 2 -
len(func.func_defaults or []))
for param_name, param_type in params.items():
param_value = kwargs.get(param_name)
if param_value is not None:
validator = module.Validator(param_name, func.func_name,
params)
validation_func = getattr(validator, param_type)
validation_func(param_value)
param_num += 1
elif param_num < mandatory_params_num:
raise exception.MissingParameter(param=param_name)
return impl_func(context, **kwargs)
return func_wrapped
return wrapped
class CloudController(object):
"""Cloud Controller
Provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
pass
def __str__(self):
return 'CloudController'
@metric_publisher.ReportLatency("run_instances-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'jmi_id', 'int',
'str255', 'sg_ids',
'str', 'dummy',
'subnet_id', 'ip')
def run_instances(self, context, image_id, instance_count=1,
key_name=None, security_group_id=None,
instance_type_id=None, block_device_mapping=None,
subnet_id=None, private_ip_address=None):
"""Launches the specified number of instances using an AMI.
Args:
context (RequestContext): The request context.
image_id (str): The ID of the AMI.
instance_count (int): The number of instances to launch.
key_name (str): The name of the key pair.
security_group_id (list of str): One or more security group IDs.
instance_type_id (str): The instance type.
block_device_mapping (list of dict): Dict can contain:
device_name (str): The device name exposed to the instance
(for example, /dev/sdh or xvdh).
snapshot_id (str): The ID of the snapshot.
volume_size (str): The size of the volume, in GiBs.
delete_on_termination (bool): Indicates whether to delete
the volume on instance termination.
subnet_id (str): The ID of the subnet to launch the instance into.
private_ip_address (str): The primary IP address.
You must specify a value from the IP address range
of the subnet.
Returns:
The instance set that was created.
If you don't specify a security group when launching an instance, EC2
uses the default security group.
"""
@metric_publisher.ReportLatency("describe_instance_types-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'dummy')
def describe_instance_types(self, context, instance_type_id=None):
"""Describes one or more of instance types.
Args:
context(RequestConext): The request context.
instance_type_id (list of str): One or more of instance type ids.
Returns:
A list of instance types,
"""
@metric_publisher.ReportLatency("describe_instances-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'i_ids', 'filter',
'int', 'str')
def describe_instances(self, context, instance_id=None, filter=None,
max_results=None, next_token=None):
"""Describes one or more of your instances.
Args:
context (RequestContext): The request context.
instance_id (list of str): One or more instance IDs.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain instances.
max_results (int): The maximum number of items to return.
Not used now.
next_token (str): The token for the next set of items to return.
Not used now.
Returns:
A list of reservations.
If you specify one or more instance IDs, Amazon EC2 returns information
for those instances. If you do not specify instance IDs, you receive
information for all relevant instances. If you specify an invalid
instance ID, you receive an error. If you specify an instance that you
don't own, we don't include it in the results.
"""
@metric_publisher.ReportLatency("terminate_instances-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'i_ids')
def terminate_instances(self, context, instance_id):
"""Shuts down one or more instances.
Args:
context (RequestContext): The request context.
instance_id (list of str): One or more instance IDs.
Returns:
A list of instance state changes.
This operation is idempotent; if you terminate an instance more than
once, each call succeeds.
"""
@metric_publisher.ReportLatency("reboot_instances-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'i_ids')
def reboot_instances(self, context, instance_id):
"""Requests a reboot of one or more instances.
Args:
context (RequestContext): The request context.
instance_id (list of str): One or more instance IDs.
Returns:
true if the request succeeds.
"""
@metric_publisher.ReportLatency("stop_instances-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'i_ids', 'bool')
def stop_instances(self, context, instance_id, force=False):
"""Stops one or more instances.
Args:
context (RequestContext): The request context.
instance_id (list of str): One or more instance IDs.
force (boolean): Forces the instances to stop. The instances do not
have an opportunity to flush file system caches or file system
metadata.
Not used now. Equivalent value is True.
Returns:
true if the request succeeds.
"""
@metric_publisher.ReportLatency("start_instances-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'i_ids')
def start_instances(self, context, instance_id):
"""Starts one or more instances.
Args:
context (RequestContext): The request context.
instance_id (list of str): One or more instance IDs.
Returns:
true if the request succeeds.
"""
@metric_publisher.ReportLatency("describe_key_pairs-compute", listOfKeys = '{"1":["request_id"]}')
# AK - Comment out filtersa and keynames from describe keypairs for now
#@module_and_param_types(key_pair, 'str255s', 'filter')
#def describe_key_pairs(self, context, key_name=None, filter=None):
@module_and_param_types(key_pair)
def describe_key_pairs(self, context):
"""Describes one or more of your key pairs.
Args:
context (RequestContext): The request context.
key_name (list of str): On or more keypair names.
filter (list of filter dict): On or more filters.
Returns:
Specified keypairs.
"""
@metric_publisher.ReportLatency("create_key_pair-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(key_pair, 'str255')
def create_key_pair(self, context, key_name):
"""Creates a 2048-bit RSA key pair with the specified name.
Args:
context (RequestContext): The request context.
key_name (str): A unique name for the key pair.
Returns:
Created keypair.
"""
@metric_publisher.ReportLatency("delete_key_pair-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(key_pair, 'str255')
def delete_key_pair(self, context, key_name):
"""Deletes the specified key pair.
Args:
context (RequestContext): The request context.
key_name (str): Name of the keypair.
Returns:
Returns true if the request succeeds.
"""
@metric_publisher.ReportLatency("import_key_pair-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(key_pair, 'str255', 'str')
def import_key_pair(self, context, key_name, public_key_material):
"""Imports the public key from an existing RSA key pair.
Args:
context (RequestContext): The request context.
key_name (str): A unique name for the key pair.
public_key_material (str): The public key. You must base64 encode
the public key material before sending it.
Returns:
Imported keypair.
"""
@metric_publisher.ReportLatency("describe_account_attributes-compute",\
listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(availability_zone, 'strs')
def describe_account_attributes(self, context, attribute_name=None):
"""Describes attributes of your EC2 account.
Args:
context (RequestContext): The request context.
attribute_name (list of str): One or more account attribute names.
The following are the supported account attributes:
supported-platforms | default-vpc | max-instances |
vpc-max-security-groups-per-interface (unsupported now) |
max-elastic-ips (unsupported now) |
vpc-max-elastic-ips (unsupported now)
Returns:
Information about one or more account attributes.
"""
@metric_publisher.ReportLatency("show_delete_on_termination_flag-compute",\
listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(volume, 'dummy')
def show_delete_on_termination_flag(self, context, volume_id):
"""Get the delete on termination flag on the volume id
to the given value.
Args:
volume_id: The ID of the volume to update
Returns:
The dict showing the volume id, attached instance id, and
delete on termination flag
"""
@metric_publisher.ReportLatency("update_delete_on_termination_flag-compute",\
listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(volume, 'dummy', 'bool')
def update_delete_on_termination_flag(self, context, volume_id,
delete_on_termination):
"""Update the delete on termination flag on the volume id
to the given value.
Args:
volume_id: The ID of the volume to update
delete_on_termination: Bool to set the flag
Returns:
The dict showing the volume id, attached instance id, and
delete on termination flag
"""
@metric_publisher.ReportLatency("describe_images-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(image, 'jmijrijki_ids')
def describe_images(self, context, image_id=None):
"""Describes one or more of the images available to you.
Args:
context (RequestContext): The request context.
image_id (list of str): One or more image IDs.
Returns:
A list of images.
"""
@metric_publisher.ReportLatency("attach_volume-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(volume, 'dummy', 'i_id', 'str')
def attach_volume(self, context, volume_id, instance_id, device):
"""Attaches an EBS volume to a running or stopped instance.
Args:
context (RequestContext): The request context.
volume_id (str): The ID of the volume.
instance_id (str): The ID of the instance.
device_name (str): The device name to expose to the instance.
Returns:
Information about the attachment.
The instance and volume must be in the same Availability Zone.
"""
@metric_publisher.ReportLatency("detach_volume-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(volume, 'dummy', 'i_id', 'str')
def detach_volume(self, context, volume_id, instance_id=None, device=None,
force=None):
"""Detaches an EBS volume from an instance.
Args:
context (RequestContext): The request context.
volume_id (str): The ID of the volume.
instance_id (str): The ID of the instance.
Not used now.
device (str): The device name.
Not used now.
force (boolean): Forces detachment.
Not used now.
Returns:
Information about the detachment.
"""
@metric_publisher.ReportLatency("get_password_data-compute", listOfKeys = '{"1":["request_id"]}')
@module_and_param_types(instance, 'i_id')
def get_password_data(self, context, instance_id):
"""Retrieves the encrypted administrator password for Windows instance.
Args:
context (RequestContext): The request context.
instance_id (str): ID of the Windows instance
Returns:
The password of the instance, timestamp and instance id.
The password is encrypted using the key pair that you specified when
you launched the instance.
"""
"""
LegacyCloudController has the APIs that we will not support in first phase
of launch. We can enable them using the legacy_support flag
"""
class LegacyCloudController(CloudController):
"""VPC Cloud Controller
Adds full VPC functionality which requires Neutron to work.
"""
@module_and_param_types(vpc, 'vpc_cidr', 'str255')
def create_vpc(self, context, cidr_block, instance_tenancy='default'):
"""Creates a VPC with the specified CIDR block.
Args:
context (RequestContext): The request context.
cidr_block (str): The CIDR block for the VPC
(for example, 10.0.0.0/16).
instance_tenancy (str): The supported tenancy options for
instances launched into the VPC.
Valid values: default | dedicated
Not used now.
Returns:
Information about the VPC.
The smallest VPC you can create uses a /28 netmask (16 IP addresses),
and the largest uses a /16 netmask.
"""
@module_and_param_types(vpc, 'vpc_id')
def delete_vpc(self, context, vpc_id):
"""Deletes the specified VPC.
Args:
context (RequestContext): The request context.
vpc_id (str): The ID of the VPC.
Returns:
true if the request succeeds.
You must detach or delete all gateways and resources that are
associated with the VPC before you can delete it. For example, you must
terminate all instances running in the VPC, delete all security groups
associated with the VPC (except the default one), delete all route
tables associated with the VPC (except the default one), and so on.
"""
return vpc.delete_vpc(context, vpc_id)
@module_and_param_types(vpc, 'vpc_ids', 'filter')
def describe_vpcs(self, context, vpc_id=None, filter=None):
"""Describes one or more of your VPCs.
Args:
context (RequestContext): The request context.
vpc_id (list of str): One or more VPC IDs.
Default: Describes all your VPCs.
filter (list of filter dict): You can specify filters so that
the response includes information for only certain VPCs.
Returns:
A list of VPCs.
"""
return vpc.describe_vpcs(context, vpc_id, filter)
@module_and_param_types(internet_gateway)
def create_internet_gateway(self, context):
"""Creates an Internet gateway for use with a VPC.
Args:
context (RequestContext): The request context.
Returns:
Information about the Internet gateway.
"""
@module_and_param_types(internet_gateway, 'igw_id', 'vpc_id')
def attach_internet_gateway(self, context, internet_gateway_id, vpc_id):
"""Attaches an Internet gateway to a VPC.
Args:
context (RequestContext): The request context.
internet_gateway_id (str): The ID of the Internet gateway.
vpc_id (str): The ID of the VPC.
Returns:
Returns true if the request succeeds.
Attaches an Internet gateway to a VPC, enabling connectivity between
the Internet and the VPC.
"""
@module_and_param_types(internet_gateway, 'igw_id', 'vpc_id')
def detach_internet_gateway(self, context, internet_gateway_id, vpc_id):
"""Detaches an Internet gateway from a VPC.
Args:
context (RequestContext): The request context.
internet_gateway_id (str): The ID of the Internet gateway.
vpc_id (str): The ID of the VPC.
Returns:
Returns true if the request succeeds.
Detaches an Internet gateway from a VPC, disabling connectivity between
the Internet and the VPC. The VPC must not contain any running
instances with Elastic IP addresses.
"""
@module_and_param_types(internet_gateway, 'igw_id')
def delete_internet_gateway(self, context, internet_gateway_id):
"""Deletes the specified Internet gateway.
Args:
context (RequestContext): The request context.
internet_gateway_id (str): The ID of the Internet gateway.
Returns:
Returns true if the request succeeds.
You must detach the Internet gateway from the VPC before you can
delete it.
"""
@module_and_param_types(internet_gateway, 'igw_ids',
'filter')
def describe_internet_gateways(self, context, internet_gateway_id=None,
filter=None):
"""Describes one or more of your Internet gateways.
Args:
context (RequestContext): The request context.
internet_gateway_id (list of str): One or more Internet gateway
IDs.
Default: Describes all your Internet gateways.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain Internet
gateways.
Returns:
A list of Internet gateways.
"""
@module_and_param_types(subnet, 'vpc_id', 'subnet_cidr',
'str255')
def create_subnet(self, context, vpc_id, cidr_block,
availability_zone=None):
"""Creates a subnet in an existing VPC.
Args:
context (RequestContext): The request context.
vpc_id (str): The ID of the VPC.
cidr_block (str): The CIDR block for the subnet.
For example, 10.0.0.0/24.
availability_zone (str): The Availability Zone for the subnet.
If None or empty EC2 selects one for you.
Returns:
Information about the subnet.
The subnet's CIDR block can be the same as the VPC's CIDR block,
or a subset of the VPC's CIDR block. If you create more than one subnet
in a VPC, the subnets' CIDR blocks must not overlap. The smallest
subnet you can create uses a /28 netmask (16 IP addresses),
and the largest uses a /16 netmask.
EC2 reserves both the first four and the last IP address
in each subnet's CIDR block. They're not available for use.
If you add more than one subnet to a VPC, they're set up
in a star topology with a logical router in the middle.
"""
@module_and_param_types(subnet, 'subnet_id')
def delete_subnet(self, context, subnet_id):
"""Deletes the specified subnet.
Args:
context (RequestContext): The request context.
subnet_id (str): The ID of the subnet.
Returns:
true if the request succeeds.
You must terminate all running instances in the subnet before
you can delete the subnet.
"""
@module_and_param_types(subnet, 'subnet_ids', 'filter')
def describe_subnets(self, context, subnet_id=None, filter=None):
"""Describes one or more of your subnets.
Args:
context (RequestContext): The request context.
subnet_id (list of str): One or more subnet IDs.
Default: Describes all your subnets.
filter (list of filter dict): You can specify filters so that
the response includes information for only certain subnets.
Returns:
A list of subnets.
"""
@module_and_param_types(route_table, 'vpc_id')
def create_route_table(self, context, vpc_id):
"""Creates a route table for the specified VPC.
Args:
context (RequestContext): The request context.
vpc_id (str): The ID of the VPC.
Returns:
Information about the route table.
After you create a route table, you can add routes and associate the
table with a subnet.
"""
@module_and_param_types(route_table, 'rtb_id', 'cidr',
'igw_id', 'i_id',
'eni_id',
'dummy')
def create_route(self, context, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None,
network_interface_id=None,
vpc_peering_connection_id=None):
"""Creates a route in a route table within a VPC.
Args:
context (RequestContext): The request context.
route_table_id (str): The ID of the route table for the route.
destination_cidr_block (str): The CIDR address block used for the
destination match. Routing decisions are based on the most
specific match.
gateway_id (str): The ID of an Internet gateway or virtual private
gateway attached to your VPC.
instance_id (str): The ID of a NAT instance in your VPC.
The operation fails if you specify an instance ID unless
exactly one network interface is attached.
network_interface_id (str): The ID of a network interface.
vpc_peering_connection_id (str): The ID of a VPC peering
connection.
Returns:
true if the requests succeeds.
The route's target can be an Internet gateway or virtual private
gateway attached to the VPC, a VPC peering connection, or a NAT
instance in the VPC.
"""
@module_and_param_types(route_table, 'rtb_id', 'cidr',
'igw_id', 'i_id',
'eni_id',
'dummy')
def replace_route(self, context, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None,
network_interface_id=None,
vpc_peering_connection_id=None):
"""Replaces an existing route within a route table in a VPC.
Args:
context (RequestContext): The request context.
route_table_id (str): The ID of the route table for the route.
destination_cidr_block (str): The CIDR address block used for the
destination match. Routing decisions are based on the most
specific match.
gateway_id (str): The ID of an Internet gateway or virtual private
gateway attached to your VPC.
instance_id (str): The ID of a NAT instance in your VPC.
The operation fails if you specify an instance ID unless
exactly one network interface is attached.
network_interface_id (str): The ID of a network interface.
vpc_peering_connection_id (str): The ID of a VPC peering
connection.
Returns:
true if the requests succeeds.
"""
@module_and_param_types(route_table, 'rtb_id', 'cidr')
def delete_route(self, context, route_table_id, destination_cidr_block):
"""Deletes the specified route from the specified route table.
Args:
context (RequestContext): The request context.
route_table_id (str): The ID of the route table.
destination_cidr_block (str): The CIDR range for the route.
The value you specify must match the CIDR for the route
exactly.
Returns:
true if the requests succeeds.
"""
@module_and_param_types(route_table, 'rtb_id', 'subnet_id')
def associate_route_table(self, context, route_table_id, subnet_id):
"""Associates a subnet with a route table.
Args:
context (RequestContext): The request context.
route_table_id (str): The ID of the route table.
subnet_id (str): The ID of the subnet.
Returns:
The route table association ID
The subnet and route table must be in the same VPC. This association
causes traffic originating from the subnet to be routed according to
the routes in the route table. The action returns an association ID,
which you need in order to disassociate the route table from the subnet
later. A route table can be associated with multiple subnets.
"""
@module_and_param_types(route_table, 'rtbassoc_id',
'rtb_id')
def replace_route_table_association(self, context, association_id,
route_table_id):
"""Changes the route table associated with a given subnet in a VPC.
Args:
context (RequestContext): The request context.
association_id (str): The association ID.
route_table_id (str): The ID of the new route table to associate
with the subnet.
Returns:
The ID of the new association.
After the operation completes, the subnet uses the routes in the new
route table it's associated with.
You can also use this action to change which table is the main route
table in the VPC.
"""
@module_and_param_types(route_table, 'rtbassoc_id')
def disassociate_route_table(self, context, association_id):
"""Disassociates a subnet from a route table.
Args:
context (RequestContext): The request context.
association_id (str): The association ID.
Returns:
true if the requests succeeds.
After you perform this action, the subnet no longer uses the routes in
the route table. Instead, it uses the routes in the VPC's main route
table.
"""
@module_and_param_types(route_table, 'rtb_id')
def delete_route_table(self, context, route_table_id):
"""Deletes the specified route table.
Args:
context (RequestContext): The request context.
route_table_id (str): The ID of the route table.
You must disassociate the route table from any subnets before you can
delete it. You can't delete the main route table.
Returns:
true if the requests succeeds.
"""
@module_and_param_types(route_table, 'rtb_ids', 'filter')
def describe_route_tables(self, context, route_table_id=None, filter=None):
"""Describes one or more of your route tables.
Args:
context (RequestContext): The request context.
route_table_id (str): One or more route table IDs.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain tables.
Returns:
A list of route tables
"""
@module_and_param_types(dhcp_options, 'key_value_dict_list')
def create_dhcp_options(self, context, dhcp_configuration):
"""Creates a set of DHCP options for your VPC.
Args:
context (RequestContext): The request context.
dhcp_configuration (list of dict): Dict can contain
'key' (str) and
'value' (str) for each option.
You can specify the following options:
- domain-name-servers: up to 4 DNS servers,
IPs are in value separated by commas
- domain-name: domain name
- ntp-servers: up to 4 NTP servers
- netbios-name-servers: up to 4 NetBIOS name servers
- netbios-node-type: the NetBIOS node type (1,2,4 or 8)
Returns:
A set of DHCP options
"""
@module_and_param_types(dhcp_options, 'dopt_ids',
'filter')
def describe_dhcp_options(self, context, dhcp_options_id=None,
filter=None):
"""Describes the specified DHCP options.
Args:
context (RequestContext): The request context.
dhcp_options_id (list of str): DHCP options id.
filter (list of filter dict): You can specify filters so that
the response includes information for only certain DHCP
options.
Returns:
DHCP options.
"""
@module_and_param_types(dhcp_options, 'dopt_id')
def delete_dhcp_options(self, context, dhcp_options_id):
"""Deletes the specified set of DHCP options
Args:
context (RequestContext): The request context.
dhcp_options_id (str): DHCP options id
Returns:
true if the request succeeds
You must disassociate the set of DHCP options before you can delete it.
You can disassociate the set of DHCP options by associating either a
new set of options or the default set of options with the VPC.
"""
@module_and_param_types(dhcp_options, 'dopt_id_or_default', 'vpc_id')
def associate_dhcp_options(self, context, dhcp_options_id, vpc_id):
"""Associates a set of DHCP options with the specified VPC.
Args:
context (RequestContext): The request context.
dhcp_options_id (str): DHCP options id or "default" to associate no
DHCP options with the VPC
Returns:
true if the request succeeds
"""
@module_and_param_types(network_interface, 'subnet_id',
'ip',
'dummy',
'int',
'str',
'sg_ids')
def create_network_interface(self, context, subnet_id,
private_ip_address=None,
private_ip_addresses=None,
secondary_private_ip_address_count=None,
description=None,
security_group_id=None):
"""Creates a network interface in the specified subnet.
Args:
subnet_id (str): The ID of the subnet to associate with the
network interface.
private_ip_address (str): The primary private IP address of the
network interface. If you don't specify an IP address,
EC2 selects one for you from the subnet range.
private_ip_addresses (list of dict): Dict can contain
'private_ip_address' (str) and
'primary' (boolean) for each address.
The private IP addresses of the specified network interface and
indicators which one is primary. Only one private IP address
can be designated as primary.
You can't specify this parameter when
private_ip_addresses['primary'] is true if you specify
private_ip_address.
secondary_private_ip_address_count (integer): The number of
secondary private IP addresses to assign to a network
interface. EC2 selects these IP addresses within the subnet
range. For a single network interface, you can't specify this
option and specify more than one private IP address using
private_ip_address and/or private_ip_addresses.
description (str): A description for the network interface.
security_group_id (list of str): The list of security group IDs
for the network interface.
Returns:
The network interface that was created.
"""
@module_and_param_types(network_interface, 'eni_id')
def delete_network_interface(self, context, network_interface_id):
"""Deletes the specified network interface.
Args:
context (RequestContext): The request context.
network_interface_id (str): The ID of the network interface.
Returns:
true if the request succeeds.
You must detach the network interface before you can delete it.
"""
@module_and_param_types(network_interface, 'eni_ids',
'filter')
def describe_network_interfaces(self, context, network_interface_id=None,
filter=None):
"""Describes one or more of your network interfaces.
Args:
context (RequestContext): The request context.
network_interface_id (list of str): One or more network interface
IDs.
Default: Describes all your network interfaces.
filter (list of filter dict): You can specify filters so that
the response includes information for only certain interfaces.
Returns:
A list of network interfaces.
"""
return network_interface.describe_network_interfaces(
context, network_interface_id, filter)
@module_and_param_types(network_interface, 'eni_id',
'str')
def describe_network_interface_attribute(self, context,
network_interface_id,
attribute):
"""Describes the specified attribute of the specified network interface.
Args:
context (RequestContext): The request context.
network_interface_id: Network interface ID.
attribute: The attribute of the network interface.
Returns:
Specified attribute.
You can specify only one attribute at a time.
"""
return network_interface.describe_network_interface_attribute(
context, network_interface_id, attribute)
@module_and_param_types(network_interface, 'eni_id',
'str',
'bool',
'sg_ids',
'dummy')
def modify_network_interface_attribute(self, context,
network_interface_id,
description=None,
source_dest_check=None,
security_group_id=None,
attachment=None):
"""Modifies the specified attribute of the specified network interface.
Args:
context (RequestContext): The request context.
network_interface_id: Network interface ID.
description: New description.
source_dest_check: Indicates whether source/destination checking is
enabled. A value of true means checking is enabled, and false
means checking is disabled.
This value must be false for a NAT instance to perform NAT.
security_group_id [list of str]: List of secuirity groups to attach
attachment: Information about the interface attachment. If
modifying the 'delete on termination' attribute, you must
specify the ID of the interface attachment.
Returns:
true if the request succeeds.
You can specify only one attribute at a time.
"""
@module_and_param_types(network_interface, 'eni_id',
'str')
def reset_network_interface_attribute(self, context,
network_interface_id,
attribute):
"""Resets the specified attribute of the specified network interface.
Args:
context (RequestContext): The request context.
network_interface_id: Network interface ID.
attribute: The attribute to reset. Valid values "SourceDestCheck"
(reset to True)
Returns:
true if the request succeeds.
"""
@module_and_param_types(network_interface, 'eni_id',
'i_id', 'int')
def attach_network_interface(self, context, network_interface_id,
instance_id, device_index):
"""Attach a network interface to an instance.
Args:
context (RequestContext): The request context.
network_interface_id (str): The ID of the network interface.
instance_id (str): The ID of the instance.
device_index (int): The index of the device for the network
interface attachment.
Returns:
Attachment Id
"""
@module_and_param_types(network_interface, 'eni_attach_id',
'bool')
def detach_network_interface(self, context, attachment_id,
force=None):
"""Detach a network interface from an instance.
Args:
context (RequestContext): The request context.
attachment_id (str): The ID of the attachment.
force (boolean): Specifies whether to force a detachment
Returns:
true if the request succeeds.
"""
@module_and_param_types(network_interface, 'eni_id',
'ips',
'int',
'bool')
def assign_private_ip_addresses(self, context, network_interface_id,
private_ip_address=None,
secondary_private_ip_address_count=None,
allow_reassignment=False):
"""Assigns secondary private IP addresses to the network interface.
Args:
network_interface_id (str): The ID of the network interface.
private_ip_address (list of str): List of IP addresses to assign.
secondary_private_ip_address_count (integer): The number of
secondary private IP addresses to assign. EC2 selects these
IP addresses within the subnet range.
Returns:
true if the request succeeds.
"""
@module_and_param_types(network_interface, 'eni_id',
'ips')
def unassign_private_ip_addresses(self, context, network_interface_id,
private_ip_address=None):
"""Unassigns secondary IP addresses from the network interface.
Args:
network_interface_id (str): The ID of the network interface.
private_ip_address (list of str): List of secondary private IP
addresses to unassign.
Returns:
true if the request succeeds.
"""
@module_and_param_types(address, 'str255')
def allocate_address(self, context, domain=None):
"""Acquires an Elastic IP address.
Args:
context (RequestContext): The request context.
domain (str): Set to vpc to allocate the address for use with
instances in a VPC.
Default: The address is for use in EC2-Classic.
Valid values: vpc
Returns:
The Elastic IP address information.
An Elastic IP address is for use either in the EC2-Classic platform
or in a VPC.
"""
@module_and_param_types(address, 'ip', 'i_id',
'eipalloc_id', 'eni_id',
'ip', 'bool')
def associate_address(self, context, public_ip=None, instance_id=None,
allocation_id=None, network_interface_id=None,
private_ip_address=None, allow_reassociation=False):
"""Associates an Elastic IP with an instance or a network interface.
Args:
context (RequestContext): The request context.
public_ip (str): The Elastic IP address.
Required for Elastic IP addresses for use with instances
in EC2-Classic.
instance_id (str): The ID of the instance.
The operation fails if you specify an instance ID unless
exactly one network interface is attached.
Required for EC2-Classic.
allocation_id (str): The allocation ID.
Required for EC2-VPC.
network_interface_id (str): The ID of the network interface.
private_ip_address (str): The primary or secondary private IP.
allow_reassociation (boolean): Allows an Elastic IP address that is
already associated to be re-associated.
Otherwise, the operation fails.
Returns:
true if the request succeeds.
[EC2-VPC] The ID that represents the association of the Elastic IP.
For a VPC, you can specify either instance_id or network_interface_id,
but not both.
If the instance has more than one network interface, you must specify
a network interface ID.
If no private IP address is specified, the Elastic IP address
is associated with the primary private IP address.
[EC2-Classic, default VPC] If the Elastic IP address is already
associated with a different instance, it is disassociated from that
instance and associated with the specified instance.
This is an idempotent operation.
"""
@module_and_param_types(address, 'ip',
'eipassoc_id')
def disassociate_address(self, context, public_ip=None,
association_id=None):
"""Disassociates an Elastic IP address.
Args:
context (RequestContext): The request context.
public_ip (str): The Elastic IP address.
Required for EC2-Classic.
assossiation_id (str): The association ID.
Required for EC2-VPC
Returns:
true if the request succeeds.
Disassociates an Elastic IP address from the instance or network
interface it's associated with.
This is an idempotent action.
"""
@module_and_param_types(address, 'ip',
'eipalloc_id')
def release_address(self, context, public_ip=None, allocation_id=None):
"""Releases the specified Elastic IP address.
Args:
context (RequestContext): The request context.
public_ip (str): The Elastic IP address.
allocation_id (str): The allocation ID.
Returns:
true if the requests succeeds.
If you attempt to release an Elastic IP address that you already
released, you'll get an AuthFailure error if the address is already
allocated to another AWS account.
[EC2-Classic, default VPC] Releasing an Elastic IP address
automatically disassociates it from any instance that it's associated
with.
[Nondefault VPC] You must use DisassociateAddress to disassociate the
Elastic IP address before you try to release it.
"""
@module_and_param_types(address, 'ips', 'eipalloc_ids',
'filter')
def describe_addresses(self, context, public_ip=None, allocation_id=None,
filter=None):
"""Describes one or more of your Elastic IP addresses.
Args:
context (RequestContext): The request context.
public_ip (list of str): One or more Elastic IP addresses.
allocation_id (list of str): One or more allocation IDs.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain Elastic IP
addresses.
Returns:
A list of Elastic IP addresses.
"""
@module_and_param_types(security_group, 'security_group_strs',
'sg_ids', 'filter')
def describe_security_groups(self, context, group_name=None,
group_id=None, filter=None):
"""Describes one or more of your security groups.
Args:
context (RequestContext): The request context.
group_name (list of str): One or more security group names.
group_id (list of str): One or more security group IDs.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain security groups.
Returns:
A list of security groups.
"""
@module_and_param_types(security_group, 'security_group_str',
'security_group_str', 'vpc_id')
def create_security_group(self, context, group_name,
group_description, vpc_id=None):
"""Creates a security group.
Args:
context (RequestContext): The request context.
group_name (str): The name of the security group.
group_description (str): A description for the security group.
vpc_id (str): [EC2-VPC] The ID of the VPC.
Returns:
true if the requests succeeds.
The ID of the security group.
You can have a security group for use in EC2-Classic with the same name
as a security group for use in a VPC. However, you can't have two
security groups for use in EC2-Classic with the same name or two
security groups for use in a VPC with the same name.
You have a default security group for use in EC2-Classic and a default
security group for use in your VPC. If you don't specify a security
group when you launch an instance, the instance is launched into the
appropriate default security group. A default security group includes
a default rule that grants instances unrestricted network access to
each other.
group_name and group_description restrictions:
up to 255 characters in length,
EC2-Classic: ASCII characters,
EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*
"""
@module_and_param_types(security_group, 'security_group_str', 'sg_id')
def delete_security_group(self, context, group_name=None, group_id=None):
"""Deletes a security group.
Args:
context (RequestContext): The request context.
group_name (str): The name of the security group.
group_id (str): The ID of the security group.
Returns:
true if the requests succeeds.
[EC2-Classic, default VPC] You can specify either GroupName or GroupId
If you attempt to delete a security group that is associated with an
instance, or is referenced by another security group, the operation
fails.
"""
@module_and_param_types(security_group, 'sg_id',
'security_group_str', 'dummy')
def authorize_security_group_ingress(self, context, group_id=None,
group_name=None, ip_permissions=None):
"""Adds one or more ingress rules to a security group.
Args:
context (RequestContext): The request context.
group_id (str): The ID of the security group.
group_name (str): [EC2-Classic, default VPC] The name of the
security group.
ip_permissions (list of dicts): Dict can contain:
ip_protocol (str): The IP protocol name or number.
Use -1 to specify all.
For EC2-Classic, security groups can have rules only for
TCP, UDP, and ICMP.
from_port (str): The start of port range for the TCP and UDP
protocols, or an ICMP type number. For the ICMP type
number, you can use -1 to specify all ICMP types.
to_port (str): The end of port range for the TCP and UDP
protocols, or an ICMP code number. For the ICMP code
number, you can use -1 to specify all ICMP codes for the
ICMP type.
groups (list of dicts): Dict can contain:
group_id (str): The ID of the source security group. You
can't specify a source security group and a CIDR IP
address range.
user_id (str): [EC2-Classic] The ID of the AWS account that
owns the source security group, if it's not the current
AWS account.
cidr_ip (str): The CIDR IP address range. You can't specify
this parameter when specifying a source security group.
Returns:
true if the requests succeeds.
"""
@module_and_param_types(security_group, 'sg_id',
'security_group_str', 'dummy')
def revoke_security_group_ingress(self, context, group_id=None,
group_name=None, ip_permissions=None):
"""Removes one or more ingress rules from a security group.
Args:
context (RequestContext): The request context.
group_id (str): The ID of the security group.
group_name (str): [EC2-Classic, default VPC] The name of the
security group.
ip_permissions (list of dicts): See
authorize_security_group_ingress
Returns:
true if the requests succeeds.
The values that you specify in the revoke request (for example, ports)
must match the existing rule's values for the rule to be removed.
"""
@module_and_param_types(security_group, 'sg_id', 'dummy')
def authorize_security_group_egress(self, context, group_id,
ip_permissions=None):
"""Adds one or more egress rules to a security group for use with a VPC.
Args:
context (RequestContext): The request context.
group_id (str): The ID of the security group.
ip_permissions (list of dicts): See
authorize_security_group_ingress
Returns:
true if the requests succeeds.
This action doesn't apply to security groups for use in EC2-Classic.
"""
@module_and_param_types(security_group, 'sg_id', 'dummy')
def revoke_security_group_egress(self, context, group_id,
ip_permissions=None):
"""Removes one or more egress rules from a security group for EC2-VPC.
Args:
context (RequestContext): The request context.
group_id (str): The ID of the security group.
ip_permissions (list of dicts): See
authorize_security_group_ingress
Returns:
true if the requests succeeds.
The values that you specify in the revoke request (for example, ports)
must match the existing rule's values for the rule to be revoked.
This action doesn't apply to security groups for use in EC2-Classic.
"""
@module_and_param_types(instance, 'i_id', 'str255')
def describe_instance_attribute(self, context, instance_id, attribute):
"""Describes the specified attribute of the specified instance.
Args:
context (RequestContext): The request context.
instance_id (str): The ID of the instance.
attribute (str): The instance attribute.
Valid values: blockDeviceMapping | disableApiTermination |
ebsOptimized (unsupported now) | groupSet |
instanceInitiatedShutdownBehavior | instanceType | kernel |
productCodes (unsupported now) | ramdisk | rootDeviceName |
sourceDestCheck (unsupported now) |
sriovNetSupport (unsupported now) | userData
Returns:
Specified attribute.
"""
@module_and_param_types(availability_zone, 'strs', 'filter')
def describe_availability_zones(self, context, zone_name=None,
filter=None):
"""Describes one or more of the available Availability Zones.
Args:
context (RequestContext): The request context.
zone_name (list of str): On or more zone names.
filter (list of filter dict): On or more filters.
Returns:
Specified availability zones.
"""
@module_and_param_types(availability_zone, 'strs', 'filter')
def describe_regions(self, context, region_name=None, filter=None):
"""Describes one or more regions that are currently available to you.
Args:
context (RequestContext): The request context.
region_name (list of str): On or more region names.
filter (list of filter dict): On or more filters.
Returns:
Specified regions.
"""
@module_and_param_types(instance, 'i_id_or_ids')
def get_console_output(self, context, instance_id):
"""Gets the console output for the specified instance.
Args:
context (RequestContext): The request context.
instance_id (str): ID of the instance
Returns:
The console output of the instance, timestamp and instance id.
"""
@module_and_param_types(image, 'i_id', 'str', 'str',
'bool', 'dummy')
def create_image(self, context, instance_id, name=None, description=None,
no_reboot=False, block_device_mapping=None):
"""Creates an EBS-backed AMI from an EBS-backed instance.
Args:
context (RequestContext): The request context.
instance_id (str): The ID of the instance.
name (str): A name for the new image.
It's required by AWS but optional for legacy Nova EC2 API.
description (str): A description for the new image.
Not used now.
no_reboot (boolean): When the parameter is set to false, EC2
attempts to shut down the instance cleanly before image
creation and then reboots the instance.
block_device_mapping (list of dict): Dict can contain:
device_name (str): The device name exposed to the instance
(for example, /dev/sdh or xvdh).
virtual_name (str): The virtual device name (ephemeral[0..3]).
ebs (dict): Dict can contain:
volume_id (str): The ID of the volume (Nova extension).
snapshot_id (str): The ID of the snapshot.
volume_size (str): The size of the volume, in GiBs.
volume_type (str): The volume type.
Not used now.
delete_on_termination (bool): Indicates whether to delete
the volume on instance termination.
iops (int): he number of IOPS to provision for the volume.
Not used now.
encrypted (boolean): Whether the volume is encrypted.
Not used now.
no_device (str): Suppresses the device mapping.
Returns:
The ID of the new AMI.
"""
return image.create_image(context, instance_id, name, description,
no_reboot, block_device_mapping)
@module_and_param_types(image, 'str', 'str',
'str', 'str',
'str', 'dummy',
'str', 'aki_id',
'ari_id', 'str')
def register_image(self, context, name=None, image_location=None,
description=None, architecture=None,
root_device_name=None, block_device_mapping=None,
virtualization_type=None, kernel_id=None,
ramdisk_id=None, sriov_net_support=None):
"""Registers an AMI.
Args:
context (RequestContext): The request context.
name (str): A name for your AMI.
It's required by AWS but optional for legacy Nova EC2 API.
image_location (str): The full path to AMI manifest in S3 storage.
description (str): A description for your AMI.
Not used now.
architecture (str): The architecture of the AMI.
Not used now.
root_device_name (str): The name of the root device
block_device_mapping (list of dict): Dict can contain:
device_name (str): The device name exposed to the instance
(for example, /dev/sdh or xvdh).
virtual_name (str): The virtual device name (ephemeral[0..3]).
ebs (dict): Dict can contain:
volume_id (str): The ID of the volume (Nova extension).
snapshot_id (str): The ID of the snapshot.
volume_size (str): The size of the volume, in GiBs.
volume_type (str): The volume type.
Not used now.
delete_on_termination (bool): Indicates whether to delete
the volume on instance termination.
iops (int): he number of IOPS to provision for the volume.
Not used now.
encrypted (boolean): Whether the volume is encrypted.
Not used now.
no_device (str): Suppresses the device mapping.
virtualization_type (str): The type of virtualization.
Not used now.
kernel_id (str): The ID of the kernel.
Not used now.
ramdisk_id (str): The ID of the RAM disk.
Not used now.
sriov_net_support (str): SR-IOV mode for networking.
Not used now.
Returns:
The ID of the new AMI.
"""
@module_and_param_types(image, 'jmijrijki_id')
def deregister_image(self, context, image_id):
"""Deregisters the specified AMI.
Args:
context (RequestContext): The request context.
image_id (str): The ID of the AMI.
Returns:
true if the request succeeds.
"""
@module_and_param_types(image, 'jmijrijki_id', 'str')
def describe_image_attribute(self, context, image_id, attribute):
"""Describes the specified attribute of the specified AMI.
Args:
context (RequestContext): The request context.
image_id (str): The ID of the image.
attribute (str): The attribute of the network interface.
Valid values: description (unsupported now)| kernel | ramdisk |
launchPermission | productCodes (unsupported now)|
blockDeviceMapping | rootDeviceName (Nova EC2 extension)
Returns:
Specified attribute.
"""
return image.describe_image_attribute(context, image_id, attribute)
@module_and_param_types(image, 'jmijrijki_id', 'str',
'strs', 'str',
'str', 'dummy',
'dummy', 'dummy', 'str')
def modify_image_attribute(self, context, image_id, attribute=None,
user_group=None, operation_type=None,
description=None, launch_permission=None,
product_code=None, user_id=None, value=None):
"""Modifies the specified attribute of the specified AMI.
Args:
context (RequestContext): The request context.
image_id (str): The ID of the image.
attribute (str): The name of the attribute to modify.
user_group (list of str): One or more user groups.
Only 'all' group is supported now.
operation_type (str): The operation type.
Only 'add' and 'remove' operation types are supported now.
description: A description for the AMI.
launch_permission: : A launch permission modification.
product_code: : Not supported now.
user_id: : Not supported now.
value: : The value of the attribute being modified.
This is only valid when modifying the description attribute.
Returns:
true if the request succeeds.
"""
@module_and_param_types(image, 'jmijrijki_id', 'str')
def reset_image_attribute(self, context, image_id, attribute):
"""Resets an attribute of an AMI to its default value.
Args:
context (RequestContext): The request context.
image_id (str): The ID of the image.
attribute (str): The attribute to reset (currently you can only
reset the launch permission attribute).
Returns:
true if the request succeeds.
"""
@module_and_param_types(tag, 'ec2_ids', 'key_value_dict_list')
def create_tags(self, context, resource_id, tag):
"""Adds or overwrites one or more tags for the specified resources.
Args:
context (RequestContext): The request context.
resource_id (list of str): The IDs of one or more resources to tag.
tag (list of dict): Dict can contain:
key (str): The key of the tag.
value (str): The value of the tag.
Returns:
true if the request succeeds.
"""
@module_and_param_types(tag, 'ec2_ids', 'dummy')
def delete_tags(self, context, resource_id, tag=None):
"""Deletes the specified tags from the specified resources.
Args:
context (RequestContext): The request context.
resource_id (list of str): The IDs of one or more resources to tag.
tag (list of dict): One or more tags to delete.
Dict can contain:
key (str): The key of the tag.
value (str): The value of the tag.
Returns:
true if the request succeeds.
If you omit the value in tag parameter, we delete the tag regardless of
its value. If you specify this parameter with an empty string as the
value, we delete the key only if its value is an empty string.
"""
@module_and_param_types(tag, 'filter', 'int',
'str')
def describe_tags(self, context, filter=None, max_results=None,
next_token=None):
"""Describes one or more of the tags for your EC2 resources.
Args:
context (RequestContext): The request context.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain tags.
max_results (int): The maximum number of items to return.
Not used now.
next_token (str): The token for the next set of items to return.
Not used now.
Returns:
A list of tags.
"""
@module_and_param_types(volume, 'str', 'int',
'snap_id', 'str', 'int',
'bool', 'str')
def create_volume(self, context, availability_zone=None, size=None,
snapshot_id=None, volume_type=None, iops=None,
encrypted=None, kms_key_id=None):
"""Creates an EBS volume.
Args:
context (RequestContext): The request context.
availability_zone (str): The Availability Zone in which to create
the volume.
It's required by AWS but optional for legacy Nova EC2 API.
instance_id (str): The size of the volume, in GiBs.
Valid values: 1-1024
If you're creating the volume from a snapshot and don't specify
a volume size, the default is the snapshot size.
snapshot_id (str): The snapshot from which to create the volume.
Required if you are creating a volume from a snapshot.
volume_type (str): The volume type. One of volume types created
in used Block Storage.
iops (int): The number of IOPS to provision for the volume.
Valid values: Range is 100 to 4,000.
Not used now.
encrypted (boolean): Whether the volume should be encrypted.
Not used now.
kms_key_id (str): The full ARN of AWS KMS master key to use when
creating the encrypted volume.
Not used now.
Returns:
Information about the volume.
You can create a new empty volume or restore a volume from an EBS
snapshot.
"""
@module_and_param_types(volume, 'dummy')
def delete_volume(self, context, volume_id):
"""Deletes the specified EBS volume.
Args:
context (RequestContext): The request context.
volume_id (str): The ID of the volume.
Returns:
Returns true if the request succeeds.
The volume must be in the available state.
"""
@module_and_param_types(volume, 'dummy', 'filter',
'int', 'str')
def describe_volumes(self, context, volume_id=None, filter=None,
max_results=None, next_token=None):
"""Describes the specified EBS volumes.
Args:
context (RequestContext): The request context.
volume_id (list of str): One or more volume IDs.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain volumes.
max_results (int): The maximum number of items to return.
Not used now.
next_token (str): The token for the next set of items to return.
Not used now.
Returns:
A list of volumes.
"""
@module_and_param_types(snapshot, 'vol_id', 'str')
def create_snapshot(self, context, volume_id, description=None):
"""Creates a snapshot of an EBS volume.
Args:
context (RequestContext): The request context.
volume_id (str): The ID of the volume.
description (str): A description for the snapshot.
Returns:
Information about the snapshot.
"""
@module_and_param_types(snapshot, 'snap_id')
def delete_snapshot(self, context, snapshot_id):
"""Deletes the specified snapshot.
Args:
context (RequestContext): The request context.
snapshot_id (str): The ID of the snapshot.
Returns:
Returns true if the request succeeds.
"""
@module_and_param_types(snapshot, 'snap_ids', 'strs',
'strs', 'filter')
def describe_snapshots(self, context, snapshot_id=None, owner=None,
restorable_by=None, filter=None):
"""Describes one or more of the snapshots available to you.
Args:
context (RequestContext): The request context.
snapshot_id (list of str): One or more snapshot IDs.
owner (list of str): Returns the snapshots owned by the specified
owner.
Not used now.
restorable_by (list of str): One or more accounts IDs that can
create volumes from the snapshot.
Not used now.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain snapshots.
Returns:
A list of snapshots.
"""
```
#### File: tests/unit/base.py
```python
import copy
import itertools
from cinderclient import client as cinderclient
from glanceclient import client as glanceclient
import mock
from novaclient import client as novaclient
from oslo_config import fixture as config_fixture
from oslotest import base as test_base
import ec2api.api.apirequest
from ec2api.api import ec2utils
import ec2api.db.sqlalchemy.api
from ec2api.tests.unit import fakes
from ec2api.tests.unit import matchers
from ec2api.tests.unit import tools
import ec2api.wsgi
def skip_not_implemented(test_item):
def decorator(test_item):
test_item.skip('The feature is not yet implemented')
return decorator
class ApiTestCase(test_base.BaseTestCase):
ANY_EXECUTE_ERROR = object()
NOVACLIENT_SPEC_OBJ = novaclient.Client('2')
def setUp(self):
super(ApiTestCase, self).setUp()
neutron_patcher = mock.patch('neutronclient.v2_0.client.Client',
autospec=True)
self.neutron = neutron_patcher.start().return_value
self.addCleanup(neutron_patcher.stop)
nova_patcher = mock.patch('novaclient.client.Client')
self.nova = mock.create_autospec(self.NOVACLIENT_SPEC_OBJ)
self.novaclient_getter = nova_patcher.start()
self.novaclient_getter.return_value = self.nova
self.addCleanup(nova_patcher.stop)
glance_patcher = mock.patch('glanceclient.client.Client')
self.glance = mock.create_autospec(
glanceclient.Client(endpoint='/v1'))
glance_patcher.start().return_value = self.glance
self.addCleanup(glance_patcher.stop)
cinder_patcher = mock.patch('cinderclient.client.Client')
self.cinder = mock.create_autospec(cinderclient.Client('1'))
cinder_patcher.start().return_value = self.cinder
self.addCleanup(cinder_patcher.stop)
db_api_patcher = mock.patch('ec2api.db.api.IMPL',
autospec=ec2api.db.sqlalchemy.api)
self.db_api = db_api_patcher.start()
self.addCleanup(db_api_patcher.stop)
isotime_patcher = mock.patch('oslo_utils.timeutils.isotime')
self.isotime = isotime_patcher.start()
self.addCleanup(isotime_patcher.stop)
self._conf = self.useFixture(config_fixture.Config())
self.configure(fatal_exception_format_errors=True)
def execute(self, action, args):
status_code, response = self._execute(action, args)
self.assertEqual(200, status_code,
self._format_error_message(status_code, response))
return response
def assert_execution_error(self, error_code, action, args):
status_code, response = self._execute(action, args)
if error_code == self.ANY_EXECUTE_ERROR:
self.assertLessEqual(400, status_code)
else:
self.assertEqual(400, status_code)
self.assertEqual(error_code, response['Error']['Code'],
self._format_error_message(status_code, response))
def assert_any_call(self, func, *args, **kwargs):
calls = func.mock_calls
for call in calls:
call_args = call[1]
if matchers.ListMatches(call_args, args, orderless_lists=True):
return
self.assertEqual(False, True)
def set_mock_db_items(self, *items):
self._db_items = copy.copy(items)
self.db_api.get_items.side_effect = (
tools.get_db_api_get_items(*self._db_items))
self.db_api.get_item_by_id.side_effect = (
tools.get_db_api_get_item_by_id(*self._db_items))
self.db_api.get_items_by_ids.side_effect = (
tools.get_db_api_get_items_by_ids(*self._db_items))
self.db_api.get_items_ids.side_effect = (
tools.get_db_api_get_items_ids(*self._db_items))
def add_mock_db_items(self, *items):
merged_items = items + tuple(item for item in self._db_items
if all(i['id'] != item['id']
for i in items))
self.set_mock_db_items(*merged_items)
def configure(self, **kwargs):
self._conf.config(**kwargs)
def check_filtering(self, operation, resultset_key, filters):
for name, value in filters:
resp = self.execute(operation,
{'Filter.1.Name': name,
'Filter.1.Value.1': str(value)})
self.assertTrue(resp[resultset_key] is not None and
len(resp[resultset_key]) > 0,
'Filter by %s does not work' % name)
resp = self.execute(operation,
{'Filter.1.Name': name,
'Filter.1.Value.1': 'dummy filter value'})
self.assertTrue(resp[resultset_key] is None or
len(resp[resultset_key]) == 0)
def check_tag_support(self, operation, resultset_key, sample_item_id,
id_key, item_kinds=[]):
self.db_api.get_tags = tools.CopyingMock(
return_value=[{'item_id': sample_item_id,
'key': 'fake_key',
'value': 'fake_value'}])
ec2_tags = [{'key': 'fake_key',
'value': 'fake_value'}]
resp = self.execute(operation, {})
tag_found = False
if type(resultset_key) is list:
resp_items = itertools.chain(*(r[resultset_key[1]]
for r in resp[resultset_key[0]]))
else:
resp_items = resp[resultset_key]
resultset_key = [resultset_key]
for resp_item in resp_items:
if resp_item.get(id_key) == sample_item_id:
self.assertIn('tagSet', resp_item)
self.assertThat(resp_item['tagSet'],
matchers.ListMatches(ec2_tags))
tag_found = True
else:
self.assertTrue('tagSet' not in resp_item or
resp_item['tagSet'] == [])
self.assertTrue(tag_found)
if not item_kinds:
item_kinds = (ec2utils.get_ec2_id_kind(sample_item_id),)
self.assertTrue(self.db_api.get_tags.call_count == 1 and
(self.db_api.get_tags.mock_calls[0] in
(mock.call(mock.ANY, item_kinds, set()),
mock.call(mock.ANY, item_kinds, None))))
self.db_api.reset_mock()
id_param = '%s%s.1' % (id_key[0].capitalize(), id_key[1:])
resp = self.execute(operation, {id_param: sample_item_id})
self.assertTrue(
self.db_api.get_tags.call_count == 1 and
(self.db_api.get_tags.mock_calls[0] in
(mock.call(mock.ANY, item_kinds, set([sample_item_id])),
mock.call(mock.ANY, item_kinds, [sample_item_id]))))
self.check_filtering(
operation, resultset_key[0],
[('tag-key', 'fake_key'),
('tag-value', 'fake_value'),
('tag:fake_key', 'fake_value')])
def _create_context(self, auth_token=None):
return ec2api.context.RequestContext(
fakes.ID_OS_USER, fakes.ID_OS_PROJECT,
auth_token=auth_token,
service_catalog=[{'type': 'network',
'endpoints': [{'publicUrl': 'fake_url'}]}])
def _execute(self, action, args):
ec2_request = ec2api.api.apirequest.APIRequest(action, 'fake_v1', args)
ec2_context = self._create_context()
environ = {'REQUEST_METHOD': 'FAKE',
'ec2.request': ec2_request,
'ec2api.context': ec2_context}
request = ec2api.wsgi.Request(environ)
response = request.send(ec2api.api.Executor())
return (response.status_code,
self._check_and_transform_response(response, action))
def _check_and_transform_response(self, response, action):
body = tools.parse_xml(response.body)
if response.status_code == 200:
action_tag = '%sResponse' % action
self.assertIn(action_tag, body)
body = body.pop(action_tag)
self.assertIn('requestId', body)
body.pop('requestId')
else:
self.assertIn('Response', body)
body = body.pop('Response')
self.assertIn('RequestID', body)
body.pop('RequestID')
self.assertEqual(1, len(body))
self.assertIn('Errors', body)
body = body.pop('Errors')
self.assertEqual(1, len(body))
self.assertIn('Error', body)
self.assertEqual(2, len(body['Error']))
return body
def _format_error_message(self, status_code, response):
if status_code >= 400:
return '%s: %s' % (response['Error']['Code'],
response['Error']['Message'])
else:
return ''
```
#### File: rally-scenarios/plugins/ec2api_plugin.py
```python
import functools
from rally.benchmark.scenarios import base
from rally.common import log as logging
from ec2api.tests.functional import botocoreclient
LOG = logging.getLogger(__name__)
class AtomicActionWithoutFirst(base.AtomicAction):
def __init__(self, scenario_instance, name):
super(AtomicActionWithoutFirst, self).__init__(scenario_instance, name)
self.scenario_instance = scenario_instance
self.name = name
def __exit__(self, type, value, tb):
args = self.scenario_instance.context['user']['ec2args']
if self.name in args:
super(AtomicActionWithoutFirst, self).__exit__(type, value, tb)
else:
args[self.name] = True
class EC2APIPlugin(base.Scenario):
def _get_client(self, is_nova):
args = self.context['user']['ec2args']
url = args['nova_url'] if is_nova else args['url']
client = botocoreclient.APIClientEC2(
url, args['region'], args['access'], args['secret'])
return client
def _run_both(self, base_name, func):
client = self._get_client(True)
with AtomicActionWithoutFirst(self, base_name + '_nova'):
func(self, client)
client = self._get_client(False)
with AtomicActionWithoutFirst(self, base_name + '_ec2api'):
func(self, client)
def _run_ec2(self, base_name, func):
client = self._get_client(False)
with AtomicActionWithoutFirst(self, base_name + '_ec2api'):
func(self, client)
def _runner(run_func):
def wrap(func):
@functools.wraps(func)
def runner(self, *args, **kwargs):
run_func(self, func.__name__, func)
return runner
return wrap
@base.scenario()
@_runner(_run_both)
def describe_instances(self, client):
resp, data = client.DescribeInstances()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_both)
def describe_addresses(self, client):
resp, data = client.DescribeAddresses()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_both)
def describe_security_groups(self, client):
resp, data = client.DescribeSecurityGroups()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_both)
def describe_regions(self, client):
resp, data = client.DescribeRegions()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_both)
def describe_images(self, client):
resp, data = client.DescribeImages()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_ec2)
def describe_vpcs(self, client):
resp, data = client.DescribeVpcs()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_ec2)
def describe_subnets(self, client):
resp, data = client.DescribeSubnets()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_ec2)
def describe_network_interfaces(self, client):
resp, data = client.DescribeNetworkInterfaces()
assert 200 == resp.status_code
@base.scenario()
@_runner(_run_ec2)
def describe_route_tables(self, client):
resp, data = client.DescribeRouteTables()
assert 200 == resp.status_code
_instance_id_by_client = dict()
@base.scenario()
@_runner(_run_both)
def describe_one_instance(self, client):
client_id = client.get_url()
instance_id = self._instance_id_by_client.get(client_id)
if not instance_id:
resp, data = client.DescribeInstances()
assert 200 == resp.status_code
instances = data['Reservations'][0]['Instances']
index = len(instances) / 3
instance_id = instances[index]['InstanceId']
self._instance_id_by_client[client_id] = instance_id
LOG.info("found instance = %s for client %s"
% (instance_id, client_id))
resp, data = client.DescribeInstances(InstanceIds=[instance_id])
assert 200 == resp.status_code
@base.scenario()
def describe_all_in_one(self):
self.describe_addresses()
self.describe_instances()
self.describe_security_groups()
self.describe_one_instance()
self.describe_vpcs()
self.describe_subnets()
self.describe_network_interfaces()
self.describe_route_tables()
@base.scenario()
def describe_networks(self):
self.describe_vpcs()
self.describe_subnets()
self.describe_network_interfaces()
self.describe_route_tables()
``` |
{
"source": "jiohyoo/gamelanpy",
"score": 2
} |
#### File: gamelanpy/gamelanpy/coreset.py
```python
import logging
import numpy as np
import scipy.linalg
import scipy.cluster
import scipy.spatial
import time
def get_coreset(data, num_clusters, coreset_size, delta=0.1):
'''
Parameters
----------
data: array-like, (num_frames, num_vars)
num_clusters: int, number of clusters
coreset_size: int, number of coreset samples
delta: float, default=0.1
Return
------
samples: coreset samples
weights: coreset weights
'''
logger = logging.getLogger()
if len(data.shape) == 1:
logger.debug('Input data is 1-D, converting it to 2-D')
data = data[:, np.newaxis]
num_frames, num_vars = data.shape
if coreset_size < num_clusters:
raise ValueError("coreset_size %d is less than num_mixtures %d" % (coreset_size, num_clusters))
if num_frames < coreset_size:
raise ValueError("num_frames %d is less than coreset_size %d" % (num_frames, num_clusters))
data_remain = data.copy()
samples = np.zeros((0, num_vars))
# first, do the subsampling : pick core samples, and remove closest point to
# it
logger.debug('Before Coreset random sampling')
num_iters = 0
num_single_samples = int(1.0 * num_vars * num_clusters * np.log(1.0 / delta))
logger.debug('num_single_samples: %d', num_single_samples)
while data_remain.shape[0] > num_single_samples:
cur_time = time.time()
logger.debug('Starting iteration %d', num_iters)
num_frames_remain = data_remain.shape[0]
idx = np.random.permutation(num_frames_remain)[:num_single_samples]
single_samples = data_remain[idx, :]
prev_time = cur_time
cur_time = time.time()
logger.debug('After random sampling (took %.3f sec)', cur_time - prev_time)
# Here we define similarity matrix, based on some measure of
# similarity or kernel. Feel free to change
dists = scipy.spatial.distance.cdist(data_remain, single_samples)
prev_time = cur_time
cur_time = time.time()
logger.debug('After evaluating cdist (took %.3f sec)', cur_time - prev_time)
# minimum distance from random samples
min_dists = np.min(dists, axis=1)
# median distance
v = np.median(min_dists)
# remove rows with distance <= median distance
remove_idx = np.where(min_dists <= v)[0]
# remove rows of remove_idx
data_remain = np.delete(data_remain, remove_idx, 0)
samples = np.vstack((samples, single_samples))
logger.debug('Shape of the coreset samples so far (%d, %d)', samples.shape)
logger.debug('Shape of the remaining samples (%d, %d)', data_remain.shape)
prev_time = cur_time
cur_time = time.time()
logger.debug('End of iteration %d (took %.3f sec)', (num_iters, cur_time - prev_time))
num_iters += 1
# end of while loop
logger.debug('Shape of the final remaining samples (%d, %d)', data_remain.shape)
samples = np.vstack((samples, data_remain))
logger.debug('Shape of the final coreset samples (%d, %d)', samples.shape)
# now compute the weights of all the points, according to how close they
# are to the closest core-sample.
db_size = np.zeros(samples.shape[0])
min_dists = np.zeros(num_frames)
closest_sample_idx = np.zeros(num_frames)
for i in xrange(num_frames):
dists = scipy.spatial.distance.cdist(data[i:i+1, :], samples)
min_dist = np.min(dists)
min_idx = np.argmin(dists)
min_dists[i] = min_dist
closest_sample_idx[i] = min_idx
for i in xrange(num_frames):
# for each datapoint, Ix[i] is the index of the coreset point
# it is assigned to.
db_size[closest_sample_idx[i]] += 1
sq_sum_min_dists = (min_dists ** 2).sum()
m = np.zeros(num_frames)
for i in xrange(num_frames):
m[i] = np.ceil(5.0 / db_size[closest_sample_idx[i]] + (min_dists[i] ** 2) / sq_sum_min_dists)
m_sum = m.sum()
cdf = (1.0 * m / m_sum).cumsum()
samples = np.zeros((coreset_size, num_vars))
weights = np.zeros(coreset_size)
# Now, sample from the weighted points, to generate final corset
# and the corresponding weights
for i in xrange(coreset_size):
r = np.random.rand()
idx = (cdf <= r).sum()
samples[i, :] = data[idx, :]
weights[i] = m_sum / (coreset_size * m[idx])
return samples, weights
```
#### File: gamelanpy/gamelanpy/json_util.py
```python
import base64
import json
import numpy as np
from sparse_gmm import SparseGMM
from nonparanormal import NPNTransformer
class GamelanPyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, SparseGMM):
dct = obj.__dict__
dct['__sparse_gmm__'] = True
return dct
if isinstance(obj, NPNTransformer):
print 'hello'
dct = obj.__dict__
dct['__npn_transformer__'] = True
return dct
if isinstance(obj, np.ndarray):
data_base64 = base64.b64encode(obj.data)
return dict(__ndarray__=data_base64, dtype=str(obj.dtype), shape=obj.shape)
return json.JSONEncoder(self, obj)
def gamelan_json_obj_hook(dct):
if isinstance(dct, dict) and '__ndarray__' in dct:
data = base64.b64decode(dct['__ndarray__'])
return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])
if isinstance(dct, dict) and '__sparse_gmm__' in dct:
sgmm = SparseGMM()
sgmm.__dict__ = dct
return sgmm
if isinstance(dct, dict) and '__npn_transformer__' in dct:
npn = NPNTransformer()
npn.__dict__ = dct
return npn
return dct
``` |
{
"source": "JiongDong/Insect-classification-based-on-images",
"score": 3
} |
#### File: JiongDong/Insect-classification-based-on-images/Classify.py
```python
from LoadImage import LoadImage
from FeatureExtractor import FeatureExtractor
from BOW import BOW
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import cv2
import numpy as np
import os
class Classify:
def __init__(self):
pass
def algoSVM(self,trainingPath, testPath, featureExtraType):
'''
Using Support Vector Machine algorithm to classify insect images
:param trainingPath: the path of training images
:param testPath: he path of testing images
:param featureExtraType: the feature type : sift or surf
:return:
'''
loadImage = LoadImage()
featureExtra = FeatureExtractor()
bow = BOW()
#get the species,the name of all insects, the path of all insect images
insectSpecies, names, trainingPaths = loadImage.loadTrainImage(trainingPath)
print insectSpecies
print "Le bombre d'espece :", len(insectSpecies)
dictionarySize = len(insectSpecies)
insect = {}
num = 1
for name in insectSpecies:
insect[name] = num
num += 1
#get the descriptors of all training images
descriptors = bow.getDescriptors(trainingPaths, featureExtraType)
#get Bag of Words dictionary
bowDictionary = bow.getBowDictionary(dictionarySize, descriptors, featureExtraType)
print "bow dictionary"
#train data
trainDesc = []
#train response
trainLabels = []
i = 0
#initialize train datas and train responses
for p in trainingPaths:
trainDesc.extend(featureExtra.getSingleFeature(p, bowDictionary, featureExtraType))
trainLabels.append(insect[names[i]])
i = i + 1
svm = cv2.SVM()
#training
svm.train(np.array(trainDesc), np.array(trainLabels))
testInsectNames = os.listdir(testPath)
# Initialize a zero matrix to save the classification results
result = np.zeros((dictionarySize, dictionarySize))
print "result zero"
count = 0
#classify all the test immages
for test in testInsectNames:
testingImage = os.listdir(testPath + "\\" + test)
for p in testingImage:
#get feature from a test image
feature = featureExtra.getSingleFeature(testPath + "\\" + test + "\\" + p, bowDictionary, featureExtraType)
#predict
p = svm.predict(feature)
#save the result in the result matrix
result[count, p - 1] += 1
count += 1
return result
def algoANN(self, trainingPath, testPath, featureExtraType, epochs):
'''
Using Artificial Neural Network algorithm to classify insect images
:param trainingPath: the path of training images
:param testPath: the path of testing images
:param featureExtraType: the feature type:sift or surf
:param epochs: the numbre of training for neural network
:return: the classification results
'''
loadImage = LoadImage()
featureExtra = FeatureExtractor()
bow = BOW()
# get the species,the name of all insects, the path of all insect images
insectNames, names, trainingPaths = loadImage.loadTrainImage(trainingPath)
insectSpecies = len(insectNames)
print "insect species:", insectSpecies
#get all descriptos of training images
trainDescriptors = bow.getDescriptors(trainingPaths, featureExtraType)
#get the BoW dictionary of trianing images
trainBowDictionary = bow.getBowDictionary(insectSpecies, trainDescriptors, featureExtraType)
#initialize a Neural Network
net = buildNetwork(insectSpecies, 100, 100, insectSpecies)
#initialize a data set
ds = SupervisedDataSet(insectSpecies, insectSpecies)
species = 0
#add all datas in data set
for p in insectNames:
trainingPaths = os.listdir(trainingPath + "\\" + p)
for j in trainingPaths:
#add data
ds.addSample(featureExtra.getSingleFeature(trainingPath + "\\" + p + "\\" + j,
trainBowDictionary, featureExtraType)[0], (species,))
species += 1
#initialize a trainer
trainer = BackpropTrainer(net, ds, learningrate=0.01, momentum=0.1, weightdecay=0.01)
#training
for i in range(1, epochs):
traError = trainer.train()
print 'after %d epochs,train error:%f' % (i, traError)
testInsectNames, testNames, testingPaths = loadImage.loadTrainImage(testPath)
testDescriptors = bow.getDescriptors(testingPaths, featureExtraType)
testBowDictionary = bow.getBowDictionary(insectSpecies, testDescriptors, featureExtraType)
# Initializes a zero matrix to save the classification results
result = np.zeros((insectSpecies, insectSpecies))
count = 0
# classify all the test immages
for m in testInsectNames:
testPaths = os.listdir(testPath + "\\" + m)
for n in testPaths:
test = net.activate(featureExtra.getSingleFeature(testPath + "\\" + m + "\\" + n,
testBowDictionary, featureExtraType)[0])
target = map(lambda x: (x), test) # numpy.array to list
result[count, target.index(max(target))] += 1
count += 1
return result
```
#### File: JiongDong/Insect-classification-based-on-images/LoadImage.py
```python
import os
class LoadImage:
def __init__(self):
pass
def loadTrainImage(self,path):
'''
load all train images from the path
:param path: the images' path
:return: the type and the name of all insects, the path of all insect images
'''
# get all species' names
insectSpecies = os.listdir(path)
trainingPaths = []
names = []
# get full list of all training images
for p in insectSpecies:
paths = os.listdir(path + "\\" + p)
for j in paths:
trainingPaths.append(path + "\\" + p + "\\" + j)
names.append(p)
return insectSpecies,names,trainingPaths
``` |
{
"source": "Jiongqi/RectAngle",
"score": 3
} |
#### File: rectangle/utils/io.py
```python
import torch
from torch import nn
import numpy as np
import random
def train_val_test(file, ratio=(0.6, 0.2, 0.2)):
""" Generate list of keys for file based on index values
Input arguments:
file : h5py File object
Loaded using h5py.File(path : string)
ratio : tuple(int), default = (0.6,0.2,0.2)
Ratios of train/val/test for splitting data in h5py File.
"""
keys = [key.split('_') for key in file.keys()]
num_subjects = int(keys[-1][1])
assert 1.0 * sum(ratio) == 1.0
sum_ratio = list(ratio)
if sum_ratio[1] > 0:
sum_ratio[1] += sum_ratio[0]
if sum_ratio[2] > 0:
sum_ratio[2] += sum_ratio[1]
scaled_ratio = [int(round(val * num_subjects)) for val in sum_ratio]
ix = np.linspace(0, num_subjects, num_subjects+1, dtype=int)
train_ix = ix[:scaled_ratio[0]]
if scaled_ratio[1] > 0:
val_ix = ix[scaled_ratio[0]:scaled_ratio[1]]
else:
val_ix = 0
if scaled_ratio[2] > 0:
test_ix = ix[scaled_ratio[1]:scaled_ratio[2]]
else:
test_ix = 0
return train_ix, val_ix, test_ix
def key_gen(file, ix):
""" Generate list of keys for file based on index values
Input arguments:
file : h5py File object
Loaded using h5py.File(path : string)
ix : list[int]
List of index values to find file keys for.
"""
keys = list(file.keys())
split_keys = [key.split('_') for key in keys]
new_keys = []
for i, key in enumerate(split_keys):
if int(key[1]) in ix:
new_keys.append(keys[i])
return new_keys
class H5DataLoader(torch.utils.data.Dataset):
def __init__(self, file, keys=None, label='random'):
""" Dataloader for hdf5 files.
Input arguments:
file : h5py File object
Loaded using h5py.File(path : string)
keys : list, default = None
Keys from h5py file to use. Useful for train-val-test split.
If None, keys generated from entire file.
label : string, default = 'random'
Method for loading segmentation labels.
Options:
* 'random' - randomly select one of the available labels
* 'vote' - calculate pixel-wise majority vote from available labels
"""
super().__init__()
self.file = file
if not keys:
keys = list(file.keys())
split_keys = [key.split('_') for key in keys]
start_subj = int(split_keys[0][1])
last_subj = int(split_keys[-1][1])
self.num_subjects = last_subj - start_subj
self.subjects = np.linspace(start_subj, last_subj,
self.num_subjects+1, dtype=int)
self.label = label
if label.split('_')[0] == 'combine':
self.label_loop = label
def __len__(self):
return self.num_subjects
def __getitem__(self, index):
subj_ix = self.subjects[index]
label_ix = random.randint(0, 2)
image = torch.unsqueeze(torch.tensor(
self.file['frame_%05d' % (subj_ix,
)][()].astype('float32')), dim=0)
if self.label_loop.split('_')[0] == 'combine':
label_percent = int(self.label_loop.split('_')[1])
if index < int(self.num_subjects*label_percent/100):
self.label = 'vote'
else:
self.label = 'random'
if self.label == 'random':
label = torch.unsqueeze(torch.tensor(
self.file['label_%05d_%02d' % (subj_ix,
label_ix
)][()].astype(int)), dim=0)
elif self.label == 'vote':
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%05d_%02d' % (subj_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label_mean = torch.unsqueeze(torch.mean(label_batch, dim=0), dim=0)
label = torch.round(label_mean).int()
elif self.label == 'mean':
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%05d_%02d' % (subj_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label = torch.unsqueeze(torch.mean(label_batch, dim=0), dim=0)
return(image, label)
class ClassifyDataLoader(torch.utils.data.Dataset):
def __init__(self, file, keys=None):
""" Dataloader for hdf5 files, with labels converted to classifier labels
Input arguments:
file : h5py File object
Loaded using h5py.File(path : string)
keys : list, default = None
Keys from h5py file to use. Useful for train-val-test split.
If None, keys generated from entire file.
"""
super().__init__()
self.file = file
if not keys:
keys = list(file.keys())
split_keys = [key.split('_') for key in keys]
start_subj = int(split_keys[0][1])
last_subj = int(split_keys[-1][1])
self.num_subjects = last_subj - start_subj
self.subjects = np.linspace(start_subj, last_subj,
self.num_subjects+1, dtype=int)
num_frames = []
for subj in range(start_subj, last_subj):
subj_string = str(subj).zfill(4)
frames = [key[2] for key in split_keys if key[1] == subj_string]
num_frames.append(int(frames[-1]))
self.num_frames = num_frames
def __len__(self):
return self.num_subjects
def __getitem__(self, index):
subj_ix = self.subjects[index]
frame_ix = random.randint(0, self.num_frames[index])
image = torch.unsqueeze(torch.tensor(
self.file['frame_%04d_%03d' % (subj_ix,
frame_ix
)][()].astype('float32')), dim=0)
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix, frame_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label_vote = torch.sum(label_batch, dim=(1,2))
sum_vote = torch.sum(label_vote != 0)
if sum_vote >= 2:
label = torch.tensor([1.0])
else:
label = torch.tensor([0.0])
return(image, label)
class TestPlotLoader(torch.utils.data.Dataset):
def __init__(self, file, keys=None, label='vote'):
""" Dataloader for hdf5 files, always fixed to first frame to compare plots
Input arguments:
file : h5py File object
Loaded using h5py.File(path : string)
keys : list, default = None
Keys from h5py file to use. Useful for train-val-test split.
If None, keys generated from entire file.
label : string, default = 'random'
Method for loading segmentation labels.
Options:
* 'random' - randomly select one of the available labels
* 'vote' - calculate pixel-wise majority vote from available labels
"""
super().__init__()
self.file = file
if not keys:
keys = list(file.keys())
split_keys = [key.split('_') for key in keys]
start_subj = int(split_keys[0][1])
last_subj = int(split_keys[-1][1])
self.num_subjects = last_subj - start_subj
self.subjects = np.linspace(start_subj, last_subj,
self.num_subjects+1, dtype=int)
num_frames = []
for subj in range(start_subj, last_subj):
subj_string = str(subj).zfill(4)
frames = [key[2] for key in split_keys if key[1] == subj_string]
num_frames.append(int(frames[-1]))
self.num_frames = num_frames
self.label = label
def __len__(self):
return self.num_subjects
def __getitem__(self, index):
subj_ix = self.subjects[index]
frame_ix = 0
label_ix = random.randint(0, 2)
image = torch.unsqueeze(torch.tensor(
self.file['frame_%04d_%03d' % (subj_ix,
frame_ix
)][()].astype('float32')), dim=0)
if self.label == 'random':
label = torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix,
frame_ix,
label_ix
)][()].astype(int)), dim=0)
elif self.label == 'vote':
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix, frame_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label_mean = torch.unsqueeze(torch.mean(label_batch, dim=0), dim=0)
label = torch.round(label_mean).int()
elif self.label == 'mean':
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix, frame_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label = torch.unsqueeze(torch.mean(label_batch, dim=0), dim=0)
return(image, label)
class PreScreenLoader(torch.utils.data.Dataset):
def __init__(self, model, file, keys=None, label='random', threshold=0.5):
""" Dataloader for hdf5 files.
Input arguments:
model : Torch nn.Module object
file : h5py File object
Loaded using h5py.File(path : string)
keys : list, default = None
Keys from h5py file to use. Useful for train-val-test split.
If None, keys generated from entire file.
label : string, default = 'random'
Method for loading segmentation labels.
Options:
* 'random' - randomly select one of the available labels
* 'vote' - calculate pixel-wise majority vote from available labels
"""
super().__init__()
self.file = file
if not keys:
keys = list(file.keys())
split_keys = [key.split('_') for key in keys]
start_subj = int(split_keys[0][1])
last_subj = int(split_keys[-1][1])
self.num_subjects = last_subj - start_subj
self.subjects = np.linspace(start_subj, last_subj,
self.num_subjects+1, dtype=int)
num_frames = []
for subj in range(start_subj, last_subj):
subj_string = str(subj).zfill(4)
frames = [key[2] for key in split_keys if key[1] == subj_string]
num_frames.append(int(frames[-1]))
self.num_frames = num_frames
self.label = label
self.model = model
self.threshold = threshold
def __len__(self):
return self.num_subjects
def __getitem__(self, index):
subj_ix = self.subjects[index]
label_ix = random.randint(0, 2)
prostate = 'no'
while prostate == 'no':
frame_ix = random.randint(0, self.num_frames[index])
image = torch.unsqueeze(torch.tensor(
self.file['frame_%04d_%03d' % (subj_ix,
frame_ix
)][()].astype('float32')), dim=0)
image_screen = torch.unsqueeze(image, dim=0)
device = next(self.model.parameters()).device
image_screen = image_screen.to(device)
with torch.no_grad():
self.model.eval()
screen_pred = self.model(image_screen)
if screen_pred.item() > self.threshold:
prostate = 'yes'
if self.label == 'random':
label = torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix,
frame_ix,
label_ix
)][()].astype(int)), dim=0)
elif self.label == 'vote':
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix, frame_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label_mean = torch.unsqueeze(torch.mean(label_batch, dim=0), dim=0)
label = torch.round(label_mean).int()
elif self.label == 'mean':
label_batch = torch.cat([torch.unsqueeze(torch.tensor(
self.file['label_%04d_%03d_%02d' % (subj_ix, frame_ix, label_ix
)][()].astype('float32')), dim=0) for label_ix in range(3)])
label = torch.unsqueeze(torch.mean(label_batch, dim=0), dim=0)
return(image, label)
``` |
{
"source": "jionie/CPM-Huggingface",
"score": 3
} |
#### File: jionie/CPM-Huggingface/test.py
```python
import torch
from transformers import GPT2LMHeadModel
from data_utils.tokenization import GPT2Tokenizer
def main():
print("loading tokenizer")
tokenizer = GPT2Tokenizer('bpe_3w_new/vocab.json', 'bpe_3w_new/chinese_vocab.model')
print("loading tokenizer finished")
src = "您好"
input_ids = torch.tensor([tokenizer.encode(src)]).cuda()
print("loading model")
model = GPT2LMHeadModel.from_pretrained("model/CPM/")
model.cuda()
print("loading model finished")
# generate text until the output length (which includes the context length) reaches 50
print("testing greedy")
greedy_output = model.generate(input_ids, max_length=50)
print("Output:\n" + 100 * '-')
print(tokenizer.decode(greedy_output[0].tolist()))
print("testing greedy finished")
# set no_repeat_ngram_size to 2
print("testing beam search")
beam_output = model.generate(
input_ids,
max_length=50,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True
)
print("Output:\n" + 100 * '-')
print(tokenizer.decode(beam_output[0].tolist()))
print("testing beam search finished")
# set top_k = 50 and set top_p = 0.95 and num_return_sequences = 3
print("testing sampling")
sample_outputs = model.generate(
input_ids,
do_sample=True,
max_length=50,
top_k=50,
top_p=0.95,
num_return_sequences=3
)
print("Output:\n" + 100 * '-')
for i, sample_output in enumerate(sample_outputs):
print("{}: {}".format(i, tokenizer.decode(sample_output.tolist())))
print("testing sampling finished")
return
if __name__ == "__main__":
main()
``` |
{
"source": "jionie/Django-website",
"score": 2
} |
#### File: testsite/ShowResults/views.py
```python
from __future__ import unicode_literals
from imageUpload.models import Img
from django import forms
from django.shortcuts import render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
import os
from random_choose import choose
from django.contrib import messages
from ShowResults.models import Result_Img
# Create your views here.
@csrf_exempt
def analyze(request):
request.session.set_expiry(0)
#fo = open("form.txt", "a")
if request.method == "POST":
check_box_list = request.POST.getlist("check_box_list")
list0 = request.session.get('list0')
if not list0:
queryset = []
for i in range(len(list0)):
img0 = Img.objects.get(img_name__exact=list0[i])
queryset.append(img0)
messages.error(request, "Please upload an image.")
return render(request, 'list.html', {'queryset': queryset, 'check_box_list': check_box_list})
if check_box_list:
# for i in range(len(list0)):
# fo.write(str(list0[i]))
list = ','.join(check_box_list)
print list
#fo.write("OK")
url_pro = choose(list0[0])
Result_images = []
for i in range(len(url_pro)):
img = Result_Img()
r = []
for j in range(2, len(url_pro[i])):
r.append(url_pro[i][j])
img.Result_probability = url_pro[i][0]
img.Result_name = url_pro[i][1]
img.Result_path= r
Result_images.append(img)
queryset = []
for i in range(len(list0)):
img0 = Img.objects.get(img_name__exact=list0[i])
queryset.append(img0)
return render(request, 'conclusion.html', {'Result_images': Result_images, 'queryset': queryset, 'list':list})
else:
# for i in range(len(list0)):
# fo.write(str(list0[i]))
#fo.write("NO")
messages.warning(request, "Please choose a measure.")
queryset = []
for i in range(len(list0)):
img0 = Img.objects.get(img_name__exact=list0[i])
queryset.append(img0)
return render(request, 'list.html', {'queryset': queryset, 'check_box_list': check_box_list})
else:
check_box_list = []
list0 = request.session.get('list0')
queryset = []
for i in range(len(list0)):
img0 = Img.objects.get(img_name__exact=list0[i])
queryset.append(img0)
return render_to_response('list.html', {'queryset': queryset, 'check_box_list': check_box_list})
@csrf_exempt
def continue_img(request):
request.session.set_expiry(0)
list0 = request.session.get('list0')
#fo = open("form.txt", "a")
#fo.write("3")
return HttpResponseRedirect('/image')
@csrf_exempt
def delete_image(request, id):
request.session.set_expiry(0)
id = int(id)
list0 = request.session.get('list0')
img0 = Img.objects.get(img_name__exact=list0[id])
os.remove(settings.MEDIA_ROOT + '/' + str(img0.img))
Img.objects.get(img_name__exact=list0[id]).delete()
list0.remove(list0[id])
request.session['list0'] = list0
return HttpResponseRedirect('/result')
@csrf_exempt
def refresh(request):
request.session.set_expiry(0)
list0 = request.session.get('list0')
for i in range(len(list0)):
img0 = Img.objects.get(img_name__exact = list0[i])
os.remove(settings.MEDIA_ROOT + '/' + str(img0.img))
Img.objects.get(img_name__exact = list0[i]).delete()
queryset = []
request.session['list0'] = []
check_box_list = []
return render_to_response('list.html', {'queryset': queryset, 'check_box_list': check_box_list})
@csrf_exempt
def logout(request):
list0 = request.session.get('list0')
for i in range(len(list0)):
img0 = Img.objects.get(img_name__exact = list0[i])
os.remove(settings.MEDIA_ROOT + '/' + str(img0.img))
Img.objects.get(img_name__exact = list0[i]).delete()
del request.session['account'] #删除session
del request.session['list0']
return HttpResponseRedirect('/login')
``` |
{
"source": "jion/python-scraper",
"score": 3
} |
#### File: jion/python-scraper/main_example.py
```python
import sys
sys.path.append('./src')
from scraper.operations import CountNumberOfElements, ListOcurrences
from scraper.resultprinters import ConsolePrinter
from scraper.scraper import defaultUrlScraper as Scraper
url = "http://ordergroove.com/company"
def main():
"""
This runs an example petition to the scraper
asking for the number of html elements and the
5 most used tags on the page that is stored in
global variable "url").
Scraper was made in a way to be extendable by
Operations, and also the output was decoupled using an abstract
printer (only consoleOutput was implemented by now)
"""
# Setting up & Running the Scraper
scraper = Scraper(url)
scraper.addOperation(CountNumberOfElements())
scraper.addOperation(ListOcurrences(limit=5))
scraper.run()
# Printing results
consolePrinter = ConsolePrinter()
scraper.printResults(consolePrinter)
if __name__ == '__main__':
main()
```
#### File: src/helpers/observerpattern.py
```python
class Observable(object):
"""
This class implements the observer design pattern.
You just need to inherit from this class to add
the ability to the class of be observable.
"""
observers= {}
def subscribe(self, action, observer):
if action not in self.observers:
self.observers[action] = []
self.observers[action].append( observer )
def _trigger(self, action, eventData=None):
if action not in self.observers:
return # No suscribers for this event
e = { 'action': action, 'data': eventData }
for observer in self.observers[action]:
observer.notify(e)
class Observer(object):
def notify(self, eventData):
pass
```
#### File: src/scraper/dombuilder.py
```python
from helpers.observerpattern import Observable
from HTMLParser import HTMLParser
class DomBuilder(Observable, HTMLParser):
"""
This class is on charge of parse the plainHTML provided via a Reader and construct
a dom representation with it. DOM structure is decoupled from this class and need
to be passed at the time of construction.
"""
# Some elements don't have a closing tag ( https://www.w3.org/TR/html51/syntax.html#void-elements )
voidTags = ["area", "base", "br", "col", "embed", "hr", "img", "input", "keygen",
"link", "menuitem", "meta", "param", "source", "track", "wbr"] # const
def __init__(self, dom):
HTMLParser.__init__(self)
self.dom = dom
self.actualParent = [None,]
def _finishParsing(self):
self._trigger("ParsingFinished", { 'dom': self.dom })
def handle_starttag(self, tag, attrs):
element = (tag, attrs, self.actualParent[-1])
nodeIndex = self.dom.addNode( element )
if tag not in self.voidTags:
self.actualParent.append( nodeIndex )
def handle_endtag(self, tag):
if tag in self.voidTags:
return # We already did the job
actualParent = self.actualParent.pop()
if self.dom.getNode( actualParent )[0] != tag:
raise Exception("DomBuilder - Closing tag is missing") # TODO: Custom error object. (ParseEror ?)
if self.actualParent[-1] == None:
self._finishParsing()
```
#### File: src/scraper/scraper.py
```python
from dombuilder import DomBuilder
from dom import SimpleDOM
from urllib2 import urlopen, URLError, HTTPError
# Scraper responsability:
# orquestate the steps of the scraping process:
# Construction phase:
# 1. create the proper reader (input)
# 2. construct the HTML parser
# 3. customize the HTML parser adding operations
# 4. construct the Printer
#
# Running phase:
# 1. obtain an html input
# 2. pass the input to de HTML parser
# 3. Run the purser.
# 4. Print the results
def simpleUrlReader(url, consumer):
"""
This function handle the process of reading from an url
and passing data to a consumer (via a feed method on the
consumer).
I would wish to do this asynchronic, but for now it takes
the whole HTML at once and just then pass it to the consumer.
The function is decoupled for the Scraper class in order to be able
to support another types of feeding (maybe html file, etc)
"""
try:
handler = urlopen(url)
html = handler.read()
except HTTPError, e:
raise Exception("There was a problem getting the specified url - HTTP code %s" % e.code)
except URLError, e:
raise Exception("There was a problem getting the specified url: %s" % str(e))
else:
#####################
consumer.feed(html) #
#####################
def defaultUrlScraper(url):
reader = simpleUrlReader
dom = SimpleDOM()
domBuilder = DomBuilder(dom)
scraper = Scraper(reader, domBuilder)
scraper.setUrl(url)
return scraper
class Scraper(object):
"""
This class orchestrates the whole proccess of Scrapping the specified HTML.
You can customize the proccess adding Operations that will be executed on
the fly while the DOM object is created from parsing the HTML page.
When results are ready this will output the results in a decoupled fashion
passing the desired implementation of a printer to the printResults function.
The initializer receives an object that accepts file protocol.
"""
url = None
def __init__(self, reader, domBuilder):
self.operations = []
self.domBuilder = domBuilder
self.reader = reader
def setUrl(self, url):
self.url = url
def addOperation(self, operation):
operation.attachTo(self.domBuilder.dom)
self.operations.append(operation)
return self
def _feed_parser(self):
"""
This is the core method of all the scraper application.
It feeds the builder with the HTML data, who will build
a dom representation and doing analysis on the fly.
"""
self.reader(self.url, self.domBuilder)
def run(self):
if self.url == None:
raise Exception("Scraper Error - URL missing")
self._feed_parser()
def printResults(self, printer):
for operation in self.operations:
printer.printResults( operation )
``` |
{
"source": "jiosec/tdameritrade",
"score": 3
} |
#### File: tdameritrade/orders/order_builder.py
```python
from .constants import (ComplexOrderStrategyType, Duration, Instruction,
OrderStrategyType, OrderType, Session)
from .leg_builder import create_equity_order_leg, create_option_order_leg
from .models.orders import Order
def build_buy_market_stock_order(symbol, quantity):
"""Build Buy Market Stock Order
Buy {quantity} shares of {smybol} at the Market good for the Day.
Simple sample from https://developer.tdameritrade.com/content/place-order-samples
Args:
symbol: symbol you want to trade
quantity: How much of the stock to trade
"""
# Constants
order_type = OrderType.MARKET
session = Session.NORMAL
duration = Duration.DAY
order_strategy_type = OrderStrategyType.SINGLE
# Can be changed to handle Buy and Sell for Equities
_instruction = Instruction.BUY
_order_leg = create_equity_order_leg(
instruction=_instruction, quantity=quantity, symbol=symbol,
)
order_leg_collection = [_order_leg]
return Order(
orderType=order_type,
session=session,
duration=duration,
orderStrategyType=order_strategy_type,
orderLegCollection=order_leg_collection,
)
def build_market_stock_order(symbol, quantity):
"""Build Buy Market Stock Order
Buy {quantity} shares of {smybol} at the Market good for the Day.
Simple sample from https://developer.tdameritrade.com/content/place-order-samples
Args:
symbol: symbol you want to trade
quantity: How much of the stock to trade
"""
# Constants
order_type = OrderType.MARKET
session = Session.NORMAL
duration = Duration.DAY
order_strategy_type = OrderStrategyType.SINGLE
# Can be changed to handle Buy and Sell for Equities
_instruction = Instruction.BUY
if quantity < 0:
_instruction = Instruction.SELL
quantity = - quantity
_order_leg = create_equity_order_leg(
instruction=_instruction, quantity=quantity, symbol=symbol,
)
order_leg_collection = [_order_leg]
return Order(
orderType=order_type,
session=session,
duration=duration,
orderStrategyType=order_strategy_type,
orderLegCollection=order_leg_collection,
)
def build_buy_limit_option_order(symbol, quantity, price):
"""Build Buy Limit: Single Option
Buy to open {quanity} contracts of the {symbol with date and option info}
at a Limit of {price} good for the Day.
Args:
symbol: symbol you want to trade. Includes date e.g., XYZ_032015C49
quantity: Amount you want to buy
price: Amount you want to buy the option for
Note:
May want to add expiry date and type of option (CALL, PUT) and
create completed symbol
"""
# Constants
complex_order_strategy_type = ComplexOrderStrategyType.NONE
order_type = OrderType.LIMIT
session = Session.NORMAL
duration = Duration.DAY
order_strategy_type = OrderStrategyType.SINGLE
_instruction = Instruction.BUY_TO_OPEN
_order_leg = create_option_order_leg(
instruction=_instruction, quantity=quantity, symbol=symbol,
)
order_leg_collection = [_order_leg]
return Order(
complexOrderStrategyType=complex_order_strategy_type,
orderType=order_type,
session=session,
price=price,
duration=duration,
orderStrategyType=order_strategy_type,
orderLegCollection=order_leg_collection,
)
def build_buy_limit_vertical_call_spread_order(
buy_symbol, sell_symbol, quantity, price
):
"""Buy Limit: Vertical Call Spread
Buy to open 10 contracts of the XYZ Jan 15, 2016 $40 Call and
Sell to open 10 contracts of the XYZ Jan 15, 2016 $42.5 Call
for a Net Debit of $1.20 good for the Day.
Args:
buy_symbol: symbol you want to buy. Includes date e.g., XYZ_011516C40
sell_symbol: symbol you want to buy. Includes date e.g., XYZ_011516C42.5
quantity: Amount you want to buy.
price: Amount you want to buy the spread for
"""
# Constants
order_type = OrderType.NET_DEBIT
session = Session.NORMAL
duration = Duration.DAY
order_strategy_type = OrderStrategyType.SINGLE
_buy_order_leg = create_option_order_leg(
instruction=Instruction.BUY_TO_OPEN, quantity=quantity, symbol=buy_symbol,
)
_sell_order_leg = create_option_order_leg(
instruction=Instruction.SELL_TO_OPEN, quantity=quantity, symbol=sell_symbol,
)
order_leg_collection = [
_buy_order_leg,
_sell_order_leg,
]
return Order(
orderType=order_type,
session=session,
price=price,
duration=duration,
orderStrategyType=order_strategy_type,
orderLegCollection=order_leg_collection,
)
def build_custom_option_spread_order(
buy_symbol, sell_symbol, buy_quantity, sell_quantity
):
"""Custom Option Spread
Buy to open 2 contracts of the XYZ Jan 17, 2020 $43 Put and Sell to open
1 contracts of the XYZ Jan 18, 2019 $45 Put at the Market good for the Day.
"""
# Constants
order_strategy_type = OrderStrategyType.SINGLE
order_type = OrderType.MARKET
session = Session.NORMAL
duration = Duration.DAY
complex_order_strategy_type = ComplexOrderStrategyType.CUSTOM
_buy_order_leg = create_option_order_leg(
instruction=Instruction.BUY_TO_OPEN, quantity=buy_quantity, symbol=buy_symbol,
)
_sell_order_leg = create_option_order_leg(
instruction=Instruction.SELL_TO_OPEN,
quantity=sell_quantity,
symbol=sell_symbol,
)
order_leg_collection = [
_sell_order_leg,
_buy_order_leg,
]
return Order(
orderStrategyType=order_strategy_type,
orderType=order_type,
orderLegCollection=order_leg_collection,
complexOrderStrategyType=complex_order_strategy_type,
duration=duration,
session=session,
)
```
#### File: orders/models/test_base.py
```python
from tdameritrade.orders.models import base
def test_base_order_json():
base_order = base.BaseOrder()
base_order.some_field = 123
assert base_order.json() == "{}"
``` |
{
"source": "jiosue/PythonGames",
"score": 4
} |
#### File: src/game/board.py
```python
from words import random_words
class Board:
""" Deal with the words on the board """
def __init__(self):
""" initialize 25 random words and empty covers """
self._words, self._cover = random_words(), [""] * 25
self._words_dict = {
self.get_word(r, c): (r, c) for r in range(5) for c in range(5)
}
def _get_index(self, row, col):
if row not in range(5) or col not in range(5):
raise ValueError("Row or Column out of range")
return 5*row + col
def get_word(self, row, col):
return self._words[self._get_index(row, col)]
def word_location(self, word):
return self._words_dict.get(word, (-1, -1))
def get_cover(self, row, col):
return self._cover[self._get_index(row, col)]
def correct_guess(self, row, col):
self._cover[self._get_index(row, col)] = "g"
def p1_incorrect_guess(self, row, col):
self._cover[self._get_index(row, col)] = "1"
def p2_incorrect_guess(self, row, col):
self._cover[self._get_index(row, col)] = "2"
def black_guess(self, row, col):
self._cover[self._get_index(row, col)] = "b"
def get_words(self):
return self._words
def get_covers(self):
return self._cover
def __str__(self):
return "words: " + str(self._words) + "\ncovers: " + str(self._cover)
```
#### File: src/game/gameplay.py
```python
from .board import Board
from .grid import Grid, TOTAL_NUM_GREENS
from random import randint
_NUM_TURNS = 9
class Game:
def __init__(self, rand_turn=False):
self._board, self._grid = Board(), Grid()
self._greens_remaining = TOTAL_NUM_GREENS
self._turn = 1 # player two always gives the clue first
self._turns_remaining = _NUM_TURNS
self._black_guessed = False
if rand_turn: self._turn = randint(1, 2)
def guess(self, word):
if self._game_state() != "ongoing": return
r, c = self._board.word_location(word)
if r == -1 and c == -1: return
# see if it's already been picked
co = self._board.get_cover(r, c)
if co and co in "bg" + str(self._turn): return
if self._turn == 1: color = self._grid.get_p2_color(r, c)
else: color = self._grid.get_p1_color(r, c)
if color == "g":
self._board.correct_guess(r, c)
self._greens_remaining -= 1
elif color == "b":
self._board.black_guess(r, c)
self._black_guessed = True
self.turn_complete()
else:
if self._turn == 1: self._board.p1_incorrect_guess(r, c)
else: self._board.p2_incorrect_guess(r, c)
self.turn_complete()
def get_turn(self):
return self._turn
def turn_complete(self):
self._turns_remaining -= 1
old_turn = self._turn
self._turn = 1 if old_turn == 2 else 2
# for r in range(5):
# for c in range(5):
# cg = self._grid.get_color(r, c, old_turn)
# cb = self._board.get_cover(r, c)
# if cg == "g" and cb != "g": return
#
# self._turn = old_turn
def _game_state(self):
if self._greens_remaining == 0: return "victory"
elif self._black_guessed or not self._turns_remaining: return "defeat"
else: return "ongoing"
def get_update_info(self):
return {
"turns_remaining": self._turns_remaining,
"greens_remaining": self._greens_remaining,
"turn": self._turn,
"covers": self._board.get_covers(),
"game_state": self._game_state()
}
def get_game_info(self, player):
d = self.get_update_info()
d.update({
"words": self._board.get_words(),
"grid": self._grid.display_p1() if player == 1
else self._grid.display_p2(),
"player": player
})
return d
def new_game(self):
self.__init__(True)
```
#### File: CodenamesDuet/src/player.py
```python
import socket
from threading import Thread
import tkinter as tk
from ui import Display
def recv(clientsocket, display):
while True:
msg = clientsocket.recv(8192).decode()
display.recv(msg)
def create_clientsocket():
hostname = input("Enter hostname/IP to connect to: ")
port = int(input("Enter port: "))
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostname, port))
return clientsocket
def main():
clientsocket = create_clientsocket()
root = tk.Tk()
display = Display(clientsocket, root)
display.pack()
thread_recv = Thread(target=recv, args=(clientsocket, display))
thread_recv.start()
root.mainloop()
if __name__ == "__main__":
main()
```
#### File: PythonGames/PenguinDrop/PenguinDrop.py
```python
### Adjust window ###
SCREEN = 1200, 700 #Canvas size in pixels.
### Adjust difficulty ###
MAX_PENGUINS = 10 #Max number of penguins before ice breaks.
### Adjust controls ###
GAME_CONTROLS = LEFT, RIGHT, SHOOT = "<Left>", "<Right>", "<space>"
RESTART = "<Return>"
### Adjust movement of objects ###
FALLING_SPEED = 1.2 #Speed that penguins fall.
G = 0.1 #Acceleration of cannonball down due to gravity.
START_VELOCITY = 23 #Magnitude of velocity of cannonball when first shot.
### Adjust relative sizes of objects ###
PERCENT_ICE = 0.15 #Ice height in terms of percent of screen.
PERCENT_PENGUIN = 0.05 #Penguin height in terms of percent of screen.
PERCENT_PARACHUTE = 0.07 #Parachute height in terms of percent of screen.
PERCENT_CANNONBALL = 0.007 #Cannonball radius in terms of percent of screen.
PERCENT_CANNON = 0.08 #Cannon height in terms of percent of screen.
## Penguin, parachute, and cannon images are scaled to keep their aspect ratios.
## Ice image is set at size = (SCREEN[0], PERCENT_ICE*SCREEN[1])
### Adjust frame rate ###
SPEED = 20 #Milliseconds between screen redraw.
###############################################################################
import os, math, random
from PIL import Image, ImageTk
if os.sys.version_info.major > 2:
xrange = range
import tkinter as tk
else:
import Tkinter as tk
#### Image filenames ####
FAVICON = "images/favicon.ico"
ICE = "images/ice.png"
PENGUIN = "images/penguin.png"
PARACHUTE = "images/parachute.png"
## There are multiple cannon images, each named with an
## integer that represents the angle that they are offset.
CANNON_DIRECTORY = "images/cannon/"
CANNON = CANNON_DIRECTORY + "%d.png" #%d is for the angle.
## i.e. a cannon image at 100 degrees has filename = CANNON % 100
#### Colors ####
SKY_COLOR = "#9fd7fb"
CANNONBALL_COLOR = "white" #Snowball!
#### Method for creating and sizing PIL.Images ####
def create_image(filename, width=0, height=0):
"""
Returns a PIL.Image object from filename - sized
according to width and height parameters.
filename: str.
width: int, desired image width.
height: int, desired image height.
1) If neither width nor height is given, image will be returned as is.
2) If both width and height are given, image will resized accordingly.
3) If only width or only height is given, image will be scaled so specified
parameter is satisfied while keeping image's original aspect ratio the same.
"""
#Create a PIL image from the file.
img = Image.open(filename, mode="r")
#Resize if necessary.
if not width and not height:
return img
elif width and height:
return img.resize((int(width), int(height)), Image.ANTIALIAS)
else: #Keep aspect ratio.
w, h = img.size
scale = width/float(w) if width else height/float(h)
return img.resize((int(w*scale), int(h*scale)), Image.ANTIALIAS)
#### Create cannon PIL images and sort them based on angle ####
def create_cannons():
"""
returns tuple of length 2:
first argument: list of sorted angles (in radians).
second argument: list of PIL images that map to their list of angles.
Each cannon image has an integer name that represents
the angle (in degrees) that it is offset.
"""
angles = []
for filename in os.listdir(CANNON_DIRECTORY):
try:
if os.path.isfile(CANNON_DIRECTORY+filename):
angles.append(int(filename.split(".")[0]))
except ValueError: pass
angles.sort()
images = []
for a in angles:
images.append(create_image(CANNON % a, height=PERCENT_CANNON*SCREEN[1]))
return [math.radians(a) for a in angles], images
#### On screen objects ####
class Parachute(object):
"""
Deals with the animation of a parachute on canvas.
canvas: tk.Canvas object to draw on.
initial_position: tuple, initial position of center of image (x, y).
"""
#Create image just once upon class creation. Use same image throughout.
image = create_image(PARACHUTE, height=PERCENT_PARACHUTE*SCREEN[1])
def __init__(self, canvas, initial_position):
self.x, self.y = initial_position
self.can = canvas
#Must be created AFTER main window has been opened.
photo = ImageTk.PhotoImage(image=Parachute.image)
#So the photo doesn't disappear!!
self.label = tk.Label(image=photo)
self.label.image = photo
#Draw parachute on canvas.
self.parachute = self.can.create_image((self.x, self.y), image=photo)
def destroy(self):
""" Destroy the parachute on canvas and the label remembering it. """
if self.parachute:
self.can.delete(self.parachute)
self.label.destroy()
self.parachute, self.label = False, False
def update(self):
""" Update the parachutes position and redraw it. """
self.y += FALLING_SPEED
self.can.coords(self.parachute, self.x, self.y)
class Penguin(object):
"""
Deals with the animation of the penguin on canvas.
canvas: tk.Canvas object to draw on.
"""
#Create image just once upon class creation. Use same image throughout.
image = create_image(PENGUIN, height=PERCENT_PENGUIN*SCREEN[1])
def __init__(self, canvas):
self.can = canvas
self.w, self.h = Penguin.image.size
#Must be created AFTER main window has been opened.
photo = ImageTk.PhotoImage(image=Penguin.image)
#So the photo doesn't disappear!!
self.label = tk.Label(image=photo)
self.label.image = photo
#Initial position: above screen at random x value.
self.x, self.y = random.randint(0, SCREEN[0]), -self.h
#Draw on canvas.
self.penguin = self.can.create_image((self.x, self.y), image=photo)
#Create the parachute animation that goes with the penguin.
self.parachute = Parachute(self.can, (self.x, self.y-self.h))
def destroy_parachute(self):
""" Destroy the parachute """
self.parachute.destroy()
self.parachute = False
def update(self):
""" Update the penguins position, redraw penguin and parachute. """
self.y += FALLING_SPEED
self.can.coords(self.penguin, self.x, self.y)
if self.parachute: self.parachute.update()
def destroy(self):
""" Destroy both the parachute and the penguin. """
self.can.delete(self.penguin)
self.label.destroy()
self.destroy_parachute()
def get_overlapping(self):
""" Return list of canvas objects that are overlapping the penguin. """
x0, y0 = self.x - self.w/2, self.y - self.h/2
x1, y1 = self.x + self.w/2, self.y + self.h/2
return self.can.find_overlapping(x0, y0, x1, y1)
def off_screen(self):
""" Returns True if penguin has fallen below the screen, else False. """
#Add a little cushion; make sure it's completely off screen.
return self.y - self.h - PERCENT_PARACHUTE*SCREEN[1] > SCREEN[1]
class Ice(object):
"""
Deals with the ice.
canvas: tk.Canvas object to draw on.
"""
#Create image just once upon class creation. Use same image throughout.
image = create_image(ICE, width=SCREEN[0], height=PERCENT_ICE*SCREEN[1])
def __init__(self, canvas):
self.can = canvas
#Must be created AFTER main window has been opened.
photo = ImageTk.PhotoImage(image=Ice.image)
#So the photo doesn't disappear!!
label = tk.Label(image=photo)
label.image = photo
height = Ice.image.size[1]
self.x, self.y = SCREEN[0]/2, SCREEN[1]-height/2
self.ice = self.can.create_image((self.x, self.y), image=photo)
#Keeps track of penguins standing on the ice.
self.penguins = []
def fall(self):
"""
When too many penguins are on the ice, the ice breaks
and all the penguins fall. If the ice and penguins have
fallen below screen already, do nothing.
"""
## Should be faster than
## if not all(p.off_screen() for p in self.penguins):
if any(not p.off_screen() for p in self.penguins):
self.y += FALLING_SPEED
self.can.coords(self.ice, self.x, self.y)
for p in self.penguins:
p.update()
def get_ice(self):
""" Returns tk.Canvas.create_image object. """
return self.ice
def add_penguin(self, penguin):
""" Add another penguin standing on the ice. """
self.penguins.append(penguin)
def get_num_penguins(self):
""" Returns number of penguins standing on the ice. """
return len(self.penguins)
class Cannonball(object):
"""
Deals with the animation of the cannonballs.
canvas: tk.Canvas object to draw on.
initial_position: tuple, (x, y).
initial_velocity: tuple, (vx, vy).
"""
def __init__(self, canvas, initial_position, initial_velocity):
self.can = canvas
self.x, self.y = initial_position
self.vx, self.vy = initial_velocity
self.w = PERCENT_CANNONBALL*min(SCREEN)
self.cannonball = self.can.create_oval(self.x-self.w, self.y-self.w,
self.x+self.w, self.y+self.w,
fill=CANNONBALL_COLOR)
def destroy(self):
""" Destroys the cannonball from the canvas. """
self.can.delete(self.cannonball)
def update(self):
""" Updates position on screen. """
self.x += self.vx
self.vy += G
self.y += self.vy
self.can.coords(self.cannonball, self.x-self.w, self.y-self.w,
self.x+self.w, self.y+self.w)
def off_screen(self):
""" Returns True if the cannonball is off the screen; else False. """
return (self.x - self.w > SCREEN[0] or self.x + self.w < 0 or
self.y - self.w > SCREEN[1] or self.y + self.w < 0)
def get_cannonball(self):
""" Returns a tk.Canvas.create_oval object. """
return self.cannonball
class Home(object):
"""
Deals with all the penguins and cannonballs.
Adds new penguins at increasing frequency.
canvas: tk.Canvas object to draw on.
ice: Ice object.
"""
def __init__(self, canvas, ice):
self.can, self.ice = canvas, ice
#Starting probability of new penguins and how much to increase it by.
self.prob, self.increment = 0.02, 0.005
#Starting count and at what count to increment the probability.
self.count, self.max_count = 0, 10000/SPEED
self.score = 0 #Everytime cannonball hits a penguin, score + 1.
self.penguins = [Penguin(self.can)] #list of Penguin objects.
self.cannonballs = [] #list of Cannonball objects.
def update_cannonballs(self):
""" Update cannonball positions, destroy if off screen. """
i = 0
while i < len(self.cannonballs):
b = self.cannonballs[i]
b.update()
if b.off_screen(): #if it's off screen, destroy.
b.destroy()
self.cannonballs.pop(i)
else:
i += 1
def update_penguins(self):
"""
Update positions of penguins and check if hit by cannonball.
Destroys both penguin and cannonball when hit.
"""
i = 0
while i < len(self.penguins):
p = self.penguins[i]
p.update()
overlapping = p.get_overlapping()
#if it's on the ice, add it to self.ice.
if self.ice.get_ice() in overlapping:
p.destroy_parachute()
self.ice.add_penguin(p)
self.penguins.pop(i)
i -= 1
elif p.off_screen():
p.destroy()
self.penguins.pop(i)
i -= 1
else: #Check if any cannonballs are hitting the penguin.
for n in xrange(len(self.cannonballs)):
if self.cannonballs[n].get_cannonball() in overlapping:
self.score += 1
self.cannonballs[n].destroy()
self.cannonballs.pop(n)
p.destroy()
self.penguins.pop(i)
i -= 1
break #Only one cannonball is destroyed per penguin.
i += 1
def update(self):
""" Update cannonballs, penguins, game. """
self.update_cannonballs()
self.update_penguins()
## Update game difficulty and/or add penguins.
self.count += 1
if random.random() <= self.prob:
self.penguins.append(Penguin(self.can))
if self.count >= self.max_count:
if self.prob + self.increment < 1:
self.prob += self.increment
else:
self.prob = 1
self.count = 0
def add_cannonball(self, cannonball):
""" cannonball: Cannonball object. """
self.cannonballs.append(cannonball)
def get_score(self):
""" Score: int, how many penguins have been killed. """
return self.score
class Cannon(object):
"""
Deals with the cannon animation the screen.
When fired, adds cannonball to Home class.
canvas: tk.Canvas object/
home: Home object.
"""
#Create images just once upon class creation. Use same images throughout.
#angles (in radians) is sorted and maps directly to its image in images.
angles, images = create_cannons()
def __init__(self, canvas, home):
self.can, self.home = canvas, home
self.position = (SCREEN[0]/2, #x = middle of screen.
SCREEN[1]-(SCREEN[1]*PERCENT_ICE)) #y = ontop of ice.
#Cannon.angles and self.photos indices map accordingly.
self.photos = [ImageTk.PhotoImage(image=img) for img in Cannon.images]
### For example: Cannon.angles[i] == angle of self.photos[i] ###
#self.index follows what photo and angle is current.
self.index = len(Cannon.angles)//2 #start in the middle.
self.current_image = False
self.draw()
def destroy(self):
""" Deletes any cannon image from the canvas. """
if self.current_image:
self.can.delete(self.current_image)
def draw(self):
self.destroy()
self.current_image = self.can.create_image(
self.position, image=self.photos[self.index]
)
def rotate_ccw(self):
""" Changes current cannon to next one ccw """
if self.index + 1 < len(self.angles):
self.index += 1
self.draw()
def rotate_cw(self):
""" Changes current cannon to next one ccw """
if self.index > 0:
self.index -= 1
self.draw()
def shoot_cannonball(self):
"""
Computes the initial position and initial velocity of the cannonball
based on the current cannon position and angle. With that, creates a
Cannonball object and adds it to self.home to be integrated into game.
"""
angle = Cannon.angles[self.index]
vx = START_VELOCITY * math.cos(angle)
vy = -START_VELOCITY * math.sin(angle)
#Get approximate length of barrel.
l = SCREEN[1]*PERCENT_CANNON/2.0
x = self.position[0] + (math.cos(angle)*l)
y = self.position[1] - (math.sin(angle)*l/2.0)
self.home.add_cannonball(Cannonball(self.can, (x, y), (vx, vy)))
class Game(object):
"""
Creates a tk.Frame and a tk.Canvas to draw on.
Initializes Ice, Home, and Cannon objects.
Binds game keys.
Updates the game every SPEED milliseconds.
master: tk.Tk window.
"""
def __init__(self, master):
self.frame = tk.Frame(master)
self.frame.pack()
self.can = tk.Canvas(self.frame, width=SCREEN[0],
height=SCREEN[1], bg=SKY_COLOR)
self.can.pack()
self.ice = Ice(self.can)
self.home = Home(self.can, self.ice)
self.cannon = Cannon(self.can, self.home)
#Binding game keys to rotate and fire cannon.
master.bind(SHOOT, lambda event: self.cannon.shoot_cannonball())
master.bind(LEFT, lambda event: self.cannon.rotate_ccw())
master.bind(RIGHT, lambda event: self.cannon.rotate_cw())
self.master, self.playing = master, False
def restart(self):
""" Destroys the current frame, makes a new one, begins game. """
self.frame.destroy()
self.__init__(self.master)
self.start()
def update(self):
"""
Update self.home which updates all the penguins and cannonballs.
Check if game is lost - if so, destroy the cannon image and unbind
cannon operators (GAME_CONTROLS). Start making the ice fall.
"""
self.home.update()
self.master.title("Penguin Drop - %d" % self.home.get_score())
if self.ice.get_num_penguins() > MAX_PENGUINS: #If game is lost.
self.ice.fall()
#If it's just been lost, destroy the cannon and take away controls.
if self.playing:
self.playing = False
self.cannon.destroy()
for key in GAME_CONTROLS:
self.master.unbind(key)
self.frame.after(SPEED, self.update)
def start(self):
""" Begin Game """
self.playing = True
self.update()
def main():
""" Open a window - Play Game """
root = tk.Tk()
root.wm_iconbitmap(FAVICON)
root.resizable(0, 0)
game = Game(root)
#Bind RESTART key to restart game at any time.
root.bind(RESTART, lambda event: game.restart())
#Place window in center of screen.
#root.eval('tk::PlaceWindow %s center'%root.winfo_pathname(root.winfo_id()))
game.start()
root.mainloop()
if __name__ == "__main__":
main()
```
#### File: PythonGames/Roomba/Rumba.py
```python
from collections import deque
import random
import os
if os.sys.version_info.major > 2:
xrange = range
import tkinter as tk
else:
import Tkinter as tk
#### METHODS ####
def scale_vector(vector, velocity):
"""
Create unit vector. Multiply each component of unit vector
by the magnitude of the desired vector (velocity).
"""
try:
x = float(vector[0])/((vector[0]**2+vector[1]**2)**.5)
y = float(vector[1])/((vector[0]**2+vector[1]**2)**.5)
return int(x*velocity), int(y*velocity)
except ZeroDivisionError:
return None, None
def get_random_velocity(velocity):
"""
Create random direction vector.
Scale direction vector with scale_vector method.
"""
vx, vy = None, None
while vx == None and vy == None:
vector = (random.random()*random.choice([-1, 1]),
random.random()*random.choice([-1, 1]))
vx, vy = scale_vector(vector, velocity)
return vx, vy
def make_grid(furniture, dimension):
"""
Scale actual (x, y) positions down to a grid (dictionary)
with keys (Nx*1, Ny*1) where Nx and Ny range from 1 to dimension[0]
and 1 to dimension[1] respectively.
The keys are mapped to a boolean indicating whether that tile
is occupied with furniture (True) or not (False).
furniture: list with pixle locations. Each element ~ (x, y, x+dx, y+dy).
dimension: tuple, x by y dimensions (x, y).
returns: grid = {(1, 1): False, (2, 1): True, ...}
"""
#dx, dy are width and height of tiles.
dx = furniture[0][2] - furniture[0][0]
dy = furniture[0][3] - furniture[0][1]
w, h = dx*dimension[0], dy*dimension[1]
grid = {}
for y in xrange(1, dimension[1]+1):
for x in xrange(1, dimension[0]+1):
grid[(x, y)] = False
y_grid = 0
for y in xrange(dy//2, h, dy):
y_grid += 1
x_grid = 0
for x in xrange(dx//2, w, dx):
x_grid += 1
for element in furniture:
if x >= element[0] and x <= element[2] \
and y >= element[1] and y <= element[3]:
grid[(x_grid, y_grid)] = True
break
return grid
def get_neighbors(position):
"""
Generator. Yields positions to the left, to the right,
above, and below the current position.
"""
deltas = [(1, 0), (-1, 0), (0, 1), (0, -1)]
for d in deltas:
yield position[0]+d[0], position[1]+d[1]
#def find_accessable_tiles_RECURSIVE(grid, position, l=set()):
# """
# Finds all non-furniture locations that are accessable
# when starting at position 'position'.
# *** Mutates l ***
# Assumes position is not at a point such that grid[position] == True.
# In other words, the initial positions is valid and is not occupied.
# grid: dict mapping a Grid to booleans (tiles with/without furniture).
# i.e. grid = {(1, 1): False, (2, 1): True, ...}
# position: tuple (x, y)
# l: list
# """
# l.add(position)
# for n in get_neighbors(position):
# if n in grid and n not in l and not grid[n]:
# find_accessable_tiles(grid, n, l)
# return l
def find_accessable_tiles(grid, position):
"""
Finds all tiles that are accessable from starting position.
Returns a set of all accessable tiles.
"""
accessable = set()
accessable.add(position)
tile_queue = deque() #imported from collections
tile_queue.append(position)
while tile_queue:
current = tile_queue.popleft()
for n in get_neighbors(current):
if n in grid and n not in accessable and not grid[n]:
accessable.add(n)
tile_queue.append(n)
return accessable
def is_furniture_valid(furniture, dimension):
"""
Checks to see if all non-furniture tiles can be accessed
when starting initially at position (1, 1).
furniture: list of (x, y, x+dx, y+dy).
dimension: tuple, x by y dimensions (x, y).
"""
if not furniture: #Rooms with no furniture are valid.
return True
grid = make_grid(furniture, dimension)
#Start position is (1, 1).
accessable_tiles = find_accessable_tiles(grid, (1, 1))
#Compare accessable tiles to all non-furniture tiles.
for element in grid:
#if a tile doesn't have furniture AND is not accessible - not valid.
if not grid[element] and element not in accessable_tiles:
return False
return True
#### OBJECT DEFINITIONS ####
class Rumba(object):
"""
Dealing with the actual Rumba robot on the screen - red square.
canvas: tk.Canvas object.
position: tuple (x, y).
width: int width of square.
"""
def __init__(self, canvas, position, width):
self.can, self.width = canvas, width
self.Draw(position)
def Draw(self, position):
x, y = position
x1, y1 = x + self.width, y + self.width
x2, y2 = x + self.width, y - self.width
x3, y3 = x - self.width, y - self.width
x4, y4 = x - self.width, y + self.width
self.vacuum = self.can.create_polygon(x1, y1, x2, y2, x3, y3, x4, y4, fill="red")
self.line1 = self.can.create_line(x1, y1, x2, y2, fill="black")
self.line2 = self.can.create_line(x2, y2, x3, y3, fill="black")
self.line3 = self.can.create_line(x3, y3, x4, y4, fill="black")
self.line4 = self.can.create_line(x1, y1, x4, y4, fill="black")
def update_position(self, new_position):
x, y = new_position
x1, y1 = x + self.width, y + self.width
x2, y2 = x + self.width, y - self.width
x3, y3 = x - self.width, y - self.width
x4, y4 = x - self.width, y + self.width
self.can.coords(self.vacuum, x1, y1, x2, y2, x3, y3, x4, y4)
self.can.coords(self.line1, x1, y1, x2, y2)
self.can.coords(self.line2, x2, y2, x3, y3)
self.can.coords(self.line3, x3, y3, x4, y4)
self.can.coords(self.line4, x1, y1, x4, y4)
class Grid(object):
"""
The grid that the vacuum will clean.
canvas: tk.Canvas object.
dimension: tuple of number of tiles (x, y).
screen: tuple of size of canvas (w, h).
furniture: boolean - if room will have furniture.
"""
def __init__(self, canvas, dimension, screen, furniture=True):
self.can, self.dimension = canvas, dimension
self.w, self.h = screen
self.create_tiles(furniture)
def create_tiles(self, furniture):
"""
Finds a valid configuration of furniture and tiles.
Then, calls self.draw_tiles to draw configuration.
"""
#dx, dy are width and height of tiles.
dx, dy = self.w//self.dimension[0], self.h//self.dimension[1]
#adjust screen size for discrepincies in forcing int divition.
self.w, self.h = self.dimension[0]*dx, self.dimension[1]*dy
self.can.config(width=self.w, height=self.h)
valid = False
while not valid:
tiles, furniture_tiles = [], []
for y in xrange(0, self.h, dy):
for x in xrange(0, self.w, dx):
#(0, 0) is always a non-furniture tile.
if not furniture or random.random() <= 0.8 or (x, y) == (0, 0):
tiles.append((x, y, x+dx, y+dy))
else:
furniture_tiles.append((x, y, x+dx, y+dy))
valid = is_furniture_valid(furniture_tiles, self.dimension)
self.draw_tiles(tiles, furniture_tiles)
def draw_tiles(self, tiles, furniture_tiles):
"""
Draws a configuration of furniture and tiles.
tiles: list of position tuples, (x, y, x+dx, y+dy).
furniture_tiles: same as tiles but only for furniture.
"""
self.furniture = furniture_tiles
for element in self.furniture:
x, y = element[0], element[1]
dx, dy = element[2] - x, element[3] - y
self.can.create_rectangle(x, y, x+dx, y+dy, fill="green")
self.tiles = {}
for element in tiles:
x, y = element[0], element[1]
dx, dy = element[2] - x, element[3] - y
self.tiles[element] = [4,
self.can.create_rectangle(x, y, x+dx, y+dy, fill="black")]
def get_tile(self, position):
x, y = position
for element in self.tiles:
if (x >= element[0] and x <= element[2]
and y >= element[1] and y <= element[3]):
return element
def clean_tile(self, position):
"""
Takes 4 times to clean a tile.
Usually, vacuum will clean 2 at a time though.
*** On some screens, 'dark grey' is lighter than 'grey'. ***
"""
tile = self.get_tile(position)
self.tiles[tile][0] -= 1
if self.tiles[tile][0] == 0:
self.can.itemconfig(self.tiles[tile][1], fill="white")
elif self.tiles[tile][0] == 1:
self.can.itemconfig(self.tiles[tile][1], fill="light grey")
elif self.tiles[tile][0] == 2:
self.can.itemconfig(self.tiles[tile][1], fill="grey")
elif self.tiles[tile][0] == 3:
self.can.itemconfig(self.tiles[tile][1], fill="dark grey")
def is_grid_cleaned(self):
for element in self.tiles.values():
if element[0] > 0:
return False
return True
def get_dimension(self):
return self.dimension
def get_grid_size(self):
return (self.w, self.h)
def get_furniture(self):
return self.furniture
class Robot(object):
"""
Completes the numerical simulation.
grid: a Grid object.
canvas: a tk.Canvas object.
v: int speed of robot.
"""
def __init__(self, grid, canvas, v):
self.grid = grid
self.w, self.h = self.grid.get_grid_size()
self.furniture = self.grid.get_furniture()
self.v = v
self.set_random_velocity()
average_size = sum(self.grid.get_grid_size())/2
average_dimension = sum(self.grid.get_dimension())/2
self.robot_width = int((average_size/average_dimension)*0.3)
#initial position
self.x, self.y = self.robot_width, self.robot_width
self.rumba = Rumba(canvas, (self.x, self.y), self.robot_width)
def is_valid_position(self, position):
x, y = position
if x + self.robot_width >= self.w or x - self.robot_width <= 0:
return False
elif y + self.robot_width >= self.h or y - self.robot_width <= 0:
return False
for element in self.furniture:
#element is of the form (x, y, x+dx, y+dy)
if x >= element[0] and x <= element[2]:
if y >= element[1] and y <= element[3]:
return False
elif y + self.robot_width >= element[1] and y + self.robot_width <= element[3]:
return False
elif y - self.robot_width >= element[1] and y - self.robot_width <= element[3]:
return False
elif x + self.robot_width >= element[0] and x + self.robot_width <= element[2]:
if y >= element[1] and y <= element[3]:
return False
elif y + self.robot_width >= element[1] and y + self.robot_width <= element[3]:
return False
elif y - self.robot_width >= element[1] and y - self.robot_width <= element[3]:
return False
elif x - self.robot_width >= element[0] and x - self.robot_width <= element[2]:
if y >= element[1] and y <= element[3]:
return False
elif y + self.robot_width >= element[1] and y + self.robot_width <= element[3]:
return False
elif y - self.robot_width >= element[1] and y - self.robot_width <= element[3]:
return False
return True
def set_random_velocity(self):
self.vx, self.vy = get_random_velocity(self.v)
def update(self):
"""
Checks to see if current direction is valid.
If it is, continues, if not, picks new,
random directions until it finds a valid direction.
"""
x, y = self.x+self.vx, self.y+self.vy
while (x, y) == (self.x, self.y) or not self.is_valid_position((x, y)):
self.set_random_velocity()
x, y = self.x+self.vx, self.y+self.vy
self.x, self.y = x, y
self.rumba.update_position((self.x, self.y))
self.grid.clean_tile((self.x, self.y))
#### OBJECTS MANAGER ####
class Home(object):
"""
Manages Simulation.
master: tk.Tk object.
screen: tuple (width, height).
dimension: tuple, dimension of the grid.
"""
def __init__(self, master, screen, dimension):
frame = tk.Frame(master)
frame.pack()
v = sum(screen)//(2*sum(dimension))
canvas = tk.Canvas(frame, width=screen[0], height=screen[1])
canvas.pack()
grid = Grid(canvas, dimension, screen)
robot = Robot(grid, canvas, v)
master.title("Roomba Robot - Steps: 0")
master.bind('<Return>', self.restart)
master.bind('<Up>', self.fast)
master.bind('<Down>', self.slow)
#initialize class variables.
self.master, self.frame = master, frame
self.screen, self.dimension = screen, dimension
self.robot, self.grid = robot, grid
#self.speed adjusts frame rate. Can be manipulated with arrow keys.
#self.count keeps track of steps.
self.speed, self.count = 100, 0
self.update()
def restart(self, callback=False):
""" Enter/Return Key """
self.frame.destroy()
self.__init__(self.master, self.screen, self.dimension)
def fast(self, callback=False):
""" Up arrow key """
if self.speed > 5:
self.speed -= 5
else:
self.speed = 1
def slow(self, callback=False):
""" Down arrow key """
self.speed += 5
def update(self):
self.robot.update()
self.count += 1
self.master.title("Rumba Robot - Steps: %d" % self.count)
if not self.grid.is_grid_cleaned():
self.frame.after(self.speed, self.update)
else:
self.frame.bell()
#### SIMULATION ####
def simulate(screen, dimension):
"""
screen: tuple, screen size in pixles: (width, height).
dimension: tuple, dimension of grid: (x, y).
"""
root = tk.Tk()
root.resizable(0, 0)
try: root.wm_iconbitmap("ploticon.ico")
except: pass
Home(root, screen, dimension)
#Center window on screen.
#root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id()))
root.mainloop()
if __name__ == "__main__":
"""
Tip: Up/Down arrow keys will speed/slow the simulation.
Enter/Return will restart with the same screen and dimension attributes.
*** Large dimensions may take a few minutes to generate ***
"""
screen = 1000, 700
dimension = 30, 20
simulate(screen, dimension)
``` |
{
"source": "jiosue/QAOAPython",
"score": 3
} |
#### File: QAOAPython/qc/cirq_helper.py
```python
import cirq
from numpy import pi, eye
# define pi so that in string gates we can have pi as an angle.
# Because we use eval for string gates. For example, gate = "rz(pi/2, 1)".
#TODO: add all.
PARAMETER_FREE_GATES = {"h", "cnot", "x", "y", "z", "cz", "ccx", "ccz"}
def get_gate_info(gate):
"""
gate: str, string gate. ie H(0), or "cx(1, 0)".
returns: tuple, (gate_name (str), gate_args (tuple)).
"""
g = gate.strip().lower()
i = g.index("(")
gate_name, gate_args = g[:i], eval(g[i:])
try: len(gate_args)
except TypeError: gate_args = gate_args,
if gate_name == "cx": gate_name = "cnot"
return gate_name, gate_args
def get_num_qubits(algorithm):
"""
Determine the max qubit value used in the algorithm.
algorithm: iterable, each element must be a string gate, as in
apply_string_gate above.
ie, algorithm = ["h(0)", "cx(0, 1)", "rx(pi/4, 1)",..]
returns: int, max qubit value in algorithm.
"""
#TODO: support more of the cirq gate set
m = 0
for gate in algorithm:
gate_name, gate_args = get_gate_info(gate)
if gate_name in PARAMETER_FREE_GATES:
m = max((m,) + gate_args)
elif gate_name in ("rx", "ry", "rz"):
_, qubit = gate_args
m = max((m, qubit))
elif gate_name == "measure":
qubit = gate_args[0]
m = max((m, qubit))
else:
raise NotImplementedError("%s gate not supported" % gate_name)
return m + 1
def make_gate(gate, qubits):
"""
Convert str gate to cirq gate.
gate: str, gate of the form "cx(0, 1)" for example.
qubits: list of ints, qubits that algorithm is running on.
returns: cirq.Gate object applied on the correct qubits to be appended
to a cirq.Circuit object.
"""
#TODO: support more of the cirq gate set
gate_name, gate_args = get_gate_info(gate)
if gate_name in PARAMETER_FREE_GATES:
args = "(%s)" % ", ".join("qubits[%d]" for _ in range(len(gate_args)))
return eval("cirq." + gate_name.upper() + args % gate_args)
elif gate_name in ("rx", "ry", "rz"):
angle, qubit = gate_args
r = eval(
"cirq.%sPowGate(exponent=%g)" % (gate_name[1].upper(), angle/pi)
)
return r(qubits[qubit])
elif gate_name == "measure":
return cirq.measure(qubits[gate_args[0]], key=str(gate_args[1]))
else:
raise NotImplementedError("%s gate not supported" % gate_name)
def make_circuit(algorithm, num_qubits):
"""
Make cirq.Circuit object from algorithm. If measure is not in the algorithm
then by default all qubits will be measured.
algorithm: list of strs, gates to apply.
num_qubits: int, number of qubits to run the algorithm on.
returns: cirq.Circuit object.
"""
qs = [cirq.GridQubit(i, 0) for i in range(num_qubits)]
circuit = cirq.Circuit()
measure = False
for gate in algorithm:
circuit.append(make_gate(gate, qs),
strategy=cirq.InsertStrategy.EARLIEST)
if "measure" in gate.lower(): measure = True
if not measure:
for i in range(len(qs)):
circuit.append(make_gate("measure(%d, %d)" % (i, i), qs),
strategy=cirq.InsertStrategy.EARLIEST)
return circuit
class Result(dict):
""" Just a dictionary that automatically gives default values = 0.0 """
def __getitem__(self, key):
""" Return 0.0 if key not in result dictionary """
return self.get(key, 0.0)
def cirq_output_to_Result(cirq_output):
"""
Take the output of cirq.simulator.run and convert it to Result dictionary.
For example, if the cirq_output is
res.measurements = {"0": [True, False], "1": [True, True]}
This means that the 0th qubit was 1 then 0, and the 1st qubit was 1 then 1.
So this function should return
Result({"11": 0.5, "01": 0.5})
"""
qubits = sorted(int(x) for x in cirq_output.measurements.keys())
counts = {}
for i in range(cirq_output.repetitions):
state = ""
for j in qubits:
state += "1" if cirq_output.measurements[str(j)][i] else "0"
if state in counts: counts[state] += 1
else: counts[state] = 1
for state in counts: counts[state] /= cirq_output.repetitions
return Result(counts)
def run(algorithm, num_qubits=None, num_samples=8000):
"""
Create a quantum circuit, run the algorithm, return the resulting
probability distribution.
algorithm: algorithm (list of strings) or list of algorithms,
each string is a gate, ie "cx(0, 1)" or rz(pi/2, 0)
num_qubits: int, number of qubits to run each algorithm on. Can be None,
in which case the algorithm will be run on the minimum
number of qubits required.
num_samples: int, number of samples to take from the quantum computer in
in order to determine the probabilities for each state.
returns: dict (common.Result), keys are states, values are probabilities
found to be in that state.
"""
multiple = bool(algorithm and isinstance(algorithm[0], list))
if not multiple: algorithm = [algorithm]
if num_qubits is None:
num_qubits = max(get_num_qubits(a) for a in algorithm)
circuits = [make_circuit(a, num_qubits) for a in algorithm]
sim = cirq.Simulator()
results = [sim.run(c, repetitions=num_samples) for c in circuits]
if multiple: return [cirq_output_to_Result(r) for r in results]
else: return cirq_output_to_Result(results[0])
def algorithm_unitary(algorithm, num_qubits=None):
"""
Find the unitary corresponding to the algorithm.
algorithm: list of strings, each string is a gate in GATE_ARGUMENTS.keys()
with whatever arguments required to define the
gate.
num_qubits: int, number of qubits to run the algorithm on.
returns: np.array, unitary matrix corresponding to the algorithm.
"""
if num_qubits is None: num_qubits = get_num_qubits(algorithm)
if not algorithm: return eye(2**num_qubits)
circuit = make_circuit(algorithm, num_qubits)
return circuit.to_unitary_matrix()
if __name__ == "__main__":
## Examples
# `run` returns a dictionary mapping states to probabilities, ie
# run(["h(0)", "cx(0, 1)"]) should return {"00":0.5, "11": 0.5}.
# if no "measure" is included in alg, then by default everything will
# be measured.
alg = ["H(0)", "CX(0, 1)"]
print(run(alg, 3, num_samples=10000))
# since a measure is included, only that register will be measured.
alg = ["H(0)", "CX(0, 1)", "measure(0, 0)"]
# print(run(alg, 3, num_samples=1000, backend="ibmqx4"))
# run multiple circuits at once
alg0 = ["h(0)", "cx(0, 1)", "measure(1, 0)"]
alg1 = ["x(0)", "H(1)", "ccx(0, 1, 2)", "rz(pi/2, 2)"]
print(run([alg0, alg1]))
# convert alg to its unitary respresentation.
alg = ["h(0)", "cx(0, 1)", "rx(0, 1)", "rz(pi, 0)"]
print(algorithm_unitary(alg, 2))
```
#### File: QAOAPython/qc/qiskit_helper.py
```python
import qiskit, time
from numpy import pi
# define pi so that in string gates we can have pi as an angle.
# Because we use eval for string gates. For example, gate = "rz(pi/2, 1)".
name = "IBM"
simulators = simulator, unitary_simulator, state_simulator = (
"qasm_simulator", "unitary_simulator",
"statevector_simulator"
)
quantum_computer = "ibmqx4"
def apply_credentials():
print("\nApplying credentials...\n")
# with open("qSonify/qc/APItoken.txt") as f: APItoken = f.read().strip()
try:
# qiskit.IBMQ.enable_account(APItoken)
qiskit.IBMQ.load_accounts()
print('Available backends:')
print(qiskit.IBMQ.backends())
print(qiskit.Aer.backends())
print("\nCredientials applied\n")
except:
print('Something went wrong.\nDid you enter a correct token?')
#### String algorithm methods ####
# With this, we can write an algorithm as a list with any of the keys in
# GATE_ARGUMENTS. So, for example,
# alg = ["H(0)", "RX(pi/2, 1)", "CX(1, 2)", "u3(pi/2, pi/4, .2, 0)"]
# then apply it to a qiskit.QuantumCircuit and qiskit.QuantumRegister qc and r
# respectively by calling
# apply_string_algorithm(alg, r, qc).
p = lambda x: ("reg[%d]",)*x
a = lambda x: ("%g",)*x
b = lambda x, y: "(" + ", ".join(a(x)+p(y)) + ")"
GATE_PARAMS = { ## The first number is the number of parameters,
## The second number is the number of qubit arguments.
"ccx": (0, 3), "ch": (0, 2), "crz": (1, 2), "cswap": (0, 3), "cu1": (1, 2),
"cu3": (3, 2), "cx": (0, 2), "cx_base": (0, 2), "cy": (0, 2), "cz": (0, 2),
"h": (0, 1), "iden": (0, 1), "rx": (1, 1), "ry": (1, 1), "rz": (1, 1),
"rzz": (1, 2), "s": (0, 1), "sdg": (0, 1), "swap": (0, 2), "t": (0, 1),
"tdg": (0, 1), "u0": (1, 1), "u1": (1, 1), "u2": (2, 1), "u3": (3, 1),
"u_base": (3, 1), "x": (0, 1), "y": (0, 1), "z": (0, 1),
}
GATE_ARGUMENTS = {gate: b(*args) for gate, args in GATE_PARAMS.items()}
GATE_ARGUMENTS["measure"] = "(reg[%d], c_reg[%d])"
def get_gate_info(gate):
"""
gate: str, string gate. ie H(0), or "cx(1, 0)".
returns: tuple, (gate_name (str), gate_args (tuple)).
"""
gate = gate.strip().lower().replace("cnot", "cx")
i = gate.index("(")
gate_name, gate_args = gate[:i], eval(gate[i:])
try: len(gate_args)
except TypeError: gate_args = gate_args,
return gate_name, gate_args
def get_num_qubits(algorithm):
"""
Determine the max qubit value used in the algorithm.
algorithm: iterable, each element must be a string gate, as in
apply_string_gate above.
ie, algorithm = ["h(0)", "cx(0, 1)", "rx(pi/4, 1)",..]
returns: int, max qubit value in algorithm.
"""
n = -1
for gate in algorithm:
gate_name, gate_args = get_gate_info(gate)
if gate_name == "measure": m = gate_args[0]
# elif sum(GATE_PARAMS[gate_name]) == 1: m = gate_args
else: m = max(gate_args[GATE_PARAMS[gate_name][0]:])
n = max(n, m)
return n + 1
def apply_string_gate(gate, reg, cir, c_reg=None):
"""
gate: str, one of the elements in GATE_ARGUMENTS.keys() + a tuple of
arguments. ie, for a rx rotation by pi/2 radians on qubit 0,
gate = "rx(pi/2, 0)".
reg: qiskit.QuantumRegister, register to apply gate to.
cir: qiskit.QuantumCircuit, circuit to add gate to.
c_reg: qiskit.ClassicalRegister, must be supplied if gate is a measurement.
Classical register to measure to.
returns: int, if gate is a measure gate, then return the integer
corresponding to the classical register to measure to,
otherwise returns -1.
"""
gate_name, gate_args = get_gate_info(gate)
# apply gate
eval("cir." + gate_name + GATE_ARGUMENTS[gate_name] % gate_args)
# value of the classical register to measure to
if "measure" in gate: return gate_args[-1]
else: return -1
def apply_string_algorithm(algorithm, reg, cir, c_reg=None):
"""
algorithm: iterable, each element must be a string gate, as in
apply_string_gate above.
ie, algorithm = ["h(0)", "cx(0, 1)", "rx(pi/4, 1)",..]
reg: qiskit.QuantumRegister, register to apply algorithm to.
cir: qiskit.QuantumCircuit, circuit to add gates in algorithm to.
c_reg: qiskit.ClassicalRegister, must be supplied if gate is a measurement.
Classical register to measure to.
returns: int, if the algorithm has any measure gates, then returns the
integer corresponding to the largest index of the classical
register that is measured to, otherwise returns -1.
"""
if not algorithm: return -1
return max(apply_string_gate(gate, reg, cir, c_reg) for gate in algorithm)
def _make_job(qc, backend, num_samples):
"""
Begin the execution of the circuit qc on the backend with shots=num_samples
qc: qiskit.QuantumCircuit or list of circuits, circuits to run.
backend: str, IBM backend to run circuit on. Can be 'ibmqx4', 'ibmqx5',
'local_qasm_simulator', 'local_unitary_simulator', etc.
num_samples: int, number of samples to take from the quantum computer in
in order to determine the probabilities for each state.
returns: qiskit Job object from qiskit.backends.
"""
if backend in simulators: f = qiskit.Aer
else: f = qiskit.IBMQ
try:
return qiskit.execute(qc, backend=f.get_backend(backend),
shots=num_samples, max_credits=3)
except LookupError:
apply_credentials()
return qiskit.execute(qc, backend=f.get_backend(backend),
shots=num_samples, max_credits=3)
class Result(dict):
""" Just a dictionary that automatically gives default values = 0.0 """
def __getitem__(self, key):
""" Return 0.0 if key not in result dictionary """
return self.get(key, 0.0)
def run(algorithm, num_qubits=None, num_samples=8000, backend=simulator):
"""
Create a quantum circuit, run the algorithm, return the resulting
probability distribution.
algorithm: algorithm (list of strings) or list of algorithms,
each string is a gate in GATE_ARGUMENTS.keys() with whatever
arguments required to define the gate.
num_qubits: int, number of qubits to run each algorithm on. Can be None,
in which case the algorithm will be run on the minimum
number of qubits required.
num_samples: int, number of samples to take from the quantum computer in
in order to determine the probabilities for each state.
backend: str, IBM backend to run the algorithm on. If backend is not
a local simulator then credentials must have already
been applied.
returns: dict (common.Result), keys are states, values are probabilities
found to be in that state.
"""
multiple = bool(algorithm and isinstance(algorithm[0], list))
if not multiple: algorithm = [algorithm]
n = len(algorithm)
if num_qubits is None:
num_qubits = max(get_num_qubits(a) for a in algorithm)
q = qiskit.QuantumRegister(num_qubits)
c = [qiskit.ClassicalRegister(num_qubits) for _ in range(n)]
qc = [qiskit.QuantumCircuit(q, c[j]) for j in range(n)]
for j in range(n):
i = apply_string_algorithm(algorithm[j], q, qc[j], c[j])
if i == -1: qc[j].measure(q, c[j])
else: c[j].size = i + 1
job_exp = _make_job(qc, backend, num_samples)
# Often there are random queue errors that have happened to
# me that cause the job to never complete. Two things I have
# encountered: I lose connection or something, and I get an
# error, or for some reason their server tells me that the
# job is running indefinitely, ie it just get stuck running.
# So if either of those things happen, we reset and
# reinitialize our job(s) into the queue.
if backend not in simulators:
lapse, interval = 0, 30
done = False
while not done:
str_status = str(job_exp.status())
queue_position = job_exp.queue_position()
error = job_exp.error_message()
print('\nStatus @ %d seconds' % (interval * lapse))
print("queue position =", queue_position)
print(str_status)
done = queue_position is not None and queue_position < 1
if error:
print("\nEncountered an error")
print(error)
print("reentering job into queue\n")
job_exp.cancel()
job_exp = _make_job(qc, backend, num_samples)
lapse = 0
lapse += 1
time.sleep(interval)
res = job_exp.result()
## qiskit orders their bits opposite to Cirq nad ProjectQ, and in my
## opinion in a much less intuitive way. So I flip the order of the bits
## here.
if multiple:
return [
Result(
{k[::-1]: v/num_samples
for k, v in res.get_counts(cir).items()}
) for cir in qc
]
else:
return Result(
{k[::-1]: v/num_samples for k, v in res.get_counts(qc[0]).items()}
)
def algorithm_unitary(algorithm, num_qubits=None):
"""
Find the unitary corresponding to the algorithm.
algorithm: list of strings, each string is a gate in GATE_ARGUMENTS.keys()
with whatever arguments required to define the
gate.
num_qubits: int, number of qubits to run the algorithm on.
returns: np.array, unitary matrix corresponding to the algorithm.
"""
if num_qubits is None: num_qubits = get_num_qubits(algorithm)
if not algorithm: algorithm = ["iden(0)"]
## qiskit orders their bits opposite to Cirq nad ProjectQ, and in my
## opinion in a much less intuitive way. So I flip the order of the bits
## here.
a = []
for gate in algorithm:
gate_name, gate_args = get_gate_info(gate)
i = GATE_PARAMS[gate_name][0]
params = gate_args[:i]
qubits = gate_args[i:]
qubits = tuple(num_qubits-q-1 for q in qubits)
a.append(gate_name + str(params + qubits))
q = qiskit.QuantumRegister(num_qubits)
qc = qiskit.QuantumCircuit(q)
apply_string_algorithm(a, q, qc)
return qiskit.execute(
qc, backend=qiskit.Aer.get_backend(unitary_simulator)
).result().get_data(qc)["unitary"]
def prepare_state(state):
"""
state: string, string of 0's and 1's.
returns: algorithm (list of strings), algorithm to prepare the state.
"""
return ["x(%d)" % i for i in range(len(state)) if state[i] == "1"]
def sample(algorithm, num_qubits=None,
num_samples=1, backend=simulator):
"""
Get a list of all the outputs from an algorithm. Differs from `run` because
`run` returns the determined probabilities of each state, but `sample`
returns a list of the outputs.
algorithm: algorithm (list of strings), NOT a list of algorithms,
each string is a gate in GATE_ARGUMENTS.keys() with whatever
arguments required to define the gate.
num_qubits: int, number of qubits to run each algorithm on.
num_samples: int, number of samples to take from the quantum computer.
backend: str, IBM backend to run the algorithm on. If backend is not
a local simulator then credentials must have already
been applied.
returns: list, each element is the measured state.
"""
d = run([algorithm]*num_samples,
num_qubits=num_qubits,
num_samples=1, backend=backend)
return [list(x.keys())[0] for x in d]
def single_sample(algorithm, num_qubits=None, backend=simulator):
"""
Same as `sample` with one sample, but returns a state instead of a list of
one state.
"""
return sample(algorithm, num_qubits, 1, backend)[0]
def markovian_sample(algorithm, num_qubits=None,
num_samples=1, backend=simulator):
"""
Get a list of all the outputs from an algorithm, where the previous output
is prepared as the starting point for the next algorithm; ie the
measurement of the algorithm is input to run the algorithm again.
algorithm: algorithm (list of strings), NOT a list of algorithms,
each string is a gate in GATE_ARGUMENTS.keys() with whatever
arguments required to define the gate.
num_qubits: int, number of qubits to run each algorithm on.
num_samples: int, number of samples to take from the quantum computer.
backend: str, IBM backend to run the algorithm on. If backend is not
a local simulator then credentials must have already
been applied.
returns: list, each element is the measured state.
"""
if num_samples < 1: raise ValueError("Must have >= 1 sample")
if num_qubits is None: num_qubits = get_num_qubits(algorithm)
args = num_qubits, backend
res = [single_sample(algorithm, *args)]
for _ in range(num_samples-1):
res.append(single_sample(prepare_state(res[-1])+algorithm, *args))
return res
if __name__ == "__main__":
## Examples
# `run` returns a dictionary mapping states to probabilities, ie
# run(["h(0)", "cx(0, 1)"]) should return {"00":0.5, "11": 0.5}.
# if no "measure" is included in alg, then by default everything will
# be measured.
alg = ["H(0)", "CX(0, 1)", "u3(pi, pi/2, pi/4, 0)"]
print(run(alg, 3, num_samples=10000))
# since a measure is included, only that register will be measured.
alg = ["H(0)", "CX(0, 1)", "u3(pi, pi/2, pi/4, 0)", "measure(0, 0)"]
# print(run(alg, 3, num_samples=1000, backend="ibmqx4"))
# run multiple circuits at once
alg0 = ["h(0)", "cx(0, 1)", "measure(0, 0)", "measure(1, 1)"]
alg1 = ["x(0)", "H(1)", "ccx(0, 1, 2)"]
print(run([alg0, alg1]))
# convert alg to its unitary respresentation.
alg = ["h(0)", "cx(0, 1)"]
print(algorithm_unitary(alg, 2))
``` |
{
"source": "jiosue/qSonify",
"score": 4
} |
#### File: qSonify/maps/frequencymapping.py
```python
from qSonify.sonify import Song, freq_to_note
def frequencymapping(low_freq=300, base=2):
""" map output of algorithm to two tracks """
def f(res, name, tempo):
"""
res: list, list of output states of the qc.
name: str, name of song.
tempo: int, tempo of song.
return: Song object.
"""
s = Song(name=name, tempo=tempo)
for x in res:
note = freq_to_note(int(x, base=base) + low_freq)
s.addNote(note, duration=.5)
return s
return f
```
#### File: qSonify/sonify/song.py
```python
from midiutil.MidiFile import MIDIFile
import os
def _create_midi_mapping():
""" Create a dictionary that maps note name to midi note integer """
middle_c = 60
notes = "c", "c#", "d", "d#", "e", "f", "f#", "g", "g#", "a", "a#", "b"
equiv = (("c#", "db"), ("d#", "eb"),
("f#", "gb"), ("g#", "ab"), ("a#", "bb"))
m = {}
j, o = len(notes)-1, 3
for v in range(middle_c-1, -1, -1):
for e in equiv: m[notes[j].replace(*e) + str(o)] = v
if j == 0: o -= 1
j = (j - 1) % len(notes)
j, o = 0, 4
for v in range(middle_c, 128):
for e in equiv: m[notes[j].replace(*e) + str(o)] = v
j = (j + 1) % len(notes)
if j == 0: o += 1
return m
_midi_mapping = _create_midi_mapping()
class Song(MIDIFile):
_valid = tuple, list, type(x for x in range(1))
def __init__(self, name="test", tempo=100, num_tracks=1):
"""
Intialize Song object.
name: str, name of song/file.
tempo: int, bpm of song.
num_tracks: int, number of tracks for the midi file to have.
"""
super().__init__(num_tracks)
self.name, self.tempo, self.volume = name, tempo, 100
self.filename = "%s.mid" % name
self.path = ""
track, self.channel = 0, 0
self.time = [0]*num_tracks # start each track at the beginning
self.addTempo(track, self.time[0], self.tempo)
def addNote(self, notes, duration=4, track=0):
"""
Overrides MIDIFile's addNote method, but uses it as a subroutine. Adds
a note or notes with a duration to the specified track, then increments
the time by that duration.
notes: str or tuple of strs, notes to add at the current location of
of the track.
duration: float, number of beats for the note/chord.
track: int, which track to add to.
"""
if not isinstance(notes, Song._valid): notes = notes,
for note in notes:
note = note.lower()
if note in _midi_mapping: pitch = _midi_mapping[note]
elif note+"4" in _midi_mapping: pitch = _midi_mapping[note+"4"]
else: raise ValueError("Note not valid:", note)
super().addNote(track, self.channel, pitch,
self.time[track], duration, self.volume)
self.time[track] += duration
self.need_to_write = True
def addRest(self, duration=1, track=0):
"""
Add a rest to the track, just corresponds to adjusting the time.
duration: float, number of beats the rest lasts.
track: int, which track to add the rest to.
"""
self.time[track] += duration
self.need_to_write = True
def addText(self, text, track=0):
"""
Add text to a track at the current time. For it to be visible, there
must be a note at the current time on this track.
text: str, text to add.
track: int, which track to add the text to.
"""
super().addText(track, self.time[track], str(text))
self.need_to_write = True
def writeFile(self, path=""):
"""
Write the current midi track to a file
path: str, path to write the file to. Must end with a "/"!
"""
if not self.need_to_write: return
try:
with open(path+self.filename, "wb") as f: super().writeFile(f)
except FileNotFoundError:
os.mkdir(path)
with open(path+self.filename, "wb") as f: super().writeFile(f)
self.need_to_write = False
self.path = path
def play(self, path=""):
"""
Write the midi file, then call on the system's default midi player. On
Windows, this is probably Windows Media Player. THIS ONLY WORKS ON
WINDOWS, IF YOU WANT TO USE IT YOU MUST CHANGE THE SYSTEM CALL.
path: str, where to save the file to. Must end with a "/"!
"""
if not path and self.path: path = self.path
self.writeFile(path)
os.system("start %s" % (self.path+self.filename))
def __str__(self):
""" Return the string name of the song """
return self.filename
if __name__ == "__main__":
s = Song(name="helloworld", tempo=110, path="")
s.addNote("c")
s.addNote("d")
s.addNote(("c", "d", "e"))
s.view()
``` |
{
"source": "jiosue/Quantum-Computer-Simulator-with-Algorithms",
"score": 3
} |
#### File: PythonImplementation/QuantumSimulator/gates.py
```python
import numpy as np
exp, PI, cos, sin = np.exp, np.pi, np.cos, np.sin
sigma_x = [[0, 1], [1, 0]]
sigma_y = [[0, -1j], [1j, 0]]
sigma_z = [[1, 0], [0, -1]]
class Gate:
def __init__(self, unitary, qubits):
"""
unitary is list of list representing unitary matrix
qubits is tuple in order of qubits that unitary acts on
"""
self.unitary, self.qubits = np.array(unitary), qubits
self.dimension, self.num_qubits = len(unitary), len(qubits)
def __getitem__(self, item):
""" Gate[i][j] gets the (i, j) element of the unitary matrix """
return self.unitary[item]
def __call__(self, register):
"""
Apply gate to register.
:param register: Register object to apply the gate to
:return: the register, so that we can string gate function calls.
ie, gate1(gate2(gate3(register)))
"""
register.apply_gate(self)
return register
def full_unitary(self, num_qubits):
"""
Find the full unitary matrix of the gate on the full Hilbert space of
dimension 2^num_qubits. ONLY WORKS FOR SINGLE QUBIT GATES RIGHT NOW
"""
unitary = np.kron(np.eye(1 << self.qubits[0]), self.unitary)
unitary = np.kron(unitary, np.eye(1 << (num_qubits-self.qubits[0]-1)))
return unitary
def __pow__(self, power):
return Gate([list(x) for x in np.array(self.unitary)**power],
self.qubits)
# def __mul__(self, other):
# """ self * other """
class H(Gate):
c = 1.0/2.0**0.5 + 0.0j
unitary = np.array([
[c, c],
[c, -c]
])
def __init__(self, qubit):
super().__init__(H.unitary, (qubit,))
def __str__(self):
return "H(%d)" % self.qubits[0]
class CX(Gate):
unitary = np.array([
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0]
])
def __init__(self, control_qubit, target_qubit):
# qubits should be tuple (control, target)
super().__init__(CX.unitary, (control_qubit, target_qubit))
def __str__(self):
return "CX" + str(self.qubits)
class X(Gate):
unitary = np.array(sigma_x)
def __init__(self, qubit):
super().__init__(X.unitary, (qubit,))
def __str__(self):
return "X%d" % self.qubits[0]
class Y(Gate):
unitary = np.array(sigma_y)
def __init__(self, qubit):
super().__init__(Y.unitary, (qubit,))
def __str__(self):
return "Y%d" % self.qubits[0]
class Z(Gate):
unitary = np.array(sigma_z)
def __init__(self, qubit):
super().__init__(Z.unitary, (qubit,))
def __str__(self):
return "Z%d" % self.qubits[0]
class T(Gate):
unitary = [[0.0]*8 for _ in range(8)]
for i in range(6): unitary[i][i] = 1.0
unitary[6][7] = 1.0
unitary[7][6] = 1.0
unitary = np.array(unitary)
def __init__(self, *qubits):
""" qubits should be a tuple of length 3 """
super().__init__(T.unitary, qubits)
def __str__(self):
return "T" + str(self.qubits)
class S(Gate):
unitary = np.array([
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
])
def __init__(self, *qubits):
""" swap two qubits. qubits should be tuple of length 2 """
super().__init__(S.unitary, qubits)
def __str__(self):
return "S" + str(self.qubits)
class P(Gate):
""" Phase shift P = |0><0| + exp(i theta) |1><1| """
unitary = lambda angle: np.array([
[1.0, 0.0],
[0.0, exp(1.0j*angle)]
])
def __init__(self, angle, qubit):
super().__init__(P.unitary(angle), (qubit,))
self.angle = angle
def __str__(self):
return "P" + str((self.angle,) + self.qubits)
class CP(Gate):
unitary = lambda angle: np.array([
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, exp(1.0j*angle)]
])
def __init__(self, angle, control_qubit, target_qubit):
qubits = (control_qubit, target_qubit)
super().__init__(CP.unitary(angle), qubits)
self.angle = angle
def __str__(self):
return "CP" + str((self.angle,) + self.qubits)
class RX(Gate):
unitary = lambda angle: np.array([
[cos(angle/2), -1.0j*sin(angle/2)],
[-1.0j*sin(angle/2), cos(angle/2)]
])
def __init__(self, angle, qubit):
""" rotate the qubit around the x axis by an angle """
super().__init__(RX.unitary(angle), (qubit,))
self.angle = angle
def __str__(self):
return "RX" + str((self.angle,) + self.qubits)
class RY(Gate):
unitary = lambda angle: np.array([
[cos(angle/2), -sin(angle/2)],
[sin(angle/2), cos(angle/2)]
])
def __init__(self, angle, qubit):
""" rotate the qubit around the y axis by an angle """
super().__init__(RY.unitary(angle), (qubit,))
self.angle = angle
def __str__(self):
return "RY" + str((self.angle,) + self.qubits)
class RZ(Gate):
unitary = lambda angle: np.array([
[exp(-1.0j*angle/2), 0.0],
[0.0, exp(1.0j*angle/2)]
])
def __init__(self, angle, qubit):
""" rotate the qubit around the z axis by an angle """
super().__init__(RZ.unitary(angle), (qubit,))
self.angle = angle
def __str__(self):
return "RZ" + str((self.angle,) + self.qubits)
class U3(Gate):
""" u3(th, phi, lam) = Rz(phi)Ry(th)Rz(lam), see arxiv:1707.03429 """
unitary = lambda theta, phi, lam: np.array([
[exp(-1j*(phi+lam)/2)*cos(theta/2),
-exp(-1j*(phi-lam)/2)*sin(theta/2)],
[exp(1j*(phi-lam)/2)*sin(theta/2),
exp(1j*(phi+lam)/2)*cos(theta/2)]
])
def __init__(self, theta, phi, lam, qubit):
super().__init__(U3.unitary(theta, phi, lam), (qubit,))
self.params = theta, phi, lam
def __str__(self):
return "U3" + str(self.params + self.qubits)
def string_to_gate(string):
return eval(string.upper())
def apply_gate(string, register):
""" apply the gate represented by string to the register """
string_to_gate(string)(register)
def apply_algorithm(algorithm, register):
for gate in algorithm: apply_gate(gate, register)
``` |
{
"source": "jiosue/testing_actions",
"score": 2
} |
#### File: jiosue/testing_actions/setup.py
```python
import setuptools
from setuptools.command.build_ext import build_ext
try:
from Cython.Build import cythonize
USE_CYTHON, ext = True, '.pyx'
except ImportError:
def cythonize(e): return e
USE_CYTHON, ext = False, '.c'
with open('README.rst') as f:
README = f.read()
with open("requirements.txt") as f:
REQUIREMENTS = [line.strip() for line in f if line.strip()]
# get __version__, __author__, etc.
with open("testing_actions/_version.py") as f:
exec(f.read())
extensions = cythonize([
setuptools.Extension(
name='testing_actions._c_extension',
sources=['./testing_actions/_c_extension' + ext,
'./testing_actions/src/c_extension.c'],
include_dirs=['./testing_actions/src/'],
language='c'
)
])
setuptools.setup(
name="testing_actions",
version=__version__,
author=__author__,
author_email=__authoremail__,
description=__description__,
long_description=README,
long_description_content_type='text/x-rst',
url=__sourceurl__,
license=__license__,
packages=setuptools.find_packages(exclude=("tests", "docs")),
ext_modules=extensions,
test_suite="tests",
install_requires=REQUIREMENTS,
zip_safe=False,
cmdclass=dict(build_ext=build_ext),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
project_urls={
"Source": __sourceurl__
}
)
``` |
{
"source": "JiovaniLedesma/holographicM",
"score": 3
} |
#### File: holographicM/holographicMicroscope/cunwrap.py
```python
import numpy as np
import math as m
def cunwrap(Psi, options):
a = Psi.shape
if len(a) > 2:
print('CUNWRAP: input Psi must be 2D-array')
else:
ny, nx = Psi.shape[0], Psi.shape[1]
if nx < 2 || ny < 2:
print('CUNWRAP: size of Psi must be langer than 2')
else:
if 'roundk' in options:
roundk = false
if 'verbose' in options:
verbose = true
w1 = np.ones((ny, 1))
w1[0], w1[-1] = 0.5, 0.5
w2 = np.ones((1, nx))
w2[0], w2[-1] = 0.5, 0,5
weight = w1*w2
if 'weight' in options:
weight = weight
if 'maxblocksize' in options:
blocksize = 125
if 'overlap' in options:
p = 0.25
p = max(min(p,1),0)
def splitidx(blocksize, n, p):
if blocksize >= n:
ilist = [1, n]
blocksize = n
else:
q = 1-p
# Number of blocks
k = (n/blocksize-p)/q
k = np.ceil(k)
# Readjust the block size, float
blocksize = n/((k*q) + p)
# first index
firstidx = np.round(np.linspace(1, n - np.ceil(blocksize)+1, k))
lastidx = np.round(firstidx+blocksize-1)
lastidx[-1] = n
# Make sure they are overlapped
lastidx[0:-1] = max(lastidx[0:-1], firstidx[1:])
# Put the indexes of k blocks into cell array
ilist = np.ceil(1, len(firstidx))
for k in range(0,len(ilist)):
ilist[k] = firstidx[k]:lastidx[k]
blocksize = np.round(blocksize)
return ilist, blocksize
def mydisplay(verbose, arg):
if verbose == "verbose":
print(arg)
```
#### File: holographicM/holographicMicroscope/thorcam.py
```python
import numpy as np
import os.path
import time
from ctypes import *
class CameraOpenError(Exception):
def __init__(self, mesg):
self.mesg = mesg
def __str__(self):
return self.mesg
class Camera(object):
def __init__(self):
uc480_file = 'C:\\Program Files\\Thorlabs\\Scientific Imaging\\ThorCam\\uc480_64.dll'
if os.path.isfile(uc480_file):
self.bit_depth = None
self.roi_shape = None
self.camera = None
self.handle = None
self.meminfo = None
self.exposure = None
self.roi_pos = None
self.frametime = None
self.uc480 = windll.LoadLibrary(uc480_file)
else:
raise CameraOpenError("ThorCam drivers not available.")
def open(self, bit_depth=8, roi_shape=(1024, 1024), roi_pos=(0,0), camera="ThorCam FS", exposure = 0.01, frametime = 10.0):
self.bit_depth = bit_depth
self.roi_shape = roi_shape
self.camera = camera
self.roi_pos = roi_pos
is_InitCamera = self.uc480.is_InitCamera
is_InitCamera.argtypes = [POINTER(c_int)]
self.handle = c_int(0)
i = is_InitCamera(byref(self.handle))
if i == 0:
print("ThorCam opened successfully.")
pixelclock = c_uint(43) #set pixel clock to 43 MHz (fastest)
is_PixelClock = self.uc480.is_PixelClock
is_PixelClock.argtypes = [c_int, c_uint, POINTER(c_uint), c_uint]
is_PixelClock(self.handle, 6 , byref(pixelclock), sizeof(pixelclock)) #6 for setting pixel clock
self.uc480.is_SetColorMode(self.handle, 6) # 6 is for monochrome 8 bit. See uc480.h for definitions
self.set_roi_shape(self.roi_shape)
self.set_roi_pos(self.roi_pos)
self.set_frametime(frametime)
self.set_exposure(exposure)
else:
raise CameraOpenError("Opening the ThorCam failed with error code "+str(i))
def close(self):
if self.handle != None:
self.stop_live_capture()
i = self.uc480.is_ExitCamera(self.handle)
if i == 0:
print("ThorCam closed successfully.")
else:
print("Closing ThorCam failed with error code "+str(i))
else:
return
def get_image(self, buffer_number=None):
#buffer number not yet used
#if buffer_number is None:
# buffer_number = self.epix.pxd_capturedBuffer(1)
im = np.frombuffer(self.meminfo[0], c_ubyte).reshape(self.roi_shape[1], self.roi_shape[0])
return im
def get_frame_number(self):
#not implemented for thorcam_fs
#return self.epix.pxd_capturedBuffer(0x1,1)-1
return 1
def finished_live_sequence(self):
#not implemented for thorcam_fs
#return self.epix.pxd_goneLive(0x1) == 0
return 0
def start_continuous_capture(self, buffersize = None):
'''
buffersize: number of frames to keep in rolling buffer
'''
self.uc480.is_CaptureVideo(self.handle, 1)
def start_sequence_capture(self, n_frames):
#not implemented for thorcam_fs
print 'sequence capture started'
#self.epix.pxd_goLiveSeq(0x1,1,n_frames,1,n_frames,1)
def stop_live_capture(self, ):
print 'unlive now'
#self.epix.pxd_goUnLive(0x1)
self.uc480.is_StopLiveVideo(self.handle, 1)
def initialize_memory(self):
if self.meminfo != None:
self.uc480.is_FreeImageMem(self.handle, self.meminfo[0], self.meminfo[1])
xdim = self.roi_shape[0]
ydim = self.roi_shape[1]
imagesize = xdim*ydim
memid = c_int(0)
c_buf = (c_ubyte * imagesize)(0)
self.uc480.is_SetAllocatedImageMem(self.handle, xdim, ydim, 8, c_buf, byref(memid))
self.uc480.is_SetImageMem(self.handle, c_buf, memid)
self.meminfo = [c_buf, memid]
def set_bit_depth(self, set_bit_depth = 8):
if set_bit_depth != 8:
print("only 8-bit images supported")
def set_roi_shape(self, set_roi_shape):
class IS_SIZE_2D(Structure):
_fields_ = [('s32Width', c_int), ('s32Height', c_int)]
AOI_size = IS_SIZE_2D(set_roi_shape[0], set_roi_shape[1]) #Width and Height
is_AOI = self.uc480.is_AOI
is_AOI.argtypes = [c_int, c_uint, POINTER(IS_SIZE_2D), c_uint]
i = is_AOI(self.handle, 5, byref(AOI_size), 8 )#5 for setting size, 3 for setting position
is_AOI(self.handle, 6, byref(AOI_size), 8 )#6 for getting size, 4 for getting position
self.roi_shape = [AOI_size.s32Width, AOI_size.s32Height]
if i == 0:
print("ThorCam ROI size set successfully.")
self.initialize_memory()
else:
print("Set ThorCam ROI size failed with error code "+str(i))
def set_roi_pos(self, set_roi_pos):
class IS_POINT_2D(Structure):
_fields_ = [('s32X', c_int), ('s32Y', c_int)]
AOI_pos = IS_POINT_2D(set_roi_pos[0], set_roi_pos[1]) #Width and Height
is_AOI = self.uc480.is_AOI
is_AOI.argtypes = [c_int, c_uint, POINTER(IS_POINT_2D), c_uint]
i = is_AOI(self.handle, 3, byref(AOI_pos), 8 )#5 for setting size, 3 for setting position
is_AOI(self.handle, 4, byref(AOI_pos), 8 )#6 for getting size, 4 for getting position
self.roi_pos = [AOI_pos.s32X, AOI_pos.s32Y]
if i == 0:
print("ThorCam ROI position set successfully.")
else:
print("Set ThorCam ROI size failed with error code "+str(i))
def set_exposure(self, exposure):
#exposure should be given in ms
exposure_c = c_double(exposure)
is_Exposure = self.uc480.is_Exposure
is_Exposure.argtypes = [c_int, c_uint, POINTER(c_double), c_uint]
is_Exposure(self.handle, 12 , exposure_c, 8) #12 is for setting exposure
self.exposure = exposure_c.value
def set_frametime(self, frametime):
#must reset exposure after setting framerate
#frametime should be givin in ms. Framerate = 1/frametime
is_SetFrameRate = self.uc480.is_SetFrameRate
if frametime == 0: frametime = 0.001
set_framerate = c_double(0)
is_SetFrameRate.argtypes = [c_int, c_double, POINTER(c_double)]
is_SetFrameRate(self.handle, 1.0/(frametime/1000.0), byref(set_framerate))
self.frametime = (1.0/set_framerate.value*1000.0)
``` |
{
"source": "JipengSun/NLP_Project_2",
"score": 3
} |
#### File: JipengSun/NLP_Project_2/transformation_cuisine.py
```python
import pandas as pd
import nltk
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
import itertools
import spacy
import get_recipe_json
import tools_methods
import tools_methods_1
nlp=spacy.load('en_core_web_sm')
def get_indian_recipe(url):
#data = get_recipe_json.get_recipe_json("https://www.allrecipes.com/recipe/23600/worlds-best-lasagna/")
#data = get_recipe_json.get_recipe_json("https://www.allrecipes.com/recipe/276206/stuffed-turkey-meatloaf/")
data = get_recipe_json.get_recipe_json(url)
print("The Original Recipe is: ")
print(data)
tools_methods_1.get_tools_recipe(url)
tools_methods.get_methods_recipe(url)
text_recipe_name = data['name']
dict_ = {
"pork sausage":"tofu",
"chicken sausage":"tofu",
"pork ribs":"chicken",
"clams":"fish",
"pecans":"almonds",
"grits":"corn",
"extra virgin olive oil":"vegetable oil",
"extra-virgin olive oil":"vegetable oil",
"virgin olive oil":"vegetable oil",
"balsamic vinegar":"lemon juice",
"pasta":"rice",
"pasta sauce":"garlic chutney",
"oregano":"marva leaves",
"italian-seasoned":"garam masala",
"romano cheese":"amul cheese",
"spaghetti":"rice",
"prosciutto":"chicken",
"capers":"peas",
"porcini mushrooms":"mushrooms",
"basil":"mint",
"italian cheese":"amul cheese",
"rabbit" : "eggs",
"rosemary leaves":"marva leaves",
"dipping sauce":"garlic chutney",
"chipotle peppers":"green peppers",
"pancetta":"chicken",
"celery":"green onion",
"tarragon":"mint",
"italian":"indian",
"spanish":"indian",
"manicotti pasta":"rice",
"marsala wine":"lemon juice",
"sherry":"garlic chutney",
"wine sherry":"lemon juice",
"cayenne pepper":"green pepper",
"fillets cod fillets": "fish",
"brandy":"tomato puree",
"bottle dry red wine":"lemon juice",
"white wine":"lemon juice",
"sofrito":"onions",
"jamón ibérico":"chicken",
"jamon iberico":"chicken",
"sherry vinegar":"lemon juice",
"olives":"peas",
"cod fish":"fish",
"red wine":"lemon juice",
"fettuccine pasta":"rice",
"tortillas":"rotis",
"parmesan cheese":"amul cheese",
"shrimp":"fish",
"kale":"cabbage",
"whipping cream":"curd",
"noodles":"rice",
"mozzarella cheese":"amul cheese",
"chorizo":"chicken",
"ricotta cheese":"amul cheese",
"marinara sauce":"garlic chutney",
"rice vinegar":"lemon juice",
"mirin":"honey",
"miso paste":"garlic chutney",
"sesame oil":"vegetable oil",
"soba noodles":"rice",
"monosodium glutamate":"maggie bhuna masala",
"aji-no-moto":"maggie bhuna masala",
"tenkasu":"bread crumps",
"sake":"lemon juice",
"sea bass":"fish",
"canola oil":"vegetable oil",
"nori seaweed":"cabbage",
"avocado":"almond",
"crabmeat":"chicken",
"sriracha hot sauce":"chilly garlic paste",
"sriracha":"chilly garlic paste",
"sriracha sauce":"chilly garlic paste",
"nori":"cabbage",
"dashi kombu":"mushrooms",
"dashi granules":"mushrooms",
"wakame":"spinach",
"wakame seaweed":"spinach",
"bonito flakes":"mushrooms",
"shichimi togarashi":"maggie bhuna masala",
"dark soy sauce":"chilly garlic paste",
"shaoxing wine":"lemon juice",
"oyster sauce":"mushroom sauce",
"ground white pepper":"pepper",
"red caviar":"peas",
"wasabi paste":"ginger paste",
"veal":"chicken",
"pesto":"mint chutney",
"salsa":"garlic chutney",
"mussels":"prawns",
"baby squid":"fish",
"asturian fabada":"chickpeas",
"lima beans":"chickpeas",
"nutmeg":"almonds",
"cheese tortellini":"cheese cubes",
"broccoli":"cauliflower",
"asparagus":"spinach",
"italian herb seasoning":"garam masala",
"italian-style":"indian style",
"collard greens":"spinach",
"pumpkin":"potatoes",
"turkey breast":"chicken",
"shallot":"onions",
"brussels sprouts":"peas",
"prime rib roast":"wheat bread",
"margarine":"butter",
"soy sauce":"vegetable curry",
"ground beef":"chicken",
"beef":"chicken",
"italian seasoning":"masala",
"italian seasoned":"masala",
"parsley":"corriander",
"meat sauce":"schezwan sauce",
"szechuan peppercorns":"corriander seeds",
"bacon":"chicken",
"turkey":"chicken",
"worcestershire sauce":"schezwan sauce",
"dijon":"mustard paste",
"brown sugar":"sugar",
"olive oil":"vegetable oil",
"thai basil leaves":"mint",
"thai":"indian",
"black bean sauce":"schezwan sauce",
"white soy sauce":"schezwan sauce",
"gochujang":"chilly garlic paste",
"chives":"onions",
"dumpling wrappers":"paani poori",
"beans":"chickpeas",
"limes":"lemon juice",
"jalapenos":"red chillies",
"pumpkin seeds":"sesame seeds",
"adobo sauce":"schezwan sauce",
"enchilada sauce":"schezwan sauce",
"hominy":"chickpeas",
"chipotle sauce":"schezwan sauce",
"masa harina":"wheat flour",
"taco seasoning mix":"maggie bhuna masala",
"chipotle chiles":"green chillies",
"ancho chiles":"green chillies",
"guajillo chiles":"green chillies",
"sour cream":"curd",
"cheddar cheese":"amul cheese",
"fajita seasoning":"maggie bhuna masala",
"monterey jack cheese":"amul cheese",
"fish sauce":"schezwan sauce",
"palm sugar":"sugar",
"makham piak":"tamarind juice"
}
ingredients_l=[]
mapping = dict_
for d in mapping:
text_recipe_name = text_recipe_name.lower().replace(d,mapping[d])
for i,ingredient in enumerate(data['ingredients']):
#print("NAME",ingredient['name'])
if d in ingredient['name'].lower():
data['ingredients'][i]['name'] = ingredient['name'].lower().replace(d,mapping[d])
for i,ingredient in enumerate(data['ingredients']):
ingredients_l.append(str(data['ingredients'][i]['quantity'])+" "+str(data['ingredients'][i]['unit'])+" "+data['ingredients'][i]['name'])
print("\nThe transformed Recipe (Indian Cuisine) is:")
print(text_recipe_name)
#print(data['ingredients'])
print("\nThe transformed ingredients are: ")
print(ingredients_l)
text=data["steps"]
list_stepwise_ing=[]
#INDIVIDUAL TRANSFORMED STEPS:
original_rec=text
#OVERALL TRANSFORMED STEPS
new_steps = []
for ing in mapping:
text = [w.lower().replace(ing,mapping[ing]) for w in text]
for ing in mapping:
for i1,t in enumerate(text):
#t_list = t.split()
S1 = t.split(", ")
S1 = ' '.join(S1)
S1 = S1.split(".")
S1 = ' '.join(S1)
S1 = S1.split()
if mapping[ing] in S1:
print("The list of Transformed Ingredients are:")
print({ing:mapping[ing]})
print("\nThe transformed recipe steps are: ")
print(text)
for i1,t in enumerate(text):
list_in = []
dict_ = {}
for i in data['ingredients']:
#split_=i['name'].split()
S1 = set(i['name'].split())
S2 = set(t.split(", "))
S2 = ' '.join(S2)
S2 = S2.split(" ")
#print(len(S1.intersection(S2)))
if len(S1.intersection(S2))>0:
list_in.append(i['name'])
dict_["original_step"]=original_rec[i1]
dict_["transformed_step"]=t
dict_["ingredients"]=list_in
list_stepwise_ing.append(dict_)
print("\nThe step-wise transformation of the recipe is ")
print(list_stepwise_ing)
# get_indian_recipe("https://www.allrecipes.com/recipe/276206/stuffed-turkey-meatloaf/")
``` |
{
"source": "jiper/easydo",
"score": 2
} |
#### File: Algorithm/M1809/crawling_finance_table_v1_7.py
```python
'''
类名:crawling_finance_table_v1.5
作者:徐抒田
日期:2018-1-10
描述:
1.修改BEAUTIFULSOUP的解析表达式->soup.select('table#BalanceSheetNewTable0 tbody tr td')
2.删除部分的无用代码
3.添加部分注释
版本号:V1.5
'''
'''
类名:crawling_finance_table_v1.6
作者:徐抒田
日期:2018-1-11
描述:
封装成类
版本号:V1.6
'''
'''
类名:crawling_finance_table_v1.7
作者:徐抒田
日期:2018-1-15
描述:
解决部分股票代码无法爬取
增加股票代码字段
版本号:V1.7
'''
# =============================================================================
# import re
# import json
# import numpy as np
# =============================================================================
import pandas as pd
import requests
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
class crawling_finance:
'''
一个帮助你获取股票财报数据的小伙伴
'''
def __init__(self,work_path, stock_code):
self.work_path = work_path
self.stock_code = stock_code
def get_one_page(self,url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'}
try:
response = requests.get(url,headers = headers)
response.encoding = 'GB2312' #解决中文乱码
if response.status_code == 200: #判断是否爬取网页成功
return response.text
return None
except RequestException:
return None
# =============================================================================
# def parse_one_page_zhengze(html):
# try:
# pattern = re.compile('>(.*?)".*?>(.*?)</td><td',re.S)
# items = re.findall(pattern,html)
# except:
# pass
# print(items)
# =============================================================================
def parse_one_page_zichanfuzhai(self,html):
try:
soup = BeautifulSoup(html,'html5lib')
lls = soup.select('table#BalanceSheetNewTable0 tbody tr td')
stock_raw_data = [] #记得初始化,否则append会一直叠加
stock_data=[]
data = []
senson_one=[]
senson_two=[]
senson_three=[]
senson_four=[]
for l in lls:
if (l.get_text().strip()) != '流动资产' and (l.get_text().strip()) != '非流动资产' and (l.get_text().strip()) != '流动负债' and (l.get_text().strip()) != '非流动负债' and (l.get_text().strip()) != '所有者权益':
stock_raw_data.append(l.get_text().strip())
# print(len(stock_raw_data)) #调试使用
if len(stock_raw_data) > 0 and len(stock_raw_data) < 160: #一个季度
stock_data = stock_raw_data[2:] #原数据中去除日期
dates = stock_raw_data[1:2] #原数据中选取日期
features = stock_data[::2] #选取所有字段名
senson_one = stock_data[1::2] #选取第一季度所有数据
data.append(senson_one)
if len(stock_raw_data) > 160 and len(stock_raw_data) < 240: #两个季度
stock_data = stock_raw_data[3:] #原数据中去除日期
dates = stock_raw_data[1:3] #原数据中选取日期
features = stock_data[::3] #选取所有字段名
senson_one = stock_data[1::3] #选取第一季度所有数据
senson_two = stock_data[2::3] #选取第二季度所有数据
data.append(senson_one)
data.append(senson_two)
if len(stock_raw_data) > 240 and len(stock_raw_data) < 320: #两个季度
stock_data = stock_raw_data[4:] #原数据中去除日期
dates = stock_raw_data[1:4] #原数据中选取日期
features = stock_data[::4] #选取所有字段名
senson_one = stock_data[1::4] #选取第一季度所有数据
senson_two = stock_data[2::4] #选取第二季度所有数据
senson_three = stock_data[3::4] #选取第三季度所有数据
data.append(senson_one)
data.append(senson_two)
data.append(senson_three)
if len(stock_raw_data) > 320 and len(stock_raw_data) < 400:
stock_data = stock_raw_data[5:] #原数据中去除日期
dates = stock_raw_data[1:5] #原数据中选取日期
features = stock_data[::5] #选取所有字段名
senson_one = stock_data[1::5] #选取第一季度所有数据
senson_two = stock_data[2::5] #选取第二季度所有数据
senson_three = stock_data[3::5] #选取第三季度所有数据
senson_four = stock_data[4::5] #选取第四季度所有数据
data.append(senson_one)
data.append(senson_two)
data.append(senson_three)
data.append(senson_four)
df = pd.DataFrame(data, index=dates, columns= features) #转为dataframe结构
return df
except:
pass
def parse_one_page_xianjinliuliang(self,html):
try:
soup = BeautifulSoup(html,'html5lib')
lls = soup.select('table#ProfitStatementNewTable0 tbody tr td')
stock_raw_data = [] #记得初始化,否则append会一直叠加
stock_data=[]
data = []
senson_one=[]
senson_two=[]
senson_three=[]
senson_four=[]
for l in lls:
if (l.get_text().strip()) != '一、经营活动产生的现金流量' and (l.get_text().strip()) != '二、投资活动产生的现金流量' and (l.get_text().strip()) != '三、筹资活动产生的现金流量' and (l.get_text().strip()) != '附注':
stock_raw_data.append(l.get_text().strip())
# print(len(stock_raw_data)) #调试使用
if len(stock_raw_data) > 0 and len(stock_raw_data) < 150: #一个季度
stock_data = stock_raw_data[2:] #原数据中去除日期
dates = stock_raw_data[1:2] #原数据中选取日期
features = stock_data[::2] #选取所有字段名
senson_one = stock_data[1::2] #选取第一季度所有数据
data.append(senson_one)
if len(stock_raw_data) > 150 and len(stock_raw_data) < 220: #两个季度
stock_data = stock_raw_data[3:] #原数据中去除日期
dates = stock_raw_data[1:3] #原数据中选取日期
features = stock_data[::3] #选取所有字段名
senson_one = stock_data[1::3] #选取第一季度所有数据
senson_two = stock_data[2::3] #选取第二季度所有数据
data.append(senson_one)
data.append(senson_two)
if len(stock_raw_data) > 220 and len(stock_raw_data) < 300: #两个季度
stock_data = stock_raw_data[4:] #原数据中去除日期
dates = stock_raw_data[1:4] #原数据中选取日期
features = stock_data[::4] #选取所有字段名
senson_one = stock_data[1::4] #选取第一季度所有数据
senson_two = stock_data[2::4] #选取第二季度所有数据
senson_three = stock_data[3::4] #选取第三季度所有数据
data.append(senson_one)
data.append(senson_two)
data.append(senson_three)
if len(stock_raw_data) > 300 and len(stock_raw_data) < 370:
stock_data = stock_raw_data[5:] #原数据中去除日期
dates = stock_raw_data[1:5] #原数据中选取日期
features = stock_data[::5] #选取所有字段名
senson_one = stock_data[1::5] #选取第一季度所有数据
senson_two = stock_data[2::5] #选取第二季度所有数据
senson_three = stock_data[3::5] #选取第三季度所有数据
senson_four = stock_data[4::5] #选取第四季度所有数据
data.append(senson_one)
data.append(senson_two)
data.append(senson_three)
data.append(senson_four)
df = pd.DataFrame(data, index=dates, columns= features)
return df
except:
pass
def parse_one_page_lirunbiao(self,html):
try:
soup = BeautifulSoup(html,'html5lib')
lls = soup.select('table#ProfitStatementNewTable0 tbody tr td')
stock_raw_data = [] #记得初始化,否则append会一直叠加
stock_data=[]
data = []
senson_one=[]
senson_two=[]
senson_three=[]
senson_four=[]
for l in lls:
if (l.get_text().strip()) != '六、每股收益':
stock_raw_data.append(l.get_text().strip())
# print(len(stock_raw_data)) #调试使用
if len(stock_raw_data) > 0 and len(stock_raw_data) < 60: #一个季度
stock_data = stock_raw_data[2:] #原数据中去除日期
dates = stock_raw_data[1:2] #原数据中选取日期
features = stock_data[::2] #选取所有字段名
senson_one = stock_data[1::2] #选取第一季度所有数据
data.append(senson_one)
if len(stock_raw_data) > 60 and len(stock_raw_data) < 90: #两个季度
stock_data = stock_raw_data[3:] #原数据中去除日期
dates = stock_raw_data[1:3] #原数据中选取日期
features = stock_data[::3] #选取所有字段名
senson_one = stock_data[1::3] #选取第一季度所有数据
senson_two = stock_data[2::3] #选取第二季度所有数据
data.append(senson_one)
data.append(senson_two)
if len(stock_raw_data) > 90 and len(stock_raw_data) < 120: #两个季度
stock_data = stock_raw_data[4:] #原数据中去除日期
dates = stock_raw_data[1:4] #原数据中选取日期
features = stock_data[::4] #选取所有字段名
senson_one = stock_data[1::4] #选取第一季度所有数据
senson_two = stock_data[2::4] #选取第二季度所有数据
senson_three = stock_data[3::4] #选取第三季度所有数据
data.append(senson_one)
data.append(senson_two)
data.append(senson_three)
if len(stock_raw_data) > 120 and len(stock_raw_data) < 150:
stock_data = stock_raw_data[5:] #原数据中去除日期
dates = stock_raw_data[1:5] #原数据中选取日期
features = stock_data[::5] #选取所有字段名
senson_one = stock_data[1::5] #选取第一季度所有数据
senson_two = stock_data[2::5] #选取第二季度所有数据
senson_three = stock_data[3::5] #选取第三季度所有数据
senson_four = stock_data[4::5] #选取第四季度所有数据
data.append(senson_one)
data.append(senson_two)
data.append(senson_three)
data.append(senson_four)
df = pd.DataFrame(data, index=dates, columns= features)
return df
except:
pass
def crawling_update(self):
date=[]
html = self.get_one_page('http://money.finance.sina.com.cn/corp/go.php/vFD_BalanceSheet/stockid/'+self.stock_code+'/ctrl/2017/displaytype/4.phtml')
soup = BeautifulSoup(html,'html5lib')
lls = soup.select('div#con02-1 table tbody tr td a')
for l in lls:
item = l.get_text().strip()
try:
item = int(item)
date.append(item)
except:
pass
zichanfuzhai = pd.DataFrame()
for i in range(date[len(date)-1],date[0]+1):
url = 'http://money.finance.sina.com.cn/corp/go.php/vFD_BalanceSheet/stockid/'+self.stock_code+'/ctrl/'+str(i)+'/displaytype/4.phtml'
html = self.get_one_page(url)
df = self.parse_one_page_zichanfuzhai(html)
df['stock_code'] = self.stock_code
# print(df)
zichanfuzhai = pd.concat([zichanfuzhai,df]) #两个dataframe做连接,类似数据库的union all
# print(zichanfuzhai)
zichanfuzhai.to_csv(self.work_path + '/'+self.stock_code +'_'+'balance_sheet.csv', encoding='gbk')
xianjinliuliang = pd.DataFrame()
for i in range(date[len(date)-1],date[0]+1):
url = 'http://money.finance.sina.com.cn/corp/go.php/vFD_CashFlow/stockid/'+self.stock_code+'/ctrl/'+str(i)+'/displaytype/4.phtml'
html = self.get_one_page(url)
df = self.parse_one_page_xianjinliuliang(html)
df['stock_code'] = self.stock_code
# print(df)
xianjinliuliang = pd.concat([xianjinliuliang,df]) #两个dataframe做连接,类似数据库的union all
# print(xianjinliuliang)
xianjinliuliang.to_csv(self.work_path + '/'+self.stock_code + '_'+'cash_flow.csv', encoding='gbk')
lirunbiao = pd.DataFrame()
for i in range(date[len(date)-1],date[0]+1):
url = 'http://money.finance.sina.com.cn/corp/go.php/vFD_ProfitStatement/stockid/'+self.stock_code+'/ctrl/'+str(i)+'/displaytype/4.phtml'
html = self.get_one_page(url)
df = self.parse_one_page_lirunbiao(html)
df['stock_code'] = self.stock_code
# print(df)
lirunbiao = pd.concat([lirunbiao,df]) #两个dataframe做连接,类似数据库的union all
# print(lirunbiao)
lirunbiao.to_csv(self.work_path+'/'+self.stock_code + '_'+'profit.csv', encoding='gbk')
print('finish update', self.stock_code)
if __name__ == '__main__':
path = '../history_data/'
cbfx = crawling_finance(path,'600660')
cbfx.crawling_update()
```
#### File: Algorithm/M1809/get_dividends_history.py
```python
import pandas as pd
import requests
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'}
def get_one_page(url):
try:
response = requests.get(url,headers = headers)
response.encoding = 'GB2312'
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse(html):
raw_data = []
try:
year_raw = []
year = []
bonus_share = []
bonus_convert = []
profit_send = []
ex_rights = []
register_day = []
soup = BeautifulSoup(html,'html5lib')
l = soup.select('table#sharebonus_1')
ls = l[0].tbody
lls = ls.select('td')
for l in lls:
if (l.get_text().strip()) != '预案' and \
(l.get_text().strip()) != '实施' and \
(l.get_text().strip()) != '不分配' and \
(l.get_text().strip()) != '查看':
raw_data.append(l.get_text().strip())
year_raw = raw_data[::7]
# print(raw_data) #出错的话请检查此处的输出
# print(year_raw) #出错的话请检查此处的输出
for item in year_raw:
a = pd.to_datetime(item).year - 1
year.append(a)
bonus_share = raw_data[1::7]
bonus_convert = raw_data[2::7]
profit_send = raw_data[3::7]
ex_rights = raw_data[4::7]
register_day = raw_data[5::7]
# print(register_day)
data = {'年度':year,
'送股':bonus_share,
'转股':bonus_convert,
'派息':profit_send,
'除权日':ex_rights,
'登记日':register_day
}
frame = pd.DataFrame(data)
return frame
except:
print('cannot parse this page')
def parse_single_year(html,Year):
raw_data = []
try:
year_raw = []
year = []
bonus_share = []
bonus_convert = []
profit_send = []
ex_rights = []
register_day = []
# print('it is ',Year)
soup = BeautifulSoup(html,'html5lib')
l = soup.select('table#sharebonus_1')
ls = l[0].tbody
lls = ls.select('td')
for l in lls:
if (l.get_text().strip()) != '预案' and \
(l.get_text().strip()) != '实施' and \
(l.get_text().strip()) != '不分配' and \
(l.get_text().strip()) != '查看':
raw_data.append(l.get_text().strip())
year_raw = raw_data[::7]
# print(raw_data) #出错的话请检查此处的输出
# print(year_raw) #出错的话请检查此处的输出
for item in year_raw:
a = pd.to_datetime(item).year - 1
year.append(a)
bonus_share = raw_data[1::7]
bonus_convert = raw_data[2::7]
profit_send = raw_data[3::7]
ex_rights = raw_data[4::7]
register_day = raw_data[5::7]
# print(register_day)
data = {'年度':year,
'送股':bonus_share,
'转股':bonus_convert,
'派息':profit_send,
'除权日':ex_rights,
'登记日':register_day
}
frame = pd.DataFrame(data)
Len=len(frame)
for i in range(Len):
s=int(frame.iloc[i]['年度'])
date2=frame.iloc[i]['登记日']
# print(s,date2)
if s == Year:
px=float(frame.iloc[i]['派息'])
date2=date2[:4]+date2[5:7]+date2[8:]
# print(s,'px money is ',px,date2)
return px,date2
return (-1,-1)
except:
print('cannot parse this page')
def test(html,Year):
raw_data = []
year_raw = []
year = []
bonus_share = []
bonus_convert = []
profit_send = []
ex_rights = []
register_day = []
# print('it is ',Year)
soup = BeautifulSoup(html,'html5lib')
l = soup.select('table#sharebonus_1')
ls = l[0].tbody
lls = ls.select('td')
for l in lls:
if (l.get_text().strip()) != '预案' and \
(l.get_text().strip()) != '实施' and \
(l.get_text().strip()) != '不分配' and \
(l.get_text().strip()) != '查看':
raw_data.append(l.get_text().strip())
year_raw = raw_data[::7]
# print(raw_data) #出错的话请检查此处的输出
# print(year_raw) #出错的话请检查此处的输出
for item in year_raw:
a = pd.to_datetime(item).year - 1
year.append(a)
bonus_share = raw_data[1::7]
bonus_convert = raw_data[2::7]
profit_send = raw_data[3::7]
ex_rights = raw_data[4::7]
register_day = raw_data[5::7]
# print(register_day)
data = {'年度':year,
'送股':bonus_share,
'转股':bonus_convert,
'派息':profit_send,
'除权日':ex_rights,
'登记日':register_day
}
frame = pd.DataFrame(data)
# print (frame)
Len=len(frame)
for i in range(Len):
s=int(frame.iloc[i]['年度'])
# Date=frame.iloc[i,[2]]
# print (Date)
# return
# date2=Date.loc[u'登记日']
date2=frame.iloc[i]['登记日']
# print(s,date2)
# return
if s == Year:
px=float(frame.iloc[i]['派息'])
date2=date2[:4]+date2[5:7]+date2[8:]
# print(s,'px money is ',px,date2)
return px,date2
return -1
#获取每10股派现金,及股权登记日
def get_px_single_year(id,Year):
url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vISSUE_ShareBonus/stockid/'
url += str(id)
url += '.phtml'
html = get_one_page(url)
return parse_single_year(html,Year)
# 提供给用户的函数,输入ID,解析出历史分红列表
def get_bonus_table(id):
url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vISSUE_ShareBonus/stockid/'
url += str(id)
url += '.phtml'
html = get_one_page(url)
return parse(html)
###############################################################################
###############################################################################
if __name__ =='__main__':
# APP示例代码,用完了请关闭 600066
s = get_bonus_table('601012')
print(s)
#2017年的派息实际是在2018派发,所以登记日时间上是2018年
#测试股息率
PX,Date = get_px_single_year('601012',2016)
print(PX,Date)
```
#### File: Algorithm/M1809/m1809_API.py
```python
import os
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from crawling_finance_table_v1_7 import crawling_finance
from CoreAnalyse import CoreAnalyse
from getData import GetData
class M1809:
def __init__(self, company_id_list, DataSource='SQL', LocalStore='ON'):
self.company_id_list = company_id_list
self.DataSource = DataSource
self.LocalStore = LocalStore
self.BasePath = '.\\easydo\\Algorithm\\M1809'
self.HstPath = os.path.join(self.BasePath, "history_data") # 历史数据路径
self.OutPath = os.path.join(self.BasePath, "output") # 输出文档路径
if (DataSource != "SQL" and DataSource != 'sql'):
self.DataSource = "CSV" # 从CSV文件中读取数据#从数据库中读取数据
if (os.path.exists(self.HstPath)):
pass
else:
os.mkdir(self.HstPath)
else:
self.DataSource = "SQL" # 从数据库中读取数据
if (LocalStore != 'OFF' and LocalStore != 'off'):
self.LocalStore = 'ON' # getdata数据输出到文本
if (os.path.exists(self.OutPath)):
pass
else:
os.mkdir(self.OutPath)
else:
self.LocalStore = 'OFF' # 不输出到文本
print("History Data save in:" + self.HstPath)
print("Outcome Data save in:" + self.OutPath)
def M1809_Init(self):
'''
本地模式配置
只需要提供感兴趣的对比公司即可,如果只有一个,说明只进行自主分析
'''
global cur
global parameter
print('please wait, start init...')
if len(self.company_id_list) < 2:
print('最少需要输入2个id作为对比')
return
# 此处增加id合法性检查
if self.DataSource == "CSV": # 从CSV文件中读取数据
HisPath = self.BasePath + "history_data"
if (os.path.exists(HisPath)):
print("Folder creation failed!")
return
for item in self.company_id_list:
try:
file_name = os.path.join(self.HstPath,
item + '_profit.csv')
# print (file_name)
with open(file_name, 'r') as fh:
content = fh.readlines()
s = content[-1].split(',')
latest_record = parse(s[0]) # 获取最新时间
current_day = datetime.now() - relativedelta(
months=+12)
if latest_record > current_day:
pass
else:
cbfx = crawling_finance(self.HstPath, item)
cbfx.crawling_update()
except Exception:
cbfx = crawling_finance(self.HstPath, item)
cbfx.crawling_update()
else:
# test = mysql.sql()
# BASE_DIR = os.path.dirname(
# os.path.dirname(os.path.abspath(__file__)))
# SQL_DIR = BASE_DIR + r'\Mysql'
# s = test.init_by_cfg_file(SQL_DIR + r'\sql_config.json')
# M1809_Update(cur, company_list)
pass
print('finish init!')
def M1809_GetData(self):
# self_result = self.AnalyseObj.Compare2Themself(self.company_id_list[0],
# self.DataSource) # 自身对比
GetDataObj = GetData(self.DataSource, self.HstPath)
self_result = GetDataObj.Compare2Themself(self.company_id_list[0])
b1 = GetDataObj.Compare2Industry(self.company_id_list) #同行业对比
compare_result = GetDataObj.data_normalize(b1) #归一化的同行业对比
if self.LocalStore == 'ON':
SelfResultPath = os.path.join(self.OutPath+'\\compare_self.csv')
ComparePath = os.path.join(self.OutPath+'\\compare_industry.csv')
NomalizePath = os.path.join(self.OutPath+'\\normalize.csv')
self_result.to_csv(SelfResultPath, encoding='gbk')
b1.to_csv(ComparePath, encoding='gbk')
compare_result.to_csv(NomalizePath, encoding='gbk')
return self_result, compare_result
def M1809_Analyse(self):
'''
对比分析,并输出
1. ../output/文件夹下会生成诊断报告
2. 控制台输出对比图像(之后可以考虑保存图片)
'''
AnalyseObj = CoreAnalyse()
self_result, compare_result = self.M1809_GetData()
AnalyseObj.Analyse(self_result, compare_result,self.company_id_list[0],self.OutPath)
AnalyseObj.PlotAnalyse(self_result)
def M1809_Run(self):
self.M1809_Init()
self.M1809_Analyse()
if __name__ == '__main__':
company_id_list = ['000651', '000333']
DataSource = "CSV"
AObject = M1809(company_id_list, DataSource)
AObject.M1809_Run()
```
#### File: Algorithm/M1809/M1809_finance_weight.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import metrics
import lightgbm as lgb
'''
修改成从数据库中读取文件
'''
result_yinli = pd.read_csv('D:/999github/anack/M1809/xst/result_yinli.csv')
result_yingyun = pd.read_csv('D:/999github/anack/M1809/xst/result_yingyun.csv')
result_chengzhang = pd.read_csv('D:/999github/anack/M1809/xst/result_chengzhang.csv')
result_changzhai = pd.read_csv('D:/999github/anack/M1809/xst/result_changzhai.csv')
result_xianjin = pd.read_csv('D:/999github/anack/M1809/xst/result_xianjin.csv')
df_final = pd.read_csv('D:/999github/anack/M1809/xst/target.csv')
df_final = df_final[(df_final.firstincrase > 0.1) & (df_final.secondincrase > 0.1)]
df_final = pd.DataFrame({'code' : df_final['code'],
'label' : 1,
})
data = result_yinli
data = pd.merge(data, result_yingyun, on=['code','name'])
data = pd.merge(data, result_chengzhang, on=['code','name'])
data = pd.merge(data, result_changzhai, on=['code','name'])
data = pd.merge(data, result_xianjin, on=['code','name'])
data = pd.merge(data, df_final, on='code',how = 'left')
# =============================================================================
# null_counts = data.isnull().sum()
# print(null_counts)
# =============================================================================
data = data.fillna(0)
data = data.dropna(axis=0)
orig_columns = data.columns
drop_columns = []
for col in orig_columns:
col_series = data[col].dropna().unique()
if len(col_series) == 1:
drop_columns.append(col)
data = data.drop(drop_columns, axis=1)
print(drop_columns)
target = data['label']
code = data['code']
name = data['name']
features = data.drop(['code','name','label'],axis=1)
features[features.currentratio20161 == '--'] = 0
features[features.quickratio20161=='--']=0
features[features.cashratio20161=='--']=0
features[features.icratio20161=='--']=0
features[features.sheqratio20161=='--']=0
features[features.adratio20161=='--']=0
features[features.currentratio20162=='--']=0
features[features.quickratio20162=='--']=0
features[features.cashratio20162=='--']=0
features[features.icratio20162=='--']=0
features[features.sheqratio20162=='--']=0
features[features.adratio20162=='--']=0
features[features.currentratio20163=='--']=0
features[features.quickratio20163=='--']=0
features[features.cashratio20163=='--']=0
features[features.icratio20163=='--']=0
features[features.sheqratio20163=='--']=0
features[features.adratio20163=='--']=0
features[features.currentratio20164=='--']=0
features[features.quickratio20164=='--']=0
features[features.cashratio20164=='--']=0
features[features.icratio20164=='--']=0
features[features.currentratio20171=='--']=0
features[features.quickratio20171=='--']=0
features[features.cashratio20171=='--']=0
features[features.icratio20171=='--']=0
features[features.sheqratio20171=='--']=0
features[features.adratio20171=='--']=0
features[features.currentratio20172=='--']=0
features[features.quickratio20172=='--']=0
features[features.cashratio20172=='--']=0
features[features.icratio20172=='--']=0
features[features.currentratio20173=='--']=0
features[features.quickratio20173=='--']=0
features[features.cashratio20173=='--']=0
features[features.icratio20173=='--']=0
features[features.currentratio20174=='--']=0
features[features.quickratio20174=='--']=0
features[features.cashratio20174=='--']=0
features[features.icratio20174=='--']=0
features[features.currentratio20181=='--']=0
features[features.quickratio20181=='--']=0
features[features.cashratio20181=='--']=0
features[features.icratio20181=='--']=0
features = features.astype('float64')
##基于树的方法不用做标准化、归一化处理
'''
资产负债比,营业税增长率,营业现金增长率,现金增长净额,期末现金
'''
features = features[['targ20174','nav20174','gross_profit_rate20174','cashflowratio20174','net_profit_ratio20174','mbrg20174','currentratio20174','currentasset_turnover20174','inventory_days20174']]
def aucfun(act,pred):
fpr,tpr,thresholds = metrics.roc_curve(act,pred)
plt.plot(fpr, tpr, color='darkorange',lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# =============================================================================
# print(fpr)
# print(tpr)
# print(thresholds)
# =============================================================================
return metrics.auc(fpr,tpr)
def ml_for_weight(features,target):
min_max_scaler = preprocessing.MinMaxScaler()
features_new = min_max_scaler.fit_transform(features)
features = pd.DataFrame(features_new, columns=features.columns)
X_train,X_test,y_train,y_test = train_test_split(features,target,test_size=0.25,random_state=42)
'''
调参
'''
clf = lgb.LGBMClassifier(
boosting_type='gbdt', num_leaves=31, reg_alpha=0, reg_lambda=1,
max_depth=-1, n_estimators=800, objective='binary',
subsample=0.7, colsample_bytree=0.7, subsample_freq=2,
learning_rate=0.05, min_child_weight=20, random_state=2018, n_jobs=-1,class_weight = 'balanced'
)
clf = clf.fit(X_train, y_train, eval_set=[(X_train, y_train),(X_test, y_test)], eval_names = ['train','test'],eval_metric='auc',early_stopping_rounds=100)
y_pre = clf.predict(X_test)
y_pre_pro = clf.predict_proba(X_test)[:, 1]
# =============================================================================
# print(y_pre_pro)
# =============================================================================
print(classification_report(y_test,y_pre))
print(metrics.roc_auc_score(y_test,y_pre_pro)) #预测Y值得分
aucfun(y_test,y_pre_pro)
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
for f in range(features.shape[1]):
print("%d. feature %d (%f): %s" % (f + 1, indices[f], importances[indices[f]] , features.columns[indices[f]] ))
return features.columns,importances
a,b = ml_for_weight(features,target)
# =============================================================================
# y_pre_pro_f = clf.predict_proba(features)[:, 1]
#
# y_pre_pro_f = pd.DataFrame({'code' : code,
# 'name' : name,
# 'gailv' : y_pre_pro_f
# })
#
# y_pre_pro_f.to_csv('D:/999github/anack/M1809/y_pre_pro_f.csv',index =False)
# =============================================================================
```
#### File: easydo/Data/SinaApp.py
```python
import requests
import json
import pandas as pd
from datetime import datetime
from datetime import timedelta
import os,sys
BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
import Miscellaneous.TimeConverter as TimeConverter
class SinaApp:
'''
@类名:SinaApp
@描述:从新浪财经获取的数据
@已提供API列表:
>>UpdateKday:更新指定个股的k线数据,如果没有则自动创建,结果持久化到本地或者数据库
>>GetInfo:获取指定个股某一天的交易数据(close,high,low,open,volume)
>>get_k_day:获取指定个股指定天数的K线数据,以DataFrame格式返回
>>ClosePrice:获取指定个股某一天的收盘价
'''
def __init__(self):
'''
@用户配置:
data_path:csv文件保存的默认地址
compatible:兼容模式,开启后使用更方便(效率显著降低)
兼容模式下:自动检查更新,自动检查输入数据类型并自动切换
'''
self.data_path = BASE_DIR + '\\Data\\raw_data\\' #默认文件保存地址
if os.path.exists(self.data_path) == False:
os.makedirs(self.data_path)
self.compatible = True #默认打开,方便使用,想要禁止,请设置为False
self.log = True #调试语句和显示语句使能开关
def UpdateKday(self, mid, mode = 'CSV'):
'''
@用户接口API:
@描述:输入一个id列表或者单个id,自动获取其所有的k线历史并自动存档
@
'''
if isinstance(mid, str):
self.update_one(mid,mode)
elif isinstance(mid, list):
for item in mid:
self.update_one(item,mode)
def update_one(self, id_str,mode = 'CSV'):
'''
@描述:检查指定路径下是否包含最新的资源,如果不包含,则更新
1. 检查是否有该文件
2. 检查是否为最新记录,不是则更新
3. 根据mode指定是保存到本地CSV还是到数据库
@输入:id_str(str)->更新标的,必须是单个股票,且格式固定为: 'sh600660'
mode->更新方式, CSV:存本地, SQL:存数据库
@输出:./Data/raw_data/文件夹下自动建立id_str.csv文件
'''
if self.log == True:
print('checking ...')
file_name = self.data_path + id_str + '.csv'
try:
content = pd.read_csv(file_name)
latest_record = content.iloc[-1]
get_today_data = self.get_k_day(id_str,1) #获取最新网络数据
today_str = get_today_data.iloc[-1]['day'] #今天
today = TimeConverter.str2dtime(today_str)
record_str = latest_record['day'] #最新记录的日期
latest = TimeConverter.str2dtime(record_str)
if latest == today: #已经是最新的
if self.log == True:
print('%s already the latest' % id_str)
else: #append方式更新
if self.log == True:
print('update %s,please wait...' % id_str)
update_day = abs(today-latest)
update_day = update_day.days #更新时间
b = self.get_k_day(id_str, update_day)
for i in range(len(b)):
day = b.iloc[i]['day']
day = TimeConverter.str2dtime(day)
if day > latest:
index = i
break
append_item = b[b.index >= index] #最终要更新的内容
append_item.to_csv(file_name,mode='a', header=False, index = False)
except:
print('no record yet, creating %s.csv,please wait...' % id_str)
a = self.get_k_day(id_str)
a.to_csv(file_name,index = False)
if self.log == True:
print('finished')
def GetInfo(self, id_str, date):
'''
@描述:读取csv文件并获取某一天的参数(价格/成交量等)
@输入:id_str(str)->想要查找的id, 格式如下:'sh600660'
date(str): 非兼容模式下必须为str类型且为'xxxx-xx-xx'格式,兼容模式下随意
item(str):获取什么信息,默认获取收盘价
item(str) = close/open/high/low/volume
@输出:
对应的查询信息,类型为float或者int
'''
if self.compatible == True:
if isinstance(date, datetime):
date = TimeConverter.dtime2str(date,'-')
elif isinstance(date, str):
date = TimeConverter.str2dtime(date)
date = TimeConverter.dtime2str(date,'-')
self.UpdateKday(id_str) #自动更新
file_name = self.data_path + id_str + '.csv'
content = pd.read_csv(file_name)
result = content[content.day == date]
return result
def ClosePrice(self, id_str, date):
'''
@描述:获取指定一天的收盘价
@输入:id_str(str)->‘sh600660’
date(datetime, str)都可以
@输出:price(float), 返回0代表没有数据
'''
info = self.GetInfo(id_str, date)
try:
price = info.iloc[-1]['close']
except:
price = 0
return price
def get_k_day(self, id_str,day=9999):
'''
@描述:获取指定ID的K线数据,并以DateFrame形式返回(此为辅助函数,用户禁止直接调用)
@输入: id_str-> str类型的id,尤其注意,必须带sh/sz前缀,如:"sh600660"
day->返回的天数,默认是最大值,返回全部历史记录
@输出: DateFrame形式的K线数据,全部为str类型
close day high low open volume
0 22.090 2018-12-14 22.290 21.900 22.090 7427246
1 22.190 2018-12-17 22.310 21.810 22.050 5286448
'''
import re
prefix = 'http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/CN_MarketData.getKLineData?symbol='
tail = '&scale=240&ma=no&datalen='
if isinstance(day,int):
day = str(day)
url = prefix + id_str + tail + day
response = requests.get(url)
if response.status_code == 200:
html = response.content
html_str = html.decode()
pattern = r'(?<=[{,])(\w+)' #可以找到
content = re.sub(pattern, lambda i: '\"'+i.group(1)+'\"', html_str)
result = json.loads(content)
detail_column = ['close', 'day', 'high', 'low', 'open', 'volume']
data = pd.DataFrame(columns=detail_column)
for item in result:
data = data.append(item, ignore_index=True)
return data
else:
return 0
def add_black_price(self, id_str,day_formate = '%Y-%m-%d'):
'''
@描述:给定一个k线csv文件,填充空白的时间,便于快速查找价格
@备注:此为辅助函数,用户禁止调用
@输入: file_name->文件全名
day_formate->时间格式转换的参数,默认为'xx-xx-xx',可以改为'xx/xx/xx'
@输出:out.csv文件, DataFrame格式的变量以备查验
'''
file_name = self.data_path + id_str + '.csv'
content = pd.read_csv(file_name)
if self.log == True:
print(content)
ilen = len(content)
i = 0
today = datetime.strptime(content.iloc[0]['day'],day_formate)
yesterday = today
for i in range(ilen):
today = datetime.strptime(content.iloc[i]['day'],day_formate)
a = today.strftime(day_formate)
d_index = list(content.columns).index('day')
content.iloc[i,d_index] = a
for i in range(1, ilen):
today = content.iloc[i]['day']
today = datetime.strptime(today,day_formate)
if yesterday + timedelta(1) != today:
cur_day = yesterday + timedelta(1)
while cur_day < today:
s = content.iloc[i-1].copy()
s['day'] = cur_day.strftime(day_formate)
content = content.append(s, ignore_index=True)
cur_day += timedelta(1)
yesterday = today
content = content.sort_values(by='day')
content.to_csv('out.csv', index = False)
return content
###############################################################################
#print(test.get_k_day('sh600660',10))
#url = 'http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/CN_MarketData.getKLineData?symbol=sz000651&scale=240&ma=no&datalen=2'
if __name__ == '__main__':
'''
1. 先检查是否有csv或者mysql的数据,如果没有,则自动重新爬取
2. 爬取的数据存为csv
3. 每次获取都统一从k_day.csv一个文件中获取,没有就append进去
'''
test = SinaApp()
# test.update_one('sh600660')
ll = ['sh600660','sh601012','sh600377','sh000001']
#更新k线数据
test.UpdateKday(ll)
#获取指定一天的数据
a = test.GetInfo('sh600660','2019/01/08')
#获取指定一天的价格
# a = test.ClosePrice('sh600660','2019-01-08')
# print(a)
``` |
{
"source": "Jip-Hop/polargraph-optimizer",
"score": 3
} |
#### File: Jip-Hop/polargraph-optimizer/lib.py
```python
from __future__ import print_function
import sys, math, re
min_penup_travel_distance = 1 # in mm, shorter travels will be removed
pendown_value = 'G0 F500.000 Z160.000'
penup_value = 'G0 F500.000 Z90.000'
feedrate_value = 'F60.000'
def calculate_distance(coordinates1, coordinates2):
x1 = coordinates1[0]
y1 = coordinates1[1]
x2 = coordinates2[0]
y2 = coordinates2[1]
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def replace_text_between(original_text, delimeter_a, delimeter_b, replacement_text):
# parts = original_text.split(delimeter_a)
# leadingText = parts.pop(0) # keep everything beofe first delimterA
# parts = delimeter_a.join(parts).split(delimeter_b)
# parts.pop(0) # remove everything before first delimeter_b
# trailingText = delimeter_b.join(parts) # keep everything after first delimeter_b
# return leadingText + delimeter_a + replacement_text + delimeter_b + trailingText
reg = "(?<=%s).*?(?=%s)" % (delimeter_a, delimeter_b)
r = re.compile(reg,re.DOTALL)
result = r.sub(replacement_text, original_text)
return result
class Instruction():
def __init__(self, line):
self.line = line.rstrip()
self.typecode = self.line.split(' ')[0]
self.typename = self._typename()
self.coords = self._coords()
def distance_to(self, other):
# return max(abs(other.coords[0] - self.coords[0]), abs(other.coords[1] - self.coords[1]))
return calculate_distance(self.coords, other.coords)
def _typename(self):
if pendown_value in self.line:
return 'pendown'
elif penup_value in self.line:
return 'penup'
elif self.typecode == "G0" or self.typecode == "G1":
return 'move'
else:
return 'other'
def _coords(self):
try:
# Try to extract coordinates.
x = self.line.split('X')[1].split(" ")[0]
y = self.line.split('Y')[1].split(" ")[0]
return (float(x), float(y))
except IndexError:
return None
class Glyph():
def __init__(self, instructions):
self._reversed = False
try:
self.start = instructions[0].coords
self.end = instructions[-2].coords
except IndexError:
self.start = None
self.end = None
if self.start == None or self.end == None:
print("Problem with instructions in glyph:", file=sys.stderr)
for i in instructions:
print("%s (%s)" % (i.line, i.typename), file=sys.stderr)
self.instructions = instructions
def distance_to(self, other):
"""
Compute distance between two glyphs
"""
# return max(abs(other.start[0] - self.end[0]), abs(other.start[1] - self.end[1]))
return calculate_distance(self.end, other.start)
def distance_to_if_other_reversed(self, other):
# return max(abs(other.end[0] - self.end[0]), abs(other.end[1] - self.end[1]))
return calculate_distance(self.end, other.end)
def _reversed_instructions(self):
"""
A generator of the reversed instructions.
Typical instructions look like this (normal ordering):
G1 F100.000 X250.066 Y-439.295 <-- startpoint (assumed pen is up)
G0 F500.000 Z160.000 <-- pendown
G0 F60.000 X250.409 Y-439.954 <-- drawing moves ...
G0 X248.001 Y-441.921
G0 X245.314 Y-443.391 <-- last move
G0 F500.000 Z90.000 <-- penup
So a reversed ordering would print in this order:
startpoint, G1, but with coordinates from last move
pendown
other moves in reversed order
last move, G0, but with coordinates from startpoint
penup
"""
original_order = iter(self.instructions)
reverse_order = reversed(self.instructions)
startpoint = next(original_order)
pendown = next(original_order)
penup = next(reverse_order)
endpoint = next(reverse_order)
endpoint.line = replace_text_between(startpoint.line, "X", " ", str(endpoint.coords[0]))
endpoint.line = replace_text_between(startpoint.line, "Y", " ", str(endpoint.coords[1]))
startpoint.line = replace_text_between(endpoint.line, "X", " ", str(startpoint.coords[0]))
startpoint.line = replace_text_between(endpoint.line, "Y", " ", str(startpoint.coords[1]))
endpoint.typecode = endpoint.line.split(' ')[0]
startpoint.typecode = startpoint.line.split(' ')[0]
yield endpoint
yield pendown
for i in reverse_order:
if not i.typename == 'move':
break
yield i
yield startpoint
yield penup
def ordered_instructions(self):
if self._reversed:
return self._reversed_instructions()
else:
return iter(self.instructions)
def reversed_copy(self):
if not hasattr(self, '_reversed_copy'):
from copy import copy
new = copy(self)
new.start = self.end
new.end = self.start
new._reversed = True
new._reversed_copy = self
self._reversed_copy = new
return self._reversed_copy
def __hash__(self):
return hash("\n".join([i.line for i in self.instructions]))
def total_penup_travel(gs):
"""
Compute total distance traveled in a given ordering
"""
def distance_between_each_pair(gs):
gs = iter(gs)
prev = next(gs)
for g in gs:
yield prev.distance_to(g)
prev = g
return sum(distance_between_each_pair(gs))
def total_travel(gs):
def iter_moves(gs):
for g in gs:
for i in g.ordered_instructions():
if i.typename == 'move':
yield i
def distance_between_moves(moves):
moves = iter(moves)
prev = next(moves)
for m in moves:
yield prev.distance_to(m)
prev = m
return sum(distance_between_moves(iter_moves(gs)))
def reorder_greedy(gs, index=0):
"""
Greedy sorting: pick a starting glyph, then find the glyph which starts
nearest to the previous ending point.
This is O(n^2). Pretty sure it can't be optimized into a sort.
"""
from operator import itemgetter
gs = list(gs)
ordered = [gs.pop(index)]
prev = ordered[0]
def dist_reverse_iterator(gs):
for g in gs:
yield (prev.distance_to(g), False, g)
yield (prev.distance_to_if_other_reversed(g), True, g)
while len(gs) > 0:
(dist, reverse, nearest) = min(dist_reverse_iterator(gs),
key=itemgetter(0, 1))
gs.remove(nearest)
if reverse:
prev = nearest.reversed_copy()
else:
prev = nearest
ordered.append(prev)
return ordered
def prune_small_distance_penups(instructions):
instructions = iter(instructions)
try:
prev = next(instructions)
except StopIteration:
raise ValueError("instructions empty")
# The first instruction should always be a penup, so we send it straight
# through.
yield prev
try:
while True:
current = next(instructions)
if current.typename == 'penup':
last_down = prev
penup = current
# Get all moves while the pen is up. There should only ever be
# one, but you never know these days. :-)
moves = []
try:
while True:
penup_move = next(instructions)
if penup_move.typename == 'pendown':
pendown = penup_move
break
else:
moves.append(penup_move)
except StopIteration:
# If we reach the end of the instructions while looking for
# a pendown, raise the pen and call it good.
yield penup
raise StopIteration
if calculate_distance(moves[-1].coords, last_down.coords) <= min_penup_travel_distance:
# The penup move(s) didn't travel the minimum desired distance,
# so we remove them from the list of instructions and continue
# to the next instruction.
continue
else:
# The penup move(s) DID move enough, so we keep them.
yield penup
for move in moves:
yield move
yield pendown
else:
yield current
prev = current
except StopIteration:
pass
def clean_instructions(instructions):
cleaned = []
is_pen_up = True
clean_instructions.prev = None
def keep_instruction(instruction):
if (instruction.typecode == "G0" and instruction.coords is not None):
if ((clean_instructions.prev.typename == 'pendown') and clean_instructions.prev is not None) and ("F" not in instruction.line):
# Insert feed rate for first pendown move
instruction.line = replace_text_between(instruction.line, "G0 ", "X", feedrate_value + " ")
elif ("F" in instruction.line):
# Remove feed rate for next moves
instruction.line = replace_text_between(instruction.line, "G0 ", "X", "")
clean_instructions.prev = instruction
cleaned.append(instruction)
for instruction in instructions:
if instruction.typename == 'penup':
is_pen_up = True
elif instruction.typename == 'pendown':
is_pen_up = False
if (instruction.typecode == "G1"):
if is_pen_up:
# Keep G1 instruction if pen is up
keep_instruction(instruction)
else:
# If pen is down, it should be a G0 move.
# Only keep if it travels a distance
if(clean_instructions.prev is not None and clean_instructions.prev.coords):
if calculate_distance(clean_instructions.prev.coords, instruction.coords) > 0:
instruction.typecode = "G0"
instruction.line = instruction.line.replace("G1", "G0")
keep_instruction(instruction)
else:
if instruction.typecode == "G0" and instruction.coords is not None and clean_instructions.prev is not None and clean_instructions.prev.coords is not None:
if not (calculate_distance(clean_instructions.prev.coords, instruction.coords) > 0):
# Skip duplicate instruction
continue
# Keep these instructions
keep_instruction(instruction)
return cleaned
def dedupe(gs):
"Use Glyph.__hash__() to dedupe the list of glyphs"
seen = set()
for g in gs:
h = hash(g)
if h not in seen:
yield g
seen.add(h)
def iter_instructions(gs):
# be sure to start with a penup
yield Instruction(penup_value)
for g in gs:
for i in g.ordered_instructions():
yield i
``` |
{
"source": "Ji-Ping-Dai/CosmoCl",
"score": 2
} |
#### File: pycamb/camb/nonlinear.py
```python
from .baseconfig import dll_import
from ctypes import c_int
# ---Parameters in halofit_ppf.f90
halofit_original = 1
halofit_bird = 2
halofit_peacock = 3
halofit_takahashi = 4
halofit_mead = 5
halofit_halomodel = 6
halofit_default = halofit_takahashi
halofit_version_names = ['original','bird','peacock','takahashi','mead','halomodel']
halofit_version = dll_import(c_int, "nonlinear", "halofit_version")
# halofit_version.value = halofit_default
def set_halofit_version(version = 'takahashi'):
"""
Set the halofit model for non-linear corrections.
:param version: One of
- original: `astro-ph/0207664 <http://arxiv.org/abs/astro-ph/0207664>`_
- bird: `arXiv:1109.4416 <http://arxiv.org/abs/1109.4416>`_
- peacock: `Peacock fit <http://www.roe.ac.uk/~jap/haloes/>`_
- takahashi: `arXiv:1208.2701 <http://arxiv.org/abs/1208.2701>`_
- mead: `arXiv:1505.07833 <http://arxiv.org/abs/1505.07833>`_
- halomodel: basic halomodel
"""
halofit_version.value = halofit_version_names.index(version) + 1
```
#### File: pycamb/camb/reionization.py
```python
from .baseconfig import CAMB_Structure, dll_import
from ctypes import c_bool, c_int, c_double
# ---Variables in reionization.f90
# To set the value please just put
# variablename.value = newvalue
# logical
include_helium_fullreion = dll_import(c_bool, "reionization", "include_helium_fullreion")
# include_helium_fullreion.value = True
# logical
Reionization_AccuracyBoost = dll_import(c_bool, "reionization", "reionization_accuracyboost")
# Reionization_AccuracyBoost.value = 1.
Rionization_zexp = dll_import(c_bool, "reionization", "rionization_zexp")
# ---Derived Types in reionization.f90
class ReionizationParams(CAMB_Structure):
"""
Hold sparameters for the reionization model.
"""
_fields_ = [
("Reionization", c_int), # logical
("use_optical_depth", c_int), # logical
("redshift", c_double),
("delta_redshift", c_double),
("fraction", c_double),
("optical_depth", c_double),
("helium_redshift", c_double), # helium_redshift = 3.5_dl
("helium_delta_redshift", c_double), # helium_delta_redshift = 0.5
("helium_redshiftstart", c_double) # helium_redshiftstart = 5._dl
]
def set_tau(self, tau, delta_redshift=None):
"""
Set the optical depth
:param tau: optical depth
:param delta_redshift: delta z for reionization
:return: self
"""
self.use_optical_depth = True
self.optical_depth = tau
if delta_redshift is not None:
self.delta_redshift = delta_redshift
return self
class ReionizationHistory(CAMB_Structure):
"""
Internally calculated parameters.
"""
_fields_ = [
("tau_start", c_double),
("tau_complete", c_double),
("akthom", c_double),
("fHe", c_double),
("WindowVarMid", c_double),
("WindowVarDelta", c_double)
]
``` |
{
"source": "Ji-Ping-Dai/ML_exercise_Andrew_Ng_Python",
"score": 3
} |
#### File: ML_exercise_Andrew_Ng_Python/machine-learning-ex2/func.py
```python
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text',usetex=True)
plt.rc('font',family='Times New Roman')
def plotdata(X,y,ax):
'''PLOTDATA Plots the data points x and y into a new figure'''
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
ax.plot(X[(y==1).flatten(),0], X[(y==1).flatten(),1], 'kx')
ax.plot(X[(y==0).flatten(),0], X[(y==0).flatten(),1], 'yo')
return
def sigmoid(z):
'''sigmoid function, output between 0-1'''
sigmoid = 1/(1+np.exp(-z))
return sigmoid
def costfunc(theta,x,y):
'''Compute cost for Logistic Regression'''
m = len(y)
theta = theta.reshape(-1,1)
z = x@theta
J = -1/m*([email protected](sigmoid(z))+(1-y)[email protected](1-sigmoid(z)))
return J
def costfuncReg(theta,x,y,lam):
'''Compute cost for Logistic Regression'''
m = len(y)
theta = theta.reshape(-1,1)
J=np.zeros([len(theta),1])
z = x@theta
J = -1/m*([email protected](sigmoid(z))+(1-y)[email protected](1-sigmoid(z)))+lam/2/m*theta.T[:,1:]@theta[1:,:]
return J
def grid(theta,x,y):
'''Compute gradient for Logistic Regression'''
m = len(y)
theta = theta.reshape(-1,1)
z = x@theta
grid = 1/m*(x.T@(sigmoid(z)-y))
return grid.flatten()
def gridReg(theta,x,y,lam):
'''Compute gradient for Logistic Regression'''
m = len(y)
theta = theta.reshape(-1,1)
grid=np.zeros([len(theta),1])
z = x@theta
grid[0,:] = 1/m*(x.T@(sigmoid(z)-y))[0,:]
grid[1:,:] = 1/m*(x.T@(sigmoid(z)-y))[1:,:] + lam/m*theta[1:,:]
return grid.flatten()
def gradientDescent(x,y,theta,iterations,alpha):
'''Performs gradient descent to learn theta'''
m = len(y)
cost = np.zeros(iterations)
for i in range(iterations):
z = x@theta
theta = theta-1/m*alpha*x.T@(sigmoid(z)-y)
cost[i] = costfunc(theta,x,y)
return theta, cost
def plotboundary(X,y,theta,ax):
'''Plots the Decision Boundary'''
plotdata(X,y,ax)
if len(theta)<=3:
px=np.linspace(np.min(X[:,0])-2,np.max(X[:,0])+2,100)
py=(-theta[0]-theta[1]*px)/theta[2]
ax.plot(px,py,ls='--',color='b')
else:
px=np.linspace(-1,1.5,100)
py=np.linspace(-1,1.5,100)
cost=np.zeros([100,100])
for i in range(len(px)):
for j in range(len(py)):
cost[i,j]=mapX(px[i],py[j])@theta
cost=cost.T
PX,PY=np.meshgrid(px,py)
ax.contour(PX,PY,cost,[0],colors='k')
return
def predict(x,theta):
'''Predict whether the label is 0 or 1 using learned logistic '''
p = sigmoid([email protected](-1,1))
p[p>=0.5]=1
p[p<0.5]=0
return p
def mapX(x1,x2):
'''Feature mapping function to polynomial features'''
order = 6
X = np.ones([x1.size,1])
order=6
for i in range(1,order+1):
for j in range(i+1):
newcol = (x1**(i-j)*x2**(j)).reshape(-1,1)
X = np.concatenate([X,newcol],axis=1)
return X
```
#### File: ML_exercise_Andrew_Ng_Python/machine-learning-ex6/func.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.optimize import fmin_cg
from sklearn.svm import SVC
plt.rc('text',usetex=True)
plt.rc('font',family='Times New Roman')
def plotdata(X,y,ax):
'''PLOTDATA Plots the data points x and y into a new figure'''
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
ax.plot(X[np.ravel(y==1),0],X[np.ravel(y==1),1],'kx')
ax.plot(X[np.ravel(y==0),0],X[np.ravel(y==0),1],'ro')
return
def dataset3Params(X,y,Xval,yval):
'''returns the optimal choice of C and sigma '''
C = np.array([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
sigma = np.array([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
m = len(C)
error = np.zeros([m,m])
for i in range(m):
for j in range(m):
model = SVC(kernel='rbf', C=C[i], gamma=1/2/sigma[j]**2)
model.fit(X,y.ravel())
error[i,j] = sum(model.predict(Xval) == yval.ravel())
maxarg = np.unravel_index(np.argmax(error),error.shape)
return C[maxarg[0]],sigma[maxarg[1]]
def processEmail(file_contents):
'''preprocesses a the body of an email and returns a list of word_indices'''
import pandas as pd
import re
from nltk import PorterStemmer
stemmer = PorterStemmer()
vocabList = pd.read_table('data/vocab.txt',header=None, names=['index'],index_col=1)
word_indices = []
file_contents = file_contents.lower()
pattern = re.compile(r'\n')
file_contents = pattern.sub(" ", file_contents)
file_contents
pattern = re.compile(r'[0-9]+')
file_contents = pattern.sub("number", file_contents)
pattern = re.compile(r'(http|https)://.*?\s')
file_contents = pattern.sub("httpaddr", file_contents)
file_contents
pattern = re.compile(r'[^\s]+@[^\s]+')
file_contents = pattern.sub("emailaddr", file_contents)
pattern = re.compile(r'[$]+')
file_contents = pattern.sub("dollar", file_contents)
file_contents = file_contents.split(' ')
for i in range(len(file_contents)):
if file_contents[i].isalpha():
words = stemmer.stem(file_contents[i])
if words in vocabList.index:
word_indices.append(int(vocabList.loc[words]))
return word_indices
``` |
{
"source": "Jipje/local_smart_grid_simulation",
"score": 3
} |
#### File: local_smart_grid_simulation/environment/NetworkEnvironment.py
```python
class NetworkEnvironment(object):
def __init__(self, verbose_lvl=2):
self.network_objects = []
self.verbose_lvl = verbose_lvl
self.network_object_parameters = []
self.number_of_steps = 0
def add_object(self, network_object, action_parameters):
self.network_objects.append(network_object)
self.network_object_parameters.append(action_parameters)
def take_step(self, environment_step) -> int:
self.number_of_steps += 1
total_network_step = 0
for object_index in range(len(self.network_objects)):
network_object = self.network_objects[object_index]
action_parameters = self.network_object_parameters[object_index]
total_network_step += network_object.take_step(environment_step, action_parameters)
return self.check_action(total_network_step)
def check_action(self, network_step):
if self.verbose_lvl > 2:
print('Network is measuring: {}'.format(network_step))
return network_step
def done_in_mean_time(self, curr_msg=None):
res_msg = ''
for network_object in self.network_objects:
res_msg = res_msg + network_object.done_in_mean_time() + '\n\t'
res_msg = res_msg[:-2]
if curr_msg is not None:
res_msg = curr_msg + res_msg
return res_msg
def end_of_environment_message(self, environment_additions):
num_of_ptus = self.number_of_steps / 15
num_of_days = num_of_ptus / 96
res_msg = 'Environment: ' \
f'\n\tNumber of 1m timesteps: {self.number_of_steps}' \
f'\n\tNumber of PTUs: {num_of_ptus}' \
f'\n\tNumber of days: {num_of_days}'
for msg in environment_additions:
res_msg = res_msg + '\n\t' + msg
for network_object in self.network_objects:
res_msg = res_msg + network_object.end_of_environment_message(num_of_days)
return res_msg
def end_of_environment_metrics(self, current_metrics):
res_dict = current_metrics
for object_index in range(len(self.network_objects)):
network_object = self.network_objects[object_index]
res_dict.update(network_object.end_of_environment_metrics())
return res_dict
```
#### File: evolutionary_algorithm/fitness_functions/PureMoneyFitnessNoCongestion.py
```python
from environment.ImbalanceEnvironment import ImbalanceEnvironment
from environment.NetworkEnvironment import NetworkEnvironment
from environment.TotalNetworkCapacityTracker import TotalNetworkCapacityTracker
from evolutionary_algorithm.Fitness import Fitness
from evolutionary_algorithm.individuals.StrategyIndividual import StrategyIndividual
from main import run_simulation_from_dict_of_df
from network_objects.Battery import Battery
from network_objects.RenewableEnergyGenerator import RenewableEnergyGenerator
from network_objects.control_strategies.StrategyWithLimitedChargeCapacityControlTower import \
StrategyWithLimitedChargeCapacityControlTower
class PureMoneyFitnessNoCongestion(Fitness):
def __init__(self, verbose_lvl=-1, transportation_kw=2000, congestion_kw=14000, congestion_safety_margin=0.99):
super().__init__(verbose_lvl, transportation_kw, congestion_kw, congestion_safety_margin)
def run_simulation(self, individual):
# Initialise environment
imbalance_environment = NetworkEnvironment(verbose_lvl=self.verbose_lvl)
ImbalanceEnvironment(imbalance_environment, mid_price_index=2, max_price_index=1, min_price_index=3)
TotalNetworkCapacityTracker(imbalance_environment, self.congestion_kw)
# Initialise solar farm
solarvation = RenewableEnergyGenerator('Solarvation solar farm', 19000, verbose_lvl=self.verbose_lvl)
# Initialise battery
battery = Battery('Wombat', 30000, 14000, battery_efficiency=0.9, starting_soc_kwh=1600,
verbose_lvl=self.verbose_lvl)
# Initialise random strategy
money_earning_strategy = individual.value
strategy_limited_charge_controller = StrategyWithLimitedChargeCapacityControlTower(
name="Wombat Battery Controller", network_object=battery, strategy=money_earning_strategy,
verbose_lvl=self.verbose_lvl, transportation_kw=self.transportation_kw)
imbalance_environment.add_object(solarvation, [1, 3, 4])
imbalance_environment.add_object(strategy_limited_charge_controller, [1, 3, 4])
res_dict = run_simulation_from_dict_of_df(self.starting_timestep, self.number_of_steps, scenario=self.scenario,
verbose_lvl=self.verbose_lvl,
simulation_environment=imbalance_environment,
dict_of_df=self.scenario_df)
return res_dict
def fitness(self, individual):
if individual.fitness is not None:
return individual.fitness
res_dict = self.run_simulation(individual)
fitness_value = res_dict['wombat_battery_revenue']
individual.set_fitness(fitness_value)
return fitness_value
if __name__ == '__main__':
random_individual = StrategyIndividual(init_params={'number_of_points': 4})
fitness = PureMoneyFitnessNoCongestion(verbose_lvl=1)
fitness.set_month(4)
print(fitness.fitness(random_individual))
```
#### File: evolutionary_algorithm/populations/TournamentSelectionPopulation.py
```python
import random
from evolutionary_algorithm.Population import Population
class TournamentSelectionPopulation(Population):
def __init__(self, size, fitness, individual_class, init_params, tournament_size):
super().__init__(size, fitness, individual_class, init_params)
self.tournament_size = tournament_size
def get_parents(self, num_of_partners):
pop_max_index = len(self.individuals) - 1
selection = []
while len(selection) < 2 * num_of_partners:
best = self.individuals[random.randint(0, pop_max_index)]
for _ in range(self.tournament_size - 1):
contestant = self.individuals[random.randint(0, pop_max_index)]
if contestant.fitness < best.fitness:
best = contestant
selection.append(best)
mothers = selection[:num_of_partners]
fathers = selection[num_of_partners:]
return mothers, fathers
```
#### File: local_smart_grid_simulation/helper_objects/ImbalancePriceReader.py
```python
import pandas as pd
import datetime as dt
import dateutil.tz
ams = dateutil.tz.gettz('Europe/Amsterdam')
utc = dateutil.tz.UTC
class ImbalancePriceReader(object):
def __init__(self, market_data='../data/tennet_balans_delta/tennet_balans_delta_15m.csv'):
market_df = pd.read_csv(market_data, parse_dates=True)
market_df.index = pd.to_datetime(market_df['time'], utc=True)
market_df = market_df.drop('time', axis=1)
self.market_df = market_df
def get_day(self, day=None):
if day is None:
day = dt.datetime(2020, 12, 27, tzinfo=utc)
else:
day = day.replace(hour=0, minute=0, second=0, microsecond=0)
start_of_day = day
end_of_day = day + dt.timedelta(days=1)
return self.get_specific_time(start_of_day, end_of_day)
def get_specific_time(self, start_of_time=None, end_of_time=None):
if start_of_time is None:
start_of_time = dt.datetime(2020, 12, 27, tzinfo=utc)
if end_of_time is None:
end_of_time = start_of_time + dt.timedelta(days=1)
filtered_df = self.market_df[(self.market_df.index > start_of_time) & (self.market_df.index <= end_of_time)]
return filtered_df
if __name__ == '__main__':
imbalance_price_reader = ImbalancePriceReader()
print(imbalance_price_reader.get_day())
print(imbalance_price_reader.get_specific_time())
```
#### File: helper_objects/strategies/CsvStrategy.py
```python
import pandas as pd
from helper_objects.strategies.Strategy import Strategy
class CsvStrategy(Strategy):
def __init__(self, name, strategy_csv, price_step_size=5):
super().__init__(name, price_step_size)
self.upload_strategy(strategy_csv)
def update_max_and_min_price(self, strategy_df):
assert(strategy_df['state_from'].min() == 0)
assert(strategy_df['state_until'].max() == 100)
highest_price = strategy_df['price_from'].drop_duplicates(keep='last').nlargest(1).iloc[0] + self.price_step_size
lowest_price = strategy_df['price_until'].drop_duplicates(keep='last').nsmallest(1).iloc[0] - self.price_step_size
self.max_price = highest_price
self.min_price = lowest_price
def upload_strategy(self, strategy_csv):
strategy_df = pd.read_csv(strategy_csv)
self.update_max_and_min_price(strategy_df)
self.initialize_strategy_matrix()
strategy_matrix = self.strategy_matrix
for _, strategy_line in strategy_df.iterrows():
if strategy_line.command not in ['CHARGE', 'WAIT', 'DISCHARGE']:
raise ValueError('Strategies should only contain the following commands: CHARGE, WAIT, DISCHARGE')
if strategy_line.price_from != 9999 and strategy_line.price_from != -9999:
if strategy_line.price_from % 5 != 0:
raise ValueError('Strategies should be defined in price steps of 5. Found price: {}'.format(strategy_line.price_from))
if strategy_line.price_until != 9999 and strategy_line.price_until != -9999:
if strategy_line.price_until % 5 != 0:
raise ValueError('Strategies should be defined in price steps of 5. Found price: {}'.format(strategy_line.price_until))
current_soc = strategy_line.state_from
if strategy_line.state_until == 100:
strategy_line.state_until = 101
while current_soc < strategy_line.state_until:
current_soc_index = current_soc
for current_price in range(self.min_price, self.max_price + self.price_step_size, self.price_step_size):
current_price_index = self.price_index(current_price)
if strategy_line.price_from <= current_price <= strategy_line.price_until:
strategy_matrix[current_soc_index][current_price_index] = strategy_line.command
current_soc += 1
self.uploaded = True
self.strategy_matrix = strategy_matrix
```
#### File: helper_objects/strategies/RandomStrategyGenerator.py
```python
import random
import sys
from helper_objects.strategies.PointBasedStrategy import PointBasedStrategy
from one_time_scripts.visualisations.strategy_visualisation import visualize_strategy
def generate_fully_random_strategy(seed=None, name=None, strategy_price_step_size=None, number_of_points=None,
flag_visualise=None):
if seed is None:
seed = random.randrange(sys.maxsize)
if flag_visualise is None:
flag_visualise = True
random.seed(seed)
if flag_visualise is None:
flag_visualise = False
if name is None:
name = 'Randomly generated strategy. Seed={}'.format(seed)
if strategy_price_step_size is None:
price_step_size = 5
else:
price_step_size = int(strategy_price_step_size)
if number_of_points is None:
number_of_points = random.randint(1, 5)
else:
number_of_points = int(number_of_points)
point_based_strat = PointBasedStrategy(name, price_step_size=price_step_size)
for _ in range(number_of_points):
state_of_charge_perc = random.randint(6, 95)
imbalance_price = random.randrange(-100, 400, price_step_size)
point_based_strat.add_point((state_of_charge_perc, imbalance_price, 'CHARGE'))
state_of_charge_perc = random.randint(6, 95)
imbalance_price = random.randrange(-100, 400, price_step_size)
point_based_strat.add_point((state_of_charge_perc, imbalance_price, 'DISCHARGE'))
point_based_strat.upload_strategy()
if flag_visualise:
visualize_strategy(point_based_strat)
return point_based_strat
def generate_random_discharge_relative_strategy(seed=None, name=None, number_of_points=None,
strategy_price_step_size=None, flag_visualise=None):
if seed is None:
seed = random.randrange(sys.maxsize)
if flag_visualise is None:
flag_visualise = True
random.seed(seed)
if flag_visualise is None:
flag_visualise = False
if name is None:
name = 'Randomly generated strategy. Seed={}'.format(seed)
if number_of_points is None:
number_of_points = random.randint(2, 4)
else:
number_of_points = int(number_of_points)
if strategy_price_step_size is None:
strategy_price_step_size = 5
else:
strategy_price_step_size = int(strategy_price_step_size)
point_based_strat = PointBasedStrategy(name, price_step_size=strategy_price_step_size)
soc_step_size = int(89/number_of_points)
price_step_size = int(500/number_of_points)
if price_step_size % strategy_price_step_size != 0:
price_step_size = price_step_size - (price_step_size % strategy_price_step_size)
charge_soc = 5
charge_price = 401
charge_price = charge_price + (strategy_price_step_size - charge_price % strategy_price_step_size)
max_charge_price = charge_price
discharge_soc = 0
discharge_price = charge_price
for i in range(1, number_of_points):
charge_soc = random.randint(charge_soc + 1, i * soc_step_size)
charge_price = random.randrange(max_charge_price - i * price_step_size, charge_price - 1, strategy_price_step_size)
point_based_strat.add_point((charge_soc, charge_price, 'CHARGE'))
discharge_soc = min(max(charge_soc, discharge_soc) + random.randint(5, soc_step_size), 94)
discharge_price = random.randrange(charge_price, discharge_price - 1, strategy_price_step_size)
point_based_strat.add_point((discharge_soc, discharge_price, 'DISCHARGE'))
charge_soc = 95
charge_min_price = -105
charge_min_price = charge_min_price - charge_min_price % strategy_price_step_size
charge_price = random.randrange(charge_min_price, charge_price - 1, strategy_price_step_size)
point_based_strat.add_point((charge_soc, charge_price, 'CHARGE'))
discharge_soc = 95
discharge_price = random.randrange(charge_price, discharge_price - 1, strategy_price_step_size)
point_based_strat.add_point((discharge_soc, discharge_price, 'DISCHARGE'))
point_based_strat.upload_strategy()
if flag_visualise:
visualize_strategy(point_based_strat)
return point_based_strat
if __name__ == '__main__':
fully_random = generate_fully_random_strategy(strategy_price_step_size=2, flag_visualise=True)
random_strategy = generate_random_discharge_relative_strategy(strategy_price_step_size=2, flag_visualise=True)
```
#### File: network_objects/control_strategies/NaiveControlTower.py
```python
from network_objects.Battery import Battery
from network_objects.NetworkObject import NetworkObject
class NaiveControlTower(NetworkObject):
def __init__(self, name, network_object: Battery, verbose_lvl=3):
super().__init__(name)
self.battery = network_object
self.verbose_lvl = verbose_lvl
def take_step(self, environment_step, action_parameters) -> int:
if self.verbose_lvl > 3:
print(f'\t{self.battery.name} battery is taking a step. Current SoC: {self.battery.state_of_charge_kwh}kWh')
self.progress_battery(environment_step, action_parameters)
action, action_kw = self.determine_step(environment_step, action_parameters)
action_kw = self.battery.take_action(action, action_kw)
return action_kw
def determine_step(self, environment_step, action_parameters) -> (str, int):
return 'WAIT', 0
def progress_battery(self, environment_step, action_parameters):
charge_price = environment_step[action_parameters[0]]
discharge_price = environment_step[action_parameters[1]]
self.battery.update_step(charge_price, discharge_price)
def done_in_mean_time(self):
return self.battery.done_in_mean_time()
def end_of_environment_message(self, num_of_days=None):
return self.battery.end_of_environment_message(num_of_days)
def end_of_environment_metrics(self):
return self.battery.end_of_environment_metrics()
```
#### File: network_objects/control_strategies/StrategyControlTower.py
```python
from network_objects.Battery import Battery
from network_objects.control_strategies.NaiveControlTower import NaiveControlTower
class StrategyControlTower(NaiveControlTower):
def __init__(self, name, network_object: Battery, verbose_lvl=3, strategy=None):
super().__init__(name, network_object, verbose_lvl)
self.strategy = strategy
def determine_step(self, environment_step, action_parameters):
charge_price = environment_step[action_parameters[0]]
discharge_price = environment_step[action_parameters[1]]
soc_perc = int(self.battery.state_of_charge_kwh / self.battery.max_kwh * 100)
if self.strategy is None:
raise NotImplementedError('You did not specify a strategy.')
action = self.strategy.make_decision(charge_price, discharge_price, soc_perc)
if self.verbose_lvl > 3:
print(f'\t\t{self.strategy.name} tells the battery to {action}')
return action, self.battery.max_kw
```
#### File: one_time_scripts/helper_objects/solarvation_loader.py
```python
import datetime as dt
import pandas as pd
import dateutil.tz
utc = dateutil.tz.tzutc()
def date_parser(string):
return dt.datetime.strptime(string, '%Y-%m-%d %H:%M:%S%z').replace(tzinfo=utc)
def load_solarvation_data(solarvation_filename='../../data/environments/lelystad_1_2021.csv'):
solarvation_df = pd.read_csv(solarvation_filename, parse_dates=[0], date_parser=date_parser)
try:
solarvation_df.index = pd.to_datetime(solarvation_df['time_utc'], errors='coerce', utc=True)
except KeyError:
solarvation_df.index = pd.to_datetime(solarvation_df['time_ams'], errors='coerce', utc=True)
solarvation_df = solarvation_df.drop('time_ams', axis=1)
solarvation_df['time_utc'] = solarvation_df.index
solarvation_df['hour_of_production'] = solarvation_df.index.hour
solarvation_df['time'] = solarvation_df['time_utc'].apply(lambda x: x.replace(year=1970, month=1, day=1))
return solarvation_df
```
#### File: solar_scripts/solar_old/solar_runner.py
```python
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
import datetime as dt
import dateutil.tz
ams = dateutil.tz.gettz('Europe/Amsterdam')
utc = dateutil.tz.tzutc()
def date_parser(string):
return dt.datetime.strptime(string, '%Y-%m-%dT%H:%M:%S%z').replace(tzinfo=utc)
if __name__ == '__main__':
solar_power_mw_df = pd.read_csv('../../../data/solar_data/solar_power/cleaned_solar_production.csv', parse_dates=[0], date_parser=date_parser)
solar_power_mw_df.index = pd.to_datetime(solar_power_mw_df['time'], errors='coerce', utc=True)
solar_power_mw_df = solar_power_mw_df.drop('time', axis=1)
# print(solar_power_mw_df)
radiation_df = pd.read_csv(
'../../../data/solar_data/radiation_with_forecast/cleaned_radiation_forecast_and_values.csv', parse_dates=[0], date_parser=date_parser)
radiation_df.index = pd.to_datetime(radiation_df['time'], errors='coerce', utc=True)
radiation_df = radiation_df.drop('time', axis=1)
# print(radiation_df)
# First design decision, pad or interpolate 1h radiation data?
# radiation_df = radiation_df.resample('15T').pad()
radiation_df = radiation_df.resample('15T').interpolate()
res_df = solar_power_mw_df.merge(radiation_df, how='inner', left_index=True, right_index=True)
# print(res_df)
my_solar_farm_m2 = 10
# Second method
res_df['solar_farms_m2'] = res_df['solar_mw'] * 1000 / res_df['radiation']
# Third method
res_df['cloud_coverage'] = res_df['solar_mw'] / res_df['solar_mw'].max()
# Fourth method
res_df['cloud_coverage_rolling'] = res_df['solar_mw'] / res_df['solar_mw'].rolling(180).max()
# First method - Simply take radiation
res_df['kw_my_solar_farm'] = my_solar_farm_m2 * res_df['radiation']
# However that does not take into account the efficiency of our solar panels.
# It assumes all power that reaches them is translated into energy, that is not the case.
# The total solar production offers us data on how efficient the solar panels are running
# Second method - Based on m2 of windfarms
res_df['kw_my_solar_farm_2'] = my_solar_farm_m2 / res_df['solar_farms_m2'] * res_df['solar_mw'] * 1000
res_df['kw_my_solar_farm_2'].replace(np.NaN, 0, inplace=True)
# Third method
res_df['kw_my_solar_farm_3'] = res_df['cloud_coverage'] * my_solar_farm_m2 * res_df['radiation']
# Fourth method
res_df['kw_my_solar_farm_4'] = res_df['cloud_coverage_rolling'] * my_solar_farm_m2 * res_df['radiation']
res_df['hour_of_production'] = res_df.index.hour
plt.scatter(res_df['hour_of_production'], res_df['kw_my_solar_farm_4'])
plt.ylabel('Generated power 15m (kW)')
plt.xlabel('Hour in which power was generated (UTC)')
plt.title('Scatterplot of generated power by generic solar farm')
plt.show()
start_of_set = dt.datetime(2021, 7, 15, tzinfo=utc)
end_of_set = dt.datetime(2021, 7, 19, tzinfo=utc)
res_df = res_df[start_of_set:end_of_set]
# print(res_df.to_string())
plt.plot(res_df.index, res_df['kw_my_solar_farm'], label='Radiation method')
plt.plot(res_df.index, res_df['kw_my_solar_farm_2'], label='m2 of solar farms')
plt.plot(res_df.index, res_df['kw_my_solar_farm_3'], label='Cloud coverage large max')
plt.plot(res_df.index, res_df['kw_my_solar_farm_4'], label='Cloud coverage rolling window')
ax = plt.gca()
max_formatter = mdates.DateFormatter('%d-%m')
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_major_formatter(max_formatter)
plt.ylabel('Genrated power (kW)')
plt.xlabel('Time (UTC)')
plt.title('Generated power for {}m2 solar farm.'.format(my_solar_farm_m2))
plt.legend(loc='lower right')
plt.show()
```
#### File: one_time_scripts/visualisations/baselines_visualisation.py
```python
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from one_time_scripts.visualisations.visualise_ea_runs import convert_file_into_dict
month_shorts = ['jan', 'feb', 'mar', 'apr',
'may', 'june', 'july', 'aug',
'sep', 'oct', 'nov', 'dec']
month_long = ['january', 'february', 'march', 'april',
'may', 'june', 'july', 'august',
'september', 'october', 'november', 'december']
pretty_colours = [(0.15, 0.81, 0.82), (1, 0.24, 0.22), (0.52, 0.86, 0.39),
(0.87, 0.34, 0.74), (0.11, 0.47, 0.76), (1, 0.69, 0),
(0.29, 0.21, 0.28)]
try:
baseline_df = pd.read_csv('../../data/baseline_earnings/overview.csv', delimiter=';')
except FileNotFoundError:
baseline_df = pd.read_csv('../../../data/baseline_earnings/overview.csv', delimiter=';')
# 0 Solarvation only discharging
# 1 Wombat disregard congestion (with base money s...
# 2 Wombat disregard congestion GIGA Baseline
# 3 Wombat only solve congestion
# 4 Wombat yearly timing (with base money strat)
# 5 Wombat yearly timing GIGA Baseline
# 6 Wombat conservative monthly timed (with base m...
# 7 Wombat conservative monthly timed GIGA Baseline
# 8 Wombat smart monthly timed (with base money st...
# 9 Wombat smart monthly timed GIGA Baseline
# 10 Wombat max smart monthly timed (with base mone...
# 11 Wombat max smart monthly timed GIGA Baseline
# 12 Wombat avg smart monthly timed (with base mone...
# 13 Wombat avg smart monthly timed GIGA Baseline
def make_list_of_monthly_earnings(single_run, few_months=None):
if few_months is None:
few_months = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
res = []
for i in few_months:
month_label = month_shorts[i] + '_earning'
res.append(single_run[month_label])
return res
def make_mean_and_std_per_month_from_folder(source_folder='../../data/ea_runs/giga_baseline/',
suffix='', few_months=None):
if few_months is None:
few_months = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
res_mean = []
res_error = []
for i in few_months:
month_filename = source_folder + month_long[i] + suffix + '.csv'
dict_of_runs, num_of_runs = convert_file_into_dict(month_filename)
arr_of_best_individuals = []
for run_num in range(num_of_runs):
run_label = f'run_{run_num}_best_individual'
arr_of_best_individuals.append(dict_of_runs[run_label][-1])
arr_of_best_individuals = np.array(arr_of_best_individuals)
res_mean.append(np.mean(arr_of_best_individuals))
res_error.append(np.std(arr_of_best_individuals))
return res_mean, res_error
def make_arr_of_best_individuals_per_month_from_folder(source_folder='../../data/ea_runs/giga_baseline/',
suffix='', few_months=None):
if few_months is None:
few_months = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
res = []
for i in few_months:
month_filename = source_folder + month_long[i] + suffix + '.csv'
dict_of_runs, num_of_runs = convert_file_into_dict(month_filename)
arr_of_best_individuals = []
for run_num in range(num_of_runs):
run_label = f'run_{run_num}_best_individual'
arr_of_best_individuals.append(dict_of_runs[run_label][-1])
arr_of_best_individuals = np.array(arr_of_best_individuals)
res.append(arr_of_best_individuals)
return res
def statistic_tests(baseline_indices, source_folders, few_months=None, suffixes=None):
res_dict = {}
for source_folder_index in range(len(source_folders)):
source_folder = source_folders[source_folder_index]
if suffixes is not None:
assert len(suffixes) == len(source_folders), 'Please supply as many suffixes as folders'
suffix = suffixes[source_folder_index]
else:
suffix = ''
arr_of_arr_of_best_individuals = make_arr_of_best_individuals_per_month_from_folder(source_folder,
suffix=suffix,
few_months=few_months)
res_dict[source_folder_index] = arr_of_arr_of_best_individuals
for source_folder_index in range(len(source_folders) - 1):
suffix_one = suffixes[source_folder_index]
suffix_other = suffixes[source_folder_index + 1]
print(f'Comparing performance of {suffix_one} with {suffix_other} with T-Test')
for month in range(len(few_months)):
one = res_dict[source_folder_index][month]
other = res_dict[source_folder_index + 1][month]
t_value, p_value = stats.ttest_ind(one, other)
print(f'Running test for month {few_months[month] + 1}')
print('\tTest statistic is %f'%float("{:.6f}".format(t_value)))
print('\tp-value for two tailed test is %f'%p_value)
alpha = 0.05
if p_value <= alpha:
print('\tConclusion','n','Since p-value(=%f)'%p_value,'<','alpha(=%.2f)'%alpha,'''We reject the null hypothesis H0.\n\t\tSo we conclude that the effect of the tested parameter are not equal i.e., μ1 = μ2 at %.2f level of significance.'''%alpha)
else:
print('\tConclusion','n','Since p-value(=%f)'%p_value,'>','alpha(=%.2f)'%alpha,'''We do not reject the null hypothesis H0.''')
def make_bar_graph(baseline_indices, source_folders, few_months=None, suffixes=None,
num_of_source_folder_baselines=0):
for _ in range(len(baseline_indices) + num_of_source_folder_baselines):
pretty_colours.append('#%06X' % random.randint(0, 0xFFFFFF))
month_labels = []
if few_months is None:
max_x = 130
for month in month_shorts:
month_labels.append(month.capitalize())
else:
max_x = 10 + len(few_months) * 10
for month_index in few_months:
month = month_long[month_index]
month_labels.append(month.capitalize())
x_axis = np.array(list(range(10, max_x, 10)))
num_of_items = len(baseline_indices) + len(source_folders)
offsets = []
if num_of_items == 2:
offsets = [-2, 2]
elif num_of_items == 3:
offsets = [-2, 0, 2]
elif num_of_items == 4:
offsets = [-3, -1, 1, 3]
elif num_of_items == 5:
offsets = [-3, -1.5, 0, 1.5, 3]
elif num_of_items == 6:
offsets = [-3.125, -1.875, -0.625, 0.625, 1.875, 3.125]
width = offsets[-1] - offsets[-2]
offset_tracker = -1
colour_index = len(source_folders) - num_of_source_folder_baselines + 1
for i in range(len(baseline_indices)):
single_run = baseline_df.loc[baseline_indices[i]]
single_run_y = make_list_of_monthly_earnings(single_run, few_months)
hatch = ''
alpha = 1
if single_run['time_steps_with_congestion'] > 1:
alpha = 0.75
hatch = '///'
plt.bar(x_axis + offsets[i], single_run_y, width, label=single_run['name'],
hatch=hatch, alpha=alpha, color=pretty_colours[colour_index + i])
offset_tracker = i
colour_index = colour_index + num_of_source_folder_baselines
flag_colour_index_reset = False
for source_folder_index in range(len(source_folders)):
if num_of_source_folder_baselines == 0 and not flag_colour_index_reset:
colour_index = 0
flag_colour_index_reset = True
source_folder = source_folders[source_folder_index]
if suffixes is not None:
assert len(suffixes) == len(source_folders), 'Please supply as many suffixes as folders'
suffix = suffixes[source_folder_index]
else:
suffix = ''
offset_tracker = offset_tracker + 1
y_values, y_errors = make_mean_and_std_per_month_from_folder(source_folder, suffix=suffix,
few_months=few_months)
plt.bar(x_axis + offsets[offset_tracker], y_values, width, label=source_folder + suffix,
color=pretty_colours[colour_index])
plt.errorbar(x_axis + offsets[offset_tracker], y_values, yerr=y_errors,
fmt='o', markersize=width, elinewidth=width*0.5)
num_of_source_folder_baselines = num_of_source_folder_baselines - 1
colour_index = colour_index + 1
plt.xticks(x_axis, month_labels)
plt.xlabel('Month (2021)')
plt.ylabel('Total EUR')
plt.title('Comparing monthly performance')
plt.legend(fontsize=6)
plt.show()
if __name__ == '__main__':
label_indexes = [8, 13]
source_folder_1 = '../../data/new_ea_runs/giga_baseline_with_congestion/'
source_folder_2 = '../../data/ea_runs/random_init_first_runs/'
make_bar_graph(label_indexes, source_folders=[source_folder_1, source_folder_2])
label_indexes = [2, 13]
make_bar_graph(label_indexes, source_folders=[], suffixes=[])
label_indexes = [2, 13]
make_bar_graph(label_indexes, source_folders=['../../data/new_ea_runs/default_runs_money/',
'../../data/new_ea_runs/default_runs/'])
label_indexes = []
source_folder_3 = '../../data/ea_runs/sorting_investigation/'
source_folders = [source_folder_1, source_folder_3, source_folder_3, source_folder_3, source_folder_3]
all_suffix = ['', '_sort_none', '_sort_1', '_sort_2', '_sort_3']
few_months = [2, 3, 10]
make_bar_graph(label_indexes, source_folders=source_folders, suffixes=all_suffix, few_months=few_months,
num_of_source_folder_baselines=1)
statistic_tests([], [source_folder_3, source_folder_3], few_months=[2, 3, 10],
suffixes=['_sort_none', '_sort_1'])
```
#### File: local_smart_grid_simulation/one_time_scripts/windnet_data_cleaner.py
```python
import dateutil.tz
import pandas as pd
ams = dateutil.tz.gettz('Europe/Amsterdam')
utc = dateutil.tz.tzutc()
def correct_dates_windnet_csv():
base_windnet_df = pd.read_csv('../data/windnet/base_windnet_data_sep_2020_sep_2021.csv')
base_windnet_df.index = pd.to_datetime(base_windnet_df['date'], utc=False, errors='coerce', dayfirst=True)
base_windnet_df.index = base_windnet_df.index.tz_localize(ams, ambiguous='infer')
base_windnet_df.index = base_windnet_df.index.tz_convert(utc)
base_windnet_df = base_windnet_df.drop('date', axis=1)
print(base_windnet_df)
base_windnet_df.to_csv('../data/windnet/corrected_dates_windnet_data_sep_2020_sep_2021.csv')
def add_power_to_base_windnet():
base_windnet_df = pd.read_csv('../data/windnet/corrected_dates_windnet_data_sep_2020_sep_2021.csv')
base_windnet_df.index = pd.to_datetime(base_windnet_df['date'], utc=True)
base_windnet_df = base_windnet_df.drop('date', axis=1)
base_windnet_df['nht_usage_kw'] = base_windnet_df['nht_usage_kwh'] * 12
base_windnet_df['nht_production_kw'] = base_windnet_df['nht_production_kwh'] * 12
base_windnet_df['mmt_usage_kw'] = base_windnet_df['mmt_usage_kwh'] * 12
base_windnet_df['mmt_production_kw'] = base_windnet_df['mmt_production_kwh'] * 12
print(base_windnet_df)
base_windnet_df.to_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
if __name__ == '__main__':
correct_dates_windnet_csv()
add_power_to_base_windnet()
```
#### File: local_smart_grid_simulation/one_time_scripts/windnet_interpolation.py
```python
import datetime as dt
import dateutil.tz
import pandas as pd
import matplotlib.pyplot as plt
ams = dateutil.tz.gettz('Europe/Amsterdam')
utc = dateutil.tz.tzutc()
start_graph = dt.datetime(2021, 3, 20, 18, tzinfo=utc)
end_graph = start_graph + dt.timedelta(minutes=240)
def trivial_interpolation_windnet():
base_windnet_df = pd.read_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
base_windnet_df.index = pd.to_datetime(base_windnet_df['date'], utc=True, errors='coerce')
base_windnet_df.index = base_windnet_df.index - dt.timedelta(minutes=5)
base_windnet_df = base_windnet_df.drop(['date'], axis=1)
base_windnet_df['nht_usage_kwh'] = base_windnet_df['nht_usage_kwh'] / 5
base_windnet_df['nht_production_kwh'] = base_windnet_df['nht_production_kwh'] / 5
base_windnet_df['mmt_usage_kwh'] = base_windnet_df['mmt_usage_kwh'] / 5
base_windnet_df['mmt_production_kwh'] = base_windnet_df['mmt_production_kwh'] / 5
base_windnet_df = base_windnet_df.resample('1T').pad()
return base_windnet_df
def pandas_linear_interpolation_windnet():
base_windnet_df = pd.read_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
base_windnet_df.index = pd.to_datetime(base_windnet_df['date'], utc=True, errors='coerce')
base_windnet_df.index = base_windnet_df.index - dt.timedelta(minutes=5)
base_windnet_df = base_windnet_df.drop(['date', 'nht_usage_kwh', 'nht_production_kwh', 'mmt_usage_kwh', 'mmt_production_kwh'], axis=1)
base_windnet_df = base_windnet_df.resample('1T').interpolate()
base_windnet_df['nht_usage_kwh'] = base_windnet_df['nht_usage_kw'] / 60
base_windnet_df['nht_production_kwh'] = base_windnet_df['nht_production_kw'] / 60
base_windnet_df['mmt_usage_kwh'] = base_windnet_df['mmt_usage_kw'] / 60
base_windnet_df['mmt_production_kwh'] = base_windnet_df['mmt_production_kw'] / 60
return base_windnet_df
def csv_maker():
trivial_df = trivial_interpolation_windnet()
pandas_df = pandas_linear_interpolation_windnet()
print(trivial_df)
print(pandas_df)
trivial_df.to_csv('../data/windnet/trivial_interpolation_windnet.csv')
pandas_df.to_csv('../data/windnet/pandas_interpolation_windnet.csv')
def make_simple_graph(filtered_df, title):
plt.plot(filtered_df.index, filtered_df['nht_production_kw'], marker='o')
plt.title(title)
plt.xlabel('Time')
plt.ylabel('Produced power (kW)')
plt.show()
if __name__ == '__main__':
# csv_maker()
original_df = pd.read_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
original_df.index = pd.to_datetime(original_df['date'], utc=True, errors='coerce')
original_df.index = original_df.index - dt.timedelta(minutes=5)
original_df = original_df.drop(['date'], axis=1)
trivial_df = pd.read_csv('../data/windnet/trivial_interpolation_windnet.csv')
trivial_df.index = pd.to_datetime(trivial_df['date'])
pandas_df = pd.read_csv('../data/windnet/pandas_interpolation_windnet.csv')
pandas_df.index = pd.to_datetime(pandas_df['date'])
original_df = original_df[original_df.index.to_series().between(start_graph, end_graph)]
trivial_df = trivial_df[trivial_df.index.to_series().between(start_graph, end_graph)]
pandas_df = pandas_df[pandas_df.index.to_series().between(start_graph, end_graph)]
make_simple_graph(original_df, title='Original data')
make_simple_graph(trivial_df, title='Trivial interpolation.')
make_simple_graph(pandas_df, title='Pandas interpolation.')
# csv_maker()
``` |
{
"source": "Jipolie01/nfl-playbook-creator",
"score": 3
} |
#### File: Jipolie01/nfl-playbook-creator/parser.py
```python
from formations import *
from routes import *
from offense import *
class parser():
def parse_formation(self, formation):
play_dictionary = {
"I_FORMATION" : i_formation(), "SINGLE_BACK_SET": single_back_set(),
"PRO_SET": pro_set(), "SHOTGUN_STANDARD": shotgun_standard(),
"SHOTGUN_TRIPS": shotgun_trips(), "SPREAD": spread()}
return play_dictionary.get(formation)
def parse_receiver(self, receiver):
receiver_parsing = receiver.split("=")
route_direction = False
#print(receiver_parsing)
if(receiver_parsing[1] == "T"):
route_direction = True
receiver_dictionary = {
"SLANT" : slant(route_direction), "FLAT": flat(route_direction),
"CURL" : curl(), "COMEBACK": comeback(route_direction),
"QUICK_OUT": quick_out(route_direction), "MEDIUM_OUT": medium_out(route_direction),
"DEEP_OUT": deep_out(route_direction), "CORNER" : corner(route_direction),
"POST": post(route_direction), "SKINNY_POST" : skinny_post(route_direction),
"GO": go(), "FADE": fade(route_direction),
"SHOOT": shoot(route_direction)
}
return receiver_dictionary.get(receiver_parsing[0])
def parse_play(self, single_line):
single_line = single_line.replace("\n", "")
play_indicators = single_line.split(";")
play_formation = self.parse_formation(play_indicators[1])
receiver_list = []
for i in range(2, 7):
receiver_list.append(self.parse_receiver(play_indicators[i]))
#print(receiver_list)
return offense(play_indicators[0], quarterback(), receiver(receiver_list[0]), receiver(receiver_list[1]),
receiver(receiver_list[2]), receiver(receiver_list[3]), receiver(receiver_list[4]),
play_formation)
#Example: {"SUPER_PLAY" : offense object}
class playbook_file():
def __init__(self, play_dict):
self.offense_plays_dict = play_dict
def get_full_playbook_play(self, play_name):
return self.offense_plays_dict.get(play_name)
def get_number_of_plays(self):
return len(self.offense_plays_dict)
class playbook():
def __init__(self, path):
self.factory_parsing = parser()
self.offensive_playbook = self.convert_lines_to_playbook(self.read_file(path))
def convert_lines_to_playbook(self, lines):
plays = []
for i in range(0, len(lines)):
plays.append(self.factory_parsing.parse_play(lines[i]))
return plays
def read_file(self, path):
line_list = []
with open(path, 'r') as file:
line_list = file.readlines()
return line_list
def get_number_of_plays(self):
return len(self.offensive_playbook)
def get_play(self, index):
return self.offensive_playbook[index]
```
#### File: Jipolie01/nfl-playbook-creator/routes.py
```python
class route():
def __init__(self, height, top_route, x_top_route):
self.height = height
self.top_route = top_route
self.x_top_route = x_top_route
class slant(route):
def __init__(self, breaks_left = True):
route.__init__(self, -75, -50, -100 if breaks_left else 100)
class flat(route):
def __init__(self, breaks_left = True):
route.__init__(self, -75, 0, -100 if breaks_left else 100)
class curl(route):
def __init__(self):
route.__init__(self, -100, 20, 15)
class comeback(route):
def __init__(self, breaks_left = True):
route.__init__(self, -150, 45, -30 if breaks_left else 30)
class quick_out(route):
def __init__(self, breaks_left = True):
route.__init__(self, -60, 0, -100 if breaks_left else 100)
class medium_out(route):
def __init__(self, breaks_left = True):
route.__init__(self, -120, 0, -100 if breaks_left else 100)
class deep_out(route):
def __init__(self, breaks_left = True):
route.__init__(self, -200, 0, -100 if breaks_left else 100)
class corner(route):
def __init__(self, breaks_left = True):
route.__init__(self, -200, -100, -100 if breaks_left else 100)
class post(route):
def __init__(self, breaks_left = True):
route.__init__(self, -150, -150, -60 if breaks_left else 60)
class skinny_post(route):
def __init__(self, breaks_left = True):
route.__init__(self, -150, -150, -30 if breaks_left else 30)
class go(route):
def __init__(self):
route.__init__(self, -150, -150, 0)
class fade(route):
def __init__(self, breaks_left = True):
route.__init__(self, -10, -50, -40 if breaks_left else 40)
class shoot(route):
def __init__(self, breaks_left = True):
route.__init__(self, -2, -50, -150 if breaks_left else 150 )
""" TODO Multiple cut routes
class crosser(route):
pass
class over(route):
pass
class drag(route):
pass
class sit(route):
pass
"""
# TODO Play protection for routes. Like: Post and corner, basically the same. But make sure
# Corner is always going away from the quarterback, same for the out
``` |
{
"source": "jiportilla/ontology",
"score": 2
} |
#### File: core/dmo/label_formatter.py
```python
import os
class LabelFormatter(object):
""" string utility methods """
def __init__(self,
is_debug: bool = False):
"""
Updated:
18-Mar-2019
<EMAIL>
* renamed from 'string-util'
* externalized formats to YAML
"""
self.is_debug = is_debug
self.d_format = self._load()
@staticmethod
def _load() -> dict:
from . import FileIO
path = os.path.join(os.environ["CODE_BASE"],
"resources/ontology/filters/entity_formatting.yml")
return FileIO.file_to_yaml(path)
def _camel_case(self,
a_single_token: str,
len_threshold=2):
# lookup in formatter
if a_single_token.lower() in self.d_format:
return self.d_format[a_single_token.lower()]
return self.camel_case(a_single_token,
len_threshold)
@staticmethod
def camel_case(a_single_token: str,
len_threshold: int = 2,
split_tokens: bool = False,
enforce_upper_case: bool = True):
# don't format small tokens
if len(a_single_token) <= len_threshold:
if enforce_upper_case:
return a_single_token.upper()
if a_single_token.lower() == 'ibm':
return 'IBM'
return a_single_token
if split_tokens and ' ' in a_single_token:
results = []
for token in a_single_token.split(' '):
results.append(LabelFormatter.camel_case(a_single_token=token,
len_threshold=len_threshold,
split_tokens=False,
enforce_upper_case=False))
return ' '.join(results)
# perform camel casing
return "{}{}".format(a_single_token[:1].upper(),
a_single_token[1:].lower())
def process(self,
a_token: str) -> str:
a_token = a_token.replace("_", " ")
tokens = [self._camel_case(x) for x in a_token.split(" ")]
tokens = [x for x in tokens if x]
return " ".join(tokens)
```
#### File: core/dmo/redis_client.py
```python
import os
import re
import redis
class RedisClient(object):
""" Redis Client """
WIKI_AUGMENTED_DB = 3
WIKI_PAGE_DB = 4
WIKI_SEARCH_DB = 5
def __init__(self,
db: int = 0,
decode_responses: bool = True):
"""
Created:
29-May-2019
<EMAIL>
Updated:
04-Dec-2019
<EMAIL>
* Use CredentialsFromJson
Updated:
23-Jan-2020
<EMAIL>
* Honor de DB parameter even when using an url. from_url ignores
its param if already specified in the url
"""
from ..dto import CredentialsFromJson
url = None
ca_file = None
if 'REDIS_JSON_CREDENTIALS' in os.environ:
credentials = CredentialsFromJson(os.environ['REDIS_JSON_CREDENTIALS'],
'rediss')
url = credentials.url
ca_file = credentials.ca_file
if not url:
url = 'redis://localhost:6379/0'
url = re.sub(r'/\d*$', f'/{db}', url)
options = {
'decode_responses': decode_responses
}
if url.startswith('rediss:') and ca_file:
options['ssl_ca_certs'] = ca_file
self.redis = redis.from_url(url, **options)
self.url = CredentialsFromJson.sanitize_url(url, 'rediss')
def size(self) -> int:
return self.redis.dbsize()
def clear(self):
for key in self.redis.keys():
self.redis.delete(key)
def set(self,
a_key: str,
value: str) -> None:
self.redis.set(a_key, value)
def get(self,
key) -> str:
return self.redis.get(key)
def has(self,
key) -> bool:
return self.redis.exists(key)
def set_list(self,
a_key: str,
a_list: list) -> None:
if not self.has(a_key):
self.redis.rpush(a_key, *a_list)
def get_list(self,
a_key: str) -> list:
return self.redis.lrange(a_key, 0, 9999999) # I don't like this either ...
def set_dict(self,
a_key: str,
a_dict: dict) -> None:
if not self.has(a_key):
self.redis.hmset(a_key, a_dict)
def get_dict(self,
a_key: str) -> dict:
return self.redis.hgetall(a_key)
```
#### File: badges/bp/badge_analysis_api.py
```python
import time
from base import BaseObject
IS_DEBUG = True
class BadgeAnalysisAPI(BaseObject):
""" API for Manifest based badge analysis activities """
def __init__(self,
manifest_name: str,
activity_name: str,
first: int=-1,
last: int=-1):
"""
Created:
10-Jan-2020
<EMAIL>
"""
from cendalytics.badges.svc import BadgeAnalysisManifestData
BaseObject.__init__(self, __name__)
self.analyzer = BadgeAnalysisManifestData(manifest_name,
activity_name,
first,
last,
is_debug=IS_DEBUG)
def analyze_per_badge(self):
self.analyzer.process()
def analyze_distribution(self):
self.analyzer.analyze_distribution()
def flush_target(self):
return self.analyzer.flush_target()
def get_sources(self):
return self.analyzer.source_collections()
def call_badge_analysis_api(manifest_name, activity_name, action, first, last):
first = int(first) if first else -1
last = int(last) if last else -1
print(f"API Parameters "
f"(manifest-name={manifest_name}, "
f"activity-name={activity_name}, "
f"action={action}, "
f"first={first}, last={last})")
if manifest_name.startswith("badge-analysis"):
api = BadgeAnalysisAPI(manifest_name,
activity_name,
first,
last)
return getattr(api, action)()
else:
raise ValueError(f"Unrecognized Manifest: {manifest_name}")
```
#### File: badges/dmo/badge_entity_analysis.py
```python
from typing import Dict
from base import BaseObject
from nlutext.core.bp import TextParser
class BadgeEntityAnalysis(BaseObject):
"""
Generate a sorted array of cendant tags and weights
from the badge name and the supplied ingested tags
"""
__cache:Dict[str,list] = {}
__parser = None
def __init__(self,
badge_name: str,
raw_tags: list,
is_debug: bool = False):
"""
Created:
19-Apr-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/97
Updated:
15-Aug-2019
<EMAIL>
* add mongo host as a param; driven by
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/767
Updated:
22-Aug-2019
<EMAIL>
* remove text-parser caching
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/796#issuecomment-14041159
Updated:
12-Jan-2020
<EMAIL>
* changed the interface to get badge name + array of tags and
return a sorted array of (tag, weight)
* keep TextParser cached
"""
BaseObject.__init__(self, __name__)
self._text_snipnets = raw_tags + [badge_name]
self._is_debug = is_debug
@classmethod
def _parse(cls,
some_tag: str,
is_debug: bool) -> list:
if some_tag in cls.__cache:
return cls.__cache[some_tag]
if not cls.__parser:
cls.__parser = TextParser(is_debug=is_debug)
cls.__cache[some_tag] = cls.__parser.process(some_tag)['tags']['supervised']
return cls.__cache[some_tag]
def process(self) -> list:
tag_weights:Dict[str, float] = {}
for snipnet in self._text_snipnets:
tags = self._parse(snipnet, self._is_debug)
for tag, weight in tags:
if not tag in tag_weights:
tag_weights[tag] = -1
if weight > tag_weights[tag]:
tag_weights[tag] = weight
def sort_by_key(tag_weight_tuple):
return tag_weight_tuple[1]
return sorted(tag_weights.items(), key=sort_by_key, reverse=True)
```
#### File: badges/svc/badge_analysis_manifest_data.py
```python
from collections import Counter
from statistics import mean
from statistics import stdev
import pprint
import typing
from pymongo.operations import UpdateOne
from pymongo.errors import BulkWriteError
from base import BaseObject
from base import MandatoryParamError
from datamongo.core.dmo import BaseMongoClient
from datamongo.core.bp import CendantCollection
from dataingest.core.dmo import ManifestConnectorForMongo
class BadgeAnalysisManifestData(BaseObject):
"""Tag and calc distribution values for badges
input records:
{
"_id": "Containers, K8s and Istio on IBM Cloud",
"key_field":"Containers, K8s and Istio on IBM Cloud"},
"fields":[
{"agent":"system",
"type":"text",
"name":"strategy_tags",
"value":["cloud"],
"transformations":["tag_list"]
},
{"agent":"system",
"type":"text",
"name":"category_tags",
"value":["knowledge"],
"transformations":["tag_list"]
}
]
}
resulting records:
{
"_id": "Containers, K8s and Istio on IBM Cloud",
"badge":"Containers, K8s and Istio on IBM Cloud"},
"ingested_tags": ["knowledge", "cloud],
"tags":[["cloud", 99], ["ibm cloud", 95.2], ["container", 91.7]],
"owners":{
"count": 1133,
"zScore": -0.129
}
}
Created:
13-Jan-2020
<EMAIL>
Updated:
14-Jan-2020
<EMAIL>
* Using a query to count the number of owners of each badge could
be asking too much from mongodb when running lots of processes
in parallel. We now walk the supply collection one instead.
"""
def __init__(self,
manifest: str,
activity: str,
first: int = -1,
last: int = -1,
is_debug: bool = False):
BaseObject.__init__(self, __name__)
from dataingest.core.dmo import ManifestActivityFinder
if not manifest:
raise MandatoryParamError("Manifest Name")
if not activity:
raise MandatoryParamError("Activity Name")
self._is_debug = is_debug
self._mongo_client = BaseMongoClient()
self._manifest = ManifestActivityFinder(manifest,
activity).process()
self._first = first
self._last = last
def _source(self) -> CendantCollection:
if self._is_debug:
self.logger.debug('\n'.join([
"Retrieved Source Manifest",
pprint.pformat(self._manifest["source"])]))
return ManifestConnectorForMongo(self._manifest["source"],
some_base_client=self._mongo_client,
is_debug=self._is_debug).process()
def _target(self) -> CendantCollection:
if self._is_debug:
self.logger.debug('\n'.join([
"Retrieved Target Manifest",
pprint.pformat(self._manifest["target"])]))
return ManifestConnectorForMongo(self._manifest["target"],
some_base_client=self._mongo_client,
is_debug=self._is_debug).process()
def _owners(self) -> CendantCollection:
if self._is_debug:
self.logger.debug('\n'.join([
"Retrieved Badge Owners Manifest",
pprint.pformat(self._manifest["badge_owners"])]))
return ManifestConnectorForMongo(self._manifest["badge_owners"],
some_base_client=self._mongo_client,
is_debug=self._is_debug).process()
def _input_records(self) -> list:
collection = self._source()
if self._first < 0:
records = collection.all()
else:
limit = self._last - self._first + 1
records = collection.skip_and_limit(self._first, limit)
return records
def _badges_and_ingested_tags(self,
input_records: list) -> list:
records = []
for input_record in input_records:
raw_tags: typing.Set[str] = set()
for element in input_record['fields']:
if element['name'] in ['category_tags', 'skills_tags', 'strategy_tags']: # just in case
raw_tags.update(element['value'])
records.append({
'_id': input_record['_id'],
'badge': input_record['key_field'],
'ingested_tags': list(raw_tags)
})
return records
def _add_parsed_tags(self,
output_records: list) -> list:
from cendalytics.badges.dmo import BadgeEntityAnalysis
for record in output_records:
cendant_tags = BadgeEntityAnalysis(record['badge'],
record['ingested_tags']).process()
record['tags'] = cendant_tags
return output_records
def _persist_target(self,
output_records: list) -> None:
try:
collection = self._target()
self.logger.debug(f'Persiting {len(output_records)} in {collection.collection_name}...')
actions = []
for record in output_records:
actions.append(UpdateOne({'_id': record['_id']},
{'$set': record},
upsert=True))
results = collection.collection.bulk_write(actions, ordered=False)
return self.logger.debug(f'Persisted to {collection.collection_name}'
f'matched={results.matched_count}. '
f'inserted={results.inserted_count}. '
f'upserted={results.upserted_count}. '
f'modified={results.modified_count}')
except BulkWriteError as xcpt:
self.logger.error(xcpt.details)
raise
def source_collections(self) -> list:
collection = self._source()
return [(collection.collection_name, collection.count())]
def flush_target(self) -> None:
collection = self._target()
collection.delete(keep_indexes=False)
def process(self) -> None:
input_records = self._input_records()
output_records = self._badges_and_ingested_tags(input_records)
output_records = self._add_parsed_tags(output_records)
self._persist_target(output_records)
def _get_number_of_owners(self) -> Counter:
counter: typing.Counter[str] = Counter()
collection = self._owners()
for chunk in collection.by_chunks(chunk_size=2000):
# print('.', end='', flush=True)
for record in chunk:
for field in record['fields']:
if field['type'] == 'badge':
counter.update({field['value']: 1})
return counter
def _get_zScores(self, counter_of_owners: Counter) -> list:
counts = [x[1] for x in counter_of_owners.items()]
count_mean = mean(counts)
count_stdev = stdev(counts)
del counts
def zScore(count):
z = (count - count_mean) / count_stdev
return round(z, 3)
records = []
for badge, count in counter_of_owners.items():
records.append({
'_id': badge,
'owners': {
'count': count,
'zScore': zScore(count)
}
})
return records
def analyze_distribution(self) -> None:
counter_of_owners = self._get_number_of_owners()
output_records = self._get_zScores(counter_of_owners)
self._persist_target(output_records)
```
#### File: budapest/dmo/data_transformer_for_conference.py
```python
from pandas import DataFrame
from base import BaseObject
from base import LabelFormatter
class DataTransformerForConference(BaseObject):
""" Transforms a conference DataFrame to a normalized JSON dictionary
"""
def __init__(self,
some_df: DataFrame):
"""
Created:
5-Apr-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
self.df = some_df
@staticmethod
def _cleanse(a_str: str) -> str:
return str(a_str).replace("'", "")
def _result(self,
a_row,
column_name: str,
unknown="unknown") -> str:
some_result = self._cleanse(a_row[column_name])
if not some_result or not len(some_result):
return unknown
return some_result
@staticmethod
def _name(a_row) -> str:
first_name = a_row["first_name"]
last_name = a_row["last_name"]
return "{} {}".format(LabelFormatter.camel_case(first_name),
LabelFormatter.camel_case(last_name))
def _country_code(self,
a_row) -> str:
return self._cleanse(a_row["country_code"])
def _department(self,
a_row) -> str:
return self._cleanse(a_row["department"])
def _division(self,
a_row) -> str:
return self._cleanse(a_row["division"])
def _organization(self,
a_row) -> str:
return self._cleanse(a_row["organization"])
def _group(self,
a_row) -> str:
return self._cleanse(a_row["group"])
def _ebu_code(self,
a_row) -> str:
return self._cleanse(a_row["ebu_code"])
def _geography(self,
a_row) -> str:
return self._result(a_row, "geography",
unknown="Other Geo")
def _region(self,
a_row) -> str:
return self._result(a_row, "region",
unknown="Other Region")
def _country_name(self,
a_row) -> str:
return self._result(a_row, "country_name",
unknown="Other Country")
def _job_role_id(self,
a_row) -> str:
return self._result(a_row, "job_role_id",
unknown="0")
def _bu_code(self,
a_row) -> str:
return self._result(a_row, "bu_code",
unknown="Other BU")
def _bu_name(self,
a_row) -> str:
return self._result(a_row, "bu_name",
unknown="Other BU Name")
def _lob_code(self,
a_row) -> str:
return self._result(a_row, "lob_code",
unknown="Other LOB")
def process(self):
records = []
for i, row in self.df.iterrows():
records.append({
"name": self._name(row),
"country_code": self._country_code(row),
"department": self._department(row),
"division": self._division(row),
"organization": self._organization(row),
"group": self._group(row),
"ebu_code": self._ebu_code(row),
"geography": self._geography(row),
"region": self._region(row),
"country_name": self._country_name(row),
"job_role_id": self._job_role_id(row),
"bu_code": self._bu_code(row),
"bu_name": self._bu_name(row),
"lob_code": self._lob_code(row),
})
self.logger.debug("\n".join([
"Transformed Dataframe to Records",
"\ttlen: {}".format(len(records))
]))
return records
```
#### File: budapest/dmo/data_transformer_for_jrs.py
```python
from base import BaseObject
from datamongo import CendantCollection
class DataTransformerForJrs(BaseObject):
""" Transforms a parsed JRS record set
"""
def __init__(self):
"""
Created:
5-Apr-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
@staticmethod
def _records():
return CendantCollection(some_db_name="cendant",
some_collection_name="jrs_parsed").all()
def process(self) -> list:
l_records = []
blacklist = ["is_shortlist", "indirect_jrll"]
for record in self._records():
d_record = {}
for field in record["fields"]:
if field["name"] in blacklist:
continue
def _tags():
if "tags" in field:
return sorted(set(field["tags"]["supervised"] + field["tags"]["unsupervised"]))
d_record[field["name"]] = {
"value": field["value"],
"tags": _tags()
}
l_records.append(d_record)
return l_records
```
#### File: core/svc/generate_meta_sentiment.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class GenerateMetaSentiment(BaseObject):
""" Retrieve Source Records for Feedback Sentiment Processing """
def __init__(self,
df_summary: DataFrame,
is_debug: bool = False):
"""
Created:
16-Jan-2020
<EMAIL>
* the refactoring of a notebook from
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1746
:param df_summary:
DataFrame of this Report:
'feedback_tag_<DATE>-summary-<TS>.csv'
e.g.,
'feedback_tag_20191202-summary-1575690754.csv'
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_summary = df_summary
def _summarize(self) -> DataFrame:
record_ids = sorted(self._df_summary['RecordID'].unique())
master = []
for record_id in record_ids:
df2 = self._df_summary[self._df_summary['RecordID'] == record_id]
def _region():
country = df2['Country'].unique()[0]
region = df2['Region'].unique()[0]
if region.lower() == 'africa':
return "mea"
if region.lower() == 'middle east':
return "mea"
if region.lower() == 'asia':
return "ap"
if country.lower() in ["australia", "new zealand", "sri lanka", "india"]:
return "ap"
if country.lower() in ["china", "hong kong", "taiwan"]:
return "gcg"
return region
cons = len(df2[df2['Category'] == 'Cons'])
pros = len(df2[df2['Category'] == 'Pros'])
suggestions = len(df2[df2['Category'] == 'Suggestions'])
def adjudicate():
if cons >= pros - 1 and cons > suggestions:
return "Cons"
if pros > cons and pros > suggestions + 1:
return "Pros"
return "Suggestions"
for i in range(0, 10):
master.append({
"Category": adjudicate(),
"Country": df2['Country'].unique()[0],
"Leadership": df2['Leadership'].unique()[0],
"RecordID": df2['RecordID'].unique()[0],
"Region": _region(),
"Schema": df2['Schema'].unique()[0],
"Tag": df2['Tag'].unique()[0],
"Tenure": df2['Tenure'].unique()[0]})
df_output = pd.DataFrame(master)
return df_output
def process(self) -> DataFrame:
return self._summarize()
```
#### File: core/svc/generate_polarity_report.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import LabelFormatter
class GeneratePolarityReport(BaseObject):
""" Generate a Summary of Sentiment Tags
Sample Output:
+----+-------------+-----------+--------------+--------------------------------------+---------------+-----------------------+----------+
| | Category | Country | Leadership | RecordID | Schema | Tag | Tenure |
|----+-------------+-----------+--------------+--------------------------------------+---------------+-----------------------+----------|
| 0 | Suggestions | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Education | Test |
| 1 | Suggestions | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Career Conversation | Test |
| 2 | Suggestions | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Collaboration | Career Conversation | Test |
| 3 | Suggestions | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Promotion | Career Conversation | Test |
| 4 | Pros | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Positive Leadership | Test |
| 5 | Pros | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Supportive | Test |
| 6 | Pros | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Positive Team Culture | Test |
| 7 | Other | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Request | Test |
| 8 | Cons | Test | Test | 8d30ec64-0e74-11ea-b45d-acde48001122 | Test | Career Growth | Test |
+----+-------------+-----------+--------------+--------------------------------------+---------------+-----------------------+----------+
"""
__l_pros = [
'positive team culture',
'positive leadership',
'supportive',
'appreciation',
'satisfied',
]
__l_cons = [
'career growth',
'no conversation',
'no career growth',
'improve culture',
'promotion',
'pay raise',
'churn',
'employee churn',
'no change',
'age',
'argument',
'bad process',
'bad quality',
'behavior',
'benefit',
'benefit pay',
'bias',
'bonus pay',
'cost cutting',
'cost of living',
'cost reduction',
]
__l_suggestions = [
'education',
'career conversation',
'appraisal',
'checkpoint',
'clients',
'collaborative culture',
'collaboration',
'customer support',
'customer experience'
]
def __init__(self,
df_report: DataFrame,
is_debug: bool = False):
"""
Created:
23-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1441
:param df_report:
a DataFrame that contains text with associated tags
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_report = df_report
def _categorize(self,
tag: str) -> str or None:
tag = tag.lower().strip()
if tag in self.__l_pros:
return "Pros"
if tag in self.__l_cons:
return "Cons"
if tag in self.__l_suggestions:
return "Suggestions"
@staticmethod
def _case(input_text: str) -> str:
return LabelFormatter.camel_case(input_text, split_tokens=True)
def process(self,
log_threshold: int = 1000) -> DataFrame:
from cendalytics.feedback.core.dto import SentimentRecordStructure
master = []
record_ids = self._df_report['RecordID'].unique()
ctr = 0
total_records = len(record_ids)
for record_id in record_ids:
df_by_record_id = self._df_report[self._df_report['RecordID'] == record_id]
ctr += 1
if ctr % log_threshold == 0:
self.logger.debug(f"Status: {ctr}-{total_records}")
for _, row in df_by_record_id.iterrows():
def _categorize():
category = self._categorize(row['Tag'])
if category:
return category
for schema in row['Schema']:
category = self._categorize(schema)
if category:
return category
return 'Other'
d_row = SentimentRecordStructure.deep_copy(row)
d_row['Tag'] = self._case(row['Tag'])
d_row['Category'] = self._case(_categorize())
master.append(d_row)
return pd.DataFrame(master).sort_values(by=['RecordID'], ascending=False)
```
#### File: core/svc/retrieve_source_records.py
```python
from base import BaseObject
from datamongo import CendantCollection
class RetrieveSourceRecords(BaseObject):
""" Retrieve Source Records for Feedback Sentiment Processing """
def __init__(self,
collection_name: str,
is_debug: bool = False):
"""
Created:
23-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1441
:param collection_name:
the name of the collection to retrieve the records from
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._collection = CendantCollection(some_collection_name=collection_name)
def process(self,
total_records:int=None) -> list:
def records():
if not total_records:
return self._collection.all()
return self._collection.all(limit=total_records)
records = records()
if self._is_debug:
self.logger.debug('\n'.join([
f"Retrieved Records (total={len(records)})",
f"\tCollection Name: {self._collection.collection_name}"]))
return records
```
#### File: geo/svc/generate_missing_geo.py
```python
import csv
import os
from base import BaseObject
GEO_BASE_PATH = "resources/output"
class GenerateMissingGeo(BaseObject):
""" Auto Generate missing city or country csv"""
_city_missing_count_dict = {}
_country_missing_count_dict = {}
def __init__(self):
"""
Created:
12-September-2019
<EMAIL>
* collect missing count for city and country
* generate missing cities, countries csv
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/863
"""
BaseObject.__init__(self, __name__)
@staticmethod
def collect_missing_count(data_type, value):
"""
Collect missing entry in country and city
:param data_type: city/country
:param value: value
:return:
"""
missing_dict = None
if data_type == "city":
missing_dict = GenerateMissingGeo._city_missing_count_dict
if data_type == "country":
missing_dict = GenerateMissingGeo._country_missing_count_dict
if not missing_dict:
missing_dict[value] = 1
if value in missing_dict.keys():
missing_dict[value] = missing_dict[value] + 1
else:
missing_dict[value] = 1
@staticmethod
def generate_missing_data(some_datatype):
"""
generate csv for missing entry city/country counts
:param some_datatype:
:return:
"""
missing_dict = None
filename = None
csv_file_path = os.path.join(os.environ["GTS_BASE"], GEO_BASE_PATH)
if some_datatype == "city":
filename = 'missing-city.csv'
missing_dict = GenerateMissingGeo._city_missing_count_dict
if some_datatype == "country":
filename = 'missing-country.csv'
missing_dict = GenerateMissingGeo._country_missing_count_dict
csv_columns = ['name', 'missing-count']
try:
with open(csv_file_path + '/' + filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for key, value in missing_dict.items():
writer.writerow({'name': key, 'missing-count': value})
except IOError:
raise IOError(f"I/O Error while generating geo csvs")
```
#### File: core/bp/report_api.py
```python
from base import BaseObject
from cendalytics.report.core.svc import GenerateFeedbackReport
from datamongo import BaseMongoClient
class ReportAPI(BaseObject):
"""
"""
def __init__(self,
mongo_client: BaseMongoClient = None,
is_debug: bool = False):
"""
Created:
13-Nov-2019
<EMAIL>
:param mongo_client:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
if not mongo_client:
mongo_client = BaseMongoClient()
self._is_debug = is_debug
self._mongo_client = mongo_client
def report(self):
class Facade(object):
@classmethod
def by_serial_number(cls,
serial_number: str,
collection_date: str = "latest") -> GenerateFeedbackReport:
return GenerateFeedbackReport(key_field=serial_number,
source_data_name="supply",
collection_date=collection_date,
mongo_client=self._mongo_client,
is_debug=self._is_debug)
@classmethod
def by_openseat_id(cls,
openseat_id: str,
collection_date: str = "latest") -> GenerateFeedbackReport:
return GenerateFeedbackReport(key_field=openseat_id,
source_data_name="demand",
collection_date=collection_date,
mongo_client=self._mongo_client,
is_debug=self._is_debug)
@classmethod
def by_learning_id(cls,
learning_id: str,
collection_date: str = "latest") -> GenerateFeedbackReport:
return GenerateFeedbackReport(key_field=learning_id,
source_data_name="learning",
collection_date=collection_date,
mongo_client=self._mongo_client,
is_debug=self._is_debug)
return Facade()
```
#### File: core/dmo/ranking_report_writer.py
```python
from openpyxl.worksheet.worksheet import Worksheet
COLUMNS = {"A": 20,
"B": 10,
"C": 10,
"D": 10,
"E": 10,
"F": 10,
"G": 10,
"H": 10,
"I": 10}
class RankingReportWriter(object):
def __init__(self,
some_excel_worksheet: Worksheet,
some_source_dimension: list,
some_target_dimensions: list,
some_final_ranking: list):
"""
:param some_excel_worksheet:
the excel worksheet to write to
"""
from . import WorksheetHelper
if not some_excel_worksheet:
raise ValueError("Mandatory Param: Excel Worksheet")
if not some_source_dimension:
raise ValueError("Mandatory Param: Source Dimension")
if not some_target_dimensions:
raise ValueError("Mandatory Param: Target Dimemsion")
if not some_final_ranking:
raise ValueError("Mandatory Param: Final Ranking")
self.worksheet = some_excel_worksheet
self.source_dimension = some_source_dimension
self.target_dimensions = some_target_dimensions
self.final_ranking = some_final_ranking
self.helper = WorksheetHelper
def _write_value(self,
some_column: str,
some_row: int,
some_text: str,
some_named_format: str):
"""
:param some_column:
:param some_row:
:param some_text:
:param some_named_format:
"""
cell = "{}{}".format(some_column,
some_row)
self.worksheet[cell].value = some_text
self.worksheet[cell].style = some_named_format
def _write_records(self,
source_weights: list,
source_values: list):
""" writes records by row and column """
def _dimension_value(value: str) -> dict:
return self.helper.struct(value, "dimension_value_source")
def _dimension_weight(value: str) -> dict:
return self.helper.struct(value, "dimension_weight_source")
def _header_dimension(value: str) -> dict:
return self.helper.struct(value, "header_dimension")
def _header_other(value: str) -> dict:
return self.helper.struct(value, "header_other")
def _field_key(value: str) -> dict:
return self.helper.struct(value, "keyfield")
def _field_weight(value: str) -> dict:
return self.helper.struct(value, "field_weight_source")
def _field_rank(value: str) -> dict:
return self.helper.struct(value, "field_rank")
d_row_1 = {
"A1": _header_other("Open Seat ID"),
"B1": _header_dimension("Cloud"),
"C1": _header_dimension("Database"),
"D1": _header_dimension("System Administrator"),
"E1": _header_dimension("Hard Skill"),
"F1": _header_dimension("Project Management"),
"G1": _header_dimension("Service Management"),
"H1": _header_dimension("Soft Skill"),
"I1": _header_other("Rank")}
d_row_2 = {
"A2": self.helper.struct(self.source_dimension[0]["key_field"],
"keyfield_value_source"),
"B2": _dimension_value(source_values[0]),
"C2": _dimension_value(source_values[1]),
"D2": _dimension_value(source_values[6]),
"E2": _dimension_value(source_values[2]),
"F2": _dimension_value(source_values[3]),
"G2": _dimension_value(source_values[4]),
"H2": _dimension_value(source_values[5])}
d_row_3 = {
"A3": self.helper.struct("Weight",
"dimension_weight_text"),
"B3": _dimension_weight(source_weights[0]),
"C3": _dimension_weight(source_weights[1]),
"D3": _dimension_weight(source_weights[6]),
"E3": _dimension_weight(source_weights[2]),
"F3": _dimension_weight(source_weights[3]),
"G3": _dimension_weight(source_weights[4]),
"H3": _dimension_weight(source_weights[5])}
def _field_weight_value(target_dimension: dict,
slot_name: str) -> str:
return target_dimension["slots"][slot_name]["weight"]
l_values = []
for i in range(0, len(self.target_dimensions)):
l_values.append({
"A{}".format(i + 5): _field_key(
self.target_dimensions[i]["key_field"]),
"B{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "cloud")),
"C{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "database")),
"D{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "system administrator")),
"E{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "hard skill")),
"F{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "project management")),
"G{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "service management")),
"H{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "soft skill")),
"I{}".format(i + 5): _field_rank(
self.final_ranking[i])})
self.helper.generate(self.worksheet,
[d_row_1, d_row_2, d_row_3])
self.helper.generate(self.worksheet,
l_values)
def process(self):
"""
Processes the logs from the input directory
@input: Base directory containing the input and output subdirs.
@output: None
"""
def _weights(some_records: list) -> list:
weights = []
for record in some_records:
weights.append([record["slots"][x]["weight"]
for x in record["slots"]])
return weights
def _values(some_records: list) -> list:
values = []
for record in some_records:
values.append([record["slots"][x]["z_score"]
for x in record["slots"]])
return values
source_weights = _weights(self.source_dimension)[0]
source_values = _values(self.source_dimension)[0]
self.helper.column_widths(self.worksheet,
COLUMNS)
self._write_records(source_weights,
source_values)
```
#### File: core/dmo/record_retriever.py
```python
from tabulate import tabulate
from base import BaseObject
from datamongo import BaseMongoClient
from datamongo import CendantCollection
from datamongo import CendantXdm
from datamongo import TransformCendantRecords
class RecordRetriever(BaseObject):
def __init__(self,
key_field: str,
mongo_client: BaseMongoClient,
collection_names: dict,
is_debug: bool = False):
"""
Created:
13-Nov-2019
<EMAIL>
* refactored out of 'generate-feedback-report'
Updated:
14-Nov-2019
<EMAIL>
* added random record capability
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1331#issuecomment-16009658
:param key_field:
the value of the keyfield to process
(e.g. the actual Serial Number or Open Seat ID)
:param collection_names:
a dictionary containing a complete set of collection names
Sample Input:
{ 'src': 'supply_src_20191025',
'tag': 'supply_tag_20191025',
'xdm': 'supply_xdm_20191029' }
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._key_field = key_field
self._mongo_client = mongo_client
self._collection_names = collection_names
def _retrieve_record(self,
collection_name: str):
collection = CendantCollection(is_debug=self._is_debug,
some_base_client=self._mongo_client,
some_collection_name=collection_name)
if self._key_field.lower().strip() == "random": # GIT-1331-16009658
return collection.random(total_records=1)[0]
return collection.by_key_field(self._key_field)
def _tag_record(self) -> dict or None:
tag_record = self._retrieve_record(self._collection_names["tag"])
if not self._collection_names["tag"]:
self.logger.warning('\n'.join([
"TAG Collection Not Found",
f"\t{self._collection_names}"]))
return None
if not tag_record:
self.logger.warning('\n'.join([
f"TAG Record Not Found ("
f"key-field={self._key_field})"]))
return None
if self._is_debug:
df_record = TransformCendantRecords.to_dataframe(a_record=tag_record,
include_text=False)
self.logger.debug('\n'.join([
f"Retrieved TAG Record ("
f"key-field={self._key_field})",
tabulate(df_record, tablefmt='psql', headers='keys')]))
return tag_record
def _xdm_record(self) -> dict or None:
if not self._collection_names["xdm"]:
self.logger.warning('\n'.join([
"XDM Collection Not Found",
f"\t{self._collection_names}"]))
return None
xdm_record = self._retrieve_record(self._collection_names["xdm"])
if not xdm_record:
self.logger.warning('\n'.join([
f"XDM Record Not Found ("
f"key-field={self._key_field})"]))
return None
if self._is_debug:
self.logger.debug('\n'.join([
f"Retrieved XDM Record ("
f"key-field={self._key_field})",
tabulate(CendantXdm.dataframe(xdm_record),
tablefmt='psql',
headers='keys')]))
return xdm_record
def process(self) -> dict:
svcresult = {
"tag": self._tag_record(),
"xdm": self._xdm_record()}
if self._is_debug:
self.logger.debug('\n'.join([
"Records Retrieved",
f"\tKey Field: {self._key_field}",
f"\tTAG Collection: {self._collection_names['tag']}",
f"\tXDM Collection: {self._collection_names['xdm']}"]))
return svcresult
```
#### File: core/dmo/tag_report_writer.py
```python
from openpyxl.worksheet.worksheet import Worksheet
from base import BaseObject
from datadict import FindDimensions
from datadict import FindRelationships
COLUMNS = {"A": 25,
"B": 25,
"C": 45,
"D": 45,
"E": 25,
"F": 25,
"G": 25,
"H": 25,
"I": 25}
class TagReportWriter(BaseObject):
def __init__(self,
key_field: str,
excel_worksheet: Worksheet,
tag_record: dict,
is_debug: bool = False):
"""
Created:
12-Nov-2019
<EMAIL>
* based on 'feedback-report-writer'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1331#issuecomment-15953128
:param key_field:
the value of the keyfield to process
(e.g. the actual Serial Number or Open Seat ID)
:param excel_worksheet:
the excel worksheet to write to
:param tag_record:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
from . import WorksheetHelper
self._is_debug = is_debug
self._key_field = key_field
self._tag_record = tag_record
self._excel_worksheet = excel_worksheet
self._helper = WorksheetHelper
self._rel_finder = FindRelationships()
self._supply_finder = FindDimensions("supply")
self._learning_finder = FindDimensions("learning")
def _write_value(self,
some_column: str,
some_row: int,
some_text: str,
some_named_format: str):
"""
:param some_column:
:param some_row:
:param some_text:
:param some_named_format:
"""
cell = "{}{}".format(some_column,
some_row)
self._excel_worksheet[cell].value = some_text
self._excel_worksheet[cell].style = some_named_format
def _parent_hierarchy(self,
tag: str) -> str:
"""
Purpose:
Show the entity ancestry back to root
this is useful for demonstrating the tag's context in the Ontology
Example 1:
Input: 'W3C standard'
Output: 'standard, entity'
Example 2:
Input: 'deep learning'
Output: 'cognitive skill, technical skill, skill'
'statistical algorithm, technical skill, skill'
:param tag:
any input tag
:return:
a string value representing the ancestry of the input tag
"""
return '\n'.join([', '.join(x) for x in self._rel_finder.ancestors(tag)])
def _supply_schema(self,
tag: str) -> str:
"""
Purpose:
Find the associated supply schema elements
:param tag:
any tag from a mongoDB field
:return:
the result as a comma-separated string
"""
return ', '.join(self._supply_finder.find(tag))
def _learning_schema(self,
tag: str) -> str:
"""
Purpose:
Find the associated learning schema elements
:param tag:
any tag from a mongoDB field
:return:
the result as a comma-separated string
"""
return ', '.join(self._learning_finder.find(tag))
@staticmethod
def _field_data(a_field: dict) -> str:
"""
Purpose:
Format a field meta-data expression
Format:
Field Name (Field Type)
Sample Output:
position_description (long-text)
:param a_field:
any field of a mongoDB record
:return:
the formatted text
"""
return f"{a_field['name']} ({a_field['type']})\n{a_field['field_id']}"
@staticmethod
def _original_text(a_field: dict) -> str:
"""
Purpose:
Return the original text as a string
:param a_field:
any field of a mongoDB record
:return:
the normalized text
"""
if type(a_field["value"]) == list:
return " ".join([str(x) for x in a_field["value"]])
return a_field["value"]
def _normalized_text(self,
a_field: dict) -> str:
"""
Purpose:
Conditionally return the normalized text
Conditions
1. Normalized Text must exist
2. Normalized Text must not be equivalent to Original Text
:param a_field:
any field of a mongoDB record
:return:
the normalized text
"""
if "normalized" not in a_field:
return ""
normalized_text = " ".join(a_field["normalized"])
if normalized_text == self._original_text(a_field):
return ""
return normalized_text
@staticmethod
def _tags(a_field: dict) -> dict:
"""
Purpose:
Key Tags by Confidence Level in descending order
Sample Output:
{ 91.5: [ 'artificial neural network'],
78.2: [ 'deploy',
'google',
'tensorflow'],
88.2: [ 'team lead'] }
:param a_field:
any field of a mongoDB record
:return:
a dictionary keyed by Confidence levrel
"""
d = {}
for x in a_field["tags"]["supervised"]:
if x[1] not in d:
d[x[1]] = []
d[x[1]].append(x[0])
return d
def _write_records(self):
""" writes records by row and column """
def _header_other(value: str) -> dict:
return self._helper.struct(value, "header_other")
def _collection_name(value: str) -> dict:
return self._helper.struct(value, "collection_name")
def _collection_value(value: str) -> dict:
return self._helper.struct(value, "collection_value")
def _collection_text(value: str) -> dict:
return self._helper.struct(value, "collection_text")
d_row_1 = {
"A1": _header_other(self._tag_record["key_field"]),
"B1": _header_other("Field MetaData"),
"C1": _header_other("Original"),
"D1": _header_other("Normalized"),
"E1": _header_other("Tag"),
"F1": _header_other("Confidence"),
"G1": _header_other("Parents"),
"H1": _header_other(f"Supply Schema"),
"I1": _header_other(f"Learning Schema")}
row = 2
l_structs = []
for field in self._tag_record["fields"]:
field_data = self._field_data(a_field=field)
original_text = self._original_text(a_field=field)
normalized_text = self._normalized_text(a_field=field)
if "tags" in field:
tag_ctr = 1
d_tags = self._tags(a_field=field)
for tag_confidence in sorted(d_tags.keys()):
for tag_name in d_tags[tag_confidence]:
parents = self._parent_hierarchy(tag_name)
supply_schema = self._supply_schema(tag_name)
learning_schema = self._learning_schema(tag_name)
if tag_ctr == 1:
l_structs.append({
"A{}".format(row): _collection_value(field["collection"]["name"]),
"B{}".format(row): _collection_value(field_data),
"C{}".format(row): _collection_text(original_text),
"D{}".format(row): _collection_text(normalized_text),
"E{}".format(row): _collection_value(tag_name),
"F{}".format(row): _collection_value(tag_confidence),
"G{}".format(row): _collection_value(parents),
"H{}".format(row): _collection_value(supply_schema),
"I{}".format(row): _collection_value(learning_schema)})
elif tag_ctr > 1:
l_structs.append({
"A{}".format(row): _collection_text(""),
"B{}".format(row): _collection_text(""),
"C{}".format(row): _collection_text(""),
"D{}".format(row): _collection_text(""),
"E{}".format(row): _collection_value(tag_name),
"F{}".format(row): _collection_value(tag_confidence),
"G{}".format(row): _collection_value(parents),
"H{}".format(row): _collection_value(supply_schema),
"I{}".format(row): _collection_value(learning_schema)})
tag_ctr += 1
row += 1
else:
l_structs.append({
"A{}".format(row): _collection_name(field["collection"]["name"]),
"B{}".format(row): _collection_text(original_text),
"C{}".format(row): _collection_text(normalized_text),
"D{}".format(row): _collection_text(""),
"E{}".format(row): _collection_text(""),
"F{}".format(row): _collection_text(""),
"G{}".format(row): _collection_text(""),
"H{}".format(row): _collection_text(""),
"I{}".format(row): _collection_text("")})
self._helper.generate(worksheet=self._excel_worksheet,
l_structs=[d_row_1])
self._helper.generate(worksheet=self._excel_worksheet,
l_structs=l_structs)
def process(self):
self._helper.column_widths(worksheet=self._excel_worksheet,
d_columns=COLUMNS)
self._write_records()
```
#### File: core/dmo/worksheet_helper.py
```python
import string
from openpyxl.worksheet.worksheet import Worksheet
class WorksheetHelper:
@classmethod
def column_letters(cls) -> list:
"""
Purpose:
Generate all possible excel column letters
Sample Output:
[ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'AA', 'AB', 'AC', ... 'ZX', 'ZY', 'ZZ']
:return:
list of excel column letters
"""
alphabet = [x for x in string.ascii_uppercase]
for ch in string.ascii_uppercase:
[alphabet.append(alpha) for alpha in [f"{ch}{x}" for x in string.ascii_uppercase]]
return alphabet
@classmethod
def column_widths(cls,
worksheet: Worksheet,
d_columns: dict) -> None:
"""
:param worksheet:
an active Excel worksheet
:param d_columns:
a dictionary of columns and widths
e.g. { 'A': 10,
'B': 10 }
"""
def _update(alpha: str):
worksheet.column_dimensions[alpha].width = d_columns[alpha]
[_update(alpha) for alpha in d_columns]
@classmethod
def struct(cls,
value: str,
style: str) -> dict:
return {"value": value,
"style": style}
@classmethod
def generate(cls,
worksheet: Worksheet,
l_structs: list) -> None:
"""
:param worksheet:
an active Excel worksheet
:param l_structs:
a list of structures
e.g. [ { 'value': 'Some Text Value',
'style': 'predefined-style' },
...
{ 'value': 'Another Text Value',
'style': 'some-other-style' } ]
"""
for d in l_structs:
for k in d:
worksheet[k].value = d[k]["value"]
worksheet[k].style = d[k]["style"]
```
#### File: consumer/recipes/tag_search_tutorial.py
```python
from base import BaseObject
from cendalytics.skills.core.bp import SkillsReportAPI
from datamongo import CendantCollectionRegistry
class TagSearchTutorial(BaseObject):
"""
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
13-Nov-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._api = SkillsReportAPI(is_debug=is_debug)
self._collection_registry = CendantCollectionRegistry(is_debug=self._is_debug)
def single_tag_single_division(self) -> None:
"""
Purpose:
Find Ansible Skills in GBS
:return:
"""
collection_name = self._collection_registry.by_date("20191025").supply().tag()
searcher = self._api.search(collection_name=collection_name)
searcher.tags(tags=["Ansible"], div_field="GBS").process()
def main():
TagSearchTutorial(is_debug=True).single_tag_single_division()
if __name__ == "__main__":
main()
```
#### File: core/fcd/skills_report_on_certifications.py
```python
from typing import Optional
from base import BaseObject
from base import DataTypeError
from base import MandatoryParamError
from cendalytics.skills.core.svc import FindSelfReportedCertifications
class SkillsReportOnCertifications(BaseObject):
""" Facade: Service that finds Self-Reported Certifications in user CVs and HR data """
def __init__(self,
collection_name: str,
exclude_vendors: Optional[list],
add_normalized_text: bool = True,
aggregate_data: bool = False,
mongo_database_name: str = 'cendant',
is_debug: bool = False):
"""
Created:
8-Nov-2019
<EMAIL>
* refactored out of skills-report-api
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1293
:param collection_name:
the collection from which to query data
this should be the most recent annotation run against 'supply-tag' (e.g., supply_tag_20190801)
:param exclude_vendors:
Optional a list of vendors to exclude
this is not case sensitive
for example, ['ibm'] will exclude all IBM certifications from this feedback
:param add_normalized_text:
add the normalized (pre-processed) field text to the dataframe result
Reference:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/815#issuecomment-14125072
:param aggregate_data:
if True return aggregate data only
+----+-------------------------------------------------+-------------+----------------+
| | Certification | Frequency | Vendor |
|----+-------------------------------------------------+-------------+----------------|
| 0 | ITIL Certification | 8176 | Axelos |
| 1 | ITIL Foundation Certification | 15450 | Axelos |
| 2 | IBM Certification | 21062 | IBM |
| 3 | CCNA Certification | 4899 | Cisco |
| 4 | Java Certification | 2224 | Oracle |
| 5 | Project Management Professional | 1660 | PMI |
| 6 | Level 3 Certification | 415 | IBM |
| 7 | CCNA Security | 142 | Cisco |
| 8 | Microsoft Certification | 1880 | Microsoft |
+----+-------------------------------------------------+-------------+----------------+
if False return individual level data
+----+-------------------------------------------------+----------------+----------------+
| | Certification | SerialNumber | Vendor |
|----+-------------------------------------------------+----------------+----------------|
| 0 | ITIL Certification | 123456 | Axelos |
| 1 | ITIL Foundation Certification | 227232 | Axelos |
| 2 | IBM Certification | 9483223 | IBM |
| 3 | CCNA Certification | 9483223 | Cisco |
| 4 | Java Certification | 9483223 | Oracle |
| 5 | Project Management Professional | 923823 | Oracle |
| 6 | Level 3 Certification | 009238323 | IBM |
| 7 | CCNA Security | 009238323 | Cisco |
| 8 | Microsoft Certification | 2371221 | Microsoft |
+----+-------------------------------------------------+----------------+----------------+
:param mongo_database_name:
the database containing the MongoDB collections (e.g., cendant)
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._aggregate_data = aggregate_data
self._collection_name = collection_name
self._mongo_database_name = mongo_database_name
self._add_normalized_text = add_normalized_text
self._exclude_vendors = self._transform_exclude_vendors(exclude_vendors)
@staticmethod
def _transform_exclude_vendors(exclude_vendors: Optional[list]) -> list:
if not exclude_vendors:
return []
return exclude_vendors
def _validate(self) -> None:
if not self._collection_name:
raise MandatoryParamError("Collection Name")
if type(self._collection_name) != str:
raise DataTypeError
def initialize(self) -> FindSelfReportedCertifications:
"""
Purpose:
Execute the Report to find "Self-Reported Certifications"
:return:
a pandas DataFrame with the results
"""
# Step: Perform Data Validation
self._validate()
self.logger.info('\n'.join([
"Instantiate Self-Reported Certifications Report",
f"\tAggregate Data: {self._aggregate_data}",
f"\tExclude Vendors: {self._exclude_vendors}",
f"\tAdd Normalized Text: {self._add_normalized_text}",
f"\tCollection: {self._collection_name}"]))
# Step: Instantiate Service
return FindSelfReportedCertifications(exclude_vendors=self._exclude_vendors,
aggregate_data=self._aggregate_data,
add_normalized_text=self._add_normalized_text,
collection_name=self._collection_name,
mongo_database_name=self._mongo_database_name,
is_debug=self._is_debug)
```
#### File: core/fcd/skills_utility_to_csv.py
```python
from pandas import DataFrame
from base import BaseObject
from cendalytics.skills.core.svc import GenerateCsvReport
class SkillsUtilityToCSV(BaseObject):
""" Facade: Service that generates a CSV feedback from a Skills-based pandas DataFrame """
def __init__(self,
df_results: DataFrame,
hash_serial_number: bool = False,
is_debug: bool = False):
"""
Created:
8-Nov-2019
<EMAIL>
* refactored out of skills-report-api
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1293
:param df_results:
a DataFrame of search results
:param hash_serial_number:
if True (default) return an MD5 hash of the serial number
if False return the actual Serial Number
note - individual Serial Numbers are exposed internally
to all employees in Bluepages
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_results = df_results
self._hash_serial_number = hash_serial_number
def _validator(self) -> None:
from cendalytics.skills.core.dto import SkillsReportValidator
if type(self._df_results) != DataFrame:
SkillsReportValidator.expected(self._df_results, "pandas.DataFrame")
if type(self._hash_serial_number) != bool:
SkillsReportValidator.expected(self._hash_serial_number, "bool")
def initialize(self) -> GenerateCsvReport:
"""
:return:
an instantiated service
"""
# Step: Perform Data Validation
self._validator()
self.logger.info('\n'.join([
"Instantiate CSV Report Generator",
f"\tDF: {len(self._df_results)}",
f"\tHash Serial Number? {self._hash_serial_number}"]))
# Step: Instantiate Service
return GenerateCsvReport(df=self._df_results,
hash_serial_number=self._hash_serial_number)
```
#### File: core/svc/find_self_reported_certifications.py
```python
import time
from typing import Union
from pandas import DataFrame
from base import BaseObject
from datadict import FindCertifications
from datamongo import BaseMongoClient
from datamongo import CendantTag
class FindSelfReportedCertifications(BaseObject):
"""
Purpose:
Service that finds Self-Reported Certifications in user CVs and HR data
a "self-reported" certification is one that cannot be externally verifiable.
Examples:
1. an IBM employee may earn a Level 2 badge Certification from IBM for Architecture.
This can be verified using data we have.
2. an IBM employee can earn a certification that results in a badge.
This can be verified - IF - IBM has access to that particular badge data
(IBM can access some, but not all, badges)
3. an IBM employee can earn a Microsoft certification
While this can be verified through a Microsoft directory, we likely will not have access to that data
and therefore can provide an automated system confirmation. Thus, this would remain an example of a
"self-reported certification" meaning there is no automated independent verification possible.
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/606
Prereq:
a populated "Supply_Tag_***" collection (e.g., 'supply_tag_20190801')
"""
def __init__(self,
collection_name: str,
exclude_vendors: list,
add_normalized_text: bool,
aggregate_data: bool,
mongo_database_name: str,
server_alias: str = 'cloud',
is_debug: bool = False):
"""
Created:
5-Aug-2019
<EMAIL>
Purpose:
Execute the Report to find "Self-Reported Certifications"
:param collection_name:
the collection from which to query data
this should be the most recent annotation run against 'supply-tag' (e.g., supply_tag_20190801)
:param add_normalized_text:
add the normalized (pre-processed) field text to the dataframe result
Reference:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/815#issuecomment-14125072
:param aggregate_data:
if True return aggregate data only
[Certification, Vendor, Frequency]
:param exclude_vendors:
Optional a list of vendors to exclude
this is not case sensitive
for example, ['ibm'] will exclude all IBM certifications from this feedback
:param mongo_database_name:
the database containing the MongoDB collections (e.g., cendant)
:param mongo_host_name:
deprecated/ignored
:return:
a pandas DataFrame with the results
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._aggregate_data = aggregate_data
self._exclude_vendors = exclude_vendors
self._add_normalized_text = add_normalized_text
self._cert_finder = FindCertifications()
self._collection = CendantTag(collection_name=collection_name,
database_name=mongo_database_name,
mongo_client=BaseMongoClient(server_alias=server_alias),
is_debug=True)
if self._is_debug:
self.logger.debug('\n'.join([
f"\tInitialize FindSelfReportedCertifications: ",
f"\tDatabase Name: {mongo_database_name}",
f"\tCollection Name: {collection_name}",
f"\tAggregate Data: {self._aggregate_data}",
f"\tExclude Vendors: {self._exclude_vendors}",
f"\tAdd Normalized Text: {self._add_normalized_text}"]))
def _result(self) -> Union[DataFrame, None]:
"""
Purpose:
1. Individual Dataframe (default)
https://github.ibm.com/-cdo/unstructured-analytics/issues/617
This dataframe returns a feedback record-by-record
+----+-------------------------------------------------+----------------+----------------+
| | Certification | SerialNumber | Vendor |
|----+-------------------------------------------------+----------------+----------------|
| 0 | ITIL Certification | 123456 | Axelos |
| 1 | ITIL Foundation Certification | 227232 | Axelos |
| 2 | IBM Certification | 9483223 | IBM |
| 3 | CCNA Certification | 9483223 | Cisco |
| 4 | Java Certification | 9483223 | Oracle |
| 5 | Project Management Professional | 923823 | Oracle |
| 6 | Level 3 Certification | 009238323 | IBM |
| 7 | CCNA Security | 009238323 | Cisco |
| 8 | Microsoft Certification | 2371221 | Microsoft |
+----+-------------------------------------------------+----------------+----------------+
Each Serial Number, Certification and Vendor is represented
2. Aggregated dataframe
https://github.ibm.com/-cdo/unstructured-analytics/issues/606
This dataframe returns aggregated statistics at the Certificatiomn level
+----+-------------------------------------------------+-------------+----------------+
| | Certification | Frequency | Vendor |
|----+-------------------------------------------------+-------------+----------------|
| 0 | ITIL Certification | 8176 | Axelos |
| 1 | ITIL Foundation Certification | 15450 | Axelos |
| 2 | IBM Certification | 21062 | IBM |
| 3 | CCNA Certification | 4899 | Cisco |
| 4 | Java Certification | 2224 | Oracle |
| 5 | Project Management Professional | 1660 | PMI |
| 6 | Level 3 Certification | 415 | IBM |
| 7 | CCNA Security | 142 | Cisco |
| 8 | Microsoft Certification | 1880 | Microsoft |
+----+-------------------------------------------------+-------------+----------------+
Serial Numbers are left out of this.
:return:
the result Dataframe
"""
from cendalytics.skills.core.dmo import CertificationAggregateReport
from cendalytics.skills.core.dmo import CertificationIndividualReport
if self._aggregate_data:
if self._is_debug:
self.logger.debug("Running Aggregate Report")
return CertificationAggregateReport(collection=self._collection,
is_debug=self._is_debug).process()
if self._is_debug:
self.logger.debug("Running Individual Report")
return CertificationIndividualReport(collection=self._collection,
is_debug=self._is_debug).process()
def _exclusions(self,
dataframe: DataFrame) -> DataFrame:
"""
Purpose:
Exclude Blacklisted Vendors
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/624
:param dataframe:
the original dataframe
:return:
the modified dataframe
"""
for vendor in self._exclude_vendors:
dataframe = dataframe[dataframe.Vendor != vendor]
return dataframe
def _subsumptions(self,
dataframe: DataFrame) -> DataFrame:
"""
Purpose:
Remove Subsumed Certifications
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/623
:param dataframe:
the original dataframe
:return:
the modified dataframe
"""
from cendalytics.skills.core.svc import RemoveSubsumedCertifications
return RemoveSubsumedCertifications(dataframe=dataframe,
is_debug=self._is_debug).process()
def _label_transformation(self,
dataframe: DataFrame) -> DataFrame:
"""
Purpose:
Perform Certification Labelling
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/981
:param dataframe:
the original dataframe
:return:
the modified dataframe
"""
from cendalytics.skills.core.svc import PerformCertificationLabelling
return PerformCertificationLabelling(dataframe=dataframe,
is_debug=self._is_debug).process()
def process(self) -> DataFrame:
start = time.time()
df_results = self._result()
if df_results is None or df_results.empty:
raise ValueError(f"No Records Found "
f"(collection={self._collection.collection.collection_name})")
df_results = self._exclusions(df_results)
df_results = self._subsumptions(df_results)
if self._is_debug:
end_time = time.time() - start
self.logger.debug(f"Generated Report: "
f"Time={end_time}, "
f"Total={len(df_results)}")
return df_results.sort_values(by=['Confidence'],
ascending=False)
```
#### File: core/svc/generate_division_distribution.py
```python
from collections import Counter
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from base import MandatoryParamError
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class GenerateDivisionDistribution(BaseObject):
"""
Purpose:
Generate a record counter by distribution
Sample Output:
+----+---------+------------+
| | Count | Division |
|----+---------+------------|
| 1 | 96773 | gbs |
| 0 | 85485 | |
| 3 | 26969 | fno |
| 2 | 15203 | cloud |
| 4 | 12311 | systems |
| 5 | 11177 | chq_oth |
+----+---------+------------+
"""
def __init__(self,
collection_name: str,
host_name: str = None, # deprecated/ignored
is_debug: bool = True):
"""
Created:
28-Oct-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1209
:param collection_name:
any valid collection name that contains a 'div_field' at the record root
:param host_name:
deprecated/ignored
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
if not collection_name:
raise MandatoryParamError("Collection Name")
self._is_debug = is_debug
self._collection = CendantCollection(is_debug=self._is_debug,
some_collection_name=collection_name,
some_base_client=BaseMongoClient())
@staticmethod
def _count(records: list) -> Counter:
c = Counter()
for record in records:
c.update({record["div_field"]: 1})
return c
@staticmethod
def _to_dataframe(c: Counter) -> DataFrame:
results = []
for k in c:
results.append({"Division": k, "Count": float(c[k])})
return pd.DataFrame(results).sort_values(by=['Count'], ascending=False)
def process(self) -> DataFrame:
df = self._to_dataframe(
self._count(
self._collection.all()))
if self._is_debug:
self.logger.debug('\n'.join([
"Division Distributed Generated",
tabulate(df, headers='keys', tablefmt='psql')]))
return df
```
#### File: core/svc/perform_certification_labelling.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from datadict import FindCertifications
class PerformCertificationLabelling(BaseObject):
"""
Purpose:
Proper Label Certification Report
e.g., transform 'itil certification' => 'ITIL Certification'
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/981
"""
def __init__(self,
dataframe: DataFrame,
is_debug: bool = False):
"""
Created:
23-Sept-2019
<EMAIL>
Purpose:
Execute the Report to find "Self-Reported Certifications"
:param dataframe:
dataframe
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._dataframe = dataframe
if self._is_debug:
self.logger.debug(f"Initialize PerformCertificationLabelling")
def process(self) -> DataFrame:
print (self._dataframe.head(5))
return self._dataframe
```
#### File: sna/bp/social_network_analysis_api.py
```python
from base import BaseObject
from base import MandatoryParamError
class SocialNetworkAnalysisAPI(BaseObject):
""" API to Persist Data in DB2 """
def __init__(self):
"""
Created:
- 13-July-2019
- <EMAIL>
"""
BaseObject.__init__(self, __name__)
@staticmethod
def socialnetworkanalysis(url, intranet_id, intranet_pass):
from cendalytics.sna import SocialNetworkOrchestrator
from cendalytics.sna import InputCredentials
# set credentials
InputCredentials.set_config_details(intranet_id, intranet_pass)
social_network_api = SocialNetworkOrchestrator(url)
sentiment_analysis = social_network_api.process()
return sentiment_analysis
@staticmethod
def process(blog_url, username='', password='', db2username='', db2password=''):
import getpass
try:
print("Enter credentials:\n")
if not username:
username = input("Intranet ID: ")
if not password:
password = getpass.getpass(prompt='Your Intranet password: ')
# db2username = input("DB2 Username: ")
# db2password = getpass.getpass(prompt='DB2 Password: ')
except Exception as err:
print('ERROR:', err)
else:
print("\n".join([
"API Parameters",
"\tIntranet-id: {}".format(username)]))
print("\n".join([
"API Parameters",
"\tblog_url: {}".format(blog_url)
]))
if not username:
raise MandatoryParamError("Intranet id")
if not password:
raise MandatoryParamError("Intranet Password")
# if not db2username:
# raise MandatoryParamError("DB2 Username")
# if not db2password:
# raise MandatoryParamError("DB2 Password")
if blog_url.startswith("http"):
sentiment_analysis_json = SocialNetworkAnalysisAPI.socialnetworkanalysis(blog_url,
username,
password)
else:
raise ValueError("\n".join([
"Unrecognized Input",
f"\tname: {blog_url}"]))
return sentiment_analysis_json
if __name__ == "__main__":
sentiment_data = SocialNetworkAnalysisAPI.process(
"https://w3-connections.ibm.com/blogs/0d86cb37-869b-435e-8ffc-f2f16949d5ee/entry/GTS_Academy?lang=en_us")
```
#### File: subk/bp/subk_analysis_orchestrator.py
```python
from pandas import DataFrame
from base import BaseObject
from dataingest import ExcelReader
class SubkAnalysisOrchestrator(BaseObject):
""" Orchestrate the Subk (Sub-Contractor) Analysis Business Process """
def __init__(self,
input_spreadsheet:str,
is_debug: bool = True):
"""
Created:
16-Jan-2020
<EMAIL>
* Reference
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1740#issuecomment-17198891
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._input_spreadsheet = input_spreadsheet
def process(self,
overwrite_files:bool=True):
from cendalytics.subk.svc import AnalyzeSkillsForSubk
from cendalytics.subk.svc import ClusterInputSkills
from cendalytics.subk.svc import ClusterSkillsForGTS
AnalyzeSkillsForSubk(is_debug=self._is_debug,
input_file=self._input_spreadsheet).process()
def main():
IS_DEBUG = True
input_spreadsheet = "/Users/craig.trimibm.com/Box/GTS CDO Workforce Transformation/04. Documentation/Cendant Tasks/GIT-1740/GTS Labs Subk's - skill details.xlsx"
orchestrator = SubkAnalysisOrchestrator(is_debug=IS_DEBUG,
input_spreadsheet=input_spreadsheet)
orchestrator.process(overwrite_files=False)
if __name__ == "__main__":
main()
```
#### File: subk/dmo/skill_file_writer.py
```python
import os
from pandas import DataFrame
from base import BaseObject
class SkillFileWriter(BaseObject):
""" Common Domain Component to Write DataFrames to File """
def __init__(self,
df_output: DataFrame,
output_file_name: str,
is_debug: bool = True):
"""
Created:
20-Jan-2020
<EMAIL>
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_output = df_output
self._output_file_name = output_file_name
def process(self) -> None:
full_path = os.path.join(os.environ['GTS_BASE'],
'resources/output/subk')
if not os.path.exists(full_path):
os.makedirs(full_path, exist_ok=True)
output_path = os.path.join(full_path,
self._output_file_name)
self._df_output.to_csv(output_path,
encoding="utf-8",
sep="\t")
if self._is_debug:
self.logger.debug('\n'.join([
"Wrote To File",
f"\tOutput Path: {output_path}",
f"\tTotal Records: {len(self._df_output)}"]))
```
#### File: consumer/recipe/create_and_invert_all.py
```python
from base import BaseObject
from cendalytics.tfidf.core.bp import VectorSpaceAPI
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class CreateAndInvertAll(BaseObject):
"""
Purpose:
Given a Collection,
1. Create a Vector Space for the entire collection
and invert this Vector Space
2. For each division represented in the Vector Space, create a Vector Space
and invert this Vector Space
"""
def __init__(self,
collection_name: str):
BaseObject.__init__(self, __name__)
self._collection_name = collection_name
def process(self):
vectorspace_api = VectorSpaceAPI(is_debug=False)
mongo_client = BaseMongoClient()
collection = CendantCollection(some_base_client=mongo_client,
some_collection_name=self._collection_name)
for division in collection.distinct("div_field"):
vs_fpath = vectorspace_api.tfidf().create(division=division,
mongo_client=mongo_client,
collection_name=self._collection_name).process()
print(f"VS Library Path: {vs_fpath}")
vs_library_name = vs_fpath.split('/')[-1]
print(f"VS Library Name: {vs_library_name}")
inversion_fpath = vectorspace_api.inversion().create(vs_library_name).process(top_n=3)
print(f"Inversion Library Path: {inversion_fpath}")
inversion_library_name = inversion_fpath.split('/')[-1]
print(f"Inversion Library Name: {inversion_library_name}")
def main():
COLLECTION_NAME = 'supply_tag_20191025'
CreateAndInvertAll(collection_name=COLLECTION_NAME).process()
if __name__ == "__main__":
main()
```
#### File: core/dmo/vectorspace_library_loader.py
```python
import os
import time
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import FileIO
class VectorSpaceLibraryLoader(BaseObject):
""" Load Vector Space """
__df_vectorspace = None
__columns = {"Number": int,
"Doc": str,
"DocsWithTerm": int,
"IDF": float,
"TF": float,
"TFIDF": float,
"Term": str,
"TermFrequencyInCorpus": int,
"TermFrequencyInDoc": int,
"TermsInDoc": int,
"TotalDocs": int}
def __init__(self,
library_name: str,
is_debug: bool = False):
"""
Created:
10-Jul-2019
<EMAIL>
* search the skills vector space
Updated:
5-Nov-2019
<EMAIL>
* renamed from 'skills-vectorspace-loader' and
refactored out of nlusvc project
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1261
:param library_name:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
self._library_name = library_name
def _library_path(self):
fname = f"resources/confidential_input/vectorspace/{self._library_name}"
return os.path.join(os.environ["GTS_BASE"],
fname)
def _process(self) -> None:
start = time.time()
_input_path = FileIO.absolute_path(self._library_path())
df = pd.read_csv(
_input_path,
delim_whitespace=False,
sep='\t',
error_bad_lines=False,
skip_blank_lines=True,
skiprows=1,
comment='#',
encoding='utf-8',
names=self.__columns.keys(),
dtype=self.__columns,
na_values=['none'],
usecols=self.__columns.keys())
df.fillna(value='', inplace=True)
end = time.time()
if self.is_debug:
self.logger.debug("\n".join([
"Read CSV File",
"\tPath: {}".format(_input_path),
"\tTotal Time: {}".format(end - start)]))
self.__df_vectorspace = df
def df(self) -> DataFrame:
if self.__df_vectorspace is None:
self._process()
return self.__df_vectorspace
```
#### File: core/fcd/vectorspace_create.py
```python
import time
from base import BaseObject
from base import DataTypeError
from base import MandatoryParamError
from cendalytics.tfidf.core.svc import CreateCollectionVectorSpace
from datamongo import BaseMongoClient
class VectorSpaceCreate(BaseObject):
""" VectorSpace Function Facade """
def __init__(self,
parent,
is_debug: bool = False):
"""
Created:
6-Nov-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
self._parent = parent
self._is_debug = is_debug
@staticmethod
def validate(collection_name: str,
mongo_client: BaseMongoClient,
division: str = None,
limit: int = 0) -> None:
if not collection_name:
raise MandatoryParamError("Collection Name")
if 'tag' not in collection_name.lower():
raise ValueError("Tag Collection Expected")
if not mongo_client:
raise MandatoryParamError("Mongo Client")
if division and type(division) != str:
raise DataTypeError("Division")
if limit and type(limit) != int:
raise DataTypeError("Limit")
def create(self,
collection_name: str,
division: str or None,
mongo_client: BaseMongoClient,
limit: int = None) -> CreateCollectionVectorSpace:
"""
Purpose:
Generate a Vector Space via TF-IDF Metrics
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1261#issuecomment-15732844
:param collection_name:
the name of the tag collection to build the vector space across
:param division:
the division to focus on
:param mongo_client:
an instantiated mongoDB connection instance
:param limit:
Optional the number of records to return
:return:
an instantiated GenerateVectorSpace
Sample Output:
+-------+-----------+----------------+----------+---------+-----------+---------------------+-------------------------+----------------------+--------------+-------------+
| | Doc | DocsWithTerm | IDF | TF | TFIDF | Term | TermFrequencyInCorpus | TermFrequencyInDoc | TermsInDoc | TotalDocs |
|-------+-----------+----------------+----------+---------+-----------+---------------------+-------------------------+----------------------+--------------+-------------|
| 0 | 0697A5744 | 159 | 2.39904 | 0.06667 | 0.02779 | cloud service | 186 | 1 | 15 | 1751 |
| 1 | 0697A5744 | 1094 | 0.47035 | 0.06667 | 0.14174 | management | 2573 | 1 | 15 | 1751 |
| 2 | 0697A5744 | 2006 | -0.13596 | 0.06667 | -0.49036 | agile | 2194 | 1 | 15 | 1751 |
| 3 | 0697A5744 | 2995 | -0.53676 | 0.06667 | -0.1242 | ibm | 5857 | 1 | 15 | 1751 |
| 4 | 0697A5744 | 513 | 1.22767 | 0.06667 | 0.0543 | data science | 745 | 1 | 15 | 1751 |
...
| 97480 | 04132K744 | 479 | 1.29624 | 0.01754 | 0.01353 | maintenance | 945 | 1 | 57 | 1751 |
+-------+-----------+----------------+----------+---------+-----------+---------------------+-------------------------+----------------------+--------------+-------------+
"""
start = time.time()
self.validate(collection_name=collection_name,
mongo_client=mongo_client,
division=division,
limit=limit)
gen = CreateCollectionVectorSpace(limit=limit,
division=division,
is_debug=self._is_debug,
mongo_client=mongo_client,
collection_name=collection_name)
if self._is_debug:
self.logger.debug(f"Instantiated Generator ("
f"collection={collection_name}, "
f"time={round(time.time() - start, 2)}s)")
return gen
```
#### File: core/svc/read_inversion_library.py
```python
from pandas import DataFrame
from base import BaseObject
class ReadInversionLibrary(BaseObject):
"""
Purpose:
Read an Inversion Library
Sample Input:
+------+------------+-------------------------------------------+
| | KeyField | Tag |
|------+------------+-------------------------------------------|
| 0 | 0697A5744 | cloud service |
| 1 | 0697A5744 | data science |
| 2 | 0697A5744 | solution design |
| 3 | 05817Q744 | kubernetes |
| 4 | 05817Q744 | bachelor of engineering |
| 5 | 05817Q744 | developer |
...
| 1328 | 249045760 | electrical engineering |
+------+------------+-------------------------------------------+
Sample Output (given 'data science' as a term param)
['0697A5744']
"""
def __init__(self,
library_name: str,
is_debug: bool = False):
"""
Created:
5-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1261#issuecomment-15732844
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._library_name = library_name
def _load_dataframe(self) -> DataFrame:
from cendalytics.tfidf.core.dmo import InversionLibraryLoader
return InversionLibraryLoader(is_debug=self._is_debug,
library_name=self._library_name).df()
def process(self,
term: str) -> list:
"""
Purpose:
for a given term (skill) find the key fields (serial numbers) for which
this term is the most discriminating
:param term:
any annotation tag within the Cendant Ontology
:return:
a list of key fields (e.g., Serial Numbers)
"""
df = self._load_dataframe()
df2 = df[df['Term'] == term.lower()]
key_fields = sorted(df2['KeyField'].unique())
if self._is_debug:
self.logger.debug('\n'.join([
f"Inversion Search Completed (total-results={len(key_fields)})",
f"\tTerm: {term}",
f"\tKey Fields: {key_fields}"]))
return key_fields
```
#### File: ingest/dmo/dbpedia_taxonomy_extractor.py
```python
import re
from typing import Optional
from base import BaseObject
from base import FileIO
class DBpediaTaxonomyExtractor(BaseObject):
""" Extract latent 'is-a' hierarchy from unstructured text """
__isa_patterns = None
__clause_patterns = None
def __init__(self,
input_text: str,
is_debug: bool = False):
"""
Created:
7-Jan-2020
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1706
Updated:
7-Feb-2020
<EMAIL>
* moved dictionaries to CSV resources
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1837
"""
BaseObject.__init__(self, __name__)
if self.__isa_patterns is None:
self.__isa_patterns = FileIO.file_to_lines_by_relative_path(
"resources/config/dbpedia/patterns_isa.csv")
self.__isa_patterns = [x.lower().strip() for x in self.__isa_patterns]
if self.__clause_patterns is None:
self.__clause_patterns = FileIO.file_to_lines_by_relative_path(
"resources/config/dbpedia/patterns_clause.csv")
self.__clause_patterns = [x.lower().strip() for x in self.__clause_patterns]
self._input_text = input_text
self._is_debug = is_debug
@staticmethod
def _remove_parens(input_text: str) -> str:
"""
Purpose:
Remove parens
Sample Input:
A drug (/drɑːɡ/) is any substance
Sample Output:
A drug is any substance
:return:
text without parens
"""
if '(' not in input_text and ')' not in input_text:
return input_text
x = input_text.index('(')
y = input_text.index(')') + 2
return f"{input_text[0:x]}{input_text[y:]}"
@staticmethod
def _remove_akas(input_text: str) -> str:
"""
Purpose:
Remove AKA sections
Sample Input:
Lung cancer, also known as lung carcinoma, is a malignant lung tumor
Sample Output:
Lung cancer is a malignant lung tumor
:return:
text without AKA
"""
patterns = [', also known as ',
', or ',
', formerly known as']
for pattern in patterns:
if pattern in input_text:
x = input_text.index(pattern)
y = input_text[:(x + len(pattern))].index(',') + x + len(pattern) + 4
input_text = f"{input_text[:x]}{input_text[y:]}"
return input_text
def _cleanse_text(self,
input_text: str) -> str:
original_input_text = input_text
input_text = self._remove_parens(input_text)
input_text = self._remove_akas(input_text)
if self._is_debug and original_input_text != input_text:
self.logger.debug('\n'.join([
"Text Cleansing Completed",
f"\tOriginal: {original_input_text}",
f"\tNormalized: {input_text}"]))
return input_text
def _segmenter(self,
input_text: str) -> list:
from nlutext.core.svc import PerformSentenceSegmentation
segmenter = PerformSentenceSegmentation(is_debug=self._is_debug)
return segmenter.process(some_input_text=input_text,
remove_wiki_references=True)
def _isa_normalizer(self,
input_text: str) -> str:
input_text = input_text.lower().strip()
for pattern in self.__isa_patterns:
if pattern in input_text:
input_text = input_text.replace(pattern, 'is_a')
return input_text
def _clause_inducer(self,
input_text: str) -> str:
regex = re.compile(r"[A-Za-z]+\s+(in|of)\s+", re.IGNORECASE)
target = ', '
input_text = input_text.lower().strip()
for candidate in self.__clause_patterns:
k_mid = f" {candidate} "
k_start = f"{candidate} "
k_end = f" {candidate}"
if input_text.startswith(k_start):
input_text = input_text.replace(k_start, target)
elif k_mid in input_text:
input_text = input_text.replace(k_mid, target)
elif input_text.endswith(k_end):
input_text = input_text.replace(k_end, target)
while True:
search_result = regex.search(input_text)
if not search_result:
break
input_text = input_text.replace(search_result.group(), target)
input_text = input_text.strip().replace(f' {target}', target).replace(' ', ' ')
if input_text.startswith(', '):
input_text = input_text[2:].strip()
return input_text
def process(self) -> Optional[str]:
if not self._input_text:
self.logger.warning("SubClass Extraction Failed: No Input")
return None
normalized = self._isa_normalizer(self._input_text)
if 'is_a' not in normalized:
self.logger.warning('\n'.join([
"SubClass Extraction Failed: No IS-A",
f"\tOriginal Text: {self._input_text}",
f"\tNormalized: {normalized}"]))
return None
x = normalized.index('is_a') + len('is_a')
normalized = normalized[x:].strip()
normalized = self._clause_inducer(normalized)
normalized = normalized.replace(',', '.')
normalized = normalized.replace(';', '.')
sentences = self._segmenter(normalized)
subclass = sentences[0].replace('.', '').strip()
if not subclass:
self.logger.warning('\n'.join([
"SubClass Extraction Failed: No SubClass",
f"\tOriginal Text: {self._input_text}",
f"\tNormalized: {normalized}",
f"\tSentences: {sentences}"]))
return None
if self._is_debug:
self.logger.debug('\n'.join([
"SubClass Extraction Completed",
f"\tResult: {subclass}",
f"\tOriginal Text: {self._input_text}",
f"\tNormalized: {normalized}",
f"\tSentences: {sentences}"]))
return subclass
```
#### File: ingest/svc/compare_dbpedia_entries.py
```python
from base import BaseObject
from nlusvc import TextAPI
class CompareDBPediaEntries(BaseObject):
""" Compare Categories between two dbPedia entities
"""
_filtered_categories = None
def __init__(self,
entity_name_1: str,
entity_name_2: str,
ontology_name: str,
is_debug: bool = False):
"""
Created:
8-Jan-2020
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1710#issuecomment-17017220
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._text_api = TextAPI(is_debug=is_debug,
ontology_name=ontology_name)
self._entity_name_1 = entity_name_1
self._entity_name_2 = entity_name_2
def _find(self,
entity_name: str) -> dict:
from cendalytics.wikipedia.ingest.svc import FindDbPediaEntryRedis
from cendalytics.wikipedia.ingest.svc import PostProcessDBPediaPageRedis
finder_1 = FindDbPediaEntryRedis(is_debug=self._is_debug)
entry = finder_1.process(entity_name=entity_name,
ignore_cache=False)
finder_2 = PostProcessDBPediaPageRedis(is_debug=self._is_debug)
entry = finder_2.process(entry=entry,
ignore_cache=False)
return entry
def _categories(self,
entity_name: str) -> set:
d_entry = self._find(entity_name=entity_name)
categories = set(d_entry['categories'])
if self._is_debug:
self.logger.debug('\n'.join([
"Entity Resolved",
f"\tParam Name: {entity_name}",
f"\tResolved Name: {d_entry['title']}",
f"\tCategories {categories}"]))
return categories
def process(self) -> list:
categories_1 = self._categories(self._entity_name_1)
categories_2 = self._categories(self._entity_name_2)
common = sorted(categories_1.intersection(categories_2))
self.logger.debug('\n'.join([
"Entity Comparison Completed",
f"\tEntity 1: {self._entity_name_1}",
f"\tEntity 2: {self._entity_name_2}",
f"\tCommon Entities (total={len(common)})",
f"\t\t{common}"]))
return common
```
#### File: ingest/svc/find_dbpedia_entry.py
```python
from typing import Optional
from wikipedia import WikipediaPage
from base import BaseObject
class FindDbPediaEntry(BaseObject):
""" for a given input find the associated dbPedia entry """
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Jul-2019
<EMAIL>
Updated:
8-Jan-2020
<EMAIL>
* major refactoring
* redis doesn't belong in this stage; remove
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1710#issuecomment-17015882
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
def process(self,
some_input: str) -> Optional[dict]:
from cendalytics.wikipedia.ingest.dmo import DBpediaPageFinder
from cendalytics.wikipedia.ingest.svc import TransformDBPediaPage
from cendalytics.wikipedia.ingest.dmo import DBpediaEntityResolution
def _entity_resolver(an_input: str) -> DBpediaEntityResolution:
""" resolve ambiguous title inputs """
return DBpediaEntityResolution(some_title=an_input,
is_debug=self._is_debug)
def _page(some_title: str) -> Optional[WikipediaPage]:
try:
return DBpediaPageFinder(some_title=some_title,
is_debug=self._is_debug).process()
except Exception:
return None
the_title = _entity_resolver(some_input).most_likely_result()
the_page = _page(the_title)
if not the_page:
return None
the_entry = TransformDBPediaPage(some_page=the_page,
some_title=the_title,
is_debug=self._is_debug).process()
is_valid_entry = the_entry is not None or the_entry != {} or len(the_entry) > 0
if not is_valid_entry:
return None
return the_entry
```
#### File: ingest/svc/find_dbpedia_entry_redis.py
```python
from typing import Optional
import jsonpickle
from base import BaseObject
from base import RedisClient
class FindDbPediaEntryRedis(BaseObject):
""" for a given input find the associated dbPedia entry """
_prefix = "wiki_page_"
_redis = RedisClient(RedisClient.WIKI_PAGE_DB)
def __init__(self,
is_debug: bool = False):
"""
Created:
8-Jan-2020
<EMAIL>
* moved redis to a separate service
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1710#issuecomment-17015882
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
def process(self,
entity_name: str,
ignore_cache: bool = False) -> Optional[dict]:
from cendalytics.wikipedia.ingest.svc import FindDbPediaEntry
_key = self._prefix + entity_name
if not self._redis.has(_key) or ignore_cache:
try:
entry = FindDbPediaEntry(is_debug=self._is_debug).process(entity_name)
if not entry:
return None
self._redis.set(_key, jsonpickle.encode(entry))
except Exception as e:
self.logger.exception(e)
return None
else:
self.logger.debug(f"Retrieved Entity (name={entity_name}, ignore-cache={ignore_cache})")
return jsonpickle.decode(self._redis.get(_key))
```
#### File: ingest/svc/transform_dbpedia_page.py
```python
from wikipedia import WikipediaPage
from base import BaseObject
from cendalytics.wikipedia.ingest.dmo import DBPediaContentNormalizer
from nlusvc import TextAPI
class TransformDBPediaPage(BaseObject):
""" for a given dbPedia page, create a JSON structure """
_text_api = None
def __init__(self,
some_title: str,
some_page: WikipediaPage,
is_debug: bool = False):
"""
Created:
9-Jul-2019
<EMAIL>
* refactored out of dbpedia-entity-lookup
Updated:
8-Jan-2020
<EMAIL>
* add additional content normalization
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1710#issuecomment-17014878
* renamed from 'dbpedia-page-transform'
"""
BaseObject.__init__(self, __name__)
self._page = some_page
self._title = some_title
self.is_debug = is_debug
self._text_api = TextAPI(is_debug=self.is_debug)
def _normalized(self,
content: list or str) -> list:
try:
if type(content) == str:
content = [content]
return DBPediaContentNormalizer(is_debug=self.is_debug,
ontology_name='biotech',
content=content).process()
except Exception as e:
self.logger.error(e)
def _summary(self,
some_page: WikipediaPage) -> list:
return self._normalized(some_page.summary)
def _content(self,
some_page: WikipediaPage) -> list:
return self._normalized(some_page.content)
def process(self) -> dict:
def _references():
try:
return self._page.references
except Exception as e:
self.logger.error(e)
def _categories():
try:
return self._page.categories
except Exception as e:
self.logger.error(e)
def _sections():
try:
return self._page.sections
except Exception as e:
self.logger.error(e)
def _parent_id():
try:
return str(self._page.parent_id)
except Exception as e:
self.logger.error(e)
def _page_id():
try:
return str(self._page.pageid)
except Exception as e:
self.logger.error(e)
def _revision_id():
try:
return str(self._page.revision_id)
except Exception as e:
self.logger.error(e)
def _url():
try:
return self._page.url
except Exception as e:
self.logger.error(e)
def _links():
try:
return self._page.links
except Exception as e:
self.logger.error(e)
def _title():
try:
return self._page.title
except Exception as e:
self.logger.error(e)
def _original_title():
try:
return self._page.original_title
except Exception as e:
self.logger.error(e)
return {"key": self._title,
"url": _url(),
"title": _title(),
"links": _links(),
"content": self._content(self._page),
"page_id": _page_id(),
"summary": self._summary(self._page),
"sections": _sections(),
"parent_id": _parent_id(),
"categories": _categories(),
"references": _references(),
"revision_id": _revision_id(),
"original_title": _original_title()}
```
#### File: wikipedia/scripts/create_multiple_cendant_entities.py
```python
import codecs
def main():
from cendalytics.wikipedia.etl.svc import CreateCendantEntity
terms = [
# 'ATP synthase',
# 'Acetone–butanol–ethanol fermentation',
# 'Acid',
# 'Actin',
# 'Active site',
# 'Adaptive immune system',
# 'Adenine',
# 'Adenosine triphosphate',
# 'Adenylate kinase',
# 'Aerobic respiration',
# 'Alanine',
# 'Albumin',
# 'Alpha carbon',
# 'Alpha helix',
# 'Alternative splicing',
# 'Amino',
# 'Amino acid',
# 'Amino acid synthesis',
# 'Amino acids',
# 'Aminoacyl tRNA synthetase',
# 'Anabolism',
# 'Anaerobic respiration',
# 'Angiogenin',
# 'Animal shell',
# 'Anoxygenic photosynthesis',
# 'Antibodies',
# 'Antibody',
# 'Anticodon',
# 'Antigen',
# 'Archaea',
# 'Aspartate',
# 'Aspartokinase',
# 'Assay',
# 'Atom',
# 'Atomic mass unit',
# 'B cell',
# 'Bacteria',
# 'Bacterial display',
# 'Bacterial transcription',
# 'Base pair',
# 'Beta oxidation',
# 'Beta sheet',
# 'Bibcode',
# 'Binding site',
# 'Biochemistry',
# 'Bioenergetics',
# 'Bioinformatics',
# 'Bioinorganic chemistry',
# 'Biological tissue',
# 'Biomolecule',
# 'Blood',
# 'Bradford protein assay',
# 'C-terminus',
# 'CHON',
# 'Carbohydrate',
# 'Carbohydrate catabolism',
# 'Carbohydrate metabolism',
# 'Carbon fixation',
# 'Carboxyl',
# '<NAME>',
# 'Cartesian coordinates',
# 'Cartilage',
# 'Catabolism',
# 'Catalysis',
# 'Cell (biology)',
# 'Cell adhesion',
# 'Cell biology',
# 'Cell cycle',
# 'Cell membrane',
# 'Cell nucleus',
# 'Cell signaling',
# 'Cellular compartment',
# 'Central dogma of molecular biology',
# 'Chaperone (protein)',
# 'Chemical bond',
# 'Chemical conformation',
# 'Chemical ligation',
# 'Chemical polarity',
# 'Chemosynthesis',
# 'Chimera (protein)',
# 'Chirality (chemistry)',
# 'Cholera',
# 'Chromatin immunoprecipitation',
# 'Chromatography',
# 'Circular dichroism',
# 'Cis-regulatory element',
# 'Citric acid cycle',
# 'ClustalW',
# 'Coagulate',
# 'Codon',
# 'Cofactor (biochemistry)',
# 'Collagen',
# 'Colloid',
# 'Conformational change',
# 'Connecticut Agricultural Experiment Station',
# 'Connective tissue',
# 'Coplanar',
# 'Crude lysate',
# 'Cryo-electron microscopy',
# 'Cryoelectron microscopy',
# 'Crystallize',
# 'Cyclol',
# 'Cytolysis',
# 'Cytoplasm',
# 'Cytoskeleton',
# 'DNA',
# 'DNA-binding protein',
# 'DNA repair',
# 'DNA replication',
# 'DNA sequence',
# 'Dehydration',
# 'Dehydron',
# 'Denaturation (biochemistry)',
# 'Deproteination',
# 'Diet (nutrition)',
# 'Diffraction-limited system',
# 'Diffusion',
# 'Digestion',
# 'Digital object identifier',
# 'Dihedral angle',
# 'Dirigent protein',
# 'Dissociation constant',
# 'Distance geometry',
# 'Distributed computing',
# 'Disulfide bond',
# 'Docking (molecular)',
# 'Double-bond',
# 'Downregulation and upregulation',
# 'Drug design',
# 'Dual-polarization interferometry',
# 'Dual polarisation interferometry',
# 'Dynein',
# 'Edestin',
# 'Eicosanoid metabolism',
# 'Elastin',
# 'Electrofocusing',
# 'Electron crystallography',
# 'Electron microscope',
# 'Electron transport chain',
# 'Electrophoresis',
# 'Elemental analysis',
# 'Empirical formula',
# 'Endoplasmic reticulum',
# 'Enzyme',
# 'Enzyme assay',
# 'Enzyme catalysis',
# 'Enzyme kinetics',
# 'Epigenetics',
# 'Escherichia coli',
# 'Essential amino acid',
# 'Essential amino acids',
# 'Essential fatty acid',
# 'Ethanol fermentation',
# 'Ethanol metabolism',
# 'Eukaryote',
# 'Eukaryotic Linear Motif resource',
# 'Eukaryotic transcription',
# 'Eukaryotic translation',
# 'Evolution',
# 'Experiment',
# 'Fatty acid degradation',
# 'Fatty acid metabolism',
# 'Fatty acid synthesis',
# 'Feather',
# 'Fermentation',
# 'Fibrin',
# 'Fibrous protein',
# 'Five-prime cap',
# 'Flavor',
# 'Flocculation',
# 'Fluorescent',
# 'Folding@home',
# 'Food additive',
# 'Food chemistry',
# 'Food coloring',
# 'Food fortification',
# 'Fructolysis',
# 'Fusion protein',
# 'Galactolysis',
# 'Gel electrophoresis',
# 'Gel electrophoresis of proteins',
# 'Gene',
# 'Gene expression',
# 'Gene ontology',
# 'Gene regulatory network',
# 'Genes',
# 'Genetic code',
# 'Genetic engineering',
# 'Genome',
# 'Genomic imprinting',
# 'Genomics',
# '<NAME>',
# 'Globular protein',
# 'Globular proteins',
# 'Globulin',
# 'Gluconeogenesis',
# 'Glucose',
# 'Glutamic acid',
# 'Glutamine synthetase',
# 'Gluten',
# 'Glycogenesis',
# 'Glycogenolysis',
# 'Glycolysis',
# 'Glycosylation',
# 'Graphics processing unit',
# 'Greek language',
# 'Green fluorescent protein',
# 'Guanine',
# 'HIV',
# 'HOPES',
# 'Haemoglobin',
# 'Hair',
# 'Half-life',
# 'Heme',
# 'Heme group',
# 'Hemoglobin',
# '<NAME>',
# 'Hexokinase',
# 'His-tag',
# 'Histidine',
# 'Histone acetylation and deacetylation',
# 'History of molecular biology',
# 'Homology modeling',
# 'Hoof',
# 'Hormone',
# 'Human',
# 'Human genome',
# 'Human iron metabolism',
# 'Hydrogen bond',
# 'Hydrogen bonding',
# 'Hydrolysis',
# 'Hydrophobic',
# 'Hydrophobic core',
# 'Immunoelectron microscopy',
# 'Immunoglobulin G',
# 'Immunohistochemistry',
# 'Immunoprecipitation',
# 'Immunostaining',
# 'In-gel digestion',
# 'In silico',
# 'In vitro',
# 'In vivo',
# 'Indirect immunofluorescence',
# 'Insulin',
# 'Integrated Authority File',
# 'Intein',
# 'Interactome',
# 'Intracellular transport',
# 'Intrinsically disordered proteins',
# 'Introduction to genetics',
# 'Ion channel',
# 'Isoelectric point',
# 'Isoleucine',
# 'Isopycnic centrifugation',
# 'Isothermal titration calorimetry',
# 'Keratin',
# 'Ketosis',
# 'Kinase',
# 'Kinesin',
# 'Kingdom (biology)',
# 'Laboratory rat',
# 'Lac operon',
# 'Lactic acid fermentation',
# '<NAME>',
# 'Lectins',
# 'Leucine',
# 'Ligand',
# '<NAME>',
# 'Lipid',
# 'Lipid metabolism',
# 'Lipogenesis',
# 'Lipolysis',
# 'Lung',
# 'Lymphoblastoid',
# 'Lysine',
# 'MRNA display',
# 'Macromolecular Assembly',
# 'Macromolecular docking',
# 'Macromolecule',
# 'Mass spectrometry',
# 'Mathematical model',
# '<NAME>',
# 'Membrane protein',
# 'Messenger RNA',
# 'Metabolic network',
# 'Metabolic pathway',
# 'Metabolism',
# 'Methionine',
# 'MicroRNA',
# 'Microorganism',
# 'Microscale thermophoresis',
# 'Microscopy',
# 'Mineral (nutrient)',
# 'Molecular dynamics',
# 'Molecular mass',
# 'Molecular mechanics',
# 'Molecular modeling on GPU',
# 'Molecular recognition',
# 'Monte Carlo method',
# 'Motility',
# 'Motor protein',
# 'Muscle',
# 'Mycoplasma',
# 'Myoglobin',
# 'Myosin',
# 'N-linked glycosylation',
# 'N-terminus',
# 'Nail (anatomy)',
# 'National Diet Library',
# 'Native conformation',
# 'Nickel',
# 'Nuclear magnetic resonance',
# 'Nuclear magnetic resonance spectroscopy of proteins',
# 'Nuclear membrane',
# 'Nucleic acid',
# 'Nucleic acid metabolism',
# 'Nucleoid',
# 'Nucleotide',
# 'Nucleotide salvage',
# 'Nucleotide sequence',
# 'O-linked glycosylation',
# 'Oligomer',
# 'Oligopeptide',
# 'Onconase',
# 'Open reading frame',
# 'Optical microscope',
# 'Organelle',
# 'Organic synthesis',
# 'Organism',
# 'Orotate decarboxylase',
# 'Oxidative phosphorylation',
# 'Oxygen',
# 'P-bodies',
# 'Pentose phosphate pathway',
# 'Peptide',
# 'Peptide bond',
# 'Peptide mass fingerprinting',
# 'Peptide sequence',
# 'Peptide synthesis',
# 'Phage display',
# 'Phosphorylation',
# 'Photoactivated localization microscopy',
# 'Photosynthesis',
# 'Phylogenetic tree',
# 'Plasma cell',
# 'Polyadenylation',
# 'Polyamide',
# 'Polymer',
# 'Polypeptide',
# 'Polysaccharide',
# 'Post-transcriptional modification',
# 'Post-transcriptional regulation',
# 'Post-translational modification',
# 'Post-translational regulation',
# 'Posttranslational modification',
# 'Potassium',
# 'Precipitation (chemistry)',
# 'Precursor mRNA',
# 'Primary nutritional groups',
# 'Primary structure',
# 'Prion',
# 'Prokaryote',
# 'Prokaryotic translation',
# 'Proline',
# 'Promoter (genetics)',
# 'Prosthetic group',
# 'Protease',
# 'Proteasome',
# 'Protein (disambiguation)',
# 'Protein (nutrient)',
# 'Protein Data Bank',
# 'Protein NMR',
# 'Protein biosynthesis',
# 'Protein catabolism',
# 'Protein complex',
# 'Protein conformation',
# 'Protein design',
# 'Protein domain',
# 'Protein engineering',
# 'Protein folding',
# 'Protein mass spectrometry',
# 'Protein metabolism',
# 'Protein methods',
# 'Protein microarray',
# 'Protein purification',
# 'Protein quality',
# 'Protein quinary structure',
# 'Protein sequencing',
# 'Protein structure',
# 'Protein structure prediction',
# 'Protein subunit',
# 'Protein superfamily',
# 'Protein targeting',
# 'Protein translocation',
# 'Protein turnover',
# 'Proteinogenic amino acid',
# 'Proteins',
# 'Protein–DNA interaction',
# 'Protein–carbohydrate interaction',
# 'Protein–lipid interaction',
# 'Protein–protein interaction',
# 'Protein–protein interaction prediction',
# 'Proteolysis',
# 'Proteome',
# 'Proteomics',
# 'Proteopathy',
# 'Proteopedia',
# 'PubMed Central',
# 'PubMed Identifier',
# 'Purine metabolism',
# 'Pyrimidine metabolism',
# 'Pyrrolysine',
# 'Pyruvate dehydrogenase',
# 'Quantum mechanics',
# 'Quaternary structure',
# 'RNA',
# 'RNA-dependent RNA polymerase',
# 'RNA polymerase',
# 'RNA splicing',
# 'Radiotrophic fungus',
# 'Reaction mechanism',
# 'Receptor (biochemistry)',
# 'Regulation of gene expression',
# 'Reporter gene',
# 'Residue (biochemistry)',
# 'Resonance (chemistry)',
# 'Restriction enzyme',
# 'Reverse cholesterol transport',
# 'Reverse transcription',
# 'Rhodopsin',
# 'Ribbon diagram',
# 'Ribonuclease A',
# 'Ribonuclease inhibitor',
# 'Ribosome',
# 'Ribosome-nascent chain complex',
# 'Ribosome display',
# 'SH3 domain',
# 'Saccharomyces cerevisiae',
# 'Salt bridge (protein)',
# 'Salting out',
# 'Sarcomere',
# 'Scaffolding',
# 'Scleroprotein',
# 'Secondary structure',
# 'Secrete',
# 'Secretion assay',
# 'Selenocysteine',
# 'Semipermeable membrane',
# 'Sequence alignment',
# 'Sequence homology',
# 'Sequence profiling tool',
# 'Sequence space (evolution)',
# 'Serum albumin',
# 'Sexual reproduction',
# 'Side chain',
# 'Signal transduction',
# 'Site-directed mutagenesis',
# 'Slaughterhouse',
# 'Small molecule',
# 'Sodium',
# 'Soluble',
# 'Spectroscopy',
# 'Spermatozoon',
# 'Sphingolipid metabolism',
# 'Spirochaete',
# 'Staphylococcus aureus',
# 'Starvation',
# 'Stereochemistry',
# 'Steroid',
# 'Structural alignment',
# 'Structural domain',
# 'Structural genomics',
# 'Substrate-level phosphorylation',
# 'Substrate (biochemistry)',
# 'Super-resolution microscopy',
# 'Surface plasmon resonance',
# 'Tertiary structure',
# 'Threonine',
# 'Titin',
# 'Toxin',
# 'Transcription (biology)',
# 'Transcription (genetics)',
# 'Transcription factor',
# 'Transcriptional regulation',
# 'Transfer RNA',
# 'Translation (biology)',
# 'Translation (genetics)',
# 'Translational regulation',
# 'Transmembrane protein',
# 'Triose phosphate isomerase',
# 'Tubulin',
# 'Turn (biochemistry)',
# 'Two-dimensional gel electrophoresis',
# 'Two-hybrid screening',
# 'Ultracentrifugation',
# 'Uracil',
# 'Urea cycle',
# 'Urease',
# 'Valine',
# 'Vertebrate',
# 'Vertico spatially modulated illumination',
# 'Villin',
# 'Virus',
# 'Viruses',
# 'Vitamin',
# 'X-ray crystallography',
# 'Xylose metabolism',
# 'Yeast',
# 'Yeast display'
"Pancreatitis",
]
d_master = {}
for term in terms:
try:
d_results = CreateCendantEntity(is_debug=True,
invalidate_cache=False).process(term)
for k in d_results:
if k not in d_master:
d_master[k] = []
d_master[k].append(d_results[k])
except KeyError as e:
print(e)
continue
except TypeError as e:
print(e)
continue
except Exception as e:
print(e)
print(f"Failed on Term: {term}")
raise ValueError
target = codecs.open("/Users/craig.trimibm.com/Desktop/cendant-results.txt",
encoding="utf-8", mode="a")
for k in d_master:
for item in d_master[k]:
target.write(item)
target.write('\n\n')
target.write('\n\n')
target.write('\n\n')
target.close()
if __name__ == "__main__":
main()
```
#### File: wikipedia/scripts/print_cache_stats.py
```python
from base import RedisClient
def main():
def _db_size(a_db: int) -> None:
size = RedisClient(a_db).size()
print(f"DB (name={a_db}): {size}")
_db_size(RedisClient.WIKI_SEARCH_DB)
_db_size(RedisClient.WIKI_PAGE_DB)
_db_size(RedisClient.WIKI_AUGMENTED_DB)
if __name__ == "__main__":
import plac
plac.call(main)
```
#### File: wikipedia/scripts/run_spacy_ner.py
```python
import spacy
IS_DEBUG = True
IGNORE_CACHE = True
ONTOLOGY_NAME = "biotech"
def main():
nlp = spacy.load("en_core_web_sm")
# doc = nlp("Apple is looking at buying U.K. startup for $1 billion")
# for token in doc:
# print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
# token.shape_, token.is_alpha, token.is_stop)
input_text = """
The investment arm of biotech giant Amgen has led a new $6 million round of funding in GNS Healthcare, CEO <NAME> tells Xconomy.
""".strip()
doc = nlp(input_text)
for chunk in doc.noun_chunks:
print(chunk.text, chunk.root.text, chunk.root.dep_,
chunk.root.head.text)
print ('------------------------------------------')
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
if __name__ == "__main__":
import plac
plac.call(main)
```
#### File: batch/dmo/dimension_comparator.py
```python
import numpy as np
from scipy import stats
from base import BaseObject
from base import MandatoryParamError
class DimensionComparator(BaseObject):
"""
"""
def __init__(self,
source_dimension: list,
target_dimensions: list,
is_debug=False):
"""
Created:
28-Mar-2019
<EMAIL>
:param source_dimension:
:param target_dimensions:
"""
BaseObject.__init__(self, __name__)
if not source_dimension:
raise MandatoryParamError("Source Dimension")
if not target_dimensions:
raise MandatoryParamError("Target Dimension")
self.is_debug = is_debug
self.source_dimension = source_dimension
self.target_dimensions = target_dimensions
def process(self) -> list:
source = np.array(self.source_dimension)
target = np.matrix(self.target_dimensions)
def _matmul() -> list:
_sums = []
for i in range(0, len(target)):
_sums.append(np.matmul(target[i], source))
return _sums
_mat_mul = _matmul()
_z_score = list(stats.zscore(_mat_mul))
_final_scores = []
for x in _z_score:
def _round():
result = round(x[0][0], 0)
if result == 0:
return 0
return int(result)
_final_scores.append(_round())
if self.is_debug:
self.logger.debug("\n".join([
"Generated Values",
"\tMat.Mul: len={}".format(_mat_mul),
"\tZ-Score: {}".format(_z_score),
"\tFinal: {}".format(_final_scores)
]))
return _final_scores
```
#### File: batch/dmo/record_extractor.py
```python
from base import BaseObject
from base import MandatoryParamError
from datamongo import CendantCollection
class RecordExtractor(BaseObject):
""" Perform incremental record extraction for dimensional computation
this is only useful when we want to perform dimensionality
on successive subsets of the data """
def __init__(self,
some_manifest: dict,
total_records: int,
is_debug=False):
"""
Created:
28-Mar-2019
<EMAIL>
* refactored out of 'transform-parsed-data'
:param some_manifest:
the manifest definition driving this component
:param total_records:
(optional) the total records to process
"""
BaseObject.__init__(self, __name__)
if not some_manifest:
raise MandatoryParamError("Manifest")
self.is_debug = is_debug
self.d_manifest = some_manifest
self.total_records = total_records
def _source_collection(self) -> CendantCollection:
return CendantCollection(self.d_manifest["source"]["database"],
self.d_manifest["source"]["collection"])
def _target_collection(self) -> CendantCollection:
return CendantCollection(self.d_manifest["target"]["database"],
self.d_manifest["target"]["collection"])
def _next_record_batch(self,
source_records: list,
target_records: list):
"""
this function will ensure the next batch of records
contains records that do not yet exist in the target collection
:return:
the next batch of records
the size is determined by the value for 'total-records'
"""
# load the keyfields for both source and target collections
source_key_fields = set([x["key_field"] for x in source_records])
target_key_fields = set([x["key_field"] for x in target_records])
# find the untreated records
diff = source_key_fields.difference(target_key_fields)
# limit the size of the 'diff' set using 'total-records'
if len(diff) > self.total_records:
diff = list(diff)[:self.total_records]
# return all the source records that are in the 'diff' set
return [x for x in source_records
if x["key_field"] in diff]
def process(self) -> list:
"""
:return:
the next set of records to process
"""
source_collection = self._source_collection()
source_records = source_collection.all()
if not self.total_records:
if self.is_debug:
self.logger.debug("\n".join([
"Source Records Located",
"\tTotal Source Records: {}".format(len(source_records))]))
return source_records
target_collection = self._target_collection()
target_records = target_collection.all()
records = self._next_record_batch(source_records,
target_records)
if self.is_debug:
self.logger.debug("\n".join([
"Records Located",
"\tTotal Source Records: {}".format(len(source_records)),
"\tTotal Target Records: {}".format(len(target_records)),
"\tTotal Current Records: {}".format(len(records))]))
return records
```
#### File: batch/dmo/single_record_locator.py
```python
from base import BaseObject
from base import RecordUnavailableRecord
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class SingleRecordLocator(BaseObject):
""" locate a single source record """
def __init__(self,
collection_name: str,
base_mongo_client: BaseMongoClient = None,
is_debug: bool = False):
"""
Created:
15-May-2019
<EMAIL>
* refactored out of process-single-record
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/243
Updated:
16-Jul-2019
<EMAIL>
* use 'manifest-connnector-for-mongo' to access collections via env vars
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/450
Updated:
15-Oct-2019
<EMAIL>
* don't pass the manifest in; use explicit source-collection-name instead
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1116#issuecomment-15308894
Updated:
15-Nov-2019
<EMAIL>
* modify to return a single dict instead of a list
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1331#issuecomment-16030068
"""
BaseObject.__init__(self, __name__)
if not base_mongo_client:
base_mongo_client = BaseMongoClient()
self._is_debug = is_debug
self._collection_name = collection_name
self._base_mongo_client = base_mongo_client
if self._is_debug:
self.logger.debug("Instantiated SingleRecordLocator")
def _source_collection(self) -> CendantCollection:
return CendantCollection(is_debug=self._is_debug,
some_base_client=self._base_mongo_client,
some_collection_name=self._collection_name)
def process(self,
key_field: str) -> dict:
"""
Purpose:
Retrieve a Single Record
:param key_field:
a key field to search on
e.g., Serial Number
:return:
dict a single result
"""
collection = self._source_collection()
def _record() -> dict: # GIT-1331-16030068
if key_field.lower() == "random":
return collection.random(total_records=1)[0]
return collection.by_key_field(key_field)
record = _record()
if not record:
raise RecordUnavailableRecord(f"Record Not Found "
f"collection={collection.collection_name}, "
f"key-field={key_field})")
return record
```
#### File: batch/svc/generate_metrics.py
```python
import pprint
from collections import Counter
from base import BaseObject
from base import MandatoryParamError
class GenerateMetrics(BaseObject):
""" """
def __init__(self,
d_results: dict,
is_debug=False):
"""
Created:
29-Mar-2019
<EMAIL>
* refactored out of 'dimension-computation-orchestrator'
"""
BaseObject.__init__(self, __name__)
if not d_results:
raise MandatoryParamError("Service Result")
self.is_debug = is_debug
self.d_results = d_results
def _type_counter(self,
type_name: str) -> Counter:
""" provide a simple distribution of either
schema or parent types """
c = Counter()
for key in self.d_results:
for tag in self.d_results[key]["tags"]:
c.update({tag["type"][type_name]: 1})
return c
def _other_counter(self) -> Counter:
""" provide a distribution on tags parents
that are classified as 'other' in the schema """
c = Counter()
for key in self.d_results:
for tag in self.d_results[key]["tags"]:
if tag["type"]["schema"] == "other":
c.update({tag["type"]["parent"]: 1})
return c
def _entity_counter(self) -> Counter:
""" provide a distribution of the entities used to derive tags
use both single entities and entity formations"""
c = Counter()
for key in self.d_results:
for tag in self.d_results[key]["tags"]:
entities = tag["provenance"]["entities"]
[c.update({x: 1}) for x in entities] # entity by entities
c.update({"-".join(entities): 1}) # single formation
return c
def process(self) -> None:
schema_count = pprint.pformat(self._type_counter("schema"), indent=4)
parent_count = pprint.pformat(self._type_counter("parent"), indent=4)
other_count = pprint.pformat(self._other_counter(), indent=4)
entity_count = pprint.pformat(self._entity_counter(), indent=4)
if self.is_debug:
self.logger.debug("\n".join([
"Service Result Metrics",
"\tSchema: {}".format(schema_count),
"\tParent: {}".format(parent_count),
"\tOther: {}".format(other_count),
"\tEntities: {}".format(entity_count)]))
```
#### File: runtime/svc/find_dimension_records_by_sum.py
```python
from base import BaseObject
from datamongo import BaseMongoClient
from datamongo import CendantXdm
from datamongo import CollectionFinder
class FindDimensionRecordsBySum(BaseObject):
def __init__(self,
mongo_client: BaseMongoClient = None,
is_debug: bool = False):
"""
Created:
13-May-2019
<EMAIL>
* refactored out of dimensions-api
Updated:
8-Aug-2019
<EMAIL>
* removed -dimensions in favor of cendant-xdm
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/674
"""
BaseObject.__init__(self, __name__)
if not mongo_client:
mongo_client = BaseMongoClient()
self._is_debug = is_debug
self.mongo_client = mongo_client
def process(self,
source_name: str,
minimum_value_sum: int,
maximum_value_sum: int,
key_fields_only: bool) -> list:
cendant_xdm = CendantXdm(collection_name=CollectionFinder.find_xdm(source_name),
mongo_client=self.mongo_client,
is_debug=self._is_debug)
return cendant_xdm.by_value_sum(minimum_value_sum=minimum_value_sum,
maximum_value_sum=maximum_value_sum,
key_fields_only=key_fields_only)
```
#### File: runtime/svc/schema_elements_frequency.py
```python
import sys
from collections import Counter
import pandas as pd
from tabulate import tabulate
from base import BaseObject
from datadict import FindDimensions
from datamongo import BaseMongoClient
from datamongo import CendantTag
class SchemaElementsFrequency(BaseObject):
"""
Purpose:
For a given collection, generate the Schemas and Tags with their relative frequencies by order of appearance
Rationale:
Useful for testing to continually hone in on 'unlisted' and 'other' categories
Reference:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1205#issuecomment-15589827
Sample Output:
+------+---------+-----------------------------+-------------------------------------------------------------------+
| | Count | Schema | Tag |
|------+---------+-----------------------------+-------------------------------------------------------------------|
| 0 | 8496 | unlisted | ibm |
| 1 | 5469 | unlisted | problem solving |
| 2 | 4641 | unlisted | project |
| 3 | 4399 | service management | support |
| 4 | 4361 | unlisted | implement |
| 5 | 3827 | project management | management |
| 2158 | 1 | database | db2 bind |
+------+---------+-----------------------------+-------------------------------------------------------------------+
"""
def __init__(self,
xdm_schema: str,
collection_name_tag: str,
mongo_client: BaseMongoClient,
database_name: str = 'cendant',
is_debug: bool = False):
"""
Created:
28-Oct-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1205#issuecomment-15589827
Updated:
29-Oct-2019
<EMAIL>
* remove 'entity-schema-finder' in favor of new approach
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/916#issuecomment-15620801
Updated:
31-Oct-2019
<EMAIL>
* renamed from TestSchemaElements to SchemaElementsFrequency so that py.test did not try to executes
:param xdm_schema:
the name of the schema to perform the type lookup
Notes:
- typically either 'supply' or 'learning'
- the full list is on path 'resources/config/dimensionality'
:param collection_name_tag:
a MongoDB collection
:param database_name:
a MongoDB database
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._dim_finder = FindDimensions(xdm_schema)
self._collection = CendantTag(
is_debug=self._is_debug,
mongo_client=mongo_client,
database_name=database_name,
collection_name=collection_name_tag)
def process(self,
skip: int = None,
limit: int = None,
most_common: int = None):
def _records():
if skip or limit:
return self._collection.collection.skip_and_limit(skip=skip, limit=limit)
return self._collection.all()
records = _records()
if self._is_debug:
self.logger.debug('\n'.join([
f"Retrieved Records ("
f"skip={skip}, "
f"limit={limit}, "
f"total-records={len(records)})"]))
c = Counter()
for record in records:
for field in record["fields"]:
if "tags" in field:
if "supervised" in field["tags"]:
for tag in field["tags"]["supervised"]:
c.update({tag[0]: 1})
def _counter_elements() -> dict:
if most_common:
return dict(c.most_common(most_common))
return dict(c.most_common(sys.maxsize))
results = []
d = _counter_elements()
for tag in d:
count = d[tag]
for schema in self._dim_finder.find(tag):
results.append({
"Tag": tag,
"Count": count,
"Schema": schema})
df = pd.DataFrame(results)
if self._is_debug:
self.logger.debug('\n'.join([
"Schema Element Test Completed",
tabulate(df, headers='keys', tablefmt='psql')]))
return df
if __name__ == "__main__":
print(SchemaElementsFrequency(is_debug=True,
xdm_schema="learning",
collection_name_tag="supply_tag_20191025",
mongo_client=BaseMongoClient()).process())
```
#### File: core/dmo/db2_credential_retrieval.py
```python
import getpass
import os
from base import BaseObject
from base import CryptoBase
class Db2CredentialRetrieval(BaseObject):
""" Set Input Credentials """
def __init__(self,
db2_username_env: str,
db2_password_env: str):
"""
Created:
18-July-2019
<EMAIL>
Updated:
1-Aug-2019
<EMAIL>
* retrieve username/password from the environment, if available
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/563
Updated:
3-August-2019
<EMAIL>
* added db2 env params to fetch appropriate username/password for different data sources
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/587
Updated:
9-Oct-2019
<EMAIL>
* rename from 'db-credentials-api' and move to 'dmo' folder
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1080
:param db2_username_env:
the name of the env for db2 username
:param db2_password_env:
the name of the env for db2 password
"""
BaseObject.__init__(self, __name__)
self._db2_username_env = db2_username_env
self._db2_password_env = db2_password_env
def _from_env_vars(self):
username = None
password = None
try:
username = CryptoBase.decrypt_str(os.environ[self._db2_username_env])
password = CryptoBase.decrypt_str(os.environ[self._db2_password_env])
except KeyError as err:
self.logger.warning('\n'.join([
"Unable to retrieve Username and Password from the environment",
"You will be prompted for this information"]))
return username, password
def _from_system_prompt(self):
username = None
password = <PASSWORD>
try:
print("Enter credentials:\n")
username = input("DB2 Username: ")
password = get<PASSWORD>(prompt='DB2 Password: ')
except Exception as err:
self.logger.error(f"API Connection Error: {err}")
return username, password
def process(self):
def credentials():
u, p = self._from_env_vars()
if u and p:
return u, p
u, p = self._from_system_prompt()
if u and p:
return u, p
self.logger.error("DB2 LUR Credentials Inaccessible")
username, password = credentials()
if username:
self.logger.info(f"API Parameters (db2-user = {username})")
return username, password
```
#### File: core/svc/build_db2_url.py
```python
import os
from base import BaseObject
from base import CryptoBase
from base import FileIO
from datadb2.core.dmo import BaseDB2Client
class BuildDb2Url(BaseObject):
""" Create a DB2 connection """
__config_path = 'resources/config/db2/schemas.yml'
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Oct-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1080
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._config = FileIO.file_to_yaml_by_relative_path(self.__config_path)
@staticmethod
def _values(d_config: dict):
username = CryptoBase.decrypt_str(os.environ[d_config['username'][1:]])
password = CryptoBase.decrypt_str(os.environ[d_config['password'][1:]])
return {
'host': d_config['host'].strip(),
'database': d_config['database'].strip(),
'port': d_config['port'],
'username': username.strip(),
'password': password.strip()}
@staticmethod
def _connect(d_config: dict) -> BaseDB2Client:
return BaseDB2Client(some_database_name=d_config['database'],
some_hostname=d_config['host'],
some_port=d_config['port'],
some_username=d_config['username'],
some_password=d_config['password'])
def wft_dev(self) -> BaseDB2Client:
"""
Purpose:
Connect to DB2 WFT DEV
:return:
"""
return self._connect(self._values(self._config['wft_dev']))
def cendant(self) -> BaseDB2Client:
"""
:return:
"""
return self._connect(self._values(self._config['cendant']))
if __name__ == "__main__":
# BuildDb2Url().wft_dev()
# BuildDb2Url().wft_prod()
BuildDb2Url().cendant()
```
#### File: core/svc/find_country_code.py
```python
from base import BaseObject
from datadict.core.os import the_country_code_dict
class FindCountryCode(BaseObject):
""" One-Stop-Shop Service API for Country Code queries """
_d_country_codes = the_country_code_dict
_l_known_unknowns = ["NAN", "NONE"]
def __init__(self):
"""
Created:
14-May-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/254
"""
BaseObject.__init__(self, __name__)
def find_by_code(self,
some_code):
some_code = some_code.upper().strip()
if some_code in self._l_known_unknowns:
return "unknown"
if some_code not in self._d_country_codes:
self.logger.warning(f"Unknown Country: code={some_code}")
return "unknown"
return self._d_country_codes[some_code].lower()
```
#### File: core/svc/find_dimensions.py
```python
from typing import Optional
from base import BaseObject
from base import RecordUnavailableRecord
class FindDimensions(BaseObject):
""" Act as a Controller in front of all Dimesionality Dictionaries """
__unlisted = "unlisted"
__blacklist = ['activity', 'agent', 'company', 'entity', 'industry', 'language', 'learning',
'provenance', 'role', 'situation', 'skill', 'state', 'root', 'telecommunication']
@classmethod
def sentiment(cls,
is_debug: bool = False) -> __name__:
return FindDimensions(schema='sentiment',
is_debug=is_debug)
def __init__(self,
schema: str,
ontology_name: str = 'base',
is_debug: bool = False):
"""
Created:
29-Oct-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/916#issuecomment-15618121
Updated:
1-Nov-2019
<EMAIL>
* add ability to search with or without underscores
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1151#issuecomment-15693382
Updated:
14-Nov-2019
<EMAIL>
* update inner loop traversal
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1367#issuecomment-16010583
Updated:
21-Nov-2019
<EMAIL>
* updates for see-also dict changes
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1195#issuecomment-16167608
Updated:
13-Dec-2019
<EMAIL>
* load dictionaries by ontology name
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1582
Updated:
14-Dec-2019
<EMAIL>
* add biotech dimenmsionality schema
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1609
"""
BaseObject.__init__(self, __name__)
from datadict import FindSynonym
from datadict import FindRelationships
self._is_debug = is_debug
self._d_schema = self._load_schema(schema)
self._syn_finder = FindSynonym(is_debug=is_debug,
ontology_name=ontology_name)
self._rel_finder = FindRelationships(is_debug=is_debug,
ontology_name=ontology_name)
@staticmethod
def _load_schema(schema: str) -> dict:
if schema.lower() == "learning":
from datadict import the_dimesionality_learning_dict
return the_dimesionality_learning_dict
elif schema.lower() == "supply":
from datadict import the_dimesionality_supply_dict
return the_dimesionality_supply_dict
elif schema.lower() == "degrees":
from datadict import the_dimesionality_degrees_dict
return the_dimesionality_degrees_dict
elif schema.lower() == "majors":
from datadict import the_dimesionality_majors_dict
return the_dimesionality_majors_dict
elif schema.lower() == "sentiment":
from datadict import the_dimesionality_sentiment_dict
return the_dimesionality_sentiment_dict
elif schema.lower() == "biotech":
from datadict import the_dimesionality_biotech_dict
return the_dimesionality_biotech_dict
raise NotImplementedError(schema)
def children(self,
some_parent: str) -> list:
some_parent = some_parent.lower().strip()
if some_parent in self._d_schema:
return self._d_schema[some_parent]
raise RecordUnavailableRecord(some_parent)
def top_level_entities(self) -> list:
return sorted(self._d_schema.keys())
def _find_in_schema(self,
input_text: str) -> set:
matches = set()
for k in self._d_schema:
if k.lower() == input_text:
matches.add(k)
for v in self._d_schema[k]:
if v.lower() == input_text:
matches.add(k)
return matches
@staticmethod
def _cleanse_results(results: list) -> list:
results = sorted(results)
if not len(results):
results = ["other"]
if results == ['other', 'unlisted']:
results = ['unlisted']
if len(results) > 1 and 'unlisted' in results:
results.remove('unlisted')
if len(results) > 1 and 'other' in results:
results = results.pop(results.index('other'))
return results
def find(self,
input_text: str) -> list:
cache = set()
def _inner_find(some_input_text: str) -> Optional[list]:
matches = set()
if some_input_text in self.__blacklist:
matches.add(self.__unlisted)
schema_matches = self._find_in_schema(some_input_text)
if len(schema_matches):
matches = matches.union(schema_matches)
else:
if ' ' in some_input_text:
some_input_text = some_input_text.replace(' ', '_')
elif '_' in some_input_text:
some_input_text = some_input_text.replace('_', ' ')
schema_matches = self._find_in_schema(some_input_text)
if len(schema_matches):
matches = matches.union(schema_matches)
else:
for see_also in self._syn_finder.see_also(some_input_text): # GIT-1195-16167608
if see_also not in cache:
cache.add(see_also)
matches = matches.union(_inner_find(see_also.lower().strip()))
some_input_text = some_input_text.replace('_', ' ') # GIT-1367-16010583
parents = [x.lower().strip() for x in self._rel_finder.parents(some_input_text)
if x not in cache]
for parent in parents:
cache.add(parent)
matches = matches.union(_inner_find(parent))
return matches
results = _inner_find(input_text.lower().strip())
results = self._cleanse_results(results)
if self._is_debug:
self.logger.debug(f"Located Schema ("
f"input={input_text}, "
f"schema={results})")
return results
```
#### File: core/svc/find_patterns.py
```python
from typing import ValuesView
from base import BaseObject
class FindPatterns(BaseObject):
""" One-Stop-Shop Service API for entity patterns (variations) """
_d_patterns = None
_long_distance = None
def __init__(self,
ontology_name: str = 'base',
is_debug: bool = False):
"""
Created:
26-Mar-2019
<EMAIL>
* based on 'find-entity'
Updated:
15-Jul-2019
<EMAIL>
* add 'find' method
Updated:
13-Dec-2019
<EMAIL>
* load dictionaries by ontology name
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1582
"""
BaseObject.__init__(self, __name__)
from datadict.core.dmo import DictionaryLoader
_loader = DictionaryLoader(is_debug=is_debug,
ontology_name=ontology_name)
self._d_patterns = _loader.taxonomy().patterns()
def find(self,
input_text: str,
include_patterns: bool = True) -> list or None:
input_text = input_text.lower().strip()
def _results(a_key: str):
if include_patterns:
return self._d_patterns[a_key]
return [x for x in self._d_patterns[a_key]
if "+" not in x]
for k in self._d_patterns:
if k.lower() == input_text:
return _results(k)
def long_distance(self) -> ValuesView[list]:
"""
sample input:
{ 'Aix 5.2 Workload': [ 'aix+5.2+workload',
'aix_5.2_workload' ],
'Aix 5.2 Workload Partitions': [ 'aix+5.2+workload+partitions',
'aix_5.2_workload_partitions' ],
...
}
sample output:
[ { "pattern": [aix, 5.2],
"label": 'Aix 5.2 Workload' },
{ "pattern": [aix, 5.2, workload, partitions],
"label": 'Aix 5.2 Workload Partitions' }
...
}
this dictionary is typically used in long-distance matching algorithms in NLU
:return:
a dictionary with tuples as keys
and values as the actual label
"""
if self._long_distance is None:
d_long_distance = {}
for k in self._d_patterns:
patterns = [x for x in self._d_patterns[k] if "+" in x]
patterns = [[y.strip().lower() for y in x.split("+") if y]
for x in patterns if x]
for pattern in patterns:
key = "".join(sorted(set(pattern)))
if key not in d_long_distance:
d_long_distance[key] = []
d_long_distance[key].append({
"pattern": pattern,
"label": k
})
self._long_distance = d_long_distance.values()
return self._long_distance
```
#### File: core/svc/find_synonym.py
```python
import re
import time
from typing import Optional
from base import BaseObject
PROV_WILDCARD = "synonyms"
class FindSynonym(BaseObject):
""" Act as a controller in front of all synonym dictionaries """
__syns = None
__by_word_length = None
__seealso_lcase = None
__longest_ngram = 0
def __init__(self,
ontology_name: str = 'base',
is_debug: bool = False):
"""
Created:
24-Mar-2017
<EMAIL>
Updated:
2-Aug-2017
<EMAIL>
* use config-based provenance
https://github.ibm.com/abacus-implementation/Abacus/issues/1721#issuecomment-3080923
Updated:
27-Sep-2017
<EMAIL>
* migrate to abacus-att
remove any references to flows
Updated:
21-Feb-2019
<EMAIL>
* migrated to text
Updated:
11-April-2019
<EMAIL>
* add reverse synonym dictionary
Updated:
7-Aug-2019
<EMAIL>
* use singleton and lazy loading for synonyms dictionary
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/661
Updated:
27-Sept-2019
<EMAIL>
* integrate 'see-also' capabilities
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1020
Updated:
11-Oct-2019
<EMAIL>
* reversed changes from 7-Aug-2019
Updated:
22-Oct-2019
<EMAIL>
* enable to look for synonyms of a given word length
Updated:
13-Dec-2019
<EMAIL>
* load dictionaries by ontology name
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1582
* remove reverse-regex-syns
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583#issuecomment-16612774
"""
BaseObject.__init__(self, __name__)
from datadict.core.dmo import DictionaryLoader
start = time.time()
self._is_debug = is_debug
_loader = DictionaryLoader(ontology_name=ontology_name)
self._d_see_also = _loader.synonyms().seeAlso()
self._d_rev_synonyms = _loader.synonyms().rev()
if not self.__syns:
self._build_static_dictionaries(_loader.synonyms().fwd())
total_time = str(time.time() - start)[:4]
if is_debug:
self.logger.debug('\n'.join([
"Initialized FindSynonym",
f"\tTime: {total_time}s",
f"\tOntology Name: {ontology_name}"]))
def _build_static_dictionaries(self,
d_synonyms: dict):
if self._is_debug:
self.logger.debug("Loading Synonyms Dictionary")
self.__syns = d_synonyms
self.__by_word_length = {'with_regexp': {}}
for key, values in d_synonyms.items():
key = key.replace(',', '')
for value in values:
if '[' in value:
self._update_regexp_dictionary(self.__by_word_length['with_regexp'],
value,
key)
else:
word_length = len(value.split())
if not word_length in self.__by_word_length:
self.__by_word_length[word_length] = {}
if not key in self.__by_word_length[word_length]:
self.__by_word_length[word_length][key] = []
self.__by_word_length[word_length][key].append(value)
self.__longest_ngram = max(self.__longest_ngram, word_length)
for word_length in range(1, self.__longest_ngram + 1):
if word_length not in self.__by_word_length:
self.__by_word_length[word_length] = {}
@staticmethod
def _update_regexp_dictionary(regexp_dict,
pattern,
replacement):
if "(?:^" not in pattern:
pattern = f'(?:^|\\s){pattern}'
if "(?:$" not in pattern:
pattern = f'{pattern}(?:$|\\s)'
compiled = re.compile(pattern)
regexp_dict[compiled] = f' {replacement} '
def dict(self) -> dict:
return self.__syns
def all(self,
lower: bool = True,
keep_regexp: bool = False,
transform_spans: bool = True) -> list:
"""
Purpose:
return all words in the synonyms list
:param lower:
force lowercase
:param keep_regexp:
return regular expressions
:param transform_spans:
transform word span "alpha_beta_gamma" to "alpha beta gamma"
"""
words = set()
for k in self.dict():
words.add(k)
for value in self.dict()[k]:
words.add(value)
if lower:
words = [x.lower().strip() for x in words]
if not keep_regexp:
words = [x for x in words if "[" not in x]
words = [x for x in words if "+" not in x]
if transform_spans:
words = [x.replace("_", " ") for x in words]
return sorted(words)
def canon(self,
some_input: str) -> str:
"""
Purpose:
Find the Canonical form for a Synonym
:param some_input:
some term
:return:
the canonical form
"""
_input = some_input.lower()
if _input in self._d_rev_synonyms:
return self._d_rev_synonyms[_input]
if ' ' in _input:
canon_form = ' '.join([self.canon(x) for x in _input.split(' ')])
if canon_form != _input:
return canon_form
def keys_in_swap_level(self,
swap_level: int):
return self.__by_word_length[swap_level].keys()
def synonyms_in_swap_level(self,
known_key: str,
swap_level: int):
return self.__by_word_length[swap_level][known_key]
def regexps_with_synonyms(self):
return self.__by_word_length['with_regexp']
def max_ngram(self) -> int:
return self.__longest_ngram
def synonyms(self,
some_input: str,
known: bool = False) -> Optional[list]:
"""
Purpose:
Given a head (canonical) form, find the synonyms (variations)
:param some_input:
any head (canonical) form
e.g., 'Project Manager'
:param known:
indicates the input parameter is a known dictionary key
this is an optimization that places the burden of exception handling on the caller
:return:
a list of synonyms (variations)
e.g., [ 'PM',
'Proj. Mgr' ]
"""
if known:
return self.dict()[some_input]
some_token = some_input.lower().strip()
if some_token in self.dict():
return self.dict()[some_token]
if ' ' in some_token:
some_token = some_token.replace(" ", "_")
if some_token in self.dict():
return self.dict()[some_token]
def see_also(self,
some_input: str) -> list:
"""
:param some_input:
:return:
"""
def _cleanse(a_token: str) -> str:
return a_token.lower().strip()
for k in self._d_see_also:
if _cleanse(k) == _cleanse(some_input):
return self._d_see_also[k]
return []
def exists(self,
some_input: str) -> bool:
"""
:param some_input:
any head (canonical) form to the dictionary
NOTE: No string manipulation is performed for this lookup
the input fAorm is case sensitive
:return:
True if the input exists as a dictionary key
False the input does not exist as a dictionary key
"""
return self.synonyms(some_input) is not None
```
#### File: dmo/builder/assigner_node_builder.py
```python
from typing import Optional
from pandas import DataFrame
from base import BaseObject
from datagit.graph.dmo.util import GraphNodeDefGenerator
from datagit.graph.dmo.util import GraphNodeIdGenerator
from datagit.graph.dmo.util import GraphTextSplitter
from datagit.graph.dmo.util import SocialNodeSizeGenerator
from datamongo import CendantRecordParser
class AssignerNodeBuilder(BaseObject):
""" Build a GitHub Assigner Node for Graphviz """
def __init__(self,
d_record: dict,
stylesheet_path: str,
df_social_entity_analysis: Optional[DataFrame],
is_debug: bool = True):
"""
Created:
25-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1646\
Updated:
30-Dec-2019
<EMAIL>
* refactored state and node building in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1678
Updated:
31-Dec-2019
<EMAIL>
* add cendant-record-parser
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16873873
* refactor node-id
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16877316
Updated:
2-Jan-2020
<EMAIL>
* use social entity analysis data frame to influence nodesize
https://github.ibm.com/-cdo/unstructured-analytics/issues/1680#issuecomment-16901723
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._d_record = d_record
self._df_social_entity_analysis = df_social_entity_analysis
self._line_generator = GraphNodeDefGenerator(is_debug=self._is_debug,
stylesheet_path=stylesheet_path)
self.state = {}
self._build_state()
def _nodesize(self,
person_name: str) -> dict:
generator = SocialNodeSizeGenerator(is_debug=self._is_debug,
df=self._df_social_entity_analysis)
return generator.process(person_name)
def _build_assignee_node(self):
parser = CendantRecordParser(is_debug=self._is_debug)
person_name = parser.field_value_by_names(self._d_record, ['Assigner'])
def node_id() -> str:
return GraphNodeIdGenerator(a_type='person',
a_label=person_name).process()
def label() -> str:
the_label = person_name.replace('-', ' ')
return GraphTextSplitter.split_text(the_label, threshold=5)
node_id = node_id()
label = label()
nodesize = self._nodesize(person_name)
def lines() -> list:
return self._line_generator.process(node_id=node_id,
node_type='person',
node_label=label,
height=nodesize['height'],
width=nodesize['width'],
comment=str(self.__class__.__name__))
return {
"lines": lines(),
"node_id": node_id,
"person_name": person_name}
def _build_state(self):
self.state["assigner"] = self._build_assignee_node()
```
#### File: dmo/builder/commit_node_builder.py
```python
from base import BaseObject
from datagit.graph.dmo.util import GraphNodeDefGenerator
from datagit.graph.dmo.util import GraphNodeIdGenerator
from datagit.graph.dmo.util import GraphTextSplitter
from datamongo import CendantRecordParser
class CommitNodeBuilder(BaseObject):
""" Build a GitHub Commit Node for Graphviz """
# generate mean and stdev via # GIT-1661-16820534
__commit_total_mean = 6350.46756302521
__commit_total_stdev = 26842.71482111913
def __init__(self,
d_record: dict,
stylesheet_path: str,
is_debug: bool = True):
"""
Created:
19-Dec-2019
<EMAIL>
* refactored out of 'graph-github-issue'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1631
Updated:
20-Dec-2019
<EMAIL>
* detect merge commits
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1633#issuecomment-16768132
Updated:
25-Dec-2019
<EMAIL>
* update node line generation
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1654
Updated:
26-Dec-2019
<EMAIL>
* perform dynamic node sizing via
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1661#issuecomment-16820534
Updated:
27-Dec-2019
<EMAIL>
* remove relative sizing
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1669#issue-11256101
Updated:
31-Dec-2019
<EMAIL>
* add cendant-record-parser
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16873873
Updated:
31-Dec-2019
<EMAIL>
* use node line generator to support stylesheet pathing
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16877187
* refactor node-id
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16877316
Updated:
2-Jan-2020
<EMAIL>
* refactor code into state/build-state standard
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16877328
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._d_record = d_record
self._parser = CendantRecordParser(is_debug=self._is_debug)
self._line_generator = GraphNodeDefGenerator(is_debug=self._is_debug,
stylesheet_path=stylesheet_path)
self.state = {}
self._build_state()
self.is_merge_commit = self._is_merge_commit() # GIT-1633-16768132
def _is_merge_commit(self):
"""
Purpose:
Naive Method to Detect a Merge Commit
Reference:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1633#issuecomment-16768132
:return:
True the commit record is a merge
False not a merge commit
"""
title = self._parser.field_value_by_name(a_record=self._d_record,
a_field_name='Title').lower().strip()
return 'merge' in title
def _build_state(self):
def node_id() -> str:
commit_id = self._d_record['key_field']
return GraphNodeIdGenerator(a_type='commit',
a_label=commit_id).process()
def label() -> str:
additions = self._parser.field_value_by_name(self._d_record, "Additions")
deletions = self._parser.field_value_by_name(self._d_record, "Deletions")
total = self._parser.field_value_by_name(self._d_record, "Total")
title = self._parser.field_value_by_name(self._d_record, "Title")
title = GraphTextSplitter.split_text(title, threshold=10)
template = "{{#LABEL}|{Total\\n#TOTAL|{Additions\\n#ADD|{Deletions\\n#DEL}}}}"
template = template.replace("#LABEL", title)
template = template.replace("#TOTAL", str(total))
template = template.replace("#ADD", str(additions))
template = template.replace("#DEL", str(deletions))
return template
label = label()
node_id = node_id()
def lines() -> list:
return self._line_generator.process(node_id=node_id,
node_type='commit',
node_label=label,
comment=str(self.__class__.__name__))
return {
"lines": lines(),
"node_id": node_id}
```
#### File: dmo/util/graph_edgedef_generator.py
```python
from typing import Optional
from pandas import DataFrame
from base import BaseObject
class GraphEdgeDefGenerator(BaseObject):
""" Build and Style all the Graphviz Edges """
__default_keys = ["subject", "predicate", "object", "label"]
def __init__(self,
triples: list,
stylesheet_path: str,
df_social_rel_analysis: Optional[DataFrame],
is_debug: bool = True):
"""
Created:
24-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1645
Updated:
26-Dec-2019
<EMAIL>
* dynamic label capability
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1648#issuecomment-16817744
* edge style attributes derived entirely from stylesheet
https://github.ibm.com/-cdo/unstructured-analytics/issues/1657
Updated:
27-Dec-2019
<EMAIL>
* refactor into kwargs
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1669#issue-11256101
Updated:
31-Dec-2019
<EMAIL>
* renamed from 'graph-edge-builder' in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16877277
* use stylesheet-pathing as param
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1684
Updated:
2-Jan-2020
<EMAIL>
* import df-social-rel-analysis as param
https://github.ibm.com/-cdo/unstructured-analytics/issues/1680#issuecomment-16901723
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
from datagit.graph.dmo.util import GraphEdgeStyleFinder
self._triples = triples
self._is_debug = is_debug
self._df_social_rel_analysis = df_social_rel_analysis
self._edge_style_finder = GraphEdgeStyleFinder(is_debug=self._is_debug,
stylesheet_path=stylesheet_path)
def _weight(self,
person_a: str,
person_b: str) -> float:
from datagit.graph.dmo.util import SocialRelWeightGenerator
generator = SocialRelWeightGenerator(is_debug=self._is_debug,
df=self._df_social_rel_analysis)
return generator.social_collocation(person_a, person_b)
def process(self) -> list:
lines = []
for triple in self._triples:
d_edge_style = self._edge_style_finder.process(triple['predicate'])
existing_keys = set(self.__default_keys)
lines.append(f"\t{triple['subject']} -> {triple['object']}")
lines.append(f"[")
# Add Label (Optional)
def _label() -> Optional[str]:
display_label = bool(d_edge_style["display_label"])
has_label = 'label' in triple and triple['label'] is not None
if display_label:
if has_label: # User Defined Label
return f"\tlabel=\"{triple['label']}\""
# Predicate Label
return f"\tlabel=\"{triple['predicate']}\""
# No Label
lines.append(_label())
if 'subject_name' in triple and 'object_name' in triple:
weight = self._weight(triple['subject_name'], triple['object_name'])
weight = 1.0 + weight
lines.append(f"\tweight=\"{weight}\"")
# Add User Keys (Overrides Stylesheet)
keys = [key for key in triple.keys()
if key not in existing_keys]
for key in keys:
existing_keys.add(key)
lines.append(f"\t{key}=\"{triple[key]}\"")
# Add Stylesheet Keys
style_keys = [key for key in d_edge_style.keys()
if key not in existing_keys]
for key in style_keys:
lines.append(f"\t{key}=\"{d_edge_style[key]}\"")
lines.append("]\n")
lines = [line for line in lines
if line and len(line.strip())]
return lines
```
#### File: dmo/util/graph_nodeid_generator.py
```python
from base import BaseObject
class GraphNodeIdGenerator(BaseObject):
""" Build a GitHub Node for Graphviz """
def __init__(self,
a_type: str,
a_label: str,
is_debug: bool = True):
"""
Created:
19-Dec-2019
<EMAIL>
* refactored out of 'graph-github-issue'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1631
Updated:
31-Dec-2019
<EMAIL>
* renamed from 'node-id-generator'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1681#issuecomment-16877277
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._type = a_type
self._label = a_label
self._is_debug = is_debug
def process(self) -> str:
a_type = self._type.lower().strip()
a_label = self._label.upper().strip()
a_label = a_label.replace('.', '_')
a_label = a_label.replace('-', '_')
a_label = a_label.replace('=', '_')
a_label = a_label.replace('/', '_')
def generate_label() -> str:
if a_type == "issue":
return f"NODE_ISSUE_{a_label}"
elif a_type == "pull_request":
return f"NODE_PR_{a_label}"
elif a_type == "commit":
return f"NODE_COMMIT_{a_label}"
elif a_type == "file_commit":
return f"NODE_FILE_COMMIT_{a_label}"
elif a_type == "file_folder":
return f"PYTHON_FILE_FOLDER_{a_label}"
elif a_type == "file_path":
return f"PYTHON_FILE_PATH_{a_label}"
elif a_type == "file_name":
return a_label
elif a_type == "person":
return f"PERSON_{a_label}"
raise NotImplementedError
def make_hash(some_label: str) -> str:
# return f"GN_{BaseHash.hash(some_label)}"
return f"GN_{some_label}"
label = generate_label()
if not label:
raise NotImplementedError
return make_hash(label)
```
#### File: core/dmo/collection_name_generator.py
```python
from datetime import datetime
from base import BaseObject
class CollectionNameGenerator(BaseObject):
""" GitHub Ingestion Orchestrator """
def __init__(self,
repo_name: str,
collection_type: str,
is_debug: bool = False):
"""
Created:
6-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1532
:param repo_name:
name of the Github repo
e.g., 'unstructured-analytics'
:param collection_type:
the type of collection to generate
e.g., 'src' or 'tag'
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._repo_name = repo_name
self._collection_type = collection_type
def _repo_abbrev(self) -> str:
if self._repo_name.lower() == "unstructured-analytics":
return "una"
if self._repo_name.lower() == "cdo-workforce-transformation":
return "cdo"
self.logger.warning(f"Unrecognized Repository: "
f"{self._repo_name}")
return self._repo_name.replace('-', '_')
@staticmethod
def _date() -> str:
def _current_day() -> str:
x = datetime.now().day
if x < 10:
return f"0{x}"
return str(x)
def _current_month() -> str:
x = datetime.now().month
if x < 10:
return f"0{x}"
return str(x)
current_year = datetime.now().year
return f"{current_year}{_current_month()}{_current_day()}"
def process(self) -> str:
collection_name = f"github-{self._repo_abbrev()}_{self._collection_type}_{self._date()}"
if self._is_debug:
self.logger.debug('\n'.join([
"Generated Collection Name",
f"\tName: {collection_name}",
f"\tRepo: {self._repo_name}"]))
return collection_name
```
#### File: core/dmo/zenhub_url_generator.py
```python
from base import BaseObject
class ZenHubURLGenerator(BaseObject):
"""
prepare url for ZenHub repo API calls
returns the url for coresponding repo owner & repo name
"""
def __init__(self,
repo_id: int,
repo_name: str,
repo_owner: str,
is_debug: bool = False):
"""
Created:
7-Dec-2019
<EMAIL>
* generated via
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1511
* refactored in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1553#issue-11089485
:param repo_id:
the numeric identifier of the GitHub repository
:param repo_owner:
name of the Github repo owner
e.g., 'GTS-CDO'
:param repo_name:
name of the Github repo
e.g., 'unstructured-analytics'
"""
BaseObject.__init__(self, __name__)
from datagit.ingest.core.dmo import GitHubTokenLoader
self._is_debug = is_debug
self._repo_id = repo_id
self._repo_id = repo_id
self._repo_name = repo_name
self._repo_owner = repo_owner
self._token_loader = GitHubTokenLoader(is_debug=self._is_debug)
def url(self):
class Facade(object):
@staticmethod
def base():
"""
Create a base ZenHub URL
:return:
a properly formatted URL
"""
return self._token_loader.urls().zenhub()
@staticmethod
def issue(number: int):
"""
Make url for ZenHub repo issues
:param number:
:return:
a properly formatted issues URL for the specified repository
"""
token = self._token_loader.user_tokens().zenhub()
return f"{Facade().base()}{self._repo_id}/issues/{number}?{token}"
@staticmethod
def epic(number: int):
"""
Make url for ZenHub epic issues
Reference:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1667#issuecomment-16821516
:param number:
:return:
a properly formatted issues URL for the specified repository
"""
token = self._token_loader.user_tokens().zenhub()
return f"{Facade().base()}{self._repo_id}/epics/{number}?{token}"
@staticmethod
def pull(number: int):
"""
Make url for ZenHub repo pull requests
:param number:
:return:
a properly formatted pull URL for the specified repository
"""
token = self._token_loader.user_tokens().zenhub()
return f"{Facade().base()}{self._repo_id}/pull/{number}?{token}"
return Facade()
```
#### File: github/bp/github_ingest_api.py
```python
import sys
from base import BaseObject
class GithubIngestAPI(BaseObject):
""" GitHub Ingestion Orchestrator """
def __init__(self,
is_debug: bool = False):
"""
Created:
20-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/574
Updated:
29-Nov-2019
<EMAIL>
* update for n2n flow
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1476
Updated:
6-Dec-2019
<EMAIL>
* add collection name generator
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1532
Updated:
7-Dec-2019
<EMAIL>
* github-user-name no longer needed from environment
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1551#issuecomment-16473682
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._github_usertoken = self._load_github_usertoken()
def _load_github_usertoken(self):
from datagit.ingest.core.dmo import GitHubTokenLoader
return GitHubTokenLoader(is_debug=self._is_debug).user_tokens().github()
def _target_collection_name(self,
repo_name: str,
collection_type: str) -> str:
from datagit import CollectionNameGenerator
return CollectionNameGenerator(is_debug=self._is_debug,
repo_name=repo_name,
collection_type=collection_type).process()
def ingest(self):
class Facade(object):
@classmethod
def by_name(cls,
repo_name: str,
repo_owner: str,
start_issue: int,
end_issue: int):
from datagit.ingest.github.bp import GitHubIngestOrchestrator
target_collection_name = self._target_collection_name(repo_name=repo_name,
collection_type='src')
GitHubIngestOrchestrator(flush_records=False,
persist_records=True,
is_debug=self._is_debug,
repo_name=repo_name,
repo_owner=repo_owner,
github_usertoken=self._github_usertoken,
collection_name=target_collection_name).process(start_issue=start_issue,
end_issue=end_issue)
@classmethod
def cendant(cls):
return cls.by_name(repo_owner="GTS-CDO",
repo_name="unstructured-analytics",
start_issue=1,
end_issue=sys.maxsize)
@classmethod
def cdo_workforce(cls):
return cls.by_name(repo_owner="GTS-CDO",
repo_name="CDO-Workforce-Transformation",
start_issue=1,
end_issue=sys.maxsize)
return Facade()
```
#### File: github/svc/transform_github_structure.py
```python
import pprint
from typing import Optional
from base import BaseObject
from base import MandatoryParamError
class TransformGitHubStructure(BaseObject):
""" Transform a GitHub Object to a Cendant Record Structure """
__keystructs_issue = [
'assignee,assignees,author_association,body,closed_at,closed_by,comments,comments_url,created_at,events_url,html_url,id,labels,labels_url,locked,milestone,node_id,number,repository_url,state,title,updated_at,url,user']
__keystructs_comment = [
'author_association,body,created_at,html_url,id,issue_url,node_id,updated_at,url,user']
__keystructs_commit_comment = [
"author_association,body,commit_id,created_at,html_url,id,line,node_id,path,position,updated_at,url,user"]
__keystructs_pr_event = [
"actor,commit_id,commit_url,created_at,event,id,node_id,requested_reviewer,review_requester,url",
"actor,assignee,assigner,commit_id,commit_url,created_at,event,id,node_id,url",
"actor,commit_id,commit_url,created_at,event,id,label,node_id,url"]
__keystructs_merge = [
"author,committer,html_url,message,node_id,parents,sha,tree,url,verification"]
__keystructs_pr_issue = [
"_links,additions,assignee,assignees,author_association,base,body,changed_files,closed_at,comments,comments_url,commits,commits_url,created_at,deletions,diff_url,head,html_url,id,issue_url,labels,locked,maintainer_can_modify,merge_commit_sha,mergeable,mergeable_state,merged,merged_at,merged_by,milestone,node_id,number,patch_url,rebaseable,requested_reviewers,requested_teams,review_comment_url,review_comments,review_comments_url,state,statuses_url,title,updated_at,url,user",
"assignee,assignees,author_association,body,closed_at,closed_by,comments,comments_url,created_at,events_url,html_url,id,labels,labels_url,locked,milestone,node_id,number,pull_request,repository_url,state,title,updated_at,url,user"]
__keystructs_commits = [
"author,comments_url,commit,committer,files,html_url,node_id,parents,sha,stats,url"]
__keystructs_mentions = [
"actor,commit_id,commit_url,created_at,event,id,node_id,url"]
__keystructs_milestone = [
"actor,commit_id,commit_url,created_at,event,id,milestone,node_id,url"]
__keystructs_renamed = [
"actor,commit_id,commit_url,created_at,event,id,node_id,rename,url"]
__keystructs_review_dismissed = [
"actor,commit_id,commit_url,created_at,dismissed_review,event,id,node_id,url"]
__keystructs_tree = [
"sha,tree,truncated,url"]
def __init__(self,
issue_id: int or str,
parent_id: str,
manifest_name: str,
repo_name: str,
repo_owner: str,
svcresult: dict or list,
is_debug: bool = False,
log_structures: bool = True):
"""
Created:
26-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1459
Updated:
3-Dec-2019
<EMAIL>
* add further differentiation based on structural attributes
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1509#issue-11034406
Updated:
7-Dec-2019
<EMAIL>
* renamed from 'transform-github-data'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1553#issue-11089485
* integrate zenhub access
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1511
Updated:
26-Dec-2019
<EMAIL>
* update zenhub access pattern for pics
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1667
:param issue_id:
this is a provenance link to the source issue
in this case it would be the original GitHub Issue ID the comments are linked to
:param repo_name:
the name of the repository the data is drawn from
:param manifest_name:
this is a provenance link to the source data
in this case it would be the original GitHub URL the data was taken from
:param svcresult:
a list of data retrieved from GitHub
:param is_debug:
:param log_structures:
write JSON structures to log file
these objects are potentially LARGE - best to keep this value False for production runs
"""
BaseObject.__init__(self, __name__)
from datagit.ingest.github.svc import AccessZenHubURL
if not parent_id:
raise MandatoryParamError
self._issue_id = issue_id
self._parent_id = parent_id
self._is_debug = is_debug
self._svcresult = svcresult
self._repo_name = repo_name
self._repo_owner = repo_owner
self._manifest_name = manifest_name
self._log_structures = log_structures
self._zenhub_access = AccessZenHubURL(is_debug=self._is_debug,
repo_name=self._repo_name,
repo_owner=self._repo_owner)
def _log_result(self,
structure_name: str,
svcresult: dict or list) -> None:
if self._log_structures:
self.logger.debug('\n'.join([
f"{structure_name} Structure Detected",
"------------------------------------------------------------------------------------------",
','.join(sorted(svcresult.keys())),
pprint.pformat(svcresult),
"------------------------------------------------------------------------------------------"]))
def _analyze_structure(self,
svcresult: dict) -> list or None:
from datagit.ingest.github.dmo import IssueTransformation
from datagit.ingest.github.dmo import AssignmentTransformation
from datagit.ingest.github.dmo import CommentsTransformation
from datagit.ingest.github.dmo import CommitsTransformation
from datagit.ingest.github.dmo import PullRequestEventTransformation
from datagit.ingest.github.dmo import PullRequestIssueTransformation
from datagit.ingest.github.dmo import MentionsTransformation
from datagit.ingest.github.dmo import CommitCommentTransformation
from datagit.ingest.github.dmo import MergeEventTransformation
key_struct = ','.join(sorted(svcresult.keys()))
def issue_transformation() -> list:
self._log_result("Issues", svcresult)
d_zenhub_result = self._zenhub_access.process(issue_id=self._issue_id)
def _zenhub_epic() -> Optional[dict]:
if d_zenhub_result['is_epic']:
return self._zenhub_access.process(issue_id=self._issue_id,
as_epic=True)
return IssueTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
zenhub_result=d_zenhub_result,
zenhub_epic=_zenhub_epic(),
manifest_name=self._manifest_name).process()
def comments_transformation() -> list:
self._log_result("Comments", svcresult)
return CommentsTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def pull_request_issues() -> list:
self._log_result("Pull Request Issue", svcresult)
return PullRequestIssueTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def merge_request_event() -> list:
self._log_result("Merge Request Event", svcresult)
return MergeEventTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def pull_request_events() -> list:
self._log_result("Pull Request Event", svcresult)
return PullRequestEventTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def commit_comment_transformation() -> list:
self._log_result("Commit Comment", svcresult)
return CommitCommentTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def assignment_transformation() -> list:
self._log_result("Assignment", svcresult)
return AssignmentTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def commits_transformation() -> list:
self._log_result("Commit", svcresult)
return CommitsTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def mentions_transformation() -> list:
self._log_result("Mentions", svcresult)
return MentionsTransformation(svcresult=svcresult,
is_debug=self._is_debug,
issue_id=self._issue_id,
parent_id=self._parent_id,
repo_name=self._repo_name,
manifest_name=self._manifest_name).process()
def unrecognized() -> None:
self.logger.warning('\n'.join([
"Unrecognized GitHub Object",
f"\tIssue #{self._issue_id}",
f"\tKeys: {key_struct}",
"--------------------------------------------------",
pprint.pformat(svcresult, indent=4),
"--------------------------------------------------"]))
raise NotImplementedError
# Controller Logic
if key_struct in self.__keystructs_milestone: # GIT-1509-16376091
pass # not interested
elif key_struct in self.__keystructs_renamed: # GIT-1509-16376485
pass # not interested
elif key_struct in self.__keystructs_review_dismissed: # GIT-1509-16376649
pass # not interested
elif key_struct in self.__keystructs_issue:
return issue_transformation()
elif key_struct in self.__keystructs_merge:
return merge_request_event()
elif key_struct in self.__keystructs_comment:
return comments_transformation()
elif key_struct in self.__keystructs_commit_comment:
return commit_comment_transformation()
elif key_struct in self.__keystructs_pr_issue:
return pull_request_issues()
elif key_struct in self.__keystructs_pr_event:
if 'event' not in svcresult:
return pull_request_events()
elif svcresult['event'] == 'assigned': # GIT-1509-11034406
return assignment_transformation()
elif svcresult['event'] == 'referenced':
return pull_request_events()
elif key_struct in self.__keystructs_commits:
return commits_transformation()
elif key_struct in self.__keystructs_mentions:
if svcresult["event"] == "pinned" or \
svcresult["event"] == "unpinned": # GIT-1509-16375937
pass # not interested
elif svcresult["event"] == "locked" or \
svcresult["event"] == "unlocked": # GIT-1509-16375958
pass # not interested
elif svcresult["event"] == "merged": # GIT-1509-16376390
pass # not interested
elif svcresult["event"] == "closed": # GIT-1509-16376401
pass # not interested
else:
return mentions_transformation()
elif key_struct in self.__keystructs_tree: # GIT-1537-16466899
pass # not interested
else:
unrecognized()
def _analyze(self,
svcresult: list or dict) -> list or None:
"""
Purpose:
Analyze the service result (svcresult) from GitHub
:param svcresult:
the incoming svcresult may be either:
list multiple structures
dict a single structure
:return:
a list of records
"""
if type(svcresult) == list:
master_records = []
for inner_structure in list(svcresult):
if not inner_structure:
raise ValueError
results = self._analyze_structure(inner_structure)
if results and len(results):
master_records += results
return master_records
elif type(svcresult) == dict:
return self._analyze_structure(dict(svcresult))
raise NotImplementedError
def process(self) -> list:
return self._analyze(self._svcresult)
```
#### File: scripts/graph/social_graph_all_ibm.py
```python
import os
from base import FileIO
from datagit.analyze.bp import GitHubAnalysisOrchestrator
from datagit.graph.svc import GraphSocialNetwork
from datagit.navigate.bp import GitHubNavigationAPI
def generate_output_path(issue_number_x, issue_number_y) -> str:
filename = f"G_{issue_number_x}-{issue_number_y}_SOCIAL-IBM.giz"
return os.path.join(os.environ['DESKTOP'], filename)
def main():
issue_number_x = 0
issue_number_y = 1700
IS_DEBUG = False
COLLECTION_NAME = "github-IBMCodeContent_src_20200104"
output_path = generate_output_path(issue_number_x, issue_number_y)
orchestrator = GitHubAnalysisOrchestrator(is_debug=IS_DEBUG)
social_analysis = orchestrator.distributions(collection_name=COLLECTION_NAME).social(write_to_file=True)
lines = ["digraph GitHubSNA {"]
api = GitHubNavigationAPI(is_debug=IS_DEBUG)
for issue in range(int(issue_number_x), int(issue_number_y)):
svcresult = api.navigate(COLLECTION_NAME).by_issue(issue_id=issue)
if svcresult:
lines += GraphSocialNetwork(is_debug=IS_DEBUG,
pattern=svcresult['pattern'],
d_index=svcresult['index'],
df_social_entity_analysis=social_analysis['ent'],
df_social_relationship_analysis=social_analysis['rel']).lines()
lines.append("}")
FileIO.lines_to_file(lines, output_path)
print('\n'.join([
"Wrote to File",
f"\tOutput Path: {output_path}"]))
if __name__ == "__main__":
import plac
plac.call(main)
```
#### File: datagit/tests/find_repo_id_test.py
```python
import unittest
from datagit.ingest.core.svc import FindRepoID
class FindRepoIDTest(unittest.TestCase):
def test_unstructured_analytics(self):
repo_id = FindRepoID(is_debug=True,
repo_owner="GTS-CDO",
repo_name="unstructured-analytics").process()
self.assertEquals(repo_id, 508363)
if __name__ == '__main__':
unittest.main()
```
#### File: graphviz/dmo/digraph_edge_generator.py
```python
from graphviz import Digraph
from base import BaseObject
class DigraphEdgeGenerator(BaseObject):
"""
Purpose:
Edge Generation for a graphviz.Digraph object
Notes:
- 'digraph-edge-generator' is not the same as 'graphviz-edge-generator'
- this module generates a library-specific Digraph edge element
- the graphviz-edge-generator creates a string value that conforms to the Graphviz format
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
"""
__s_unique = set()
def __init__(self,
graph_style: dict,
is_debug: bool = True):
"""
Created:
21-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
:param graph_style:
a graph style defined in a graph stylesheet
e.g.:
- resources/config/graph/graphviz_nlp_graph.yml
- resources/config/graph/graphviz_big_graph.yml
:param is_debug:
True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
from datagraph.graphviz.dmo import DigraphTextCleanser
from datagraph.graphviz.dmo import EdgeStyleMatcher
self.is_debug = is_debug
self._edge_style_matcher = EdgeStyleMatcher(is_debug=self.is_debug,
graph_style=graph_style)
self._text_cleanser = DigraphTextCleanser(graph_style=graph_style,
is_debug=self.is_debug)
def process(self,
graph: Digraph,
a_subject: str,
a_predicate: str,
a_object: str) -> Digraph:
if not a_subject or not a_predicate or not object:
return graph
uid = " ".join(sorted([a_subject.lower(), a_object.lower()]))
def _is_valid():
if "unlisted" in uid:
return False
return uid not in self.__s_unique and a_subject != a_object
if _is_valid():
self.__s_unique.add(uid)
d_edge = self._edge_style_matcher.process(a_subject=a_subject,
a_predicate=a_predicate,
a_object=a_object)
if "display_label" in d_edge:
if not d_edge["display_label"]:
a_predicate = ''
graph.edge(tail_name=self._text_cleanser.process(a_subject),
head_name=self._text_cleanser.process(a_object),
label=self._text_cleanser.process(a_predicate),
**d_edge["style"])
return graph
```
#### File: graphviz/dmo/digraph_text_cleanser.py
```python
from base import BaseObject
class DigraphTextCleanser(BaseObject):
"""
Purpose:
Edge Generation for a graphviz.Digraph object
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
"""
def __init__(self,
graph_style: dict,
is_debug: bool = True):
"""
Created:
21-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
:param graph_style:
a graph style defined in a graph stylesheet
e.g.:
- resources/config/graph/graphviz_nlp_graph.yml
- resources/config/graph/graphviz_big_graph.yml
:param is_debug:
True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._graph_style = graph_style
def process(self,
some_text: str) -> str:
"""
Purpose:
determine whether to split the text for readability
:param some_text:
input text
:return:
(optionally) processed text
"""
if "graph" not in self._graph_style:
return some_text
if "split_text" not in self._graph_style["graph"]:
return some_text
if not self._graph_style["graph"]["split_text"]:
return some_text
if " " not in some_text:
return some_text
tokens = some_text.split(" ")
return "{}\\n{}".format(tokens[0], " ".join(tokens[1:]))
```
#### File: graphviz/dmo/graph_style_loader.py
```python
from base import BaseObject
from base import FileIO
class GraphStyleLoader(BaseObject):
"""
Purpose:
Load a Graphviz Stylesheet
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
Prereq:
"""
def __init__(self,
style_name: str = "nlp",
is_debug: bool = True):
"""
Created:
21-Nov-2019
<EMAIL>
:param style_name:
the name of the graph stylesheet to use
:param is_debug:
if True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._style_name = self._load(style_name)
def style(self) -> dict:
return self._style_name
@staticmethod
def _load(some_style_name) -> dict:
def _relative_path():
if "nlp" in some_style_name.lower():
return "resources/config/graph/graphviz_nlp_graph.yml"
if "big" in some_style_name.lower():
return "resources/config/graph/graphviz_big_graph.yml"
if "sentiment" in some_style_name.lower():
return "resources/config/graph/graphviz_sentiment_graph.yml"
raise NotImplementedError
return FileIO.file_to_yaml_by_relative_path(
_relative_path())
```
#### File: neo/bp/graph_api.py
```python
import time
class GraphAPI:
""" API for Graph functionality
Created:
25-Feb-2019
<EMAIL>
* refactored from various services """
@classmethod
def initialize_neo_graph(cls):
from datagraph import OwlGraphConnector
from datagraph import InitializeNeoGraph
start = time.time()
owlg = OwlGraphConnector("cendant").process()
print("\n".join([
"Loaded Graphs: {}".format(
time.time() - start)]))
InitializeNeoGraph(some_owl_graph=owlg).process()
def main(param1):
if param1 == "init":
GraphAPI.initialize_neo_graph()
if __name__ == "__main__":
import plac
plac.call(main)
```
#### File: neo/dmo/neo_graph_connector.py
```python
import os
from py2neo.database import Graph
from base import FileIO
class NeoGraphConnector:
"""
Created:
25-Feb-2019
<EMAIL>
"""
@classmethod
def _load_creds(cls, connection):
path = os.path.join(os.environ["CODE_BASE"], "resources/config/config.yml")
doc = FileIO.file_to_yaml(path)
return doc["neo"][connection]
@classmethod
def _neo_graph(cls, neo_creds):
return Graph(neo_creds["url"],
auth=(neo_creds["username"],
neo_creds["password"]))
@classmethod
def connect(cls, connection_type="remote") -> Graph:
return cls._neo_graph(cls._load_creds(connection_type))
```
#### File: neo/dmo/neo_relationship_generator.py
```python
from py2neo import Relationship
from py2neo import Transaction
from base import BaseObject
from . import NeoGraphContext
class NeoRelationshipGenerator(BaseObject):
""" """
def __init__(self,
source_records: list,
some_tx: Transaction,
context: NeoGraphContext, ):
"""
Created:
5-Apr-2019
<EMAIL>
* refactored out of 'initialize-neo-graph'
:param some_tx:
an active Neo transaction
:param context:
an existing graph context
"""
BaseObject.__init__(self, __name__)
self.tx = some_tx
self.context = context
self.source_records = source_records
def process(self) -> NeoGraphContext:
""" generate all the relationships
"""
for source_record in self.source_records:
for field in source_record:
if not field["create_node"]:
continue
def _field_value() -> str:
if type(field["value"]) == list:
return " ".join(field["value"])
return field["value"]
_subject = self.context.find_or_create_node(self.tx,
_field_value(),
field["name"], )
for rel in field["relationships"]:
def _rel_value() -> str:
if type(rel["value"]) == list:
return " ".join(rel["value"])
return rel["value"]
_object = self.context.find_or_create_node(self.tx,
_rel_value(),
rel["name"])
self.tx.create(Relationship(_subject,
rel["type"],
_object,
activity="Known Relationship"))
return self.context
```
#### File: neo/svc/find_common_relationships.py
```python
import pandas as pd
from pyparsing import ParseException
from rdflib import Graph
from rdflib import Literal
from rdflib.query import ResultRow
from base import BaseObject
from base import LabelFormatter
from datadict import FindDimensions
from datadict import FindEntity
class FindCommonRelationships(BaseObject):
""" Query the Cendant Ontology
given
alpha implies gamma
beta implies gamma
thus
alpha <-> beta
more generically
?a ?p ?z
?b ?p ?z
where ?p != rdf:type and
?p != rdfs:subClassOf
"""
_s_terms = set()
_d_results = {}
def __init__(self,
graph: Graph = None,
xdm_schema: str = 'supply',
ontology_name: str = 'base',
is_debug: bool = False):
"""
Created:
18-Jul-2019
<EMAIL>
* refactored out of generate-dataframe-rels
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/455#issuecomment-12884783
Updated:
22-Jul-2019
<EMAIL>
* deprecated
this runs to slow -- it's hard to control SPARQL query algebra in the wild
I have added additional properties to 'find-relationships'
Updated:
29-Oct-2019
<EMAIL>
* remove 'entity-schema-finder' in favor of new approach
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/916#issuecomment-15620801
Updated:
13-Dec-2019
<EMAIL>
* load dictionaries by ontology name
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1582
:param graph:
:param xdm_schema:
the name of the schema to perform the type lookup
Notes:
- typically either 'supply' or 'learning'
- the full list is on path 'resources/config/dimensionality'
:param is_debug:
"""
BaseObject.__init__(self, __name__)
from datagraph.neo.dmo import OwlGraphConnector
if not graph:
graph = OwlGraphConnector(is_debug=is_debug,
ontology_name="cendant").process()
self._graph = graph
self._is_debug = is_debug
self._entity_finder = FindEntity(is_debug=is_debug,
ontology_name=ontology_name)
self._dim_finder = FindDimensions(schema=xdm_schema,
is_debug=is_debug,
ontology_name=ontology_name)
@staticmethod
def _query(term: str) -> str:
return """
SELECT
?a_label ?p
WHERE {
{
cendant:#1 ?p ?c .
?a rdfs:label ?a_label .
?a ?p ?c .
}
UNION
{
?z rdfs:subClassOf cendant:#1 .
?z ?p ?c .
?a rdfs:label ?a_label .
?a ?p ?c .
}
UNION
{
cendant:#1 ?p ?c .
?a rdfs:subClassOf ?c .
?a rdfs:label ?a_label .
}
}
""".strip().replace("#1", term)
def _normalize_term(self,
term: str) -> str:
original_term = term
term = term.lower().replace("!", "")
term = term.lower().replace("/", "_")
term = term.lower().replace("_", " ")
term = "_".join([LabelFormatter.camel_case(token)
for token in term.split(" ")])
self.logger.debug("\n".join([
f"Term Manipulation (original={original_term}, normalized={term}"]))
return term
def _related_terms(self,
term: str) -> list:
term = self._normalize_term(term)
def _to_result_set():
try:
sparql_query = self._query(term)
return self._graph.query(sparql_query)
except ParseException as e:
self.logger.error("\n".join([
"Query Parse Exception",
f"\tTerm: {term}"]))
result_set = _to_result_set()
def _literal_extraction(a_label: Literal):
if "http" in a_label:
return str(a_label).split("'")[-1].split("'")[0].strip()
return a_label.title()
def _valid_href(a_row: ResultRow) -> bool:
p = str(a_row[1].title()).lower()
if "type" in p or "subclassof" in p or "owl" in p or "rdf-schema#" in p:
return False
# if "ownedby" in p:
# return False
return True
def _results() -> list:
s = set()
rows = [row for row in result_set if _valid_href(row)]
for row in rows:
literals = [x for x in row
if x and type(x) == Literal]
for literal in literals:
s.add(_literal_extraction(literal))
_predicate = row[1]
_object = row[0]
self.logger.debug("\n".join([
"Located Relationship",
f"\tSubject: {term}",
f"\tPredicate: {_predicate}",
f"\tObject: {_object}"]))
return sorted(s)
results = _results()
results = [result for result in results
if result.lower().replace("_", " ") !=
term.lower().replace("_", " ")]
self.logger.debug("\n".join([
"Find Related Terms Complete",
f"\tOriginal Term: {term}",
f"\tResults: {results}"]))
self._d_results[term] = results
return results
def process(self,
term: str):
for related_term in self._related_terms(term):
if related_term not in self._s_terms:
self._s_terms.add(related_term)
self._related_terms(related_term)
results = []
for key in self._d_results:
for explicit_schema in self._dim_finder.find(key):
for value in self._d_results[key]:
for implicit_schema in self._dim_finder.find(value):
results.append({
"ExplicitSchema": explicit_schema,
"ExplicitTag": key,
"ImplicitSchema": implicit_schema,
"ImplicitTag": value,
"IsPrimary": True,
"IsVariant": False,
"Relationship": "Implication"})
df = pd.DataFrame(results)
return df
```
#### File: neo/svc/generate_similarity_metric.py
```python
from py2neo import Transaction
from base import BaseObject
from base import MandatoryParamError
from datagraph.neo.dmo import NeoGraphContext
from datagraph.neo.dmo import NeoUtils
class GenerateSimilarityMetric(BaseObject):
""" """
def __init__(self,
some_tx: Transaction,
some_graph_context: NeoGraphContext,
use_categorical_relationships=True):
"""
Created:
19-Mar-2019
<EMAIL>
* refactored out of 'load-neo-from-manifest-2'
:param some_tx:
an active neo transaction
:param some_graph_context:
an active neo transaction
:param use_categorical_relationships:
if True, use relationships like
'lowest-similarity',
'is-similar',
...
'highest-similarity'
if False, just use
'is-similar'
"""
BaseObject.__init__(self, __name__)
if not some_tx:
raise MandatoryParamError("Neo Transaction")
if not some_graph_context:
raise MandatoryParamError("Graph Context")
self.tx = some_tx
self.graph_context = some_graph_context
self.categorical_relationships = use_categorical_relationships
def _relationship_name(self,
total: int) -> str:
def _to_categorical():
switcher = {
1: "lowest-similarity",
2: "low-similarity",
3: "is-similar",
4: "high-similarity",
5: "highest-similarity"
}
if total > 5:
return switcher[5]
return switcher[total]
if self.categorical_relationships:
return _to_categorical()
return "is-similar"
def _similarity_metric(self,
d_tags: dict):
# create similarity metric
s_complete = set()
for tag1 in d_tags:
for tag2 in d_tags:
if tag1 == tag2:
continue
total = len(set(tag1.split(" ")).intersection(set(tag2.split(" "))))
if total > 1:
# prevent dupes (e.g. bidirectional flows)
key = "".join(sorted({tag1, tag2}))
if key in s_complete:
continue
s_complete.add(key)
_s = self.graph_context.find_or_create_node(self.tx,
tag1,
d_tags[tag1])
_p = self._relationship_name(total)
_o = self.graph_context.find_or_create_node(self.tx,
tag2,
d_tags[tag2])
NeoUtils.define_relationship(_s, _p, _o,
activity="Probabilistic Relationship",
entity="Vector Space")
def _generate_graph_dictionary(self) -> dict:
"""
the graph_context contains a dictionary of actual neo4j nodes
this routine takes that dictionary and creates a simple key:value dictionary of graph nodes to graph types
for example:
{ 'Server Support Specialist': 'Domain Term',
'AIX': 'Operating System' }
etc
:return:
"""
d_graph = {}
keys = sorted(self.graph_context.node_lookup.keys())
for node_key in keys:
node_name = node_key.split("-")[0].strip()
node_type = node_key.split("-")[-1].strip()
d_graph[node_name] = node_type
return d_graph
def process(self):
self._similarity_metric(
self._generate_graph_dictionary())
```
#### File: neo/svc/initialize_neo_graph.py
```python
import rdflib
from py2neo import Node
from py2neo import Relationship
from py2neo import Transaction
from base import BaseObject
from base import MandatoryParamError
from datadict import FindDimensions
from datadict import FindEntity
from datadict import FindRelationships
class InitializeNeoGraph(BaseObject):
""" Initialize the neo4j graph with the ontology model
**** WARNING ****
Running this service will wipe all existing data from the neo4j graph """
def __init__(self,
some_owl_graph: rdflib.Graph,
some_connection_type: str = 'local',
xdm_schema: str = 'supply'):
"""
Created:
21-Feb-2019
<EMAIL>
Updated:
23-Feb-2019
<EMAIL>
* added yml creds
Updated:
25-Feb-2019
<EMAIL>
* invoke from API
Updated:
26-Mar-2019
<EMAIL>
* updated based on large-scale MDA changes
Updated:
29-Oct-2019
<EMAIL>
* remove 'entity-schema-finder' in favor of new approach
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/916#issuecomment-15620801
:param some_owl_graph:
:param some_connection_type:
:param xdm_schema:
the name of the schema to perform the type lookup
Notes:
- typically either 'supply' or 'learning'
- the full list is on path 'resources/config/dimensionality'
"""
BaseObject.__init__(self, __name__)
if not some_owl_graph:
raise MandatoryParamError("Owl Graph")
self._owlg = some_owl_graph
self._entity_finder = FindEntity()
self._connection_type = some_connection_type
self._rel_finder = FindRelationships()
self._dim_finder = FindDimensions(xdm_schema)
@staticmethod
def _add_rels_by_name(tx: Transaction,
nodes: dict,
some_rels_dict: dict,
the_rel_name: str) -> None:
"""
create relationships between nodes in neo
:param tx:
an open neo transaction
:param nodes:
the previously created neo4j nodes that will be used to form these relationships
:param some_rels_dict:
the dictionary that contains the subject/object values
:param the_rel_name:
the name of the relationship in neo
"""
for k in some_rels_dict:
if k not in nodes:
continue
s = nodes[k]
for value in some_rels_dict[k]:
o = nodes[value]
tx.create(Relationship(s, the_rel_name, o))
@staticmethod
def _add_relationships(tx: Transaction,
node_lookup: dict,
some_rel_dict: dict,
some_rel_name: str):
for k in some_rel_dict:
s = node_lookup[k]
for value in some_rel_dict[k]:
o = node_lookup[value]
tx.create(Relationship(s, some_rel_name, o))
def _add_all_relationships(self,
tx: Transaction,
node_lookup: dict) -> None:
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_owns(bidirectional=False),
"owns")
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_implies(bidirectional=False),
"implies")
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_versions(bidirectional=False),
"hasVersion")
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_requires(bidirectional=False),
"requires")
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_similar(bidirectional=False),
"similarTo")
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_parts(bidirectional=False),
"partOf")
self._add_relationships(tx,
node_lookup,
self._rel_finder.all_defines(bidirectional=False),
"definedBy")
def _add_child_of_rels(self,
tx: Transaction,
node_lookup: dict):
for parent in self._entity_finder.d_parents_revmap():
for child in self._entity_finder.children(parent):
s = node_lookup[child]
o = node_lookup[parent]
tx.create(Relationship(s, "childOf", o))
def _define_all_nodes(self,
tx: Transaction) -> dict:
""" define all the nodes that will be used in the graph """
node_lookup = {}
root_lookup = {}
def _schema_entity(some_input: str) -> str:
return [x.replace(' ', '') for x in self._dim_finder.find(some_input)][0]
d_parents = self._entity_finder.d_parents_revmap()
for k in d_parents:
root_lookup[k] = _schema_entity(k)
for value in d_parents[k]:
root_lookup[value] = _schema_entity(value)
for k in d_parents:
node_lookup[k] = Node(root_lookup[k], name=k)
for value in d_parents[k]:
node_lookup[value] = Node(root_lookup[value], name=value)
[tx.create(x) for x in node_lookup.values() if x]
return node_lookup
def process(self) -> dict:
from datagraph.neo.dmo import NeoGraphConnector
neog = NeoGraphConnector.connect(connection_type=self._connection_type)
tx = neog.begin()
neog.delete_all()
nodes = self._define_all_nodes(tx)
self.logger.debug(f"Initialized Graph ("
f"total-nodes={len(nodes)})")
self._add_child_of_rels(tx, nodes)
self._add_all_relationships(tx, nodes)
tx.commit()
return nodes
if __name__ == "__main__":
from datagraph import OwlGraphConnector
owlg = OwlGraphConnector("cendant").process()
InitializeNeoGraph(owlg).process()
```
#### File: assemble/svc/assemble_manifest_data.py
```python
import os
import pprint
from typing import Iterator
from pymongo.operations import UpdateOne
from pymongo.errors import BulkWriteError
from base import BaseObject
from base import MandatoryParamError
from dataingest.core.dmo import ManifestConnectorForMongo
from datamongo import BaseMongoClient
from datamongo import CendantCollection
from datamongo import CreateTextIndex
class BulkWriteResultTracker(object):
def __init__(self):
self.inserted_count = 0
self.matched_count = 0
self.modified_count = 0
self.upserted_count = 0
def update(self, result):
self.inserted_count += result.inserted_count
self.matched_count += result.matched_count
self.modified_count += result.modified_count
self.upserted_count += result.upserted_count
def __str__(self):
return f'matched={self.matched_count}. ' \
f'inserted={self.inserted_count}. ' \
f'upserted={self.upserted_count}. ' \
f'modified={self.modified_count}'
class AssembleManifestData(BaseObject):
""" Assemble all the Manifest Data from
multiple source collections into a
single target collection """
def __init__(self,
some_manifest_name: str,
some_activity_name: str,
single_collection: str='',
first: int=-1,
last: int=-1,
is_debug: bool = False):
"""
Created:
12-Mar-2019
<EMAIL>
Updated:
10-May-2019
<EMAIL>
* added 'div-field'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/236
Updated:
25-Jun-2019
<EMAIL>
* added create-text-index
Updated:
16-Jul-2019
<EMAIL>
* added debug param
* change collection name access method per
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/450
Updated:
1-Aug-2019
<EMAIL>
* ensure consistent use of manifest-connector-for-mongo
* added 'bulk-insert-threshold' function and env var
* update logging
Updated:
15-Oct-2019
<EMAIL>
* update index creation strategy
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1122
Updated:
24-Nov-2019
<EMAIL>
* more logging
* try to decrease the memory footprint by deleting a dict of
large collections once we are done using it
Updated:
11-Dec-2019
<EMAIL>
* try to decrease the memory footprint by processing input collections
one at a time, rather than having them all in memory at the same time
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1567
Updated:
07-Jan-2020
<EMAIL>
* use $addToSet instead of $push, to prevent re-adding the same fields
* allow to ignore field traceability (required for badges)
:param some_manifest_name:
the name of the manifest
:param some_activity_name:
the name of the activity within the manifest
"""
BaseObject.__init__(self, __name__)
from dataingest.core.dmo import ManifestActivityFinder
if not some_manifest_name:
raise MandatoryParamError("Manifest Name")
if not some_activity_name:
raise MandatoryParamError("Activity Name")
self._is_debug = is_debug
self._mongo_client = BaseMongoClient()
self._threshold = self._bulk_insert_threshold()
self._manifest = ManifestActivityFinder(some_manifest_name,
some_activity_name).process()
self._single_collection = single_collection
self._first = first
self._last = last
self._log_hint = '' if not single_collection else f'{single_collection}_{first}-{last} '
def _bulk_insert_threshold(self,
default_value: int = 1000):
"""
:param default_value:
default insert threshold if the environment variable isn't set
:return:
the bulk insert amount for mongoDB
"""
try:
return int(os.environ["ASSEMBLE_API_BULK_INSERT_THRESHOLD"])
except KeyError as err:
self.logger.error(f"Environment Variable Not Found: "
f"$ASSEMBLE_API_BULK_INSERT_THRESHOLD")
except ValueError as err:
self.logger.error(f"Invalid Environment Variable Value: "
f"$ASSEMBLE_API_BULK_INSERT_THRESHOLD")
return default_value
def source_collections(self) -> list:
sources = []
for name, value in self._source_collections().items():
sources.append((name, value['collection'].count()))
return sources
def flush_target(self, target_collection=None) -> None:
if not target_collection:
target_collection = self._target_collection()
target_collection.delete(keep_indexes=False)
def index_target(self, target_collection=None) -> None:
if not target_collection:
target_collection = self._target_collection()
index_options = {
'background': True
}
self.logger.debug("Creating text indexes...")
text_index_creator = CreateTextIndex(target_collection.collection, is_debug=True)
text_index_creator.process(field_name=self._manifest['target']['index'],
index_options=index_options)
def _source_collections(self) -> dict:
d = {}
for source in self._manifest["sources"]:
if self._single_collection and \
self._single_collection != source['collection']:
continue
collection = ManifestConnectorForMongo(source,
some_base_client=self._mongo_client,
is_debug=self._is_debug).process()
d[source["collection"]] = {"collection": collection,
"fields": source["fields"],
"div_field": source["div_field"],
"key_field": source["key_field"]}
return d
def _target_collection(self) -> CendantCollection:
if self._is_debug:
self.logger.debug('\n'.join([
"Retrieved Target Manifest",
pprint.pformat(self._manifest["target"])]))
return ManifestConnectorForMongo(self._manifest["target"],
some_base_client=self._mongo_client,
is_debug=self._is_debug).process()
def _load_records(self,
d_source_collections: dict) -> Iterator[dict]:
"""
generator that loads one collection at a time
:param d_source_collections:
a dictionary of mongoDB collections
:yields:
a dictionary of records per collection
"""
grand_total = 0
for index, source_collection in enumerate(d_source_collections):
collection = d_source_collections[source_collection]["collection"]
d_records = {}
if self._first < 0:
records = collection.all()
limit = collection.count()
else:
limit = self._last - self._first + 1
records = collection.skip_and_limit(self._first, limit)
self.logger.debug(f"Loading {limit} records from {source_collection}. "
f"Collection {index + 1} of {len(d_source_collections)}")
d_records[source_collection] = {
"records": records,
"fields": d_source_collections[source_collection]["fields"],
"div_field": d_source_collections[source_collection]["div_field"],
"key_field": d_source_collections[source_collection]["key_field"]}
total_records = len(d_records[source_collection]["records"])
grand_total += total_records
self.logger.debug(f"Loaded "
f"{total_records} total records "
f"from {source_collection}")
yield d_records
self.logger.debug(f"Loaded a grand total of "
f"{grand_total} records "
f"across {len(d_source_collections)} source collections")
def process(self) -> None:
from dataingest.core.dmo import SourceRecordMerger
target_collection = self._target_collection()
if not self._single_collection:
self.flush_target(target_collection)
fields_as_a_list = self._manifest['target'].get('allow_duplicate_fields', True)
push_or_addToSet = '$push' if fields_as_a_list else '$addToSet'
try:
total_inserted = 0
for d_records in self._load_records(self._source_collections()):
d_index_by_key = SourceRecordMerger(d_records, field_traceability=fields_as_a_list).process()
records = []
bulk_res_tracker = BulkWriteResultTracker()
for key_field in d_index_by_key:
fields = d_index_by_key[key_field]["fields"]
div_field = d_index_by_key[key_field]["div_field"]
d_index_by_key[key_field] = None
records.append(UpdateOne({'_id': key_field},
{
'$set': {
'_id': key_field,
'key_field': key_field,
'div_field': div_field
},
push_or_addToSet: {'fields': {'$each': fields}}
},
upsert=True))
if len(records) % self._threshold == 0:
result = target_collection.collection.bulk_write(records, ordered=False)
bulk_res_tracker.update(result)
self.logger.debug(f"Progress {self._log_hint}"
f"{bulk_res_tracker} of {len(d_index_by_key)}")
records = []
if len(records):
result = target_collection.collection.bulk_write(records, ordered=False)
bulk_res_tracker.update(result)
self.logger.debug(f"Progress {self._log_hint}"
f"{bulk_res_tracker} of {len(d_index_by_key)}")
total_inserted += (bulk_res_tracker.modified_count + bulk_res_tracker.upserted_count)
self.logger.debug(f'DONE {self._log_hint} {bulk_res_tracker}')
if not self._single_collection:
self.index_target(target_collection)
self.logger.debug(f"Assembled {total_inserted} records {self._log_hint}")
except BulkWriteError as xcpt:
self.logger.error(xcpt.details)
raise
```
#### File: core/dmo/ingest_data_extractor.py
```python
import os.path
import time
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import MandatoryParamError
class IngestDataExtractor(BaseObject):
""" ingest data from a provenance manifest """
def __init__(self,
some_manifest: dict):
"""
Created:
11-Mar-2019
<EMAIL>
:param some_manifest:
the name of the ingestion activity
"""
BaseObject.__init__(self, __name__)
if not some_manifest:
raise MandatoryParamError("Manifest")
self.manifest = some_manifest
@staticmethod
def _field_to_columns(l_fields: list) -> dict:
"""
transform the field definitions section of the manifest to a
pandas compatible columns dictionary
:param l_fields:
multiple fields of this format:
- target_name: first_name
source_name: FRST_NM
data_type: str
- target_name: last_name
source_name: LST_NM
data_type: str
:return:
a dictionary that looks like this
{ "FRST_NM": str,
"LST_NM": str }
"""
def _data_type(a_field):
if a_field["data_type"] == "int":
return "int"
elif a_field["data_type"] == "float":
return "float"
# elif a_field["data_type"] == "date":
# return "date"
return "str"
cols = {}
for field in l_fields:
cols[field["source_name"]] = _data_type(field)
return cols
@staticmethod
def _fields_to_position(l_fields: list) -> list:
"""
transform the field definitions section of the manifest to a
list of positions for CSV importing
:param l_fields:
multiple fields of this format:
- target_name: first_name
source_name: FRST_NM
data_type: str
position: 4
- target_name: last_name
source_name: LST_NM
data_type: str
position: 8
:return:
a list that looks like this
[ 4, 8 ]
"""
return [x["position"] for x in l_fields if "position" in x]
def _excel_source_as_df(self,
path: str,
cols: dict,
skiprows=0,
position=None,
sheet_name=None) -> DataFrame:
from dataingest.core.dmo import ExcelReader
start = time.time()
def _sheet_name():
""" if no sheet name supplied in params
take the first one from the spreadsheet """
if sheet_name:
return sheet_name
return ExcelReader.sheet_names(path)[0]
sheet_name = _sheet_name()
df = ExcelReader.read_excel(some_input_path=path,
some_sheet_name=sheet_name,
skiprows=skiprows,
usecols=position,
column_names=cols)
if len(df) == 0:
raise ValueError("\n".join([
"No Records Loaded",
"\tinput-path: {}".format(path),
"\tsheet-name: {}".format(sheet_name),
]))
self.logger.debug("\n".join([
"Loaded Source Excel File as DataFrame",
"\tinput-path: {}".format(path),
"\tsheet-name: {}".format(sheet_name),
"\ttotal-records: {}".format(len(df)),
"\ttotal-time: ~{}s".format(int(time.time() - start))
]))
return df
def _csv_source_as_df(self,
path: str,
cols: dict,
positions: list,
delim: str,
skiprows=0,
sheet_name=None) -> DataFrame:
start = time.time()
# Remind analyst to double check use of positioning parameter
# in the ingest manifest
self.logger.info("\n".join([
"Adivsory: Manifest Column Positioning is 0-Based",
"\tPositions: {}".format(positions)]))
df = pd.read_csv(path,
engine="python",
delim_whitespace=False,
sep=delim,
error_bad_lines=False,
warn_bad_lines=True,
parse_dates=True,
infer_datetime_format=True,
skip_blank_lines=True,
skiprows=skiprows,
comment='#',
encoding='utf-8',
names=list(cols.keys()),
dtype=cols,
na_values=['none', 'None'],
usecols=positions)
#
# df = pd.read_csv(path,
# engine="python",
# sep=delim,
# skiprows=skiprows,
# names=list(cols.keys()),
# usecols=positions)
if len(df) == 0:
raise ValueError("\n".join([
"No Records Loaded (path={}, name={})".format(
path, sheet_name)]))
df.fillna(value='', inplace=True)
self.logger.debug("\n".join([
"Loaded Source CSV File as DataFrame",
"\tInput Path: {}".format(path),
"\tSheet Name: {}".format(sheet_name),
"\tTotal Records: {}".format(len(df)),
"\tTotal Time: ~{}s".format(int(time.time() - start))]))
return df
def process(self) -> DataFrame:
d_source = self.manifest["source"]
d_fields = self.manifest["fields"]
skiprows = d_source["skiprows"]
d_source["path"] = os.path.expandvars(d_source["path"])
if not os.path.isfile(d_source["path"]):
raise ValueError("\n".join([
"File Not Found",
"\tInput Path: {}".format(d_source["path"])]))
if d_source["type"].lower() == "excel":
field_position = ",".join(self._fields_to_position(d_fields))
if len(field_position) == 0:
field_position = None
if field_position:
self.logger.debug("\n".join([
"Field Positions",
"\t{}".format(field_position)]))
return self._excel_source_as_df(path=d_source["path"],
position=field_position,
cols=self._field_to_columns(d_fields),
skiprows=skiprows)
elif d_source["type"].lower() == "csv":
return self._csv_source_as_df(path=d_source["path"],
cols=self._field_to_columns(d_fields),
positions=self._fields_to_position(d_fields),
delim=d_source["delim"],
skiprows=skiprows)
raise NotImplementedError("\n".join([
"Source Type Not Implemented",
"\tType: {}".format(d_source["type"])]))
```
#### File: core/dmo/ingest_data_rules.py
```python
from pandas import DataFrame
from base import BaseObject
class IngestDataRules(BaseObject):
""" Apply any data rules specified in the ingest manifest
failure to meet these rules typically halts the ingest process with an error """
def __init__(self):
"""
Created:
26-Apr-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
def process(self,
d_manifest: dict,
df_src: DataFrame) -> None:
"""
Purpose:
apply rules for missing data and halt the ingest if rules are broken
:param d_manifest:
the manifest
:param df_src:
the ingested dataframe prior to flows or transformation
"""
if "missing_data" not in d_manifest["source"]:
return
for rule in d_manifest["source"]["missing_data"]:
total_missing = 0
total_missing += (df_src[rule["source_name"]].values == '').sum()
total_missing += (df_src[rule["source_name"]].values == 'none').sum()
total_missing += (df_src[rule["source_name"]].values == 'None').sum()
total_missing += (df_src[rule["source_name"]].values == '0').sum()
if total_missing > rule["tolerance"]:
raise ValueError("\n".join([
"Missing Data Exceeds Tolerance Level",
"\tRule: {}".format(rule),
"\tTotal Missing: {}".format(total_missing)]))
self.logger.debug("\n".join([
"Missing Data Rule Requirements Met",
"\tRule: {}".format(rule),
"\tTotal Missing: {}".format(total_missing)]))
```
#### File: core/dmo/sql_query_reader.py
```python
import os.path
from base import BaseObject
from base import MandatoryParamError
class SQLQueryReader(BaseObject):
""" Prepare SQL Queries """
def __init__(self,
some_manifest_name: str,
some_activity_name: str):
"""
Created:
- 26-June-2019
- <EMAIL>
- prepare sql query
- Reference: <https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/370>
Updated:
- 27-July-2019
- <EMAIL>
- added condition for gbs & cv data ingestion
:param some_manifest_name:
the name of the ingestion activity
:param some_activity_name:
Updated:
06-September-2019
<EMAIL>
* added cloud ingestion
* Reference: https://github.ibm.com/-cdo/unstructured-analytics/issues/869
Updated:
14-Oct-2019
<EMAIL>
* renamed from 'prepare-ingestion-sql-query'
Rationale:
microservice naming standard is
1. DMO components to be 'NOUN-VERB'
2. SVC components to be 'VERB-NOUN'
Updated:
17-October-2019
<EMAIL>
* added CHQ/OTH ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1135
Updated:
29-October-2019
<EMAIL>
* added Security BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1182
Updated:
4-November-2019
<EMAIL>
* added GLMKT BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1245
Updated:
14-November-2019
<EMAIL>
* added Watson Health BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1348
Updated:
19-November-2019
<EMAIL>
* added Research BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1395
Updated:
03-December-2019
<EMAIL>
* added Digital Sales BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1497
Updated:
23-December-2019
<EMAIL>
* added Industry Plat BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1638
Updated:
21-Jan-2019
<EMAIL>
* added GF BU ingestion
* Reference: https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1773
"""
BaseObject.__init__(self, __name__)
if not some_activity_name:
raise MandatoryParamError("Activity")
self.manifest_name = some_manifest_name
self.activity = some_activity_name
@staticmethod
def read_sql(sql_file_name) -> str:
path = os.path.join(os.environ["CODE_BASE"],
"resources/manifest/queries",
"{}.sql".format(sql_file_name))
# read from sql file
sql_file = open(path, 'r')
sql_stmt = sql_file.read()
sql_file.close()
return sql_stmt
def process(self) -> str:
sql_file_name = self.activity.lower().replace(" ", "_")
# add postfix gbs| | cloud| systems| chq/oth| f&o| security| glmkt| cognitive apps| watson health|
# digital sales| research| industry plat for cv ingestion
if self.manifest_name == "ingest-manifest-cv-gbs":
sql_file_name = sql_file_name + "_gbs"
if self.manifest_name == "ingest-manifest-cv-":
sql_file_name = sql_file_name + "_"
if self.manifest_name == "ingest-manifest-cv-cloud":
sql_file_name = sql_file_name + "_cloud"
if self.manifest_name == "ingest-manifest-cv-systems":
sql_file_name = sql_file_name + "_systems"
if self.manifest_name == "ingest-manifest-cv-fno":
sql_file_name = sql_file_name + "_fno"
if self.manifest_name == "ingest-manifest-cv-chq-oth":
sql_file_name = sql_file_name + "_chq_oth"
if self.manifest_name == "ingest-manifest-cv-security":
sql_file_name = sql_file_name + "_security"
if self.manifest_name == "ingest-manifest-cv-glmkt":
sql_file_name = sql_file_name + "_glmkt"
if self.manifest_name == "ingest-manifest-cv-cognitive-apps":
sql_file_name = sql_file_name + "_cognitive_apps"
if self.manifest_name == "ingest-manifest-cv-watson-health":
sql_file_name = sql_file_name + "_watson_health"
if self.manifest_name == "ingest-manifest-cv-research":
sql_file_name = sql_file_name + "_research"
if self.manifest_name == "ingest-manifest-cv-digital-sales":
sql_file_name = sql_file_name + "_digital_sales"
if self.manifest_name == "ingest-manifest-cv-industry-plat":
sql_file_name = sql_file_name + "_industry_plat"
if self.manifest_name == "ingest-manifest-cv-gf":
sql_file_name = sql_file_name + "_gf"
sql_query = self.read_sql(sql_file_name)
# add unrestricted access to db
if "with ur" not in sql_query.lower():
sql_query = f"{sql_query} WITH UR"
self.logger.debug('\n'.join([
"SQL Query Modified to add Unrestricted Access (UR)",
sql_query]))
if sql_query is None:
raise NotImplementedError("SQL Query not found")
return sql_query
```
#### File: core/svc/ingest_bluepages_cv.py
```python
import requests
import textract
import time
import sys
import os
import re
import shutil
import tempfile
import plac
from datamongo import BaseMongoClient
from datamongo import CendantCollection
from base import BaseObject
from base import MandatoryParamError
class ExtractBluepagesCV():
""" Extract CVs from Bluepages Storage
The list of records that have a CV listed in bluepages are extracted from "ingest_bluepages_api" collection and then
iterated over to download the CV to a created /tmp folder beofre calling a parse function which leverage textract
to read the contents into a dictionary and write the contents to collectionname defined
Takes 1 parameters whether to purge the collection of documents eg
The below will delete any existing documents in the ingest_bluepages_cv collection
python ingest_bluepages_cv.py 0 50 purge
The below items need to be installed or configured for textract:
- antiword - sudo apt install antiword
- tesseract - sudo apt-get install tesseract-ocr
- unrtf - sudo apt install unrtf
- textract - pip install git+https://github.com/deanmalmgren/textract.git (conda only has an old version)
"""
script_Start = time.time()
baseurl = "https://w3-services1.w3-969.ibm.com/myw3/unified-profile/v1/resume/"
collectionname = "ingest_bluepages_cv"
# Generate a unique temporary folder to download and process the files to under /tmp
with tempfile.TemporaryDirectory() as CVdirectory:
print('created temporary directory', CVdirectory)
def __init__(self):
BaseObject.__init__(self, __name__)
def exists(obj, chain):
_key = chain.pop(0)
if _key in obj:
return ExtractBluepagesCV.exists(obj[_key], chain) if chain else obj[_key]
def getcnumlist(self,mongo_client):
""" Builds a list of CNUMS which contain a resume and shoulc be used in ingest bluepages CV routine. """
self.logger.info("Reading data from ingest_bluepages_api collection")
cc = CendantCollection(some_collection_name='ingest_bluepages_api',some_base_client=mongo_client)
records = cc.all(as_list=True)
self.logger.info("Building list of CNUMs with resume attached in bluepages")
resumelist = []
for i in range(len(records)):
if ExtractBluepagesCV.exists(records[i], ['data', 'content', 'user', 'expertise', 'resume']):
resumelist.append(records[i]['id'])
self.logger.info(str(len(resumelist)) + " records downloaded from ingest_bluepages_api collection ")
return resumelist
def parseCV(self, documents):
"""Function to parse downloaded CVs"""
# iterate over fles in the download directory and use textract to extract the body of text and clean it of basic
# items before inserting to the list of records
for file in os.listdir(self.CVdirectory):
self.logger.info("Iterating over downloaded CV " + file +" and adding to collection for DB loading")
filename, file_extension = os.path.splitext(os.fsdecode(file))
file = self.CVdirectory +"/"+ filename+file_extension
try:
text = textract.process(file, method="pdfminer", encoding='utf_8').decode("utf8")
clean = text.replace('\n', ' ').replace('\r', ' ').replace('\'', '').replace('\t', '')
d = next(item for item in documents if item['CNUM'] == filename)
d['body'] = clean
except:
self.logger.error("Error on " + filename+file_extension + ", Removing from collection")
documents = [i for i in documents if not (i['CNUM'] == filename)]
continue
return documents
def downloadCV(self, cnums, urltemplate, downloadlist):
""" Function to download CV from bluepages """
count = 0
regex = r'filename=\"((.+)\.(.+))\"' # regex that matches filename and extension
noextregex = r'\"(.+)\"' # regex to be used if there is no extension
# Check if output folder exists, if not then create
if not os.path.exists(self.CVdirectory):
os.mkdir(self.CVdirectory)
for row in cnums:
CVurl = urltemplate + row
r = requests.get(CVurl,stream=True)
self.logger.info("Downloading CV for " + row + " -- status code: " + str(r.status_code))
# if the requests object gives a status code other than 404 then we process the download
if r.status_code == 200:
count += 1
content_disposition = r.headers.get('Content-Disposition')
extension = re.findall(regex, content_disposition) # using the regex find the extension type
if not extension:
# if the regex to find filename and extension is empty it usually means that the file was saved with
# no extention and therefore we force the extention to be .txt and create the extension list based on it
noextension = re.findall(noextregex, content_disposition)
extension = [(noextension[0], noextension[0], 'txt')]
# generate the filename ot be a combination of cnum and the previously defined ext
filename = self.CVdirectory + "/" + row + "." + extension[0][2]
with open(filename, 'wb') as f:
f.write(r.content)
#Generate the list of files that have been downloaded based on the above and default the body to blank
filerecord = {"CNUM": row, "file_name": extension[0][0], "file_type": extension[0][2],
"source": urltemplate + row, "ts": time.time(), "body": ""}
downloadlist.append(filerecord.copy())
self.logger.info(str(count) + " CVs downloaded")
return downloadlist
def deletecollectioncontent(self,mongo_client):
""" Function to delete the collection documents from the mongo DB"""
col = CendantCollection(some_db_name="cendant", some_collection_name=self.collectionname, some_base_client=mongo_client)
col.delete()
def writebluepagescvstodb(self, listofcvs, mongo_client):
""" Function to write downloaded and parsed CVs in a list to the mongo DB"""
# connect to the Cendent MongoDB and write records to the collections defined earlier
total_persisted = 0
col = CendantCollection(some_db_name="cendant", some_collection_name=self.collectionname, some_base_client=mongo_client)
col.insert_many(listofcvs)
total_persisted += len(listofcvs)
self.logger.info("Number of records persisted " + str(total_persisted))
def removedownloads(self):
# remove all html and subfolders created in the CVdirectory folder
shutil.rmtree(self.CVdirectory)
def main (clear_existing):
self = ExtractBluepagesCV()
self.logger.info('=========================================================')
self.logger.info(' Download of CVs from Bluepages started ')
self.logger.info('=========================================================')
mongo_client_cloud = BaseMongoClient(server_alias="CLOUD")
cnumlist = ExtractBluepagesCV.getcnumlist(self,mongo_client_cloud)
bpcvcollection = []
bpcvcollection = ExtractBluepagesCV.downloadCV(self,cnumlist,self.baseurl,bpcvcollection)
bpcvcollection = ExtractBluepagesCV.parseCV(self,bpcvcollection)
if not bpcvcollection:
self.logger.info("The list of data to be written to the DB is empty, script exiting and removing downloaded files")
ExtractBluepagesCV.removedownloads(self)
sys.exit(1)
else:
clear_existing = str(clear_existing).lower()
if clear_existing == "purge":
numpurged = ExtractBluepagesCV.deletecollectioncontent(self,mongo_client_cloud)
self.logger.info(str(numpurged) + " Records purged from : " + str(self.collectionname))
elif clear_existing == "nopurge":
self.logger.info("No Records to be purged from : " + str(self.collectionname))
else:
raise MandatoryParamError("Select \"purge\" or \"nopurge\" for the existing collection")
ExtractBluepagesCV.writebluepagescvstodb(self,bpcvcollection,mongo_client_cloud)
ExtractBluepagesCV.removedownloads(self)
self.script_End = time.time()
self.logger.info("Overall Script Duration: {:.2f} minutes".format((self.script_End - self.script_Start) / 60))
if __name__ == '__main__':
plac.call(main)
```
#### File: feedback/dmo/regional_rollup_mapper.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class RegionalRollupMapper(BaseObject):
""" Perform a rollup from Countries to Regions based on an explicit mapping file """
__input_path = "resources/config/other/sentiment-regional-rollups.csv"
def __init__(self,
is_debug: bool = False):
"""
Created:
26-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1449
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._df = self._load()
self._is_debug = is_debug
def _load(self) -> DataFrame:
return pd.read_csv(self.__input_path,
encoding='utf-8',
sep=',',
skiprows=0,
names=['Country', 'Region'])
def lookup(self,
country: str) -> str:
if type(country) != str:
self.logger.warning(f"Unrecognized Country ("
f"value={country}, "
f"type={type(country)})")
return "Other"
df_region = self._df[self._df['Country'] == country.lower()]
if df_region.empty:
self.logger.error(f"Country Not Found (name={country})")
raise NotImplementedError
return df_region['Region'].unique()[0]
```
#### File: feedback/dmo/tenure_value_mapper.py
```python
from base import BaseObject
class TenureValueMapper(BaseObject):
""" Perform a rollup from Countries to Regions based on an explicit mapping file """
def __init__(self,
is_debug: bool = False):
"""
Created:
26-Nov-2019
<EMAIL>
* refactored out of 'ingest-internal-feedback' while in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1449
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
def lookup(self,
tenure: str) -> int:
if type(tenure) != str:
self.logger.warning(f"Unrecognized Tenure ("
f"value={tenure}, "
f"type={type(tenure)})")
return 0
tenure = tenure.lower().strip()
if "<6" in tenure or "< 6" in tenure:
return 1
if "6mos" in tenure or "6 mos" in tenure:
return 2
if "1-2" in tenure:
return 3
if "3-5" in tenure:
return 4
if "6-10" in tenure:
return 5
if "11-15" in tenure:
return 6
if "16-20" in tenure:
return 7
if "21-25" in tenure:
return 8
if "26-30" in tenure:
return 9
if "31" in tenure:
return 10
self.logger.error(f"Unrecognized Tensure: {tenure}")
raise NotImplementedError
```
#### File: feedback/svc/ingest_internal_feedback.py
```python
import os
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import FieldStructure
from dataingest.feedback.dmo import RegionalRollupMapper
from dataingest.feedback.dmo import TenureValueMapper
from datamongo import CendantCollection
class IngestInternalFeedback(BaseObject):
""" NOTE
this input file will not exist on the server for reasons of confidentiality """
__input_file = 'WW GTS Comments October 2019_For Peter.xlsx'
def __init__(self,
collection_name: str,
is_debug: bool = False):
"""
Created:
22-Nov-2019
<EMAIL>
* refactored out of 'generate-sentiment-graph'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1419#issuecomment-16183547
Updated:
26-Nov-2019
<EMAIL>
* add sentence segmentation
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1452
Updated:
16-Jan-2020
<EMAIL>
* minor updates to logging and initialization
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1745
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._collection_name = collection_name
self._tenure_mapper = TenureValueMapper(is_debug=self._is_debug)
self._region_mapper = RegionalRollupMapper(is_debug=self._is_debug)
def _read(self) -> DataFrame:
inpath = os.path.join(os.environ['CODE_BASE'],
'resources/confidential_input',
self.__input_file)
df = pd.read_excel(inpath)
self.logger.debug('\n'.join([
"Feedback Spreadsheet Loaded",
f"\Total Records: {len(df)}",
f"\tInput Path: {self.__input_file}"]))
return df
def process(self):
df_input = self._read()
record_ctr = 0
div_field = ''
target_records = []
for _, row in df_input.iterrows():
fields = []
tenure = self._tenure_mapper.lookup(row['tenure'])
region = self._region_mapper.lookup(row['country'])
fields.append(FieldStructure.generate_src_field(agent_name="user",
field_type="text",
field_name='market',
field_value=row['market'],
transformations=[]))
fields.append(FieldStructure.generate_src_field(agent_name="user",
field_type="text",
field_name='country',
field_value=row['country'],
transformations=[]))
fields.append(FieldStructure.generate_src_field(agent_name="user",
field_type="text",
field_name='region',
field_value=region,
transformations=['region-mapper']))
fields.append(FieldStructure.generate_src_field(agent_name="user",
field_type="text",
field_name='leadership',
field_value=row['leadership'],
transformations=[]))
fields.append(FieldStructure.generate_src_field(agent_name="user",
field_type="text",
field_name='tenure',
field_value=tenure,
transformations=['tenure-mapper']))
fields.append(FieldStructure.generate_src_field(agent_name="user",
field_type="long-text",
field_name='comments',
field_value=row['what_else_to_share'],
transformations=[]))
target_records.append({
"fields": fields,
"key_field": record_ctr,
"div_field": div_field,
"manifest": {
"name": self.__input_file}})
record_ctr += 1
collection = CendantCollection(some_collection_name=self._collection_name)
collection.delete()
collection.insert_many(target_records)
def main():
# not going to make this more robust;
# feedback ingestion is not considered a repeatable process
IngestInternalFeedback(collection_name="feedback_src_20200116").process()
if __name__ == "__main__":
main()
```
#### File: grammar/dmo/python_loc_parser.py
```python
import pprint
from base import BaseObject
from base import FileIO
class PythonLOCParser(BaseObject):
""" Parse T/LOC from a Python File
"""
def __init__(self,
file_path: str,
is_debug: bool = False):
"""
Created:
24-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1637#issuecomment-16802191
:param file_path:
link to a python file
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._file_path = file_path
def _lines(self) -> list:
lines = FileIO.file_to_lines(self._file_path, use_sort=False)
return lines
def process(self) -> dict:
lines = self._lines()
loc = len(lines)
tloc = len([line for line in lines if line and len(line.strip())])
d_result = {
"Provenance": str(self.__class__.__name__),
"FilePath": self._file_path,
"LOC": str(loc),
"TLOC": str(tloc)}
if self._is_debug:
self.logger.debug('\n'.join([
"LOC Parsing Complete",
pprint.pformat(d_result, indent=4)]))
return d_result
```
#### File: grammar/svc/parse_python_file.py
```python
import pprint
from base import BaseObject
class ParsePythonFile(BaseObject):
""" Parse a single Python Code File """
def __init__(self,
file_path: str,
is_debug: bool = False):
"""
Created:
24-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1637#issuecomment-16802139
:param file_path:
a path to a single Python file
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._file_path = file_path
def _merge(self,
d_loc: dict,
d_classname: dict,
d_path_segmentation: dict) -> dict:
def _classname() -> str:
if d_classname:
return d_classname['ClassName']
return d_path_segmentation['FileName'].split('.')[0].strip()
d_result = {
'LOC': d_loc['LOC'],
'TLOC': d_loc['TLOC'],
'ClassName': _classname(),
'FilePath': self._file_path,
'SOAType': d_path_segmentation['SOAType'],
'FileName': d_path_segmentation['FileName'],
'RelativePath': d_path_segmentation['RelativePath']}
keys = [k for k in d_path_segmentation.keys() if k.startswith('D')]
for key in keys:
d_result[key] = d_path_segmentation[key]
return d_result
def process(self) -> list:
from dataingest.grammar.dmo import PythonLOCParser
from dataingest.grammar.dmo import PythonClassNameParser
from dataingest.grammar.dmo import PythonPathSegmentation
results = []
d_classname = PythonClassNameParser(is_debug=self._is_debug,
file_path=self._file_path).process()
d_loc = PythonLOCParser(is_debug=self._is_debug,
file_path=self._file_path).process()
d_path_segmentation = PythonPathSegmentation(is_debug=self._is_debug,
file_path=self._file_path).process()
results.append(self._merge(d_classname=d_classname,
d_loc=d_loc,
d_path_segmentation=d_path_segmentation))
if self._is_debug:
self.logger.warning('\n'.join([
"Python Parsing Complete",
pprint.pformat(results, indent=4)]))
return results
```
#### File: grammar/svc/parse_python_imports.py
```python
import os
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
class ParsePythonImports(BaseObject):
""" Extract and Augment Imports from Python Files """
def __init__(self,
files: list,
df_files: DataFrame,
is_debug: bool = False):
"""
Created:
24-Dec-2019
<EMAIL>
* refactored and augmented from existing code
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1642
:param files:
a list of paths to python files in a workspace
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._files = files
self._df_files = df_files
self._is_debug = is_debug
self._base_path = os.environ["CODE_BASE"]
def _log(self,
import_type: str,
df_imports: DataFrame) -> None:
def _sample() -> DataFrame:
if len(df_imports) > 3:
return df_imports.sample(3)
return df_imports
self.logger.debug('\n'.join([
f"{import_type} Imports Created",
tabulate(_sample(), headers='keys', tablefmt='psql')]))
def _generate(self):
from dataingest.grammar.dmo import PythonImportParser
external_imports = [] # a class imports a third-party component
internal_imports = [] # a class imports another component within the workspace
for file in self._files:
imports = PythonImportParser(file_path=file,
is_debug=self._is_debug).process()
for an_import in imports:
import_classname = an_import['Import']
df2 = self._df_files[self._df_files['ClassName'] == import_classname]
if df2.empty:
d_external = {
"FilePath": file,
"Import": import_classname}
if 'L0' in an_import:
d_external["From"] = an_import['L0']
external_imports.append(d_external)
else:
for _, row in df2.iterrows():
internal_imports.append({
"SourceFilePath": file,
"TargetFilePath": row['FilePath']})
return pd.DataFrame(external_imports), pd.DataFrame(internal_imports)
def process(self) -> dict:
df_external_imports, df_internal_imports = self._generate()
if self._is_debug:
if not df_external_imports.empty:
self._log("External", df_external_imports)
if not df_internal_imports.empty:
self._log("Internal", df_internal_imports)
return {
"external": df_external_imports,
"internal": df_internal_imports}
```
#### File: graph/dmo/graph_field_mapping.py
```python
import pprint
from base import BaseObject
from base import MandatoryParamError
class GraphFieldMapping(BaseObject):
""" """
def __init__(self,
some_manifest_fields: list,
some_source_records: list):
"""
Created:
15-Mar-2019
<EMAIL>
* refactored out of 'load-neo-from-manifest-2'
:param some_manifest_fields:
the field definitions in the manifest
:param some_source_records:
the source records from mongoDB
"""
BaseObject.__init__(self, __name__)
if not some_manifest_fields:
raise MandatoryParamError("Manifest Fields")
if not some_source_records:
raise MandatoryParamError("Source Records")
self.manifest_fields = some_manifest_fields
self.source_records = some_source_records
def _transform_source_record(self,
source_record: dict) -> dict:
"""
map a single source record from mongo with a corresponding manifest record
sample input:
Manifest:
- source_name: job_role
target_name: JobRole
- source_name: job_role_id
target_name: JobRoleId
relationships:
- partOf:
- job_role
- source_name: skill_set
target_name: SkillSet
Source Record:
[ { 'name': 'job_role',
'type': 'text',
'value': 'Application Database Administrator' },
{ 'name': 'job_role_id',
'type': 'text',
'value': '042523' },
{ 'name': 'skill_set',
'type': 'text',
'value': 'Application Database Administrator' }]
sample output:
[ { 'functional_community': {
'field': [
{ 'name': 'functional_community',
'type': 'text',
'value': 'GTS Delivery' } ],
'manifest': {
'source_name': 'functional_community',
'target_name': 'FunctionalCommunity' }},
'job_category': {
'field': [
{ 'name': 'job_category',
'type': 'text',
'value': 'Technical Specialist' } ],
'manifest': {
'source_name': 'job_category',
'target_name': 'JobCategory' }},
...
'job_role': {
'field': [
{ 'name': 'job_role',
'type': 'text',
'value': 'Application Database Administrator' } ],
'manifest':
{ 'source_name': 'job_role',
'target_name': 'JobRole' }},
}]
:param source_record:
a single record (JSON dictionary) from MongoDB
:return:
a dictionary of mongoDB records mapped to Manifest fields
"""
d_record = {}
for manifest_field in self.manifest_fields:
source_field = [source_field for source_field in source_record["fields"] if
source_field["name"] == manifest_field["source_name"]]
if not len(source_field):
self.logger.warn("\n".join([
"Source Field Not Found",
pprint.pformat(source_field)
]))
continue
if not source_field[0]["value"]:
continue
# hack start
text = source_field[0]["value"]
if ".0" in text:
text = text.split(".0")[0].strip()
source_field[0]["value"] = text
# hack end
d_record[manifest_field["source_name"]] = {
"field": source_field,
"manifest": manifest_field
}
return d_record
def process(self) -> list:
""" map mongo fields to manifest fields
merges each source record from mongo with a manifest record
"""
l_records = [self._transform_source_record(x) for x in self.source_records]
self.logger.debug("\n".join([
"Mapped Source Records to Manifest Fields",
"\ttotal-records: {}".format(len(l_records))
]))
return l_records
```
#### File: graph/svc/load_neo_from_manifest.py
```python
from py2neo import Relationship
from py2neo import Transaction
from base import BaseObject
from base import MandatoryParamError
from datagraph import NeoGraphConnector
from datagraph import NeoGraphContext
from nlutext import TextParser
class LoadNeoFromManifest(BaseObject):
""" """
def __init__(self,
some_manifest_name: str,
some_activity_name: str):
"""
Created:
14-Mar-2019
<EMAIL>
Updated:
26-Mar-2019
<EMAIL>
* updates based on MDA changes
:param some_manifest_name:
the name of the manifest
:param some_activity_name:
the name of the activity within the manifest
"""
BaseObject.__init__(self, __name__)
if not some_manifest_name:
raise MandatoryParamError("Manifest Name")
if not some_activity_name:
raise MandatoryParamError("Activity Name")
self.manifest_name = some_manifest_name
self.activity_name = some_activity_name
self.text_parser = TextParser()
self.graph_context = NeoGraphContext()
@staticmethod
def _mongo_sources(d_manifest: dict) -> list:
""" return all the defined mongo sources"""
def _is_mongo(a_source):
return a_source["source"]["description"]["type"].lower().strip() == "mongo"
return [source["source"] for source
in d_manifest["sources"] if _is_mongo(source)]
@staticmethod
def _mongo_collections(mongo_sources: list) -> dict:
""" instantiate all the mongoDB connections """
from dataingest.core.dmo import ManifestConnectorForMongo
d_coll = {}
for mongo_source in mongo_sources:
conn = ManifestConnectorForMongo(mongo_source["description"]).process()
d_coll[mongo_source["description"]["collection"]] = conn
return d_coll
def _generate_nodes(self,
tx: Transaction,
mapped_source_records: list,
mongo_source: dict):
""" generate all the nodes"""
node_entity = "{} - {}".format(mongo_source["description"]["database"],
mongo_source["description"]["collection"])
for source_record in mapped_source_records:
for field in source_record:
sentence = " ".join(field["value"])
field_node = self.graph_context.find_or_create_node(tx,
sentence,
field["name"],
some_entity=node_entity)
for tag in field["tags"]:
tag_node = self.graph_context.find_or_create_node(tx,
tag["name"],
"DomainTerm",
some_activity=tag["type"],
some_entity=tag["provenance"])
tx.create(Relationship(field_node,
"implies-type1",
tag_node,
activity="Known Relationship",
entity=""))
# add subsumes implications
for tag in field["tags"]:
_s = self.graph_context.find_or_create_node(tx,
tag["name"],
"DomainTerm",
some_activity=tag["type"],
some_entity=tag["provenance"])
for tag_link in tag["links"]:
_o = self.graph_context.find_or_create_node(tx,
tag_link,
"DomainTerm",
some_activity=tag["type"],
some_entity=tag["provenance"])
tx.create(Relationship(_s,
"implies-type2",
_o,
activity="Inferred Relationship",
entity="Vector Space"))
def _generate_relationships(self,
tx: Transaction,
mapped_source_records: list):
""" generate all the relationships """
for source_record in mapped_source_records:
for field in source_record:
_subject = self.graph_context.find_or_create_node(tx,
" ".join(field["value"]),
field["name"], )
for rel in field["relationships"]:
_object = self.graph_context.find_or_create_node(tx,
" ".join(rel["value"]),
rel["name"])
tx.create(Relationship(_subject,
rel["type"],
_object,
activity="Known Relationship"))
def process(self):
from dataingest.core.dmo import ManifestActivityFinder
from dataingest.graph.dmo import GraphDataExtractor
from dataingest.graph.dmo import GraphFieldMapping
from dataingest.graph.dmo import GraphDataTransform
from datagraph import GenerateSimilarityMetric
d_manifest = ManifestActivityFinder(self.manifest_name,
self.activity_name).process()
mongo_sources = self._mongo_sources(d_manifest)
mongo_collections = self._mongo_collections(mongo_sources)
neog = NeoGraphConnector().connect(connection_type="local")
neog.delete_all()
tx = neog.begin()
for mongo_source in mongo_sources:
source_records = GraphDataExtractor(mongo_collections,
mongo_source).process()
# retrieve fields by manifest
manifest_fields = [manifest_field for manifest_field in mongo_source["fields"]]
mapped_source_records = GraphFieldMapping(manifest_fields, source_records).process()
mapped_source_records = GraphDataTransform(mapped_source_records,
tag_min_threshold=2).process()
# create all the nodes
self._generate_nodes(tx, mapped_source_records, mongo_source)
self._generate_relationships(tx, mapped_source_records)
GenerateSimilarityMetric(tx, self.graph_context).process()
tx.commit()
self.logger.info("\n".join([
"Neo Graph Ingest Completed",
"\ttotal-nodes: {}".format(self.graph_context.total_nodes())
]))
```
#### File: parse/dmo/parse_data_extractor.py
```python
from base import BaseObject
from base import MandatoryParamError
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class ParseDataExtractor(BaseObject):
""" """
def __init__(self,
some_manifest: dict,
mongo_client: BaseMongoClient,
is_debug: bool = True):
"""
Created:
12-Mar-2019
<EMAIL>
Updated:
2-Aug-2019
<EMAIL>
* added mongo client as a parameter
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/588
Updated:
17-Dec-2019
<EMAIL>
* removed unused code
* tweaked skip and limit params. E.g. first=4 and last=5, means skip=4 and limit=5-4+1
:param some_manifest:
the parse manifest
"""
BaseObject.__init__(self, __name__)
if not some_manifest:
raise MandatoryParamError("Manifest")
self._is_debug = is_debug
self._manifest = some_manifest
self._mongo_client = mongo_client
def _source_collection(self) -> CendantCollection:
"""
:return:
a connection to the MongoDB collection with source data
"""
from dataingest.core.dmo import ManifestConnectorForMongo
if self._is_debug:
self.logger.debug("Loading Manifest Source Connector")
connector = ManifestConnectorForMongo(some_base_client=self._mongo_client,
some_manifest_entry=self._manifest["source"],
is_debug=True)
return connector.process()
def process(self,
start_record: int,
end_record: int) -> list:
"""
:param start_record:
:param end_record:
:return:
source records
"""
source_collection = self._source_collection()
source_records = source_collection.skip_and_limit(start_record,
(end_record - start_record + 1))
self.logger.info("\n".join([
"Loaded Source Records",
f"\tTotal: {len(source_records)}",
f"\tName: {source_collection.collection.name}",
f"\tStart Record: {start_record}",
f"\tEnd Record: {end_record}",
f"\tTotal Records: {len(source_records)}"]))
return source_records
```
#### File: parse/dmo/source_record_filter.py
```python
from base import BaseObject
class SourceRecordFilter(BaseObject):
""" Remove non-Annotatable Source Records from a list """
def __init__(self,
source_records: list,
is_debug: bool = False):
"""
Created:
30-Sept-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1026
:param source_records:
a list of records from the source collection
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._source_records = source_records
self.logger.debug("Instantiate SourceRecordFilter")
def process(self) -> list:
"""
Purpose:
Do not process records without long-text fields
Rationale:
1. a record is composd of 0..* fields
2. each field has a type (text, long-text, badge, tc
3. only long-text fields are annotated
4. only annotated records have value value in the XDM collection
Therefore:
1. if a record is not annotated, it has no value going forward
and should be discarded
:return:
an updated list of source records
"""
master = []
for source_record in self._source_records:
total_long_text_fields = [field for field in source_record["fields"]
if field["type"] == "long-text"]
if total_long_text_fields == 0:
continue
master.append(source_record)
if len(master) != len(self._source_records):
self.logger.info(f"Source Record Filtering Complete "
f"(original={len(self._source_records)}, "
f"filtered={len(master)})")
return master
```
#### File: parse/svc/parse_manifest_data.py
```python
import os
import pprint
from base import BaseObject
from base import MandatoryParamError
from datamongo import BaseMongoClient
from datamongo import CendantCollection
from datamongo import CreateTextIndex
from datamongo import CreateFieldIndex
from nlutext import TextParser
class ParseManifestData(BaseObject):
""" given a parse manifest:
1. take data from a source mongo collection
2. parse it
3. and write it to a target mongo collection """
def __init__(self,
some_manifest_name: str,
some_activity_name: str,
first: int = -1,
last: int = -1,
is_debug: bool = False):
"""
Created:
12-Mar-2019
<EMAIL>
Updated
3-Apr-2019
<EMAIL>
* one sentence per field
Updated:
4-Apr-2019
<EMAIL>
* incremental parse capability
Updated:
24-Jun-2019
<EMAIL>
* added text index
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/371
Updated:
2-Aug-2019
<EMAIL>
* added mongo connection as parameter to manifest connectors
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/588
Updated:
2-Sep-2019
<EMAIL>
* added incremental-retrieval and documentation to source-records
Updated:
17-Dec-2019
<EMAIL>
* removed incremental-retrieval and documentation to source-records
* adapted to kubernetes and RQ
:param some_manifest_name:
the name of the manifest
:param some_activity_name:
the name of the activity within the manifest
"""
from dataingest.core.dmo import ManifestActivityFinder
from dataingest.core.dmo import ManifestConnectorForMongo
BaseObject.__init__(self, __name__)
if not some_manifest_name:
raise MandatoryParamError("Manifest Name")
if not some_activity_name:
raise MandatoryParamError("Activity Name")
self.is_debug = is_debug
self.text_parser = TextParser(is_debug=is_debug)
self.manifest_name = some_manifest_name
self.activity_name = some_activity_name
self.first = first
self.last = last
self._mongo_client = BaseMongoClient()
self._manifest_data = ManifestActivityFinder(self.manifest_name,
self.activity_name).process()
self._manifest_connector_class = ManifestConnectorForMongo
if self.is_debug:
self.logger.debug("Instantiate ParseManifestData")
def _target_collection(self) -> CendantCollection:
if self.is_debug:
self.logger.debug("\n".join([
"Loading Manifest Target Connector",
f"{pprint.pformat(self._manifest_data['target'], indent=4)}"]))
connector = self._manifest_connector_class(some_base_client=self._mongo_client,
some_manifest_entry=self._manifest_data["target"],
is_debug=self.is_debug)
return connector.process()
def _source_records(self) -> list:
from dataingest.parse.dmo import ParseDataExtractor
data_extractor = ParseDataExtractor(self._manifest_data,
mongo_client=self._mongo_client,
is_debug=self.is_debug)
some_source_records = data_extractor.process(start_record=self.first,
end_record=self.last)
self.logger.debug(f"Returning Source Records "
f"(type=2, total={len(some_source_records)})")
return some_source_records
def _parse_records(self,
target_collection: CendantCollection,
source_records: list) -> int:
from dataingest.parse.svc import ParseRecordsFromMongo
parser = ParseRecordsFromMongo(target_collection=target_collection,
source_records=source_records,
is_debug=self.is_debug)
return parser.process(threshold=5)
def source_collections(self) -> list:
collection = self._manifest_connector_class(self._manifest_data['source'],
some_base_client=self._mongo_client,
is_debug=self.is_debug).process()
return [(collection.collection_name, collection.count())]
def flush_target(self) -> None:
target_collection = self._target_collection()
target_collection.delete(keep_indexes=False)
def index_target(self) -> None:
index_options = {
'background': True
}
self.logger.debug("Creating text indexes...")
target_collection = self._target_collection()
field_indexer = CreateFieldIndex(is_debug=self.is_debug,
collection=target_collection.collection)
text_indexer = CreateTextIndex(is_debug=self.is_debug,
collection=target_collection.collection)
text_indexer.process(field_name='fields.normalized',
index_options=index_options)
field_indexer.process(field_name='div_field',
index_options=index_options)
field_indexer.process(field_name='key_field',
index_options=index_options)
def process(self) -> int:
"""
:param start_record:
:param end_record:
:param flush_records:
if true wipe target mongo collection
if false append to mongo
note: no duplicate checking occurs
:return:
"""
target_collection = self._target_collection()
source_records = self._source_records()
if not source_records or not len(source_records):
self.logger.warn("\n".join([
"No Source Records Found",
f"\tSource: {self._manifest_data['source']}, "
f"{os.environ[self._manifest_data['source']['collection'][1:]]}",
f"\tTarget: {self._manifest_data['target']}, "
f"{os.environ[self._manifest_data['target']['collection'][1:]]}",
f"\tStart Record: {self.first}",
f"\tEnd Record: {self.last}"]))
return 0
handled_records = self._parse_records(target_collection,
source_records)
if self.is_debug:
self.logger.debug("\n".join([
"Manifest Parsing Complete",
f"\tURL: {target_collection.base_client.url}",
f"\tCollection Name: {target_collection.collection_name}",
f"\tTotal Records: {handled_records}"]))
return handled_records
```
#### File: patents/dmo/joined_record_transformer.py
```python
import math
import pandas as pd
from base import BaseObject
from base import FieldStructure
class JoinedRecordTransformer(BaseObject):
""" Transforms a "Joined" Patent Record into a "Source" Patent Record
A "joined" Record is a Row in a DataFrame that has merged
USPTO + BluePages Patent Information
A "source" Record is a single entry within
the 'patents_src_<date>' collection
"""
def __init__(self,
row: pd.Series,
is_debug: bool = False):
"""
Created:
24-Oct-2019
<EMAIL>
* https://github.ibm.com/-cdo/unstructured-analytics/issues/1196
"""
BaseObject.__init__(self, __name__)
self._row = row
self._is_debug = is_debug
@staticmethod
def _field(field_name: str,
field_value: str,
field_type="text",
transformations=None) -> dict or None:
def cleanse() -> str or int or None:
if not field_value:
return None
if (type(field_value) == int or type(field_value) == float) and math.isnan(field_value):
return None
if field_type == "int":
return int(field_value)
return field_value.strip()
field_value = cleanse()
if not field_value:
return None
return FieldStructure.generate_src_field(agent_name="System",
field_type=field_type,
field_name=field_name,
field_value=field_value,
transformations=transformations)
def process(self) -> dict:
fields = [
self._field(field_name="patent_title",
field_value=self._row["Title"],
field_type="long-text"),
self._field(field_name="patent_abstract",
field_value=self._row["Abstract"],
field_type="long-text"),
self._field(field_name="patent_id",
field_value=self._row["ID"],
field_type="text"),
self._field(field_name="patent_country",
field_value=self._row["Country"],
field_type="text"),
self._field(field_name="patent_date",
field_value=self._row["Date"],
field_type="text"),
self._field(field_name="patent_filename",
field_value=self._row["Filename"],
field_type="text"),
self._field(field_name="patent_kind",
field_value=self._row["Kind"],
field_type="text"),
self._field(field_name="patent_number_of_claims",
field_value=self._row["NumberOfClaims"],
field_type="int"),
self._field(field_name="patent_type",
field_value=self._row["Type"],
field_type="text"),
self._field(field_name="patent_withdrawn",
field_value=self._row["Withdrawn"],
field_type="int")]
fields = [x for x in fields if x]
return {
"fields": fields,
"key_field": self._row["SerialNumber"],
"manifest": {
"name": "patents"}}
```
#### File: push/bp/persist_api.py
```python
from base import BaseObject
from datamongo import BaseMongoClient
class PersistAPI(BaseObject):
""" API to Persist Data in DB2 """
def __init__(self,
is_debug: bool = False):
"""
Created:
3-July-2019
<EMAIL>
Updated:
30-September-2019
<EMAIL>
* updated `is_truncate` parameter
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1018
Updated:
1-Nov-2019
<EMAIL>
* added push-<name>-collection methods
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1238#issue-10682083
Updated:
11-Nov-2019
<EMAIL>
* added persist collection to be executed from admin.sh
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1312
Updated:
23-Feb-2020
<EMAIL>
* Folded PushTagCollection and PushXdmCollection into PushCollection
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
@staticmethod
def persist(input_data, schema_name, table_name):
from dataingest.push.dmo import PersistDatatoDB
from datadb2.core.bp import DBCredentialsAPI
db2username, db2password = DBCredentialsAPI("WFT_USER_NAME", "WFT_PASS_WORD").process()
PersistDatatoDB(db2username, db2password).process(input_data, schema_name, table_name, is_truncate=True)
def push_collection(self,
mongo_collection_name: str,
transformation_type: str,
target_db2_table: str,
target_db2_schema: str = 'Cendant',
tag_confidence_threshold: float = None,
mongo_client: BaseMongoClient = None) -> None:
"""
Purpose:
Push a collection into DB2
:param mongo_collection_name:
the name of the source collection to extract the data from
:param target_db2_table:
the name of the target table in DB2 to write the transformed data into
:param target_db2_schema:
the name of the DB2 Schema to write to
:param mongo_client:
an instantiated mongoDB client instance
"""
from dataingest.push.svc import PushCollection
from dataingest.push.dmo import FieldsRecordTransformation
from dataingest.push.dmo import TagRecordTransformation
from dataingest.push.dmo import XdmRecordTransformation
transformation_type = transformation_type.lower()
transformation = {
"fields": FieldsRecordTransformation,
"tag": TagRecordTransformation,
"xdm": XdmRecordTransformation
}
pusher = PushCollection(mongo_collection_name=mongo_collection_name,
transformation_class=transformation[transformation_type],
transformation_type=transformation_type,
db2_table_name=target_db2_table,
db2_schema_name=target_db2_schema,
mongo_client=mongo_client,
tag_confidence_threshold=tag_confidence_threshold,
is_debug=self._is_debug)
pusher.process()
def main(transformation_type, data, schema, table, tag_confidence_threshold):
def _action(tag_confidence_threshold):
print(transformation_type)
if transformation_type in ["tag", "xdm", "fields"]:
PersistAPI(is_debug=True).push_collection(mongo_collection_name=data,
transformation_type=transformation_type,
target_db2_table=table,
target_db2_schema=schema,
mongo_client=BaseMongoClient(),
tag_confidence_threshold=tag_confidence_threshold)
elif not transformation_type:
PersistAPI(is_debug=True).persist(data, schema, table)
else:
raise NotImplementedError("\n".join([
"Unknown Param: {}".format(transformation_type)]))
if tag_confidence_threshold:
tag_confidence_threshold = float(tag_confidence_threshold)
else:
tag_confidence_threshold = 0
_action(tag_confidence_threshold)
if __name__ == "__main__":
import plac
plac.call(main)
```
#### File: push/bp/run_manifest_data_command.py
```python
import os
from base import BaseObject
from base import MandatoryParamError
class RunManifestDataCommand(BaseObject):
"""Runs commands specified by a manifest activity"""
def __init__(self,
some_manifest_name: str,
some_activity_name: str,
is_debug: bool = False):
from dataingest.core.dmo import ManifestActivityFinder
BaseObject.__init__(self, __name__)
if not some_manifest_name:
raise MandatoryParamError("Manifest Name")
if not some_activity_name:
raise MandatoryParamError("Activity Name")
self.is_debug = is_debug
self.command = ManifestActivityFinder(some_manifest_name,
some_activity_name).process()['command']
def process(self) -> int:
return os.system(self.command)
def run_manifest_command(manifest_name: str,
activity_name: str):
runner = RunManifestDataCommand(manifest_name, activity_name)
command = runner.command
rc = runner.process()
if rc:
raise Exception(f"Error {rc} executing '{command}'")
```
#### File: push/dmo/tag_record_transformation.py
```python
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
class TagRecordTransformation(BaseObject):
"""
Purpose:
Transform nested TAG record into a flat DataFrame
Sample Input:
{ 'div_field': '',
'ts': '84d58ab8-f7a7-11e9-bab8-066122a69d41',
'fields': [
...
{ 'agent': 'user',
'collection': {'name': 'ingest_cv_education_', 'type': 'ingest_cv_education'},
'field_id': 'ff6d19a4-f75b-11e9-91a8-066122a69d41-0',
'name': 'major',
'normalized': ['electronics and communication'],
'tags': {'supervised': [['communication', 96.2]], unsupervised': []},
'transformations': [],
'type': 'long-text',
'value': ['Electronics and Communication']},
{ 'agent': 'user',
'collection': {'name': 'ingest_cv_education_', 'type': 'ingest_cv_education'},
'field_id': 'ff6d1ad0-f75b-11e9-91a8-066122a69d41-0',
'name': 'degree_name',
'normalized': ['bachelor_of_engineering'],
'tags': {'supervised': [['bachelor of engineering', 99.4]], unsupervised': []},
'transformations': [],
'type': 'long-text',
'value': ['Diploma in Electronics and Communication']},
{ 'agent': 'user',
'collection': {'name': 'ingest_cv_education_', 'type': 'ingest_cv_education'},
'field_id': 'ff6d1c06-f75b-11e9-91a8-066122a69d41-0',
'name': 'thesis_title',
'normalized': ['thesis_title bachelor_of_engineering and communicationyear of complete 2005'],
'tags': {'supervised': [['bachelor of engineering', 96.3]], unsupervised': []},
'transformations': [],
'type': 'long-text',
'value': ['Thesis title Diploma in Electronics and CommunicationYear of completion 2005']}],
'key_field': '05817Q744'}
Sample Output:
+----+---------------------+--------------+----------------------------------------+--------------+------------+-----------------------------------------------------------------------------+--------------------------------------------------------------------------------+-------------------------+--------------------------------------+-------------------------+
| | Collection | Confidence | FieldId | FieldName | KeyField | NormalizedText | OriginalText | PriorCollection | RecordId | Tag |
|----+---------------------+--------------+----------------------------------------+--------------+------------+-----------------------------------------------------------------------------+--------------------------------------------------------------------------------+-------------------------+--------------------------------------+-------------------------|
| 0 | supply_tag_20191025 | 96.2 | ff6d19a4-f75b-11e9-91a8-066122a69d41-0 | major | 05817Q744 | electronics and communication | Electronics and Communication | ingest_cv_education_ | 84d58ab8-f7a7-11e9-bab8-066122a69d41 | communication |
| 1 | supply_tag_20191025 | 99.4 | ff6d1ad0-f75b-11e9-91a8-066122a69d41-0 | degree_name | 05817Q744 | bachelor_of_engineering | Diploma in Electronics and Communication | ingest_cv_education_ | 84d58ab8-f7a7-11e9-bab8-066122a69d41 | bachelor of engineering |
| 2 | supply_tag_20191025 | 96.3 | ff6d1c06-f75b-11e9-91a8-066122a69d41-0 | thesis_title | 05817Q744 | thesis_title bachelor_of_engineering and communicationyear of complete 2005 | Thesis title Diploma in Electronics and CommunicationYear of completion 2005 | ingest_cv_education_ | 84d58ab8-f7a7-11e9-bab8-066122a69d41 | bachelor of engineering |
+----+---------------------+--------------+----------------------------------------+--------------+------------+-----------------------------------------------------------------------------+--------------------------------------------------------------------------------+-------------------------+--------------------------------------+-------------------------+
"""
def __init__(self,
records: list,
collection_name: str,
is_debug: bool = False):
"""
Created:
1-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1238#issuecomment-15696868
Updated:
19-Feb-2020
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1862
* updated record_id
:param records:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._records = records
self._is_debug = is_debug
self._collection_name = collection_name
def process(self,
tag_confidence_threshold: float = None) -> DataFrame:
master = []
for record in self._records:
key_field = record["key_field"]
fields = [x for x in record["fields"] if x["type"] == "long-text" and "tags" in x]
for field in fields:
field_id = field["field_id"]
def _tag_tuples():
if tag_confidence_threshold:
return [tag for tag in field["tags"]["supervised"]
if tag[1] >= tag_confidence_threshold]
return [tag for tag in field["tags"]["supervised"]]
for tag_tuple in _tag_tuples():
master.append({
"KeyField": key_field,
"FieldId": field_id,
"Tag": tag_tuple[0],
"Confidence": tag_tuple[1]})
df = pd.DataFrame(master)
if self._is_debug and not df.empty:
self.logger.debug("\n".join([
f"TAG Transformation Complete (collection={self._collection_name}, total={len(df)})",
tabulate(df.sample(),
tablefmt='psql',
headers='keys')]))
return df
```
#### File: datamongo/collections/cendant_collection_registry.py
```python
from base import BaseObject
from ..core.dmo import BaseMongoClient
class CendantCollectionRegistry(BaseObject):
""" Service to Provide access to the Cendant Collection Registry """
class CollectionNamesLoader(object):
def __init__(self,
mongo_client: BaseMongoClient):
self._client = mongo_client
def names(self) -> list:
return self._client.client.cendant.list_collection_names()
def __init__(self,
collection_names_loader=None,
is_debug: bool = True):
"""
Created:
13-Nov-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1342
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
if not collection_names_loader:
collection_names_loader = CendantCollectionRegistry.CollectionNamesLoader(BaseMongoClient())
self._config = self._load(collection_names_loader)
def _load(self, collection_names_loader) -> dict:
template = {
"src": "",
"tag": "",
"xdm": ""
}
config = {
"demand": {
"latest": dict(template)
},
"learning": {
"latest": dict(template)
},
"supply": {
"latest": dict(template)
}
}
for collection in collection_names_loader.names():
for key in config.keys():
fragments = collection.split('_')
if len(fragments) != 3:
continue
category, kind, date = tuple(fragments)
if category == key:
if date not in config[key]:
config[category][date] = dict(template)
config[category][date][kind] = collection
if collection > config[category]['latest'][kind]:
config[category]['latest'][kind] = collection
return config
def _by_name(self,
facade,
name: str):
name = name.lower().strip()
if name == "supply":
return facade.supply()
elif name == "demand":
return facade.demand()
elif name == "learning":
return facade.learning()
raise NotImplementedError(f"Name Not Recognized ("
f"name={name})")
@staticmethod
def by_type(a_dict: dict):
class Facade(object):
@staticmethod
def src() -> str:
return a_dict["src"]
@staticmethod
def tag() -> str:
return a_dict["tag"]
@staticmethod
def xdm() -> str:
return a_dict["xdm"]
@staticmethod
def all() -> dict:
return a_dict
return Facade()
def list(self):
def _cleanse(keys: list):
return sorted([x for x in keys if x != "latest"])
class Facade(object):
@classmethod
def supply(cls):
return _cleanse(self._config["supply"].keys())
@classmethod
def demand(cls):
return _cleanse(self._config["demand"].keys())
@classmethod
def learning(cls):
return _cleanse(self._config["learning"].keys())
@classmethod
def by_name(cls,
name: str):
return self._by_name(cls, name)
return Facade()
def latest(self):
def _latest(a_name: str):
return self._config[a_name]["latest"]
class Facade(object):
@classmethod
def supply(cls):
return self.by_type(_latest("supply"))
@classmethod
def demand(cls):
return self.by_type(_latest("demand"))
@classmethod
def learning(cls):
return self.by_type(_latest("learning"))
@classmethod
def by_name(cls,
name: str):
return self._by_name(cls, name)
return Facade()
def by_date(self,
a_date: str):
def _by_date(a_name: str):
if a_date not in self._config[a_name]:
raise ValueError(f"Date Not Found ("
f"name={a_name}, "
f"date={a_date})")
return self._config[a_name][a_date]
class Facade(object):
@staticmethod
def supply():
return self.by_type(_by_date("supply"))
@staticmethod
def demand():
return self.by_type(_by_date("demand"))
@staticmethod
def learning():
return self.by_type(_by_date("learning"))
@classmethod
def by_name(cls,
name: str):
return self._by_name(cls, name)
return Facade()
if __name__ == "__main__":
print(CendantCollectionRegistry().list().supply())
print(CendantCollectionRegistry().list().demand())
print(CendantCollectionRegistry().list().learning())
print(CendantCollectionRegistry().list().by_name("learning"))
print(CendantCollectionRegistry().latest().supply().tag())
print(CendantCollectionRegistry().latest().demand().all())
print(CendantCollectionRegistry().latest().learning().all())
print(CendantCollectionRegistry().latest().by_name("supply").all())
print(CendantCollectionRegistry().by_date("20190913").by_name("supply").all())
```
#### File: datamongo/collections/cendant_xdm.py
```python
import pprint
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import MandatoryParamError
from datamongo.core.bp import CendantCollection
from datamongo.core.dmo import BaseMongoClient
class CendantXdm(BaseObject):
""" Collection Wrapper over MongoDB XDM (Dimemsionality) Collections
* supply-xdm
* demand-xdm
* learning-xdm """
_records = None
def __init__(self,
collection_name: str,
mongo_client: BaseMongoClient = None,
database_name: str = "cendant",
is_debug: bool = True):
"""
Created:
7-Aug-2019
<EMAIL>
* based on 'cendant-tag' and 'cendant-src'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/649
Updated:
30-Sept-2019
<EMAIL>
* filter out records without a JRS ID
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1028
Updated:
31-Oct-2019
<EMAIL>
* add 'dataframe' method
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1227#issuecomment-15665110
"""
BaseObject.__init__(self, __name__)
if not collection_name:
raise MandatoryParamError("Collection Name")
if not mongo_client:
mongo_client = BaseMongoClient()
self.is_debug = is_debug
self.mongo_client = mongo_client
self.collection = CendantCollection(some_base_client=mongo_client,
some_db_name=database_name,
some_collection_name=collection_name,
is_debug=self.is_debug)
def random(self,
total_records: int = 1) -> list:
"""
Purpose:
Return Random Record(s)
:param total_records:
the number of random records to return
:return:
a list of random records with a cardinality of 0..*
"""
return self.collection.random(total_records)
@staticmethod
def dataframe(record: dict) -> DataFrame:
results = []
key_field = record["key_field"]
for slot_name in record["slots"]:
results.append({
"Id": key_field,
"Slot": slot_name,
"Weight": record["slots"][slot_name]["weight"],
"zScore": record["slots"][slot_name]["z_score"],
"zScoreNorm": record["slots"][slot_name]["z_score_norm"]})
return pd.DataFrame(results)
def by_value_sum(self,
minimum_value_sum: int = None,
maximum_value_sum: int = None,
key_fields_only: bool = False) -> list:
from datamongo.slots.dmo import SlotValueFilter
slot_value_filter = SlotValueFilter(some_records=self.collection.all())
return slot_value_filter.process(minimum_value_sum=minimum_value_sum,
maximum_value_sum=maximum_value_sum,
key_fields_only=key_fields_only)
def by_slot_value(self,
region: str,
slot_name: str,
minimum_value_sum: float = None,
maximum_value_sum: float = None,
minimum_band_level: int = None,
maximum_band_level: int = None) -> list:
from datamongo.slots.svc import GenerateSlotQuery
slot_query = GenerateSlotQuery(is_debug=self.is_debug)
d_query = slot_query.process(region=region,
slot_name=slot_name,
minimum_value_sum=minimum_value_sum,
maximum_value_sum=maximum_value_sum,
minimum_band_level=minimum_band_level,
maximum_band_level=maximum_band_level)
results = self.collection.find_by_query(d_query)
if not results:
results = []
self.logger.debug("\n".join([
f"Slot Value Query (total={len(results)}): ",
pprint.pformat(d_query, indent=4)]))
return results
```
#### File: datamongo/collections/employees_band_region.py
```python
from base import BaseObject
from base import RecordUnavailableRecord
from datamongo.core.bp import CendantCollection
from datamongo.core.dmo import BaseMongoClient
class BandRegion(BaseObject):
""" Collection Wrapper over MongoDB Collection
for "_employees_band_region" """
def __init__(self,
some_base_client=None):
"""
Created:
16-Apr-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
if not some_base_client:
some_base_client = BaseMongoClient()
self.collection = CendantCollection(some_base_client=some_base_client,
some_db_name="cendant",
some_collection_name="ingest_band_region")
@staticmethod
def _reverse(some_d: dict) -> dict:
d_reversed = {}
for k in some_d:
v = some_d[k]
if v not in d_reversed:
d_reversed[v] = []
d_reversed[v].append(k)
return d_reversed
def all_region_by_cnum(self,
reverse=False):
records = self.collection.all()
d_index = {}
ctr = 0
for record in records:
ctr += 1
cnum = [x["value"] for x in record["fields"] if x["name"] == "serial_number"][0]
value = [x["value"] for x in record["fields"] if x["name"] == "region"][0]
if value:
d_index[cnum] = value
if reverse:
return self._reverse(d_index)
return d_index
def all_band_by_cnum(self,
reverse=False):
records = self.collection.all()
d_index = {}
for record in records:
cnum = [x["value"] for x in record["fields"] if x["name"] == "serial_number"][0]
value = [x["value"] for x in record["fields"] if x["name"] == "band"][0]
if value:
d_index[cnum] = value
if reverse:
return self._reverse(d_index)
return d_index
def _record_by_cnum(self,
some_serial_number: str,
raise_error: bool):
record = self.collection.by_field("fields.value", some_serial_number)
if not record:
error = "\n".join([
"Record Not Found (serial-number={})".format(
some_serial_number)])
if raise_error:
raise RecordUnavailableRecord(error)
self.logger.error(error)
return record
def band_by_cnum(self,
some_serial_number: str,
raise_error: bool = True) -> int:
record = self._record_by_cnum(some_serial_number,
raise_error)
if not record:
return -1
for field in record["fields"]:
if field["name"] == "band":
return int(field["value"])
def region_by_cnum(self,
some_serial_number: str,
raise_error: bool = True) -> str:
record = self._record_by_cnum(some_serial_number,
raise_error)
if not record:
return "Unknown"
for field in record["fields"]:
if field["name"] == "region":
return field["value"]
```
#### File: datamongo/collections/github_src.py
```python
from base import BaseObject
from datamongo.core.bp import CendantCollection
class GitHubSrc(BaseObject):
""" GitHub Src Collection Wrapper
"""
def __init__(self,
collection_name: str,
is_debug: bool = False):
"""
Created:
7-Dec-2019
<EMAIL>
* renamed from 'github-data-loader'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1553
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._collection = CendantCollection(is_debug=self._is_debug,
some_collection_name=collection_name)
def flush(self):
self._collection.delete()
def create_indices(self):
from datamongo.text.svc import CreateFieldIndex
field_index_creator = CreateFieldIndex(is_debug=self._is_debug,
collection=self._collection.collection)
field_index_creator.process(field_name="key_field")
field_index_creator.process(field_name="key_field_parent")
field_index_creator.process(field_name="div_field")
def insert(self,
source_records: list) -> None:
if not len(source_records):
self.logger.warning("No Records Provided")
return
if len(source_records):
self._collection.insert_many(documents=source_records,
some_caller=str(__name__),
ordered_but_slower=True)
if self._is_debug and len(source_records):
self.logger.debug('\n'.join([
"GitHub Loading Completed",
f"\tTotal Records: {len(source_records)}",
f"\tCollection Name: {self._collection.collection_name}"]))
```
#### File: core/dmo/base_mongo_client.py
```python
import os
import warnings
from pymongo import MongoClient
from base import BaseObject
from base import CredentialsFromJson
from base import CryptoBase
class BaseMongoClient(BaseObject):
""" Create a MongoDB connection """
def __init__(self,
some_mongo_host: str = None,
server_alias: str = None,
is_debug: bool = False):
"""
Created:
14-Apr-2019
<EMAIL>
* based on 'base-mongo-client-1'
Updated:
6-Jun-2019
<EMAIL>
* enable username/password authentication
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/336
Updated:
15-Aug-2019
<EMAIL>
* added 'log' function
Updated:
04-Dec-2019
<EMAIL>
* moved code to CredentialsFromJson
Updated:
15-Jan-2020
<EMAIL>
* added server_alias param
Updated:
13-Feb-2020
<EMAIL>
* default instantiation to 'cloud' if no parameter provided
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1855
Updated:
23-Feb-2020
<EMAIL>
* Ignore server alias when running on kubernetes. This is a quick hack
because the skills api is breaking in kuberentes.
Proper solution would be to remove server alias from everywhere and
migrate our servers to the cloud instance.
"""
BaseObject.__init__(self, __name__)
if some_mongo_host:
warning_txt = 'the some_mongo_host parameter is ignored; set MONGO_JSON_CREDENTIALS instead'
# Do not use DeprecationWarning because it is hidden by jupyter and REPL
warnings.warn(warning_txt, Warning)
# if not some_mongo_host and not server_alias:
# server_alias = 'CLOUD' # GIT-1855-17765027
if 'KUBERNETES_SERVICE_HOST' in os.environ:
server_alias = None
if server_alias:
server_alias = server_alias.upper()
if server_alias not in ['WFTAG', 'CLOUD']:
raise ValueError('Invalid server alias')
url = None
ca_file = None
if server_alias or 'MONGO_JSON_CREDENTIALS' in os.environ:
env_var_name = f'MONGO_JSON_CREDENTIALS_{server_alias}' if server_alias else 'MONGO_JSON_CREDENTIALS'
credentials = CredentialsFromJson(os.environ[env_var_name],
'mongodb')
url = credentials.url
ca_file = credentials.ca_file
if not url:
host = os.environ["MONGO_HOST"]
port = int(os.environ["MONGO_PORT"])
username = CryptoBase.decrypt_str(os.environ["MONGO_USER_NAME"])
password = CryptoBase.decrypt_str(os.environ["MONGO_PASS_WORD"])
url = f"mongodb://{username}:{password}@{host}:{port}/"
if not url:
raise RuntimeError("Bad MONGO_ environment variables")
self.client = MongoClient(url, ssl_ca_certs=ca_file)
self.url = CredentialsFromJson.sanitize_url(url, 'mongodb')
if is_debug:
self.logger.debug('\n'.join([
"MongoDB Connection Opened",
f"\tURL: {self.url}"]))
def log(self) -> str:
return f"Connected on {self.url}"
```
#### File: core/dto/cendant_enums.py
```python
import os
from enum import Enum
class CendantCollectionCategory(Enum):
DEMAND = 1
SUPPLY = 2
LEARNING = 3
@staticmethod
def find(some_name: str) -> __name__:
some_name = some_name.lower()
# high precision match
if some_name == "demand":
return CendantCollectionCategory.DEMAND
if some_name == "supply":
return CendantCollectionCategory.SUPPLY
if some_name == "learning":
return CendantCollectionCategory.LEARNING
# partial precision match
if "demand" in some_name:
return CendantCollectionCategory.DEMAND
if "supply" in some_name:
return CendantCollectionCategory.SUPPLY
if "learning" in some_name:
return CendantCollectionCategory.LEARNING
raise NotImplementedError(f"Unrecognized Category: {some_name}")
class CendantCollectionType(Enum):
SRC = 1
TAG = 2
XDM = 3
@staticmethod
def find(some_name: str) -> __name__:
some_name = some_name.lower()
# high precision match
if some_name == "src":
return CendantCollectionType.SRC
if some_name == "tag":
return CendantCollectionType.TAG
if some_name == "xdm":
return CendantCollectionType.XDM
# partial precision match
if "src" in some_name:
return CendantCollectionType.SRC
if "tag" in some_name:
return CendantCollectionType.TAG
if "xdm" in some_name:
return CendantCollectionType.XDM
raise NotImplementedError(f"Unrecognized Type: {some_name}")
class CendantCollectionUsage(Enum):
BUILD = 1
USE = 2
@staticmethod
def find(some_name: str) -> __name__:
some_name = some_name.lower()
# high precision match
if some_name == "build":
return CendantCollectionUsage.BUILD
if some_name == "use":
return CendantCollectionUsage.USE
# partial precision match
if "build" in some_name:
return CendantCollectionUsage.BUILD
if "use" in some_name:
return CendantCollectionUsage.USE
raise NotImplementedError(f"Unrecognized Type: {some_name}")
class CollectionFinder(object):
@staticmethod
def find_xdm(collection_category: CendantCollectionCategory or str,
collection_usage: CendantCollectionUsage = CendantCollectionUsage.USE) -> str:
if type(collection_category) == str:
collection_category = CendantCollectionCategory.find(collection_category)
return __class__.find(collection_type=CendantCollectionType.XDM,
collection_category=collection_category,
collection_usage=collection_usage)
@staticmethod
def find_tag(collection_category: CendantCollectionCategory or str,
collection_usage: CendantCollectionUsage = CendantCollectionUsage.USE) -> str:
if type(collection_category) == str:
collection_category = CendantCollectionCategory.find(collection_category)
return __class__.find(collection_type=CendantCollectionType.TAG,
collection_category=collection_category,
collection_usage=collection_usage)
@staticmethod
def find_src(collection_category: CendantCollectionCategory or str,
collection_usage: CendantCollectionUsage = CendantCollectionUsage.USE) -> str:
if type(collection_category) == str:
collection_category = CendantCollectionCategory.find(collection_category)
return __class__.find(collection_type=CendantCollectionType.SRC,
collection_category=collection_category,
collection_usage=collection_usage)
@staticmethod
def find(
collection_type: CendantCollectionType,
collection_category: CendantCollectionCategory,
collection_usage: CendantCollectionUsage = CendantCollectionUsage.USE) -> str:
def supply_key() -> str:
if CendantCollectionType.SRC == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "SUPPLY_SRC_USE"
return "SUPPLY_SRC_BUILD"
elif CendantCollectionType.TAG == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "SUPPLY_TAG_USE"
return "SUPPLY_TAG_BUILD"
elif CendantCollectionType.XDM == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "SUPPLY_XDM_USE"
return "SUPPLY_XDM_BUILD"
def demand_key():
if CendantCollectionType.SRC == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "DEMAND_SRC_USE"
return "DEMAND_SRC_BUILD"
elif CendantCollectionType.TAG == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "DEMAND_TAG_USE"
return "DEMAND_TAG_BUILD"
elif CendantCollectionType.XDM == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "DEMAND_XDM_USE"
return "DEMAND_XDM_BUILD"
def learning_key():
if CendantCollectionType.SRC == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "LEARNING_SRC_USE"
return "LEARNING_SRC_BUILD"
elif CendantCollectionType.TAG == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "LEARNING_TAG_USE"
return "LEARNING_TAG_BUILD"
elif CendantCollectionType.XDM == collection_type:
if CendantCollectionUsage.USE == collection_usage:
return "LEARNING_XDM_USE"
return "LEARNING_XDM_BUILD"
def key():
if CendantCollectionCategory.SUPPLY == collection_category:
return supply_key()
elif CendantCollectionCategory.DEMAND == collection_category:
return demand_key()
elif CendantCollectionCategory.LEARNING == collection_category:
return learning_key()
key = key()
value = os.environ[key]
print(f"Located Cendant Collection: "
f"(type={collection_type}, "
f"category={collection_category}, "
f"key={key}, "
f"collection={value})")
return value
```
#### File: core/svc/delete_record.py
```python
from base import BaseObject
from base import MandatoryParamError
class DeleteRecord(BaseObject):
""" API for Deleting a Record (or series of Records) from MongoDB """
def __init__(self, some_collection):
"""
Created:
28-Nov-2018
<EMAIL>
Updated:
15-Mar-2019
<EMAIL>
* add strict typing
"""
BaseObject.__init__(self, __name__)
if not some_collection:
raise MandatoryParamError("Collection")
self.collection = some_collection
def all(self,
is_debug: bool = False) -> int:
"""
:return:
the total records delete
"""
records_deleted = self.collection.delete_many({}).deleted_count
if is_debug:
self.logger.debug("\n".join([
"Deleted Records (all)",
"\ttotal: {0}".format(records_deleted),
"\tcollection: {0}".format(self.collection.name)
]))
return records_deleted
def by_id(self,
some_id: str,
is_debug: bool = False) -> int:
"""
:param some_id:
the ID of the dialog structure to delete
:param is_debug:
:return:
the total records delete
"""
query = {"_id": some_id}
records_deleted = self.collection.delete_many(
query).deleted_count
if is_debug:
self.logger.debug("\n".join([
"Deleted Records (by id)",
"\tId: {0}".format(some_id),
"\tTotal: {0}".format(records_deleted),
"\tCollection: {0}".format(self.collection.name)]))
return records_deleted
def by_query(self,
some_query: dict,
is_debug: bool = False) -> int:
"""
:param some_query:
the query pattern to delete
:param is_debug:
:return:
the total records delete
"""
records_deleted = self.collection.delete_many(
some_query).deleted_count
if is_debug:
self.logger.debug("\n".join([
"Deleted Records (by id)",
"\tQuery: {0}".format(some_query),
"\tTotal: {0}".format(records_deleted),
"\tCollection: {0}".format(self.collection.name)]))
return records_deleted
def by_ts(self, tts,
is_debug: bool = False) -> int:
"""
:param tts:
the timestamp
:param is_debug:
:return:
the total records delete
"""
query = {"ts": tts}
records_deleted = self.collection.delete_many(
query).deleted_count
if is_debug:
self.logger.debug("\n".join([
"Deleted Records (by ts)",
"\tttts: {0}".format(tts),
"\ttotal: {0}".format(records_deleted),
"\tcollection: {0}".format(self.collection.name)
]))
return records_deleted
```
#### File: slots/dmo/reverse_slot_index.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import MandatoryParamError
class ReverseSlotIndex(BaseObject):
""" Create a Reverse Index for a given slot """
_records = None
def __init__(self,
some_records: list,
some_slot_name: str):
"""
Created:
1-May-2019
<EMAIL>
* refactored out of -learning-dimensions
"""
BaseObject.__init__(self, __name__)
if not some_slot_name:
raise MandatoryParamError("Records")
if not some_slot_name:
raise MandatoryParamError("Slot Name")
self.records = some_records
self.slot_name = some_slot_name
def _results(self):
results = []
for record in self.records:
for slot in record["slots"]:
if slot != self.slot_name:
continue
value = record["slots"][slot]
if value <= 0:
continue
results.append({"KeyField": record["key_field"],
"Value": value})
return results
def process(self,
sort_ascending: bool = True) -> DataFrame:
df = pd.DataFrame(self._results())
if sort_ascending and len(df):
df = df.sort_values(['Value'],
ascending=[False])
return df
```
#### File: text/dmo/text_query_windower.py
```python
import string
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class TextQueryWindower(BaseObject):
""" Window Text Query Results
"""
__exclude = set(string.punctuation)
def __init__(self,
query_results: dict,
is_debug: bool = False):
"""
Created:
<EMAIL>
16-Oct-2019
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1122#issuecomment-15340437
:param text_parser_results
the text parser results
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._query_results = query_results
def _to_text(self):
"""
Purpose:
Transform Query results into pure text
:return:
return a list of text results only
"""
values = set()
for cnum in self._query_results:
[values.add(d['value']) for d in self._query_results[cnum]]
return sorted(values)
def _tokens(self,
term: str,
input_text: str) -> list:
input_text = input_text.lower().replace('\t', ' ')
input_text = ''.join(ch for ch in input_text if ch not in self.__exclude)
tokens = input_text.split(' ')
tokens = [x.strip() for x in tokens if x and len(x.strip())]
tokens = [x.lower() for x in tokens]
if ' ' not in term: # return unigrams
return tokens
if term.count(' ') == 1: # return bigrams
s = set()
for i in range(0, len(tokens)):
if i + 1 < len(tokens):
s.add(f"{tokens[i]} {tokens[i + 1]}")
return sorted(s)
raise NotImplementedError
def process(self,
term: str,
window_size: int = 5) -> DataFrame:
"""
:param term:
:param window_size:
:return:
"""
master = []
term = term.lower().strip()
for input_text in self._to_text():
tokens = self._tokens(term, input_text)
n = tokens.index(term)
def pos_x():
if n - window_size >= 0:
return n - window_size
return 0
def pos_y():
if n + window_size < len(tokens):
return n + window_size
return len(tokens)
x = pos_x()
y = pos_y()
def l_context():
return ' '.join(tokens[x:n]).strip()
def r_context():
return ' '.join(tokens[n + 1:y]).strip()
master.append({
"A": l_context(),
"B": tokens[n],
"C": r_context()})
return pd.DataFrame(master).sort_values(
by=['A'], ascending=False)
```
#### File: text/svc/create_field_index.py
```python
import pymongo
from pymongo.collection import Collection
from pymongo.errors import OperationFailure
from base import BaseObject
class CreateFieldIndex(BaseObject):
""" Creates a basic Field Index on a MongoDB Collection
"""
_records = None
def __init__(self,
collection: Collection,
is_debug: bool = False):
"""
Created:
5-Dec-2019
<EMAIL>
* based on 'create-text-index'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1515#issuecomment-16439788
Updated:
6-Dec-2019
abhbasu3
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1529
Updated:
17-Dec-2019
<EMAIL>
* Allow options. Do not force foreground indexing
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._collection = collection
def process(self,
field_name: str,
sort_order: int = pymongo.ASCENDING,
default_language: str = 'english',
index_options = None) -> None:
def _field_name():
return field_name.replace('.', '_')
try:
if index_options is None:
index_options = {}
field_index = [(field_name, sort_order)]
self._collection.create_index(field_index,
name=_field_name(),
default_language=default_language,
**index_options)
except OperationFailure as err:
self.logger.error('\n'.join([
f"Index Creation Error",
f"\tField Name: {field_name}",
f"\tSort Order: {sort_order}",
f"\tDefault Language: {default_language}",
f"\tCollection: {self._collection.name}"]))
self.logger.exception(err)
# raise ValueError
```
#### File: text/svc/search_source_collection.py
```python
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from datamongo import CendantSrc
from datamongo.core.dmo import BaseMongoClient
class SearchSourceCollection(BaseObject):
""" Perform a Text Query on a Cendant Source Collection
Sample Output:
+-----+---------------------------------------+--------+---------------------------------+
| | A | B | C |
|-----+---------------------------------------+--------+---------------------------------|
| 0 | an MVP for | coffee | recommendation based |
| 1 | a chain of | coffee | retailers within |
| 2 | while installing new | coffee | machines, tea |
| 3 | plan events - | coffee | with leaders |
| 4 | taekwondo, karate, handicrafts | coffee | making. She |
| 5 | Town Hall session, | coffee | session, 1-to-1 |
...
| 558 | for forecast of | coffee | and tea |
+-----+---------------------------------------+--------+---------------------------------+
"""
_records = None
def __init__(self,
name: str,
term: str,
window_size: int,
mongo_client: BaseMongoClient,
is_debug: bool = False):
"""
Created:
16-Oct-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1122
:param mongo_client:
the instantiated connection to mongoDB
:param is_debug:
True write debug statements to the console
"""
BaseObject.__init__(self, __name__)
self._name = name
self._term = term
self._is_debug = is_debug
self._window_size = window_size
self._mongo_client = mongo_client
def _cendant_src(self):
return CendantSrc(collection_name=self._name,
mongo_client=self._mongo_client,
is_debug=self._is_debug)
def _full_text_search(self,
cendant_src: CendantSrc) -> dict:
d_results = cendant_src.full_text_search(some_term=self._term)
if self._is_debug:
self.logger.debug('\n'.join([
"Full Text Search Completed",
f"\tCollection Name: {self._name}",
f"\tSearch Term: {self._term}",
f"\tResults: {len(d_results)}"]))
return d_results
def _window_query(self,
d_results: dict):
from datamongo.text.dmo import TextQueryWindower
windower = TextQueryWindower(query_results=d_results,
is_debug=self._is_debug)
df_results = windower.process(term=self._term,
window_size=self._window_size)
if self._is_debug:
self.logger.debug('\n'.join([
f"Query Results (term={self._term}, total={len(df_results)})",
tabulate(df_results,
headers='keys',
tablefmt='psql')]))
return df_results
def process(self) -> DataFrame:
"""
:return:
a DataFrame of results
"""
cendant_src = self._cendant_src()
d_results = self._full_text_search(cendant_src)
df_results = self._window_query(d_results)
return df_results
```
#### File: core/dmo/event_message_parser.py
```python
import pprint
from slack import RTMClient
from slack import WebClient
from base import BaseObject
class EventMessageParser(BaseObject):
""" Parse a Message Event """
def __init__(self,
payload: dict):
"""
Created:
18-Jul-2019
<EMAIL>
* references:
https://pypi.org/project/slackclient/
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/453
"""
BaseObject.__init__(self, __name__)
self.payload = payload
self.event = payload['data']
def _attr(self,
key_name: str):
if key_name in self.event:
return self.event[key_name]
if 'message' in self.event:
if key_name in self.event['message']:
return self.event['message'][key_name]
self.logger.warning("\n".join([
pprint.pformat(self.payload)]))
raise NotImplementedError("\n".join([
"Unexpected Message Event Structure (key={})".format(
key_name)]))
def web_client(self) -> WebClient:
return self.payload['web_client']
def rtm_client(self) -> RTMClient:
return self.payload['rtm_client']
def user(self):
return self._attr("user")
def text(self):
return self._attr("text")
def channel(self):
return self.event['channel']
def ts(self):
return self.event['ts']
```
#### File: core/svc/predict_intents.py
```python
import pprint
from base import BaseObject
from datamongo import BaseMongoClient
from datamongo import SlackEventCollection
from nlusvc import TextAPI
class PredictIntents(BaseObject):
_text_api = None
def __init__(self,
min_confidence: int,
trigger_ts: float = None,
key_by_flows: bool = False,
preprocess_text: bool = True,
persist_result: bool = True,
base_mongo_client: BaseMongoClient = None,
is_debug: bool = False):
"""
Created:
18-Jul-2019
<EMAIL>
* refactored out of abacus-mapping-api
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/453
:param min_confidence:
int the minimum confidence threshold to deal with
:param trigger_ts:
float (Optional) the timestamp of the event
that triggered this request
:param persist_result:
if True persist the result to MongoDB
:param preprocess_text:
if True perform preprocessing on the input text
:param key_by_flows:
if True the service result will look like
{ FLOW_NAME_1: 80,
FLOW_NAME_2: 80,
FLOW_NAME_3: 75 }
if False the service result will look like
{ 80: [ FLOW_NAME_1, FLOW_NAME_2 ]
75: [ FLOW_NAME_3 }
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
self.trigger_ts = trigger_ts
self.key_by_flows = key_by_flows
self.persist_result = persist_result
self.min_confidence = min_confidence
self.preprocess_text = preprocess_text
self.mapping_event_collection = SlackEventCollection(base_mongo_client=base_mongo_client,
collection_name="event_mapping")
def service_result(self,
svcresult: dict,
input_text_original: str,
input_text: str,
input_tags: list or None) -> dict:
if not input_tags:
input_tags = []
svcresult = {
"ts": BaseObject.generate_tts(),
"trigger_ts": self.trigger_ts,
"input": {
"tags": input_tags,
"text": {
"original": input_text_original,
"normalized": input_text}},
"output": svcresult,
"options": {
"preprocess_text": self.preprocess_text,
"min_confidence": self.min_confidence,
"key_by_flows": self.key_by_flows}}
return svcresult
def _by_text(self,
input_text: str) -> dict:
from nluflows.mapping.bp import ServiceCatalogMapper
from nluflows.summarize.bp import ServiceCatalogSummarizer
if not self._text_api:
self._text_api = TextAPI()
original_text = input_text
# Step: Tag the Input Text
if self.preprocess_text:
input_text = self._text_api.preprocess(input_text,
lowercase=True,
no_urls=True,
no_emails=True,
no_phone_numbers=True,
no_numbers=True,
no_currency_symbols=True,
no_punct=True,
no_contractions=True,
no_accents=True,
remove_inside_brackets=True)
def _intent_not_tagged():
return self.service_result(svcresult=self._intent_not_tagged(),
input_text=input_text,
input_text_original=original_text,
input_tags=None)
def _intent_not_mapped(some_input_tags: list):
return self.service_result(svcresult=self._intent_not_mapped(),
input_text=input_text,
input_text_original=original_text,
input_tags=some_input_tags)
def _intent_mapped(analysis: dict,
some_input_tags: list):
return self.service_result(svcresult=analyses,
input_text=input_text,
input_text_original=original_text,
input_tags=some_input_tags)
# Step: Parse the Input Text
df_results = self._text_api.parse(input_text)
if df_results.empty:
return _intent_not_tagged()
input_tags = sorted(df_results.Tag.unique())
# Step: Map Tags to Intents
analyses = ServiceCatalogMapper(some_tags=input_tags,
is_debug=self.is_debug).process()
if not analyses or not len(analyses):
return _intent_not_mapped(some_input_tags=input_tags)
# Step: Summarize Mapping Results
analyses = ServiceCatalogSummarizer(some_analyses=analyses,
is_debug=self.is_debug).process()
if not analyses or not len(analyses):
return _intent_not_mapped(some_input_tags=input_tags)
# Step: Normalize Results
analyses = self._normalize(analyses)
if not analyses or not len(analyses):
return _intent_not_mapped(some_input_tags=input_tags)
# Step: Return Final Results
return _intent_mapped(analyses, input_tags)
@staticmethod
def _intent_not_tagged() -> dict:
return {'100': "UNKNOWN_NOT_TAGGED"}
@staticmethod
def _intent_not_mapped() -> dict:
return {'100': "UNKNOWN_NOT_MAPPED"}
def _normalize(self,
analyses: dict) -> dict:
d_normal = {}
for k in analyses:
if k >= self.min_confidence:
d_normal[str(int(k))] = analyses[k]
return d_normal
def process(self,
input_text: str) -> dict:
"""
:param input_text:
:return:
{ 'input': { 'tags': ['Hello'],
'text':
{ 'normalized': 'hello',
'original': 'Hello <@ULLURKNFR>' }},
'options': {
'key_by_flows': False,
'min_confidence': 80,
'preprocess_text': True },
'output': {
'80': ['CHITCHAT_GREETING'] },
'trigger_ts': '1563491078.019700',
'ts': '1563491080.2059991'
}
"""
svcresult = self._by_text(input_text=input_text)
if self.is_debug:
self.logger.debug("\n".join([
"Intent Prediction Completed",
pprint.pformat(svcresult)]))
if self.persist_result:
self.mapping_event_collection.save(svcresult)
return svcresult
```
#### File: mapping/dmo/flag_computer.py
```python
from base import BaseObject
class FlagComputer(BaseObject):
def __init__(self,
some_analyses: dict,
is_debug: bool = False):
"""
Created:
17-Jul-2019
<EMAIL>
* refactored out of 'compute-confidence-levels'
:param some_analyses:
"""
BaseObject.__init__(self, __name__)
self.analyses = some_analyses
self.is_debug = is_debug
def _manage_deductions(self) -> float:
"""
Manage the Deduction Flag
:return:
a Confidence Score Penalty (or boost)
"""
total = self.analyses["analysis"]["flags"]["deduction"]
if self.is_debug:
self.logger.debug("\n".join([
"Deduction Rule",
"\tName: Match",
"\tPenalty: {}".format(total)]))
return total
def _manage_discrimin_flag(self) -> float:
"""
Manage the Discriminatory Flag
:return:
a Confidence Score Penalty (or boost)
"""
total = (self.analyses["analysis"]["flags"]["discriminatory"] * 10)
if self.is_debug:
self.logger.debug("\n".join([
"Discriminatory Flag Rule",
"\tName: Match",
"\tPenalty: {}".format(total)]))
return total
def process(self) -> float:
total = 0
total -= self._manage_deductions()
total += self._manage_discrimin_flag()
return total
```
#### File: mapping/dmo/high_match_computer.py
```python
from base import BaseObject
class HighMatchComputer(BaseObject):
def __init__(self,
some_analyses: dict,
is_debug: bool = False):
"""
Create:
6-Apr-2017
<EMAIL>
* refactored out of svc:ComputeConfidenceLevels
Updated:
17-Jul-2019
<EMAIL>
* migrated from abacus-att
<EMAIL>:abacus-implementation/abacus-att.git
:param some_analyses:
"""
BaseObject.__init__(self, __name__)
self.analyses = some_analyses
self.is_debug = is_debug
def process(self):
"""
Purpose:
1. if a mapping exist that contains an empty 'include-all-of' section
then this is mapping is very imprecise and a deduction will exist for that
2. if a mappping exists that contains an 'include-all-of' section
a confidence boost will occur for each match
Example:
mapping-1
include-all-of [alpha, beta, gamma, delta]
mapping-2
include-all-of [alpha]
UPS:
[alpha, beta, gamma, delta]
This UPS will match both
mapping-1
mapping-2
but higher confidence should be granted to mapping-1
since more tags were matched
:return:
"""
contains = self.analyses["analysis"]["include_all_of"]["contains"]
total_include_all_of = len(contains)
if total_include_all_of > 0:
total = total_include_all_of * 5
if self.is_debug:
self.logger.debug("\n".join([
"High Match Rule",
"\tName: Boost",
"\tScore: {}".format(total)]))
return total
if total_include_all_of == 0:
if self.is_debug:
self.logger.debug("\n".join([
"High Match Rule",
"\tName: Full Inclusion Not Found",
"\tPenalty: -25"]))
return -25
if self.is_debug:
self.logger.debug("\n".join([
"High Match Rule",
"\tName: Fall Through",
"\tPenalty: 0"]))
return 0
```
#### File: summarize/dmo/analysis_summary_generator.py
```python
from base import BaseObject
class AnalysisSummaryGenerator(BaseObject):
def __init__(self, some_analyses):
"""
given sample input:
[
{ 'process': 'UC4_SC5_NETWORK_DRIVE-ALPHAT1',
'analysis': {
'include-all-of': {
'contains': ['alphatag1', 'network drive'],
'missing': ['alphatag2'],
'total': ['alphatag1', 'alphatag2', 'network drive']
},
'include-one-of': True
},
'confidence': 67.0,
'direct-match': False},
{ 'process': 'UC4_SC5_NETWORK_DRIVE-LOW_DISK_SPACE',
'analysis': {
'include-all-of': {
'contains': ['network drive'],
'missing': ['hard disk space'],
'total': ['hard disk space', 'network drive']
},
'include-one-of': True
},
'confidence': 50.0,
'direct-match': False}]
return:
{
67: ['UC4_SC5_NETWORK_DRIVE'],
50: ['UC4_SC5_NETWORK_DRIVE-LOW_DISK_SPACE']
}
Updated:
17-Jul-2019
<EMAIL>
* migrated from abacus-att
<EMAIL>:abacus-implementation/abacus-att.git
:param some_analyses:
"""
BaseObject.__init__(self, __name__)
self.analyses = some_analyses
@staticmethod
def normalize(some_dict):
ndict = {}
for key in some_dict:
ndict[key] = list(some_dict[key])
return ndict
def process(self):
rdict = {}
for analysis in self.analyses:
confidence = analysis["confidence"]
if confidence not in rdict:
rdict[confidence] = set()
rdict[confidence].add(analysis["process"])
return self.normalize(rdict)
```
#### File: summarize/dmo/confidence_normalizer_by_value.py
```python
from base import BaseObject
# all confidence levels less than the threshold will be ignored
MIN_CONFIDENCE_THRESHOLD = 0
class ConfidenceNormalizerByValue(BaseObject):
def __init__(self, some_summarized_mapping):
"""
Purpose:
given sample input:
{
67: ['UC4_SC5_NETWORK_DRIVE'],
50: ['UC4_SC5_NETWORK_DRIVE'],
40: ['UC1_TOKEN_ISSUE_VDI_FROM_CLASS'],
0: ['UC2_SC3_GOOD_WORK_INTERPRETED_AS']}
return:
{
67: ['UC4_SC5_NETWORK_DRIVE'],
40: ['UC1_TOKEN_ISSUE_VDI_FROM_CLASS']}
Updated:
30-Mar-2017
<EMAIL>
* renamed from 'ConfidenceLevelNormalizer'
Updated:
17-Jul-2019
<EMAIL>
* migrated from abacus-att
<EMAIL>:abacus-implementation/abacus-att.git
:param some_summarized_mapping:
"""
BaseObject.__init__(self, __name__)
self.summarized_mapping = some_summarized_mapping
@staticmethod
def normalize_confidence_level(some_mapping):
"""
:return:
"""
ndict = {}
for key in some_mapping:
confidence_level = int(key)
if confidence_level <= MIN_CONFIDENCE_THRESHOLD:
continue
ndict[key] = some_mapping[key]
return ndict
@staticmethod
def create_rev_map(some_normalized_mapping_dict):
"""
:return:
"""
revmap = {}
for key in some_normalized_mapping_dict:
for some_flow in some_normalized_mapping_dict[key]:
if some_flow not in revmap:
revmap[some_flow] = []
revmap[some_flow].append(key)
return revmap
@staticmethod
def remove_dupe_flows_by_confidence(some_revmap):
"""
:param some_revmap:
:return:
"""
for flow in some_revmap:
if len(some_revmap[flow]) < 2:
some_revmap[flow] = some_revmap[flow][0]
continue
max_level = 0
for confidence_level in some_revmap[flow]:
if confidence_level > max_level:
max_level = confidence_level
some_revmap[flow] = max_level
return some_revmap
@staticmethod
def unwind_rev_map(some_revmap):
"""
:param some_revmap:
:return:
"""
ndict = {}
for flow in some_revmap:
confidence_level = some_revmap[flow]
if confidence_level not in ndict:
ndict[confidence_level] = []
ndict[confidence_level].append(flow)
return ndict
def process(self):
"""
{
67: ['UC4_SC5_NETWORK_DRIVE'],
50: ['UC4_SC5_NETWORK_DRIVE'],
40: ['UC1_TOKEN_ISSUE_VDI_FROM_CLASS'],
0: ['UC2_SC3_GOOD_WORK_INTERPRETED_AS']}
"""
normalized_mapping_dict = self.normalize_confidence_level(self.summarized_mapping)
"""
{
67: ['UC4_SC5_NETWORK_DRIVE'],
50: ['UC4_SC5_NETWORK_DRIVE'],
40: ['UC1_TOKEN_ISSUE_VDI_FROM_CLASS']}
"""
reversed_mapping_dict = self.create_rev_map(normalized_mapping_dict)
"""
{
'UC4_SC5_NETWORK_DRIVE': [50, 67],
'UC1_TOKEN_ISSUE_VDI_FROM_CLASS': [40]}
"""
reversed_mapping_dict = self.remove_dupe_flows_by_confidence(reversed_mapping_dict)
"""
{
'UC4_SC5_NETWORK_DRIVE': 67,
'UC1_TOKEN_ISSUE_VDI_FROM_CLASS': 40}
"""
normalized_mapping_dict = self.unwind_rev_map(reversed_mapping_dict)
"""
{
67: ['UC4_SC5_NETWORK_DRIVE'],
40: ['UC1_TOKEN_ISSUE_VDI_FROM_CLASS']}
"""
return normalized_mapping_dict
```
#### File: core/dmo/evidence_extractor.py
```python
from typing import Union
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from base import DataTypeError
from base import EvidenceStructure
from base import MandatoryParamError
from datadict import FindDimensions
class EvidenceExtractor(BaseObject):
""" Generates a Dataframe of 'Evidence' for a given Serial Number (or other Key Field)
Given this input
key-field = 1812546302
schema-name = dim
Generate this Dataframe
+-----+----------------------------------+--------------------+--------------------+-------------+-----------------------+--------------+
| | Collection | FieldName | NormalizedText | OriginalText | Schema | Tag | TagType |
+-----+----------------------------------+--------------------+--------------------+-------------+-----------------------+--------------+
| 0 | ingest_badges | badge_name | Mainframe Operator | Mainframe Operator | hard skill | Communications Server | supervised |
+-----+----------------------------------+--------------------+--------------------+-------------+-----------------------+--------------+
Note that actual Dataframe will may be large and span hundreds of rows,
with column lengths for unstructed text fields (such as OriginalText and NormalizedText)
being in excess of several hundred characters
"""
__valid_tag_dtypes = [tuple, list]
def __init__(self,
xdm_schema: str,
some_records: Union[list, dict],
is_debug: bool = False):
"""
Created:
3-May-2019
<EMAIL>
* refactored out of tag-extractor with Jupyter notebooks in mind
Updated:
25-Aug-2019
<EMAIL>
* incorporate use of tag tuples
https://github.ibm.com/-cdo/unstructured-analytics/issues/818
Updated:
15-Oct-2019
<EMAIL>
* add work-around for tag iteration defect
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1124#issue-10473464
Updated:
17-Oct-2019
<EMAIL>
* refactor component in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1142#issuecomment-15370141
Updated:
25-Oct-2019
<EMAIL>
* minor defect fix
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1199#issue-10601102
Updated:
29-Oct-2019
<EMAIL>
* remove 'entity-schema-finder' in favor of new approach
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/916#issuecomment-15620801
* remove learning-pattern class (hasn't been used for a while)
Updated:
14-Nov-2019
<EMAIL>
* add key-field to dataframe result
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1359#issue-10828085
* add transform-records
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1359#issuecomment-16009176
:param some_records:
:param xdm_schema:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
if not some_records:
raise MandatoryParamError("Records")
self._is_debug = is_debug
self._records = self._transform_records(some_records)
self._xdm_finder = FindDimensions(schema=xdm_schema)
@staticmethod
def _transform_records(some_records: Union[dict, list]) -> list:
"""
Purpose:
- Evidence Extraction is called by multiple APIs, and it's challenging to control
the data types across Cendant
- Input to this class may be a
dictionary (single record)
list of dictionaries (multiple records)
list of lists
where an inner list is composed of a list of records or
where an inner list is composed of a single record
- This function will transform all these various inputs into a flat list
:param some_records:
a variety of possibilities as noted above
:return:
a flat list
"""
if type(some_records) == dict:
return [some_records]
elif type(some_records) == list:
master = []
for record in sorted(some_records):
if type(record) == list:
master += sorted(record)
elif type(record) == dict:
master.append(record)
else:
raise DataTypeError
return master
raise DataTypeError
def _schema(self,
some_input: str) -> list:
if not some_input:
return []
return self._xdm_finder.find(some_input)
@staticmethod
def _field_value(a_field: dict,
a_field_name: str) -> str:
if a_field_name in a_field:
if type(a_field[a_field_name]) == list and len(a_field[a_field_name]) > 0:
return a_field[a_field_name][0]
return a_field[a_field_name]
@staticmethod
def _supervised_tags(a_field: dict) -> list:
if "supervised" in a_field["tags"] and a_field["tags"]["supervised"]:
return [x for x in a_field["tags"]["supervised"] if x]
return []
@staticmethod
def _unsupervised_tags(a_field: dict) -> list:
if "unsupervised" in a_field["tags"]:
return [x for x in a_field["tags"]["unsupervised"] if x]
return []
def _build_result(self,
field: dict,
key_field: str,
a_tag_name: str or None,
a_tag_score: float or None,
a_tag_type: str or None) -> dict:
original_text = self._field_value(field, "value")
def _normalized() -> str:
if "normalized" in field:
return self._field_value(field, "normalized")
return original_text
normalized_text = _normalized()
def _collection_name(): # 1199#issue-10601102
if type(field["collection"]) == dict:
return field["collection"]["type"]
return field["collection"]
for schema_element in self._schema(a_tag_name):
return EvidenceStructure.generate(
key_field=key_field,
collection_name=_collection_name(),
field_name=field["name"],
normalized_text=normalized_text,
original_text=original_text,
tag_name=a_tag_name,
tag_score=a_tag_score,
tag_type=a_tag_type,
schema_name=schema_element)
def _validate(self,
a_tag_tuple) -> bool:
"""
:param a_tag_tuple:
a candidate tag tuple
:return:
False if the param is an invalid tuple
this is common in older collections
"""
if not a_tag_tuple:
raise ValueError("Tag Tuple Expected")
if type(a_tag_tuple) not in self.__valid_tag_dtypes:
raise DataTypeError(f"Tuple (or List) Expected: {a_tag_tuple}")
return True
def _extract_results_from_field(self,
key_field: str,
field: dict) -> list:
if "tags" not in field:
return []
results = []
def _analyze_tags(a_tag_type: str):
tag_tuples = [x for x in field["tags"][a_tag_type]
if self._validate(x)]
for tag_tuple in tag_tuples:
results.append(self._build_result(field=field,
key_field=key_field,
a_tag_name=tag_tuple[0],
a_tag_score=tag_tuple[1],
a_tag_type=a_tag_type))
for tag_type in ["supervised", "unsupervised"]:
if field["tags"][tag_type] is not None: # 1124#issuecomment-15320883
_analyze_tags(tag_type)
return results
def process(self) -> DataFrame or None:
results = []
for record in self._records:
key_field = record["key_field"]
for field in record["fields"]:
results += self._extract_results_from_field(field=field,
key_field=key_field)
df_evidence = pd.DataFrame(results)
if self._is_debug:
self.logger.debug("\n".join([
"Evidence API Complete",
"\n{}".format(tabulate(df_evidence,
headers='keys',
tablefmt='psql'))]))
if df_evidence.empty:
self.logger.warning("No Evidence Found")
return None
return df_evidence
```
#### File: core/dmo/fuzzywuzzy_matcher.py
```python
from statistics import mean
from nltk import stem
from base import BaseObject
class FuzzyWuzzyMatcher(BaseObject):
"""
"""
stemmer = stem.PorterStemmer()
def __init__(self):
"""
Created:
21-Apr-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
@staticmethod
def process(s1: str,
s2: str,
basic: bool,
q_ratio: bool,
w_ratio: bool,
uq_ratio: bool,
uw_ratio: bool,
partial_ratio: bool,
token_sort_ratio: bool) -> dict:
from fuzzywuzzy import fuzz
s1 = s1.lower().strip()
s2 = s2.lower().strip()
d_result = {"text": {
"s1": s1,
"s2": s2},
"ratios": {}}
def _basic() -> float:
return fuzz.ratio(s1, s2)
def _q_ratio() -> float:
return fuzz.QRatio(s1, s2,
force_ascii=True,
full_process=True)
def _w_ratio() -> float:
return fuzz.WRatio(s1, s2,
force_ascii=True,
full_process=True)
def _uq_ratio() -> float:
return fuzz.UQRatio(s1, s2,
full_process=True)
def _uw_ratio() -> float:
return fuzz.UWRatio(s1, s2,
full_process=True)
def _partial_ratio() -> float:
return fuzz.partial_ratio(s1, s2)
def _token_sort_ratio() -> float:
return fuzz.token_sort_ratio(s1, s2,
force_ascii=True,
full_process=True)
ratios = []
def _add_ratio(ratio_type: str,
ratio_value: float):
ratios.append(ratio_value)
d_result["ratios"][ratio_type] = ratio_value
if basic:
_add_ratio("basic", _basic())
if partial_ratio:
_add_ratio("partial", _partial_ratio())
if token_sort_ratio:
_add_ratio("token_sort", _token_sort_ratio())
if q_ratio:
_add_ratio("q", _q_ratio())
if w_ratio:
_add_ratio("w", _w_ratio())
if uq_ratio:
_add_ratio("uq", _uq_ratio())
if uw_ratio:
_add_ratio("uw", _uw_ratio())
if len(ratios):
d_result["ratios"]["mean"] = round(mean(ratios), 1)
return d_result
```
#### File: core/dmo/generate_noun_variations.py
```python
from pandas import DataFrame
from base import BaseObject
class GenerateNounVariations(BaseObject):
"""
Given a Part-of-Speech (POS) tagged DataFrame, generate Noun variations
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Jul-2019
<EMAIL>
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
@staticmethod
def _extract_adj_nouns(df_part_of_speech: DataFrame) -> dict:
"""
Purpose:
Find and Return ADJ/NOUN combinations
Sample Input:
The (OTHER) Quick (ADJ) Brown (ADJ) Fox (NOUN)
Sample Output:
Quick Fox, Brown Fox, Quick Brown Fox, Brown Quick Fox
:param df_part_of_speech:
a part-of-speech tagged DataFrame
:return:
generated variations
"""
adjs = []
nouns = []
d_terms = {}
def _generate():
if len(adjs) and len(nouns):
_noun = " ".join(nouns)
_adj = " ".join(adjs)
key = "{} {}".format(_adj, _noun)
if key not in d_terms:
d_terms[key] = set()
d_terms[key].add(_noun)
d_terms[key].add("{} {}".format(" ".join(reversed(adjs)), _noun))
for adj in adjs:
d_terms[key].add("{} {}".format(adj, _noun))
for _, row in df_part_of_speech.iterrows():
meta_tag = row["PartOfSpeechMeta"]
if meta_tag == "ADJ":
adjs.append(row["Lemma"])
elif meta_tag == "NOUN":
nouns.append(row["Lemma"])
else:
_generate()
adjs = []
nouns = []
_generate()
return d_terms
@staticmethod
def _extract_verb_nouns(df: DataFrame) -> dict:
d_terms = {}
metas = list(df['PartOfSpeechMeta'])
if "VERB CONNECT NOUN" not in " ".join(metas):
return d_terms
lemmas = list(df['Lemma'])
x = metas.index("VERB")
if metas[x + 1] == "CONNECT" and metas[x + 2] == "NOUN":
key = "{} {} {}".format(lemmas[x],
lemmas[x + 1],
lemmas[x + 2])
if key not in d_terms:
d_terms[key] = set()
d_terms[key].add("{} {}".format(lemmas[x + 2], lemmas[x]))
d_terms[key].add("{} {}".format(lemmas[x], lemmas[x + 2]))
return d_terms
@staticmethod
def _seq(a: list,
n: int = 3) -> list:
"""
Extract Contiguous Numerical Sequences from a List
Sample Input:
[1, 2, 4, 7, 8, 12, 13, 14, 16, 17, 19]
Sample Output (n=2):
[[12, 13], [13, 14]]
Sample Output (n=3):
[[12, 13, 14]]
:param a:
the input list of integers
:param n:
the contiguous sequence size
:return:
the list of sequences
"""
if n == 1:
return [[x] for x in a]
seqs = []
for i in range(0, len(a)):
if i + n >= len(a):
continue
def _match():
s = set()
for j in range(0, n):
if a[i + j] + 1 == a[i + j + 1]:
s.add(a[i + j])
s.add(a[i + j + 1])
else:
s = set()
if len(s) == n:
return sorted(s)
m = _match()
if m:
seqs.append(m)
return seqs
def _extract_noun_sequences(self,
df: DataFrame,
n: int) -> set:
lemmas = list(df['Lemma'])
metas = list(df['PartOfSpeechMeta'])
indices = [i for i, x in enumerate(metas) if x == "NOUN"]
sequences = self._seq(indices, n=n)
terms = set()
for seq in sequences:
terms.add(" ".join(lemmas[seq[0]:seq[len(seq) - 1] + 1]))
return terms
def process(self,
df_part_of_speech: DataFrame) -> dict:
return {
"NN2": self._extract_noun_sequences(df_part_of_speech, n=2),
"NN3": self._extract_noun_sequences(df_part_of_speech, n=3),
"NN4": self._extract_noun_sequences(df_part_of_speech, n=4),
"ADJNN": self._extract_adj_nouns(df_part_of_speech),
"VCN": self._extract_verb_nouns(df_part_of_speech)}
```
#### File: core/svc/perform_pos_tagging.py
```python
import pandas as pd
import spacy
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from base import DataTypeError
class PerformPosTagging(BaseObject):
"""
Perform Part-of-Speech (POS) tagging
based on spaCy
https://spacy.io/usage/linguistic-features
"""
_nlp = spacy.load('en')
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Jul-2019
<EMAIL>
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
@staticmethod
def _upper_pos(part_of_speech: str) -> str:
part_of_speech = part_of_speech.upper()
adjectives = ["ADJ"]
nouns = ["PROPN", "NOUN"]
phrases = ["ADP", "CCONJ"]
verbs = ["VERB"]
if part_of_speech in adjectives:
return "ADJ"
if part_of_speech in nouns:
return "NOUN"
if part_of_speech in phrases:
return "CONNECT"
if part_of_speech in verbs:
return "VERB"
return "OTHER"
@staticmethod
def _normalize_pos(lemma: str,
part_of_speech: str):
"""
Purpose:
we almost always accept spaCy's decision about part-of-speech tagging
this function will encode all exceptions
:param lemma:
the text being tagged
:param part_of_speech:
spaCy part-of-speech
:return:
a normalized part-of-speech
"""
if lemma == "®":
return "OTHER"
return part_of_speech
def _tagger(self,
input_text: str) -> DataFrame:
input_text = input_text
doc = self._nlp(input_text)
results = []
for token in doc:
_normalized_pos = self._normalize_pos(token.lemma_,
token.pos_)
_upper_pos = self._upper_pos(_normalized_pos)
results.append({
"Lemma": token.lemma_,
"PartOfSpeech": _normalized_pos,
"PartOfSpeechMeta": _upper_pos,
"Tag": token.tag_,
"Dependency": token.dep_,
"Shape": token.shape_,
"IsAlpha": token.is_alpha,
"Stopword": token.is_stop})
df = pd.DataFrame(results)
if self.is_debug:
self.logger.debug("\n".join([
"Part-of-Speech Output",
tabulate(df,
headers='keys',
tablefmt='psql')]))
return df
def _from_str(self,
input_text: str) -> DataFrame:
if type(input_text) != str:
raise DataTypeError("\n".join([
"Invalid DataType"]))
return self._tagger(input_text)
def _from_list(self,
multiple_inputs: list):
if type(multiple_inputs) != list:
raise DataTypeError("\n".join([
"Invalid DataType"]))
results = []
[results.append(self._tagger(x)) for x in multiple_inputs]
return results
def process(self,
some_input: str or list):
def inner():
if type(some_input) == str:
return self._from_str(some_input)
if type(some_input) == list:
return self._from_list(some_input)
self.logger.error('\n'.join([
"Unrecognized Input",
f"\tInput: {some_input}",
f"\tType: {type(some_input)}"]))
raise DataTypeError
results = inner()
return results
```
#### File: displacy/dmo/displacy_span_extractor.py
```python
import pprint
from base import BaseObject
from datadict import FindSynonym
from nlusvc.core.bp import TextAPI
from nlusvc.displacy.dto import DisplacyEntityGenerator
class DisplacySpanExtractor(BaseObject):
"""
"""
def __init__(self,
d_cluster: dict,
input_text: str,
ontology_name: str,
is_debug: bool = False):
"""
Created:
11-Oct-2019
<EMAIL>
* refactored out of 'generate-displacy-spans'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1093
Updated:
17-Oct-2019
<EMAIL>
* replace has-match function with TextAPI version
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1142#issuecomment-15377548
Updated:
13-Dec-2019
<EMAIL>
* load dictionaries by ontology name
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1582
Updated:
14-Dec-2019
<EMAIL>
* adjustment to synonym decomposition strategy
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1613#issuecomment-16624966
Updated:
13-Jan-2020
<EMAIL>
* Replace code with Coordinate Extraction service
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1722
:param d_cluster:
a dictionary that clusters the tags around the chosen dimensions
Sample Input:
{ 'activity': ['activity'],
'agent': [],
'anatomy': ['natural killer cell'],
'artifact': [],
'biology': [],
'compound': [],
'device': [],
'disease': [],
'organism': [],
'other': ['memory cd8 t cell', 'cytotoxicity'],
'pathology': [],
'situation': [],
'study': [],
'tech': []}
:param input_text:
:param ontology_name:
the ontology name used to process the text
e.g., 'biotext', 'base', etc
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._d_cluster = d_cluster
self._input_text = input_text
self._ontology_name = ontology_name
self._text_api = TextAPI(is_debug=is_debug,
ontology_name=ontology_name)
self._synonym_finder = FindSynonym(is_debug=is_debug,
ontology_name=ontology_name)
if self._is_debug:
self.logger.debug('\n'.join([
"Instantiated DisplacySpanExtractor",
f"\tInput Text: {self._input_text}",
f"\tOntology Name: {self._ontology_name}"]))
def process(self) -> list:
"""
:return:
Sample Output
[ { 'end': 26,
'label': 'ACTIVITY',
'start': 18,
'text': 'activity',
'type': 'tag'},
{ 'end': 55,
'label': 'NATURAL KILLER CELL',
'start': 30,
'text': 'natural killer (nk) cells',
'type': 'inner-syn'},
{ 'end': 17,
'label': 'CYTOTOXICITY',
'start': 8,
'text': 'cytotoxic',
'type': 'syn'},
{ 'end': 70,
'label': 'MEMORY CD8 T CELL',
'start': 4,
'text': 'the cytotoxic activity of natural killer (nk) cells and memory cd8',
'type': 'inner-syn'} ]
"""
entities = []
input_text = self._input_text.lower()
for key in self._d_cluster:
for tag in self._d_cluster[key]:
d_coords = self._text_api.coords(input_text=input_text,
entity_text=tag)
if not d_coords:
continue
entities.append(DisplacyEntityGenerator.generate(
text=tag,
entity_type='tag',
label=key,
start=d_coords['x'],
end=d_coords['y']))
if self._is_debug:
self.logger.debug('\n'.join([
"Displacy Span Extraction Complete",
f"\tTotal Spans: {len(entities)}",
f"\tOntology Name: {self._ontology_name}",
f"{pprint.pformat(entities, indent=4)}"]))
return entities
```
#### File: displacy/dmo/displacy_tag_generator.py
```python
from typing import Optional
from typing import Tuple
from base import BaseObject
from base import DataTypeError
from nlutext import MultiTextParser
class DisplacyTagGenerator(BaseObject):
""" Annotate the Input Text
Sample Input:
+----+--------------+------------------------+-----------------------+------------+----------------+
| | Confidence | InputText | NormalizedText | Ontology | Tag |
|----+--------------+------------------------+-----------------------+------------+----------------|
| 0 | 97.3 | exocrine gland red hat | exocrine_gland redhat | base | redhat |
| 1 | 98.3 | exocrine gland red hat | exocrine_gland redhat | biotech | exocrine gland |
+----+--------------+------------------------+-----------------------+------------+----------------+
Sample Output:
[ 'redhat', 'exocrine gland' ]
:return:
a list of tag tuples
[ (redhat, 97.3, base),
(exocrine gland, 98.3, biotech) ]
"""
def __init__(self,
input_text: str,
ontology_names: list,
is_debug: bool = False):
"""
Created:
14-Dec-2019
<EMAIL>
* refactored out of 'generate-display-spans' in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1594
:param input_text:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
if type(ontology_names) != list:
raise DataTypeError("Ontology Names, list")
self._is_debug = is_debug
self._input_text = input_text
self._ontology_names = ontology_names
def process(self) -> Tuple[list, str]:
parser = MultiTextParser(ontology_names=self._ontology_names,
is_debug=self._is_debug)
a_df = parser.process(original_ups=self._input_text,
use_profiler=self._is_debug,
as_dataframe=True)
tags = []
for _, row in a_df.iterrows():
tags.append((row["Tag"], row["Confidence"], row["Ontology"]))
def _normalized_text() -> Optional[str]:
if not a_df.empty:
return str(a_df.iloc[0]['NormalizedText'])
return tags, _normalized_text()
```
#### File: nltk/svc/find_edit_distance.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class FindEditDistance(BaseObject):
""" Perform Edit Distance Matching across multiple strings """
def __init__(self,
is_debug: bool = False):
"""
Created:
21-Apr-2019
<EMAIL>
Updated:
6-Feb-2020
<EMAIL>
* minor updates in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1829
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
def multiple(self,
input_text: str,
candidate_matches: list) -> DataFrame:
"""
Sample Input:
Input Text:
cristae
Candidate Matches:
'Conosa',
'Corallochytrea',
'Corbihelia',
'Crenarchaeota',
'Crista',
'Cristae',
'Cristidiscoidea',
'Cryptista',
'Cryptophyta',
'Ctenophora',
'Cutaneous amoebiasis',
'Cutosa',
'Cutosea',
'Cyanobacteria'
Sample Output:
+----+----------------------+------------+---------+
| | Candidate | Distance | Input |
|----+----------------------+------------+---------|
| 4 | crista | 0 | cristae |
| 5 | cristae | 0 | cristae |
| 7 | cryptista | 3 | cristae |
| 0 | conosa | 4 | cristae |
| 11 | cutosa | 4 | cristae |
| 12 | cutosea | 4 | cristae |
| 2 | corbihelia | 6 | cristae |
| 8 | cryptophyta | 7 | cristae |
| 9 | ctenophora | 8 | cristae |
| 3 | crenarchaeota | 9 | cristae |
| 6 | cristidiscoidea | 9 | cristae |
| 1 | corallochytrea | 10 | cristae |
| 13 | cyanobacteria | 10 | cristae |
| 10 | cutaneous amoebiasis | 13 | cristae |
+----+----------------------+------------+---------+
:param input_text:
an input string
:param candidate_matches:
a list of one-or-more matches to compare against
:return:
a measure of the distance between the two terms
"""
results = []
input_text = input_text.lower().strip()
candidate_matches = [x.lower().strip() for x in candidate_matches]
for candidate_match in candidate_matches:
result = self.single(input_text=input_text,
candidate_match=candidate_match)
results.append({
"Input": input_text,
"Candidate": candidate_match,
"Distance": result})
df = pd.DataFrame(results)
return df.sort_values(by=['Distance'], ascending=True)
def single(self,
input_text: str,
candidate_match: str) -> int:
"""
:param input_text:
an input string
:param candidate_match:
a single match to compare against
:return:
a measure of the distance between the two terms
"""
from nlusvc.nltk.dmo import NltkEditDistance
dmo = NltkEditDistance(is_debug=self._is_debug)
return dmo.process(s1=input_text,
s2=candidate_match)
```
#### File: spacy/svc/perform_pos_parse.py
```python
import pandas as pd
from pandas import DataFrame
from spacy.lang.en import English
from tabulate import tabulate
from base import BaseObject
class PerformPosParse(BaseObject):
""" Perform POS (Part-of-Speech) Parse with spaCy
Sample Input:
Amoebozoa is a major taxonomic group containing about 2,400 described species of
amoeboid protists, often possessing blunt, fingerlike, lobose pseudopods and
tubular mitochondrial cristae.
Sample Output:
+----+----------+-----------+----------+---------------+-------+---------+-------+---------------+
| | Dep | IsAlpha | IsStop | Lemma | POS | Shape | Tag | Text |
|----+----------+-----------+----------+---------------+-------+---------+-------+---------------|
| 0 | nsubj | True | False | amoebozoa | PROPN | Xxxxx | NNP | Amoebozoa |
| 1 | ROOT | True | True | be | VERB | xx | VBZ | is |
| 2 | det | True | True | a | DET | x | DT | a |
| 3 | amod | True | False | major | ADJ | xxxx | JJ | major |
| 4 | amod | True | False | taxonomic | ADJ | xxxx | JJ | taxonomic |
| 5 | attr | True | False | group | NOUN | xxxx | NN | group |
| 6 | acl | True | False | contain | VERB | xxxx | VBG | containing |
| 7 | quantmod | True | True | about | ADV | xxxx | RB | about |
| 8 | nummod | False | False | 2,400 | NUM | d,ddd | CD | 2,400 |
| 9 | amod | True | False | describe | VERB | xxxx | VBN | described |
| 10 | dobj | True | False | specie | NOUN | xxxx | NNS | species |
| 11 | prep | True | True | of | ADP | xx | IN | of |
| 12 | compound | True | False | amoeboid | NOUN | xxxx | NN | amoeboid |
| 13 | pobj | True | False | protist | NOUN | xxxx | NNS | protists |
| 14 | punct | False | False | , | PUNCT | , | , | , |
| 15 | advmod | True | True | often | ADV | xxxx | RB | often |
| 16 | acl | True | False | possess | VERB | xxxx | VBG | possessing |
| 17 | dobj | True | False | blunt | ADJ | xxxx | JJ | blunt |
| 18 | punct | False | False | , | PUNCT | , | , | , |
| 19 | conj | True | False | fingerlike | NOUN | xxxx | NN | fingerlike |
| 20 | punct | False | False | , | PUNCT | , | , | , |
| 21 | amod | True | False | lobose | VERB | xxxx | VB | lobose |
| 22 | conj | True | False | pseudopod | NOUN | xxxx | NNS | pseudopods |
| 23 | cc | True | True | and | CCONJ | xxx | CC | and |
| 24 | amod | True | False | tubular | ADJ | xxxx | JJ | tubular |
| 25 | amod | True | False | mitochondrial | NOUN | xxxx | NN | mitochondrial |
| 26 | conj | True | False | cristae | VERB | xxxx | VBN | cristae |
| 27 | punct | False | False | . | PUNCT | . | . | . |
+----+----------+-----------+----------+---------------+-------+---------+-------+---------------+
Reference:
https://spacy.io/usage/linguistic-features
"""
def __init__(self,
nlp: English,
input_text: str,
is_debug: bool = False):
"""
Created:
6-Feb-2020
<EMAIL>
* in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1829
"""
BaseObject.__init__(self, __name__)
self._nlp = nlp
self._is_debug = is_debug
self._input_text = input_text
def process(self,
log_sample_size: int = 500) -> DataFrame:
"""
Purpose:
Perform spaCY pos-tagging on input text
:param log_sample_size:
:return:
a dataframe with the following columns:
Text: The original word text.
Lemma: The base form of the word.
POS: The simple part-of-speech tag.
Tag: The detailed part-of-speech tag.
Dep: Syntactic dependency, i.e. the relation between tokens.
Shape: The word shape – capitalization, punctuation, digits.
IsAlpha: Is the token an alpha character?
IsStop: Is the token part of a stop list, i.e. the most common words of the language?
"""
doc = self._nlp(self._input_text)
results = []
for token in doc:
results.append({
"Text": token.text,
"Lemma": token.lemma_,
"POS": token.pos_,
"Tag": token.tag_,
"Dep": token.dep_,
"Shape": token.shape_,
"IsAlpha": token.is_alpha,
"IsStop": token.is_stop})
df = pd.DataFrame(results)
if self._is_debug:
self.logger.debug('\n'.join([
"Part-of-Speech DataFrame Generated",
f"\tSize: {len(df)}",
tabulate(df.head(log_sample_size),
tablefmt='psql',
headers='keys')]))
return df
```
#### File: nlusvc/tests/test_displacy_tag_clusterer.py
```python
import unittest
from typing import Optional
from datamongo import BaseMongoClient
IS_DEBUG = True
class TestDisplacyTagClusterer(unittest.TestCase):
__mongo_client = BaseMongoClient()
@staticmethod
def _get_label(tag_name: str,
d_result: dict) -> Optional[str]:
for k in d_result:
for tag in d_result[k]:
if tag == tag_name:
return k
def _execute_by_name(self,
a_tag_tuple: tuple,
expected_label: str,
xdm_schema: str,
ontology_name: str) -> None:
from nlusvc.displacy.dmo import DisplacyTagClusterer
d_result = DisplacyTagClusterer(tags=[a_tag_tuple],
xdm_schema=xdm_schema,
ontology_name=ontology_name,
mongo_client=self.__mongo_client,
is_debug=IS_DEBUG).process()
self.assertIsNotNone(d_result)
self.assertEquals(type(d_result), dict)
self.assertTrue(len(d_result))
actual_label = self._get_label(d_result=d_result,
tag_name=a_tag_tuple[0])
self.assertEquals(actual_label, expected_label)
def _execute_base(self,
a_tag_tuple: tuple,
expected_label: str) -> None:
self._execute_by_name(xdm_schema='supply',
ontology_name='base',
a_tag_tuple=a_tag_tuple,
expected_label=expected_label)
def _execute_biotech(self,
a_tag_tuple: tuple,
expected_label: str) -> None:
self._execute_by_name(xdm_schema='biotech',
ontology_name='biotech',
a_tag_tuple=a_tag_tuple,
expected_label=expected_label)
def test_biotech_01(self):
self._execute_biotech(('cytotoxic activity', 100), 'activity')
self._execute_biotech(('cell', 100), 'anatomy')
self._execute_biotech(('blood cell', 100), 'anatomy')
self._execute_biotech(('white blood cell', 100), 'anatomy')
self._execute_biotech(('lymphocyte', 100), 'anatomy')
self._execute_biotech(('t cell', 100), 'anatomy')
self._execute_biotech(('memory t cell', 100), 'anatomy')
def test_process(self):
self.test_biotech_01()
if __name__ == '__main__':
unittest.main()
```
#### File: nlusvc/tests/test_remove_stop_words.py
```python
import unittest
from nlusvc import RemoveStopWords
IS_DEBUG = True
class TestRemoveStopWords(unittest.TestCase):
def execute(self,
some_input: str or list,
expected_result: str or list) -> None:
rsw = RemoveStopWords(is_debug=True)
actual_result = rsw.process(input_text=some_input,
aggressive=False)
self.assertEqual(actual_result, expected_result)
def test_process(self):
self.execute(["and plan to learn"],
["plan learn"])
self.execute("and plan to learn",
"plan learn")
self.execute("and and plan to to learn",
"plan learn")
self.execute("to and and plan to to learn and to and to plan to and",
"plan learn plan")
self.execute("plan andlearn toplan",
"plan andlearn toplan")
self.execute(
"apply learn orient business_analyst which include analytics a client have plan train and no_skills_growth human_performance and delivery requirement which is follow by create plan strategy for a comprehensive approach to human_performance increase",
"learn orient business_analyst analytics client plan train no_skills_growth human_performance delivery requirement create plan strategy comprehensive approach human_performance increase")
if __name__ == '__main__':
unittest.main()
```
#### File: nlusvc/tests/test_text_string_matcher.py
```python
import unittest
class TestTextStringMatcher(unittest.TestCase):
@staticmethod
def _execute(match_token: str,
input_text: str) -> bool:
from nlusvc.core.svc import TextStringMatcher
return TextStringMatcher(is_debug=True,
a_token=match_token,
some_text=input_text).process()
def test_process(self):
match_token = "bright <PASSWORD>"
input_text = """Abstract: The current study detects different morphologies related to
prostate pathology using deep learning models; these models were evaluated on 2,121
hematoxylin and eosin (H&E) stain histology images captured using bright field microscopy,,,,which
spanned a variety of image qualities, origins (whole slide, tissue micro array, whole mount, Internet),
scanning machines, timestamps, H&E staining protocols, and institutions.""".strip()
self.assertTrue(self._execute(match_token, input_text))
if __name__ == '__main__':
unittest.main()
```
#### File: textacy/dmo/textacy_topic_model.py
```python
import pprint
import time
import spacy
from scipy.sparse.csr import csr_matrix
from textacy import Corpus
from textacy.tm import TopicModel
from textacy.vsm import Vectorizer
from base import BaseObject
from base import MandatoryParamError
class TextacyTopicModeler(BaseObject):
"""
https://chartbeat-labs.github.io/textacy/getting_started/quickstart.html#analyze-a-corpus
"""
_nlp = spacy.load("en_core_web_sm",
disable=('parser', 'tagger'))
_topic_model_types = ['nmf', 'lda', 'lsa']
def __init__(self,
some_values: list,
number_of_topics=10,
terms_per_topic=10,
is_debug=True):
"""
Created:
3-Apr-2019
<EMAIL>
"""
BaseObject.__init__(self, __name__)
if not some_values:
raise MandatoryParamError("Input Values")
self.is_debug = is_debug
self.values = some_values
self.terms_per_topic = terms_per_topic
self.number_of_topics = number_of_topics
@staticmethod
def _vectorizer() -> Vectorizer:
return Vectorizer(tf_type='linear',
apply_idf=True,
idf_type='smooth',
norm='l2',
min_df=2,
max_df=0.95)
def _doc_term_matrix(self,
vectorizer: Vectorizer,
corpus: Corpus) -> csr_matrix:
start = time.time()
doc_term_matrix = vectorizer.fit_transform((doc.to_terms_list(ngrams=1,
named_entities=True,
as_strings=True)
for doc in corpus))
if self.is_debug:
self.logger.debug("\n".join([
"Generated Document/Term Matrix",
"\trepr: {}".format(repr(doc_term_matrix)),
"\tTotal Time: {}".format(time.time() - start)
]))
return doc_term_matrix
def _topic_model(self,
doc_term_matrix: csr_matrix,
topic_model_type='nmf') -> TopicModel:
start = time.time()
if topic_model_type not in self._topic_model_types:
raise NotImplementedError("\n".join([
"Topic Model Type Not Recognized",
"\tname: {}".format(topic_model_type)
]))
model = TopicModel(topic_model_type,
n_topics=self.number_of_topics)
try:
model.fit(doc_term_matrix)
except IndexError as e:
raise ValueError("\n".join([
"Model Fit Error",
"\t{}".format(str(e)),
"\tTry decreasing topic-size and/or terms-per-topic"
]))
doc_topic_matrix = model.transform(doc_term_matrix)
if self.is_debug:
self.logger.debug("\n".join([
"Generated Topic Model",
"\tShape: {}".format(doc_topic_matrix.shape),
"\tTotal Time: {}".format(time.time() - start)
]))
return model
def _result_set(self,
vectorizer: Vectorizer,
model: TopicModel) -> list:
l_results = []
for topic_idx, top_terms in model.top_topic_terms(vectorizer.id_to_term,
top_n=self.terms_per_topic):
l_results.append({
"topic_idx": topic_idx,
"top_terms": top_terms
})
return l_results
def process(self) -> list:
from nlusvc.textacy.dmo import TextactyUtils
start = time.time()
corpus = TextactyUtils.corpus(spacy_model=self._nlp,
some_values=self.values,
is_debug=self.is_debug)
vectorizer = self._vectorizer()
doc_term_matrix = self._doc_term_matrix(vectorizer,
corpus)
model = self._topic_model(doc_term_matrix)
results = self._result_set(vectorizer,
model)
if self.is_debug:
self.logger.debug("\n".join([
"Topic Modeling Complete",
"\tTotal Time: {}".format(time.time() - start),
pprint.pformat(results)
]))
return results
```
#### File: textacy/dmo/textacy_util.py
```python
import logging
import time
import spacy
import textacy
import textacy.extract
import textacy.keyterms
from textacy import Corpus
from datadict import LoadStopWords
class TextactyUtils:
"""
"""
logger = logging.getLogger(__name__)
stopwords = LoadStopWords().load()
@classmethod
def spacy_model(cls):
return spacy.load("en_core_web_sm")
@classmethod
def lines_to_doc(cls,
some_lines: list,
remove_stopwords=True) -> textacy.doc:
"""
:param some_lines:
:param remove_stopwords:
:return:
lines
"""
_lines = []
some_lines = [x.lower().strip() for x in some_lines]
for line in some_lines:
_line = []
for token in line.split(" "):
if remove_stopwords:
if token not in cls.stopwords:
_line.append(token)
else:
_line.append(token)
_lines.append(" ".join(_line))
svcresult = " ".join(_lines)
return textacy.make_spacy_doc(svcresult)
@classmethod
def doc(cls,
spacy_model,
some_text: str,
no_numbers=True,
is_debug=False) -> textacy.doc:
""" use textacy to preprocess the text prior to creating a doc
:return:
a textacy doc
"""
original_text = some_text
some_text = textacy.preprocess_text(some_text,
# fix_unicode=True,
lowercase=True,
no_urls=True,
no_emails=True,
no_phone_numbers=True,
no_numbers=no_numbers,
no_currency_symbols=True,
no_punct=True,
no_contractions=True,
no_accents=True)
if no_numbers:
# textact replaces numbers and years with 'numb' and 'year' respectively
# for topic modeling these are best removed
some_text = some_text.replace("number", " ")
some_text = some_text.replace("numb", " ")
some_text = some_text.replace("years", " ")
some_text = some_text.replace("year", " ")
if is_debug:
cls.logger.debug("\n".join([
"Textacy Preprocessed Text",
"\toriginal: {}".format(original_text),
"\tpreprocessed: {}".format(some_text)
]))
return textacy.make_spacy_doc(spacy_model(some_text))
@classmethod
def corpus(cls,
spacy_model,
some_values: list,
is_debug=True) -> Corpus:
""" A textacy.Corpus is an ordered collection of textacy.Docs,
all processed by the same spacy language pipeline.
:return:
a textacy corpus of docs
"""
start = time.time()
docs = []
[docs.append(cls.doc(spacy_model,
x)) for x in some_values]
corpus = textacy.Corpus(spacy_model)
corpus.docs = docs
if is_debug:
cls.logger.debug("\n".join([
"Generated Corpus",
"\tTotal Documents: {}".format(corpus.n_docs),
"\tTotal Sentences: {}".format(corpus.n_sents),
"\tTotal Tokens: {}".format(corpus.n_tokens),
"\tTotal Time: {}".format(time.time() - start)
]))
return corpus
```
#### File: core/bp/perform_deep_nlu.py
```python
from spacy.tokens import Doc
from base import BaseObject
from nlutag.core.dmo import EntityToTagGenerator
class PerformDeepNLU(BaseObject):
"""
Purpose:
Given the preprocessed UPS structure, perform
1. tagging (aka annotation) via CitiProductAnnotator
2. inference via CitiInference
"""
def __init__(self,
ontology_name: str,
is_debug: bool = False):
"""
Created:
13-Feb-2017
<EMAIL>
* extrapolated out of an increasingly complex function within pipeline_orchestrator.py
Updated:
28-Feb-2017
<EMAIL>
* replaced single dict param with individual parameters
this makes service design more consistent
Updated:
24-Mar-2017
<EMAIL>
- sending 'normalized' instead of 'original' now
- supporting <928#issuecomment-1947787> and potentially other defects
Updated:
12-Mar-2019
<EMAIL>
* migrate to text and remove dead code
Updated:
25-Apr-2019
<EMAIL>
* add strict typing and more logging statements with is-debug flag
* change instantiation strategy
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._ontology_name = ontology_name
if self._is_debug:
self.logger.debug('\n'.join([
"Instantiate PerformDeepNLU",
f"\tOntology Name: {self._ontology_name}"]))
@staticmethod
def _to_structure(some_original_ups: str,
some_normalized_ups: str,
some_tokenation_result: dict) -> dict:
"""
Purpose:
combine normalization+tokenization into a single data structure
Notes:
there's no real reason for this except that this service
was re-factored out of a messy business process and the
domain components below require this structure as input
:param some_original_ups:
the original UPS
:param some_normalized_ups:
the normalized UPS
:param some_tokenation_result:
the tokens to perform inference on
:return:
"""
return {
"original": some_original_ups,
"normalized": some_normalized_ups,
"tokens": some_tokenation_result}
def process(self,
doc: Doc,
some_original_ups: str,
some_normalized_ups: str,
some_tokenation_result: dict):
"""
:return:
normalized matches (aka entities, tags)
"""
from nlutag.core.svc import AbacusAnnotator
preprocessed = self._to_structure(some_original_ups,
some_normalized_ups,
some_tokenation_result)
# Step: Execute the annotation pipeline
annotator = AbacusAnnotator(doc=doc,
d_input=preprocessed,
is_debug=self._is_debug,
ontology_name=self._ontology_name)
the_matches = annotator.process()
the_tags = EntityToTagGenerator(self._is_debug).process(the_matches)
return None, the_tags
```
#### File: core/dmo/entity_to_tag_generator.py
```python
import os
import pprint
from base import BaseObject
from nlutag.core.dto.token_match import TokenMatches
class EntityToTagGenerator(BaseObject):
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Feb-2017
<EMAIL>
* generate simple string-based tags from more complex entity matches
Updated:
21-Feb-2019
<EMAIL>
* migrated from -text
* rewritten from the ground up (simplified)
Updated:
25-Apr-2019
<EMAIL>
* changed instantiation strategy
Updated:
21-Aug-2019
<EMAIL>
* put tag-confidence-threshold into environment
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/796
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
self._threshold = float(os.environ['TAG_CONFIDENCE_THRESHOLD'])
if self.is_debug:
self.logger.debug("Instantiate EntityToTagGenerator¬")
@staticmethod
def _list_by_confidence(token_matches: TokenMatches) -> list:
"""
Purpose:
filter out entities with low confidence
Sample Input:
'steady state support'
may have been located via two match patterns
'steadi+state+supporting', confidence=20%
'steady state support', confidence=90%
Implementation:
- the first match pattern is a long-distance formation with low confidence
the low match will be discarded
- the second match pattern is an exact match with high confidence
the high match will be kept
Sample Output:
'steady state support' will be considered a valid entity
:return:
a list of tuple values (tag, confidence)
[ ('pmi', 100),
('plan', 100),
('rational test', 94),
...
('rational tester', 94),
('test plan', 7) ]
"""
valid_labels = []
for label in token_matches.get_keys():
entity_structure = token_matches.get_by_key(label)
max_confidence = max([x["confidence"]
for x in entity_structure["matches"]])
valid_labels.append((label, max_confidence))
return valid_labels
def process(self,
entities: TokenMatches) -> list:
result = self._list_by_confidence(entities)
if self.is_debug:
self.logger.debug("\n".join([
"Entity to Tag Generation Results",
"\t{}".format(pprint.pformat(result, indent=4))]))
return result
```
#### File: core/bp/multi_text_parser.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class MultiTextParser(BaseObject):
""" Parse Incoming Text using 1..* Ontologies
"""
def __init__(self,
ontology_names: list,
is_debug: bool = False):
"""
Created:
13-Dec-2019
<EMAIL>
* https://github.ibm.com/-cdo/unstructured-analytics/issues/1590#issuecomment-16614898
:param ontology_names:
an ordered list of Ontology Names
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._ontology_names = ontology_names
if self._is_debug:
self.logger.debug('\n'.join([
"Instantiate MultiTextParser",
f"\tOntology Names: {ontology_names}"]))
@staticmethod
def _to_dict(input_text: str,
normalized_text: str,
d_supervised_tags: dict) -> dict:
"""
Sample Output:
{ 'tags':
{ 'supervised': [
('redhat', {'confidence': 97.3, 'ontology': 'base'}),
('exocrine gland', {'confidence': 98.3, 'ontology': 'biotech'})]},
'total_tags': 2,
'ups': {
'normalized': 'exocrine_gland redhat',
'original': 'exocrine gland red hat'}}
:param input_text:
:param normalized_text:
:param d_supervised_tags:
"""
def to_tuples() -> list:
results = []
for k in d_supervised_tags:
results.append((k, d_supervised_tags[k]))
return results
return {
'tags': {
'supervised': to_tuples(),
'unsupervised': {}},
'total_tags': len(d_supervised_tags),
'ups': {
'normalized': normalized_text,
'original': input_text}}
@staticmethod
def _to_dataframe(input_text: str,
normalized_text: str,
d_supervised_tags: dict) -> DataFrame:
"""
Sample Output:
+----+--------------+------------------------+-----------------------+------------+----------------+
| | Confidence | InputText | NormalizedText | Ontology | Tag |
|----+--------------+------------------------+-----------------------+------------+----------------|
| 0 | 97.3 | exocrine gland red hat | exocrine_gland redhat | base | redhat |
| 1 | 98.3 | exocrine gland red hat | exocrine_gland redhat | biotech | exocrine gland |
+----+--------------+------------------------+-----------------------+------------+----------------+
:param input_text:
:param normalized_text:
:param d_supervised_tags:
"""
results = []
for key in d_supervised_tags:
results.append({
"Tag": key,
"InputText": input_text,
"NormalizedText": normalized_text,
"Ontology": d_supervised_tags[key]["ontology"],
"Confidence": d_supervised_tags[key]["confidence"]})
return pd.DataFrame(results)
def process(self,
original_ups: str,
use_profiler: bool = False,
as_dataframe: bool = False) -> dict or DataFrame:
from nlutext.core.bp import TextParser
d_supervised_tags = {}
normalized_text = original_ups
for ontology_name in self._ontology_names:
text_parser = TextParser(is_debug=self._is_debug,
ontology_name=ontology_name)
svcresult = text_parser.process(as_dataframe=False,
use_profiler=use_profiler,
original_ups=normalized_text)
# Step: Update Normalized Text
normalized_text = svcresult["ups"]["normalized"]
# Step: Update Tag Dictionary
for tag in svcresult["tags"]["supervised"]:
if tag[0] in d_supervised_tags: # tag exists ...
if d_supervised_tags[tag[0]]["confidence"] < tag[1]: # ... and has lower confidence
d_supervised_tags[tag[0]]["confidence"] = tag[1] # update confidence
else: # tag does not exist
d_supervised_tags[tag[0]] = {"confidence": tag[1],
"ontology": ontology_name}
if not as_dataframe:
return self._to_dict(input_text=original_ups,
normalized_text=normalized_text,
d_supervised_tags=d_supervised_tags)
return self._to_dataframe(input_text=original_ups,
normalized_text=normalized_text,
d_supervised_tags=d_supervised_tags)
```
#### File: core/dmo/apostrophe_expansion.py
```python
from base import BaseObject
from base import MandatoryParamError
class ApostropheExpansion(BaseObject):
"""
"""
def __init__(self,
the_input_text: str,
is_debug: bool = False):
"""
Created:
14-Mar-2017
<EMAIL>
* used 'expand_enclictics' as a template
Updated:
19-Apr-2017
<EMAIL>
* renamed from "ExpandApostrophes"
Updated:
26-Feb-2019
<EMAIL>
* migrated to -text
Updated:
13-Dec-2019
<EMAIL>
* cleanup params in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1582#issuecomment-16611092
:param the_input_text:
"""
BaseObject.__init__(self, __name__)
if not the_input_text:
raise MandatoryParamError("Input Text")
self._is_debug = is_debug
self._input_text = the_input_text
self._apostrophes = self.get_apostrophes_dict()
@staticmethod
def get_apostrophes_dict() -> list:
from datadict import the_apostrophes_dict
return the_apostrophes_dict
def process(self) -> str:
normalized = self._input_text.lower()
for apostrophe in self._apostrophes:
the_genitive_pattern = u"{0}s".format(apostrophe)
while the_genitive_pattern in normalized:
normalized = normalized.replace(the_genitive_pattern, " has")
if self._is_debug and self._input_text != normalized:
self.logger.debug("\n".join([
"Processing Complete",
f"\tInput: {self._input_text}",
f"\tOutput: {normalized}"]))
return normalized
```
#### File: core/dmo/compute_skipgrams.py
```python
import logging
from base import BaseObject
def to_list(results):
"""
Purpose:
Simplify the ComputeSkipGrams result set
:param results:
a ComputeSkipsGrams result set
looks like this
[(u'Problems', u'installing'), (u'Problems', u'adobe'), (u'Problems', u'acrobat'), ... ,]
:return:
a list of results
looks like this
["Problems installing", "Problems adobe", "Problems acrobat", ... ,]
"""
the_list = []
for result in list(results):
the_list.append(" ".join(list(result)))
return the_list
class ComputeSkipGrams(BaseObject):
def __init__(self):
"""
Reference:
<http://stackoverflow.com/questions/31847682/how-to-compute-skipgrams-in-python>
"""
BaseObject.__init__(self, __name__)
@staticmethod
def pad_sequence(sequence, n, pad_left=False, pad_right=False, pad_symbol=None):
from itertools import chain
if pad_left:
sequence = chain((pad_symbol,) * (n - 1), sequence)
if pad_right:
sequence = chain(sequence, (pad_symbol,) * (n - 1))
return sequence
def process(self, sequence, n, k, pad_left=False, pad_right=False, pad_symbol=None):
from itertools import combinations
sequence_length = len(sequence)
sequence = iter(sequence)
sequence = self.pad_sequence(sequence, n, pad_left, pad_right, pad_symbol)
if sequence_length + pad_left + pad_right < k:
raise Exception("The length of sentence + padding(s) < skip")
if n < k:
raise Exception("Degree of Ngrams (n) needs to be bigger than skip (k)")
history = []
nk = n + k
# Return point for recursion.
if nk < 1:
return
# If n+k longer than sequence, reduce k by 1 and recur
elif nk > sequence_length:
for ng in self.process(list(sequence), n, k - 1):
yield ng
while nk > 1: # Collects the first instance of n+k length history
history.append(next(sequence))
nk -= 1
# Iterative drop first item in history and picks up the next
# while yielding skipgrams for each iteration.
for item in sequence:
history.append(item)
current_token = history.pop(0)
# Iterates through the rest of the history and
# pick out all combinations the n-1grams
for idx in list(combinations(range(len(history)), n - 1)):
ng = [current_token]
for _id in idx:
ng.append(history[_id])
yield tuple(ng)
# Recursively yield the skigrams for the rest of seqeunce where
# len(sequence) < n+k
for ng in list(self.process(history, n, k - 1)):
yield ng
```
#### File: core/dmo/skipgram_generator.py
```python
from base import BaseObject
class SkipgramGenerator(BaseObject):
def __init__(self, some_tokens):
"""
Created:
12-Feb-2017
<EMAIL>
* refactored out of TokenizeText.py
Updated:
21-Feb-2017
<EMAIL>
* is this module still necessary?
- does long-distance tag matching evolve away from skip-gram usage?
- where are skip-grams actually used?
Updated:
3-Apr-2017
<EMAIL>
* don't log full results (they are VERY large)
:param some_tokens:
"""
BaseObject.__init__(self, __name__)
self.tokens = some_tokens
@staticmethod
def perform_semantic_tokenization(tokens):
from nlutext import PerformSemanticSegmentation
the_sem_tokens = set()
for a_token in tokens:
the_sem_tokens.add(PerformSemanticSegmentation(a_token).process())
return list(the_sem_tokens)
def compute_skipgrams(self, tokens, n, k):
from nlutext import ComputeSkipGrams
try:
return self.perform_semantic_tokenization(
ComputeSkipGrams().process(
tokens, n, k))
except:
return []
def process(self):
result = {
"n2k2": self.compute_skipgrams(self.tokens, 2, 2),
"n3k2": self.compute_skipgrams(self.tokens, 3, 2),
"n3k3": self.compute_skipgrams(self.tokens, 3, 3),
"n4k3": self.compute_skipgrams(self.tokens, 4, 3)}
return result
```
#### File: core/dmo/trigram_generator.py
```python
import string
from base import BaseObject
from base import MandatoryParamError
from datadict import the_stopwords_dict
class TrigramGenerator(BaseObject):
""" perform trigram generation on unstructured text """
def __init__(self,
some_injected_stopwords: list,
some_gram_length: int = 3,
is_debug: bool = False):
"""
Created:
7-Mar-2019
<EMAIL>
* refactored out of unstructured-data-parser
:param some_injected_stopwords:
stopwords that are customized to this particular parsing instance
:param some_gram_length:
gram length
e.g. 3 = Trigrams
2 = Bigrams
1 = Unigrams
"""
BaseObject.__init__(self, __name__)
if not some_gram_length:
raise MandatoryParamError("Gram Length")
if some_gram_length < 1 or some_gram_length > 4:
raise MandatoryParamError("Mandatory Gram Length is 1-4")
self.is_debug = is_debug
self.gram_length = some_gram_length
self.stop_words = the_stopwords_dict
self.injected_stop_words = some_injected_stopwords
@staticmethod
def _is_valid(values: list) -> bool:
"""
determine if a candidate trigram is valid
:param values:
three string values to form a candidate trigram
:return:
True candidate trigram is valid
False candidate trigram is not valid
"""
def _has_num() -> bool:
for v in values:
if v.startswith("no_"):
return True
return False
def _has_digit() -> bool:
for v in values:
if v.isdigit():
return True
return False
if _has_digit() or _has_num():
return False
return True
def _tokenize(self,
input_text: str) -> list:
"""
:param input_text:
an input string of any length
:return:
a list of tokens (with invalid tokens redacted)
"""
def _is_valid(token: str):
if not token:
return False
if len(token) < 3:
return False
if len(token) > 25:
return False
if token in self.stop_words:
return False
if token in self.injected_stop_words:
return False
return True
input_text = input_text.translate((None, string.punctuation))
return [x.strip().lower() for x in input_text.split(" ") if _is_valid(x)]
def _quadgrams(self,
tokens: list,
total_tokens: int) -> list:
grams = []
for i in range(0, total_tokens):
if i + 3 < total_tokens + 1:
t0 = tokens[i]
t1 = tokens[i + 1]
t2 = tokens[i + 2]
t3 = tokens[i + 3]
if self._is_valid([t0, t1, t2, t3]):
grams.append("{} {} {} {}".format(t0, t1, t2, t3))
return grams
def _trigrams(self,
tokens: list,
total_tokens: int) -> list:
grams = []
for i in range(0, total_tokens):
if i + 2 < total_tokens + 1:
t0 = tokens[i]
t1 = tokens[i + 1]
t2 = tokens[i + 2]
if self._is_valid([t0, t1, t2]):
grams.append("{} {} {}".format(t0, t1, t2))
return grams
def _bigrams(self,
tokens: list,
total_tokens: int) -> list:
grams = []
for i in range(0, total_tokens):
if i + 1 < total_tokens + 1:
t0 = tokens[i]
t1 = tokens[i + 1]
if self._is_valid([t0, t1]):
grams.append("{} {}".format(t0, t1))
return grams
def _unigrams(self,
tokens: list,
total_tokens: int) -> list:
grams = []
for i in range(0, total_tokens):
if i < total_tokens + 1:
t0 = tokens[i]
if self._is_valid([t0]):
grams.append(t0)
return grams
def process(self,
input_text: str) -> list:
"""
:return:
a list of trigrams created from the input text
"""
tokens = self._tokenize(input_text)
total_tokens = len(tokens) - 1
def _grams():
if self.gram_length == 4:
return self._quadgrams(tokens,
total_tokens)
if self.gram_length == 3:
return self._trigrams(tokens,
total_tokens)
if self.gram_length == 2:
return self._bigrams(tokens,
total_tokens)
if self.gram_length == 1:
return self._unigrams(tokens,
total_tokens)
raise NotImplementedError("\n".join([
"Gram Length Not Implemented",
f"\tlength: {self.gram_length}"]))
grams = _grams()
if self.is_debug:
self.logger.debug("\n".join([
"Created Input Grams: ",
f"\tinput-text: {input_text}",
f"\tlength: {self.gram_length}",
f"\tInput Trigrams: {grams}"]))
return grams
```
#### File: core/svc/perform_supervised_parsing.py
```python
from spacy.lang.en import English
from base import BaseObject
from nlutag import PerformDeepNLU
class PerformSupervisedParsing(BaseObject):
""" parse manifest data using supervised techniques
supervised parsing takes a known entity
for example
Aix 5.3 Workload:
type: Software
scoped: TRUE
provenance:
- GTS
patterns:
- 'aix_5.3_workload'
- '5.3+aix+workload'
and finds these patterns in the unstructured text and if a match is found
correlates to the known entity tag 'AIX 5.3 Workload'
"""
__nlp = English()
def __init__(self,
ontology_name: str,
is_debug: bool = False):
"""
Created:
12-Mar-2019
<EMAIL>
the name of the activity within the manifest
Updated:
14-Mar-2019
<EMAIL>
* renamed from 'svc:perform-supervised-parsing'
Updated:
16-May-2019
<EMAIL>
* add 'is_debug' param
Updated:
21-Aug-2019
<EMAIL>
* eschew the use of tokenizers for spaCy doc instead
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/796
Updated:
13-Dec-2019
<EMAIL>
* pass in ontology name as a param
https://github.ibm.com/GTS-CDO/unstructured-analytics/pull/1587
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._ontology_name = ontology_name
if self._is_debug:
self.logger.debug("Instantiate PerformSupervisedParsing")
def process(self,
original_ups: str,
normalized_ups: str) -> list:
from nlutext.core.svc import PerformDeepTokenization
# tokenize the UPS
doc = self.__nlp(normalized_ups)
# perform deeper tokenization (includes spell correction)
deep_tok = PerformDeepTokenization(doc=doc,
is_debug=self._is_debug,
ontology_name=self._ontology_name)
tokenization_result = deep_tok.process()
deep_nlu = PerformDeepNLU(is_debug=self._is_debug,
ontology_name=self._ontology_name)
_, tags = deep_nlu.process(doc=doc,
some_original_ups=original_ups,
some_normalized_ups=normalized_ups,
some_tokenation_result=tokenization_result)
return [x for x in tags if x]
```
#### File: core/svc/remove_overlapping_tags.py
```python
from base import BaseObject
class RemoveOverlappingTags(BaseObject):
"""
Purpose:
Some Annotations (tags) overlap with other Annotations (tags)
Example 1:
The tag "Windows Server 2003" overlaps with:
"Windows"
"Windows Server"
"Server 2003"
Assuming the annotation model produces all four tags:
[ "Windows", "Windows Server", "Server 2003", "Windows Server 2003"]
This function will remove all the overlapped tags, to leave only:
[ "Windows Server 2003"]
Example 2:
The reality is slightly more complicated because each tag has
an associated confidence level generated by the annotation model
The actual input to this function is a list of tag tuples:
[ ('windows', 100),
('windows server', 90),
('server 2003', 85),
('windows server 2003', 70) ]
The tuple is composed of
1. the tag tuple[0]
2. the confidence level tuple[1]
The function will find overlapping tags, but only remove the smaller tags
if the confidence level of the larger tags is sufficiently high
To take an extreme example, the tag
("AWS Certification for Cloud Computing", 45)
certainly overlaps with
("Cloud Computing", 95)
but the longer tag only has 45% chance of being accurate
and the shorter tag has a 95% chance of being accurate.
because the delta between these two tags is sufficiently high,
they are not considered to be overlapping
"""
__results = []
def __init__(self,
tag_tuples: list,
score_interval: int = 10,
is_debug: bool = False):
"""
Created:
21-Aug-2019
<EMAIL>
* refactored out of text-parser
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/796
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._score_interval = score_interval
self._process(tag_tuples)
def results(self) -> list:
return self.__results
def _compare_scores(self,
tuple_1: tuple,
tuple_2: tuple) -> bool:
"""
Purpose:
Compare the confidence scores of two tags
:param tuple_1:
the first tuple (tag, score)
:param tuple_2:
the second tuple (tag, score)
:return:
True if the interval is within the defined tag-delta
False if the interval is outside the defined tag-delta
"""
score_1 = tuple_1[1] # (tag, score)
score_2 = tuple_2[1] # (tag, score)
return abs(score_1 - score_2) < self._score_interval
def _process(self,
tag_tuples: list) -> None:
"""
:param tag_tuples:
a list of tag tuples
Sample Input:
[ ('aws', 100),
('aws certification', 73),
('plan', 73),
('financial plan', 73) ]
Sample Results:
[ ('aws certification', 73),
('financial plan', 73) ]
"""
subsumed = set()
for tt1 in tag_tuples:
for tt2 in tag_tuples:
if tt1[0] == tt2[0]:
continue
if tt1[0] in tt2[0] and self._compare_scores(tt1, tt2):
subsumed.add(tt1)
subsumed_tags = [x[0] for x in subsumed]
tag_tuples = [x for x in tag_tuples if x[0] not in subsumed_tags]
self.__results = tag_tuples
```
#### File: core/svc/batch_task_factories.py
```python
import abc
import os
# ASSEMBLE_CHUNK_SIZE = int(os.environ['RQ_CHUNK_SIZE_ASSEMBLE'])
# BADGE_CHUNK_SIZE = int(os.environ['RQ_CHUNK_SIZE_BADGE'])
# PARSE_CHUNK_SIZE = int(os.environ['RQ_CHUNK_SIZE_PARSE'])
# XDM_CHUNK_SIZE = int(os.environ['RQ_CHUNK_SIZE_XDM'])
class StageTasks(abc.ABC):
@abc.abstractmethod
def create_tasks(self, manifest_name, manifest_activity):
def a_func(a, b):
return a+b
return [('string_for_the task_id', a_func, (1, 2))]
def opening_message(self):
return ''
def closing_message(self):
return ''
@staticmethod
def chunks(total, chunk_size):
chunks = []
first = 0
while first < total:
last = first + chunk_size - 1 if (first + chunk_size) < total else total - 1
chunks.append((str(first), str(last)))
first += chunk_size
return chunks
@staticmethod
def is_activity_under_focus(manifest_activity):
if 'RQ_FOCUS_ONLY_ON' in os.environ and \
manifest_activity.lower() not in os.environ['RQ_FOCUS_ONLY_ON'].lower():
focus_on = os.environ['RQ_FOCUS_ONLY_ON']
print(f'Skipping "{manifest_activity}" because we are focusing on "{focus_on}"')
return False
else:
return True
class IngestsTasks(StageTasks):
def __init__(self):
from dataingest import call_ingest_api
self.function = call_ingest_api
self.count = 0
def create_tasks(self, manifest_name, manifest_activity):
delay_range = 0
self.count += 1
return [
('', self.function, (manifest_name, manifest_activity, 'true', str(delay_range)))
]
def opening_message(self):
return 'Starting ingestion...'
def closing_message(self):
return 'Done with ingestion.'
class AssembleApiTasks(StageTasks):
def __init__(self):
from dataingest import call_assemble_api
self.function = call_assemble_api
self.activity_names = set()
class PreAssembleTasks(AssembleApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
return [
('flush_target',
self.function,
(manifest_name, manifest_activity, 'flush_target', '', '', ''))
]
class AssembleTasks(AssembleApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
tasks = []
task_specs = self.function(manifest_name, manifest_activity, 'get_sources', '', '', '')
for collection, count in task_specs:
if not count:
print(f'Skipping empty collection {collection}')
continue
for first, last in self.chunks(count, int(os.environ['RQ_CHUNK_SIZE_ASSEMBLE'])):
tasks.append(
(f'assemble_{collection}_{first}-{last}',
self.function,
(manifest_name, manifest_activity, 'assemble', collection, first, last))
)
return tasks
class PostAssembleTasks(AssembleApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
self.activity_names.add(manifest_activity)
return [
('index_target',
self.function,
(manifest_name, manifest_activity, 'index_target', '', '', ''))
]
def closing_message(self):
return f'Done with collection assembly.\nCompleted activities {self.activity_names}.'
class BadgeAnalysisApiTasks(StageTasks):
def __init__(self):
from cendalytics.badges.bp import call_badge_analysis_api
self.function = call_badge_analysis_api
class PreBadgeAnalysisTasks(BadgeAnalysisApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
return [
('flush_target',
self.function,
(manifest_name, manifest_activity, 'flush_target', '', ''))
]
class BadgeTaggingTasks(BadgeAnalysisApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
tasks = []
task_specs = self.function(manifest_name, manifest_activity, 'get_sources', '', '')
for collection, count in task_specs:
if not count:
print(f'Skipping empty collection {collection}')
continue
for first, last in self.chunks(count, int(os.environ['RQ_CHUNK_SIZE_BADGE'])):
tasks.append(
(f'badge_tagging_{collection}_{first}-{last}',
self.function,
(manifest_name, manifest_activity, 'analyze_per_badge', first, last))
)
return tasks
class BadgeDistributionTasks(BadgeAnalysisApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
return [
('analyze_distribution',
self.function,
(manifest_name, manifest_activity, 'analyze_distribution', '', ''))
]
def closing_message(self):
return f'Done with badges analysis'
class ParseApiTasks(StageTasks):
def __init__(self):
from dataingest import call_parse_api
self.function = call_parse_api
self.collection_set = set()
class PreParseTasks(ParseApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
if not self.is_activity_under_focus(manifest_activity):
return []
return [
('flush_target',
self.function,
(manifest_name, manifest_activity, 'flush_target', '', ''))
]
class ParseTasks(ParseApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
if not self.is_activity_under_focus(manifest_activity):
return []
to_fix = ['Patent', 'Feedback', 'JRS']
if manifest_activity in to_fix:
print("TO-DO. Fix patents/feedback/JRS parsing")
return []
tasks = []
task_specs = self.function(manifest_name, manifest_activity, 'get_sources', '', '')
for collection, count in task_specs:
if not count:
print(f'Skipping empty collection {collection}')
continue
self.collection_set.add(collection)
for first, last in self.chunks(count, int(os.environ['RQ_CHUNK_SIZE_PARSE'])):
tasks.append(
(f'parse_{collection}_{first}-{last}',
self.function,
(manifest_name, manifest_activity, 'parse', first, last))
)
return tasks
def closing_message(self):
return f'Done with collection parsing.\nCompleted tagging of {self.collection_set}.'
class PostParseTasks(ParseApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
if not self.is_activity_under_focus(manifest_activity):
return []
return [
('index_target',
self.function,
(manifest_name, manifest_activity, 'index_target', '', ''))
]
def closing_message(self):
return 'Done indexing the tagged collections'
class XdmApiTasks(StageTasks):
def __init__(self):
from cendantdim.batch.bp import call_dimensions_api
self.function = call_dimensions_api
self.collection_set = set()
class PreXdmTasks(XdmApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
if not self.is_activity_under_focus(manifest_activity):
return []
return [
('flush_target',
self.function,
(manifest_name, manifest_activity, 'flush_target', '', ''))
]
class XdmTasks(XdmApiTasks):
def create_tasks(self, manifest_name, manifest_activity):
if not self.is_activity_under_focus(manifest_activity):
return []
tasks = []
task_specs = self.function(manifest_name, manifest_activity, 'get_sources', '', '')
for collection, count in task_specs:
if not count:
print(f'Skipping empty collection {collection}')
continue
self.collection_set.add(collection)
for first, last in self.chunks(count, int(os.environ['RQ_CHUNK_SIZE_XDM'])):
tasks.append(
(f'parse_{collection}_{first}-{last}',
self.function,
(manifest_name, manifest_activity, 'parse', first, last))
)
return tasks
def closing_message(self):
return f'Done with dimension analysis.\nCompleted XDM processing of {self.collection_set}.\nDEALLOCATE THE WORKERPOOL'
class ToDB2Tasks(StageTasks):
def __init__(self):
from dataingest import run_manifest_command
self.function = run_manifest_command
def create_tasks(self, manifest_name, manifest_activity):
if not self.is_activity_under_focus(manifest_activity):
return []
params = (manifest_name, manifest_activity)
return [
('', self.function, params)
]
def closing_message(self):
return f'Done transfering collections to DB2'
```
#### File: core/svc/batch_worker_environment_vars.py
```python
import os
from base import BaseObject
from base import RedisClient
class BatchWorkerEnvironmentVars(BaseObject):
_vars = [
'RQ_CHUNK_SIZE_ASSEMBLE',
'RQ_CHUNK_SIZE_BADGE',
'RQ_CHUNK_SIZE_PARSE',
'RQ_CHUNK_SIZE_XDM',
'RQ_FOCUS_ONLY_ON',
'RQ_MAX_WIP_ASSEMBLE',
'RQ_MAX_WIP_INGEST',
'RQ_STOP_AFTER',
'RQ_TIMEOUT_MINUTES_JOB',
'RQ_WORKER_PROCESSES',
'ASSEMBLE_BADGES_BUILD', # to-do: rename this
'SUPPLY_SRC_BUILD',
'SUPPLY_TAG_BUILD',
'SUPPLY_XDM_BUILD',
'DEMAND_SRC_BUILD',
'DEMAND_TAG_BUILD',
'DEMAND_XDM_BUILD',
'LEARNING_SRC_BUILD',
'LEARNING_TAG_BUILD',
'LEARNING_XDM_BUILD',
'FEEDBACK_SRC_BUILD',
'FEEDBACK_TAG_BUILD',
'FEEDBACK_XDM_BUILD',
'PATENT_SRC_BUILD',
'PATENT_TAG_BUILD',
]
_PREFIX = 'workers_env:'
def __init__(self):
BaseObject.__init__(self, __name__)
self.redis = RedisClient(decode_responses=True).redis
def keys(self):
return self.redis.keys(pattern=f'{self._PREFIX}*')
def clean(self):
keys = self.keys()
if keys:
self.logger.debug(f'Deleting keys {keys}...')
self.redis.delete(*keys)
def push(self):
self.clean()
env_dict = {f'{self._PREFIX}{env_var}': os.environ[env_var]
for env_var in self._vars if env_var in os.environ}
if env_dict:
self.logger.debug(f'Pushing environment:\n{env_dict}')
self.redis.mset(env_dict)
else:
self.logger.warn('No environment to push to rqWorkers')
def pull(self):
keys = self.keys()
if keys:
values = self.redis.mget(*keys)
for ii in range(len(keys)):
key = keys[ii].replace(self._PREFIX, '')
self.logger.debug(f'Setting {key} to "{values[ii]}"')
os.environ[key] = values[ii]
else:
self.logger.warn('No environment pulled for rqWorkers')
```
#### File: core/svc/count_mongo_collections.py
```python
import datetime
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class CountMongoCollections(BaseObject):
""" Provide a convenient way to count the total records in MongoDB collections
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
4-Oct-2019
<EMAIL>
Updated:
26-Nov-2019
<EMAIL>
* add filter-by-date functionality
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
@staticmethod
def _aggregate(names: list):
return {
"src": sorted([name for name in names if "src" in name]),
"tag": sorted([name for name in names if "tag" in name]),
"xdm": sorted([name for name in names if "xdm" in name])}
@staticmethod
def _filter(filter_name: str,
names: list) -> list:
return [name for name in names if name.startswith(filter_name)]
@staticmethod
def _filter_by_date(names: list) -> list:
mydate = datetime.datetime.now() # '2019-11-26 08:13:58.660388'
tokens = str(mydate).split(' ')[0].split('-') # ['2019', '11', '26']
pattern = f"{tokens[0]}{tokens[1]}" # '201911'
return [name for name in names if pattern in name]
def _find_names(self,
base_mongo_client: BaseMongoClient) -> dict:
"""
Purpose:
Generate a dictionary object that aggregates collections by type and stage
Sample Output:
{'demand': {'src': ['demand_src_20190913',
...
'demand_src_20190909'],
'tag': ['demand_tag_20190917',
...
'demand_tag_20191003'],
'xdm': ['demand_xdm_20190927']},
'learning': {'src': ['learning_src_20190806',
...
'learning_src_20191002'],
'tag': ['learning_tag_20190806',
...
'learning_tag_20191004'],
'xdm': []},
'supply': {'src': ['supply_src_20190801',
...
'supply_src_20190814'],
'tag': ['supply_tag_20190913',
...
'supply_tag_20190817'],
'xdm': ['supply_xdm_20190917',
...
'supply_xdm_20190807']}}
:param base_mongo_client:
an instantiated mongoDB client instance
:return:
a dictionary of collections
"""
client = base_mongo_client.client
names = sorted(dict((db, [collection for collection in client[db].collection_names()])
for db in client.database_names()).values())[0]
names = self._filter_by_date(names)
d_collections = {}
for filter_name in ["supply", "demand", "learning", "feedback", "patent", "github"]:
d_collections[filter_name] = self._aggregate(names=self._filter(filter_name, names=names))
return d_collections
def _count_sizes(self,
base_mongo_client: BaseMongoClient,
d_collections: dict) -> DataFrame:
"""
Purpose:
Count Collection Sizes and Generate DataFrame of output
Sample Output:
+----+------------+---------+-----------------------+----------+
| | Category | Count | Name | Type |
|----+------------+---------+-----------------------+----------|
| 0 | src | 207169 | supply_src_20190801 | supply |
| 1 | src | 238246 | supply_src_20190814 | supply |
...s
| 40 | tag | 174660 | learning_tag_20190923 | learning |
| 41 | tag | 169517 | learning_tag_20191004 | learning |
+----+------------+---------+-----------------------+----------+
:param base_mongo_client:
an instantiated mongoDB client instance
:param d_collections:
output produced by the prior step
:return:
a DataFrame of output
"""
results = []
for collection_type in d_collections:
for collection_category in d_collections[collection_type]:
for collection_name in d_collections[collection_type][collection_category]:
total = CendantCollection(some_collection_name=collection_name,
some_base_client=base_mongo_client).count()
if self._is_debug:
self.logger.debug(f"Collection Counted "
f"(name={collection_name}, total={total})")
results.append({
"Type": collection_type,
"Category": collection_category,
"Name": collection_name,
"Count": total})
return pd.DataFrame(results)
def process(self):
base_mongo_client = BaseMongoClient(is_debug=True)
d_collections = self._find_names(base_mongo_client)
df = self._count_sizes(base_mongo_client, d_collections)
self.logger.debug('\n'.join([
"Cendant Collection Counting Completed",
tabulate(df,
headers='keys',
tablefmt='psql')]))
def main():
CountMongoCollections().process()
if __name__ == "__main__":
import plac
plac.call(main)
```
#### File: augment/svc/add_language_variability.py
```python
import pprint
from collections import Counter
from base import BaseObject
from datadict import LoadStopWords
from nlusvc import TextAPI
class AddLanguageVariability(BaseObject):
""" Service to Augment the Synonyms ('Language Variability') file with new entries
all output from this file represents net-new non-duplicated entries into KB
"""
_text_api = TextAPI(is_debug=False)
_stopwords = LoadStopWords(is_debug=True).load(refresh_cache=True)
def __init__(self,
is_debug: bool = True):
"""
Created:
25-Jul-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/493
Updated:
10-Aug-2019
<EMAIL>
* completely reritten in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/701
* Developer's Note:
it's likely that we'll end up with multiple recipes for language variability
each recipe will be a separate domain component and all will be orchestrated via a service
Updated:
13-Jan-2020
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
def _count_tokens(self,
terms: list) -> Counter:
"""
Purpose:
Tokenize input and count the tokens
:param terms:
a list of unstructured text
the list may be of any size
each item in the list may be of any size
:return:
a collection.Counter instance containing a count of tokens in the list
"""
c = Counter()
for term in terms:
[c.update({x: 1}) for x in term.lower().split(' ')
if x not in self._stopwords]
return c
@staticmethod
def _filter_tokens_by_count(c: Counter,
min_threshold: int = 1) -> Counter:
"""
Purpose:
filter a collection.Counter instance by count
:param c:
a collection.Counter instance
:param min_threshold:
the minimium valid count
:return:
a new collection.Counter instance
"""
return Counter({x: c[x] for x in c if c[x] >= min_threshold})
@staticmethod
def _subsumed(c: Counter) -> set:
"""
Purpose:
Find Subsumed Tokens
Sample Input:
[ 'redhat_certified_system_administrator',
'open_stack',
'system_administrator',
'redhat' ]
Sample Output:
[ 'redhat_certified_system_administrator',
'system_administrator',
'redhat' ]
a 'subsumed' token is one that contains another known token as a sub-string
:param c:
a collection.Counter instance
:return:
a set of subsumed tokens
"""
subsumed = set()
for t1 in c:
for t2 in c:
if t1 == t2:
continue
if t1 in t2 or t2 in t1:
subsumed.add(t1)
subsumed.add(t2)
return subsumed
@staticmethod
def _patterns(delta: set,
subsumed: set) -> list:
"""
Purpose:
Create a list of patterns for token formation
:param delta:
a set of tokens that are not subsumed by any other token
Sample Input:
{ 'open_stack' }
:param subsumed:
a set of subsumed tokens (generated from the 'subsumed' function)
Sample Input:
{ 'redhat_certified_system_administrator',
'system_administrator',
'redhat' }
:return:
a list of candidate patterns
"""
s = set()
for t1 in subsumed:
for t2 in delta:
if t1 == t2:
continue
s.add(f"{t1}+{t2}")
s.add(f"{t1}_{t2}")
return sorted(s)
def process(self,
terms: list,
min_threshold: int = 2) -> list:
"""
Purpose:
Given a list of terms, create variations for the synonyms_kb.csv file
:param terms:
any list of terms or phrases
:param min_threshold:
the minimum token count that is acceptable
any token beneath this threshold is typically considered low-value
perhaps useful only for outlier and edge patterns
:return:
the variations
"""
c = self._count_tokens(terms)
if self.is_debug:
self.logger.debug('\n'.join([
"Token Count:",
pprint.pformat(c.most_common(25))]))
c = self._filter_tokens_by_count(c, min_threshold)
if self.is_debug:
self.logger.debug('\n'.join([
f"Token Filter (min-threshold={min_threshold}):",
pprint.pformat(c.most_common(25))]))
tokens = set([x for x in c])
subsumed = self._subsumed(c)
if self.is_debug:
self.logger.debug(f"Token Subsumption: {subsumed}")
delta = tokens.difference(subsumed)
if self.is_debug:
self.logger.debug(f"Token Delta: {delta}")
patterns = self._patterns(delta, subsumed)
if self.is_debug:
self.logger.debug(f"Pattern Generation: {patterns}")
return patterns
```
#### File: mda/dmo/entity_dict_generator.py
```python
from nltk.stem.snowball import SnowballStemmer
from base import BaseObject
stemmer = SnowballStemmer("english", ignore_stopwords=False)
class EntityDictGenerator(BaseObject):
def __init__(self, some_df):
"""
Updated:
12-Apr-2017
<EMAIL>
* renamed from "ProductDictGenerator"
Updated:
25-May-2017
<EMAIL>
* created 'get_params' method
Updated:
2-Aug-2017
<EMAIL>
* modify entity generation using provenance as a dictionary key
<https://github.ibm.com/abacus-implementation/Abacus/issues/1721#issuecomment-3080923>
Updated:
19-Mar-2018
<EMAIL>
* moved static methods to "private class"
* stemmer should only operate on unigrams
Updated:
21-Feb-2019
<EMAIL>
* migrated from text
Updated:
26-Mar-2019
<EMAIL>
* removed 'id' from attributes (only needed as key)
:param some_df:
"""
BaseObject.__init__(self, __name__)
self.df = some_df
def generate_tuple_l2(self, tokens):
t0 = self.stem(tokens[0])
t1 = self.stem(tokens[1])
if t0 != t1:
return [(t0, t1)]
return []
def generate_tuple_l3(self, tokens):
return [(
self.stem(tokens[0]),
self.stem(tokens[1]),
self.stem(tokens[2]))]
def generate_tuple_l4(self, tokens):
return [(
self.stem(tokens[0]),
self.stem(tokens[1]),
self.stem(tokens[2]),
self.stem(tokens[3]))]
def get_variation(self, some_token):
if "+" in some_token:
tmp = [x.lower().strip() for x in some_token.split("+")]
if 2 == len(tmp):
return self.generate_tuple_l2(tmp)
elif 3 == len(tmp):
return self.generate_tuple_l3(tmp)
elif 4 == len(tmp):
return self.generate_tuple_l4(tmp)
raise ValueError(
"Unrecognized Tuple (value = {0})".format(some_token))
return [some_token]
@staticmethod
def add_to_set(the_set, the_value):
if the_value is None:
return
if not isinstance(the_value, tuple):
the_value = the_value.strip().lower()
if len(the_value) == 0:
return
the_set.add(the_value)
def get_expanded_variations(self, some_label, some_list):
if some_list.startswith('"') and some_list.endswith('"'):
some_list = some_list[1:len(some_list) - 1]
the_set = set()
if 0 == len(some_label.split(" ")):
the_variant = stemmer.stem(some_label.lower())
self.add_to_set(the_set, the_variant)
else:
self.add_to_set(the_set, some_label.lower())
for token in some_list.split(","):
try:
for variant in self.get_variation(token):
self.add_to_set(the_set, variant)
except ValueError:
continue
# return list(sorted(the_set))
return list(the_set)
@staticmethod
def get_params(row):
d = {}
param = row["param"].strip()
# default param to 'type=label'
if 0 == len(param):
key = row["type"].strip().lower()
d[key] = row["label"].strip().lower()
return d
for p in param.split(","):
tokens = [x.lower().strip() for x in p.split("=")]
d[tokens[0]] = tokens[1]
return d
def process(self):
the_master_dict = {}
prov_list = filter(lambda x: len(x) > 0, self.df.prov.unique())
for prov in prov_list:
df2 = self.df[self.df.prov == prov]
for i, row in df2.iterrows():
the_label = row["label"].strip()
the_id = self.key(the_label)
the_type = self.key(row["type"].strip())
the_params = self.get_params(row)
the_scope = row["scope"].strip()
the_variants = self.get_expanded_variations(
some_label=the_label,
some_list=row["variations"].strip())
the_master_dict[the_id] = {
# "id": the_id,
"label": the_label,
"type": the_type,
# "params": the_params,
"variations": the_variants
# "scope": the_scope
}
return the_master_dict
@staticmethod
def key(value):
return value.replace(" ", "_").lower().strip()
@staticmethod
def stem(some_token):
"""
Purpose:
perform stemming operation using Snowball
Rules:
1. Only stem unigrams
non-unigrams contain 1..* whitespace tokens
2. Do not stem patterns
patterns start with "$"
3. Do not stem pre-compounded tokens
e.g. "contract_number" is pre-compounded in a prior stage
running this through the stemmer would generate
"contract_numb"
"""
if " " in some_token:
return some_token
if "_" in some_token:
return some_token
if some_token.startswith("$"):
return some_token
# entity matching is supposed to have both “exact” and “partial” matching but
# perhaps the partial matching was removed by accident so commenting out this line to stop the stemming
# return stemmer.stem(some_token.strip())
return some_token
```
#### File: mda/dmo/entity_pattern_generator.py
```python
from rdflib import Graph
from base import BaseObject
from datagraph import OwlGraphConnector
class EntityPatternGenerator(BaseObject):
""" Generate the NLU Variations for each token into a file
that is used by the NLU Parser at runtime """
# NOTE: no variables should be placed here (ref GIT-1601)
def __init__(self,
ontology_name: str,
is_debug: bool = False):
"""
Created:
26-Mar-2019
<EMAIL>
Updated:
1-Apr-2019
<EMAIL>
* added 'see-also'
Updated:
11-Apr-2019
<EMAIL>
* change 'synonyms' from set to dictionary
and add key/value for injection into synonyms generator downstream
Updated:
8-May-2019
<EMAIL>
* added exception handling
I had a funky label in the cendant OWL file and it took
forever to trace down to this particular MDA function
Updated:
16-Jul-2019
<EMAIL>
* removed 'go-words' (the input file was empty)
* read patterns from the synonyms.csv file
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/440
Updated:
21-Nov-2019
<EMAIL>
* account for new expansion pattern developed here
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1424#issue-10910179
Updated:
13-Dec-2019
<EMAIL>
* pass in ontology name as a param
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583#issuecomment-16612838
Updated:
14-Dec-2019
<EMAIL>
* all variables initialized in __init__ statement
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1601
"""
BaseObject.__init__(self, __name__)
from taskmda.mda.dmo import OntologyLabelGenerator
self._d_patterns = {}
self._d_synonyms = {}
self._is_debug = is_debug
self._ontology_name = ontology_name
self._label_generator = OntologyLabelGenerator(self._owlg())
self._invalid_patterns = ["for", "and", "-", "with", "of", "&"]
def _owlg(self) -> Graph:
return OwlGraphConnector(is_debug=self._is_debug,
ontology_name=self._ontology_name).process()
def _patterns(self,
label: str) -> None:
"""
Sample Input:
'Redhat Data Virtualization Certification'
Sample Output:
[ 'redhat+data+virtualization+certification',
'redhat_data_virtualization_certification']
Update Synonyms:
{ 'redhat_data_virtualization_certification': [ 'redhat data virtualization certification' ]}
Update Patterns:
{ 'redhat_data_virtualization_certification': [ 'redhat+data+virtualization+certification' ]}
:param label:
any entity input
"""
label = label.lower().strip()
label = label.replace("/", "")
tokens = [x.strip().lower() for x in label.split() if x]
tokens = [x for x in tokens if x not in self._invalid_patterns]
p1 = "+".join(tokens)
p2 = "_".join(tokens)
if p2 not in self._d_patterns:
self._d_patterns[p2] = []
if p1 not in self._d_patterns[p2]:
self._d_patterns[p2].append(p1)
if p2 not in self._d_synonyms:
self._d_synonyms[p2] = []
if label not in self._d_synonyms[p2]:
self._d_synonyms[p2].append(label)
def _patterns_from_synonyms_file(self) -> None:
"""
Purpose:
Extract explicit patterns from the Synonyms file
Sample Input:
redhat_data_virtualization_certification ~
ex450,
redhat data virtual certification,
redhat data virtualization certification,
redhat+data+certification,
redhat+virtualization+certification
Sample Output:
{ 'redhat_data_virtualization_certification': [
'redhat+data+certification',
'redhat+virtualization+certification' ]}
"""
from taskmda.mda.dmo import SynonymsKbReader
df_synonyms = SynonymsKbReader.by_name(self._ontology_name).read_csv()
for _, row in df_synonyms.iterrows():
canon = row['canon']
variants = row['variants'].replace('!', '+') # GIT-1424-10910179
variants = variants.split(',')
variants = [x.strip() for x in variants
if x and "+" in x]
if not len(variants):
continue
if canon not in self._d_patterns:
self._d_patterns[canon] = []
[self._d_patterns[canon].append(x) for x in variants
if x not in self._d_patterns[canon]]
def _patterns_from_owl_file(self) -> None:
"""
Purpose:
Generate implicit patterns from the OWL file (e.g., the Cendant Ontology)
Sample Entity:
'Redhat Data Virtualization Certification'
Sample Patterns:
[ 'redhat+data+virtualization+certification',
'redhat_data_virtualization_certification' ]
"""
owl_labels = self._label_generator.labels()
for label in owl_labels:
if "'" in label: # GIT-1829-17642326
label = label.replace("'", "")
self._patterns(label)
try:
[self._patterns(x)
for x in self._label_generator.see_also_by_label(label)]
[self._patterns(x)
for x in self._label_generator.infinitive_by_label(label)]
except Exception as e:
self.logger.error('\n'.join([
"Failed to Generate Label",
f"\tLabel: {label}"]))
self.logger.exception(e)
raise ValueError("Entity Pattern Generation Failure")
def process(self) -> dict:
self._patterns_from_owl_file()
self._patterns_from_synonyms_file()
self.logger.debug(f"Generated Variations "
f"(total={len(self._d_patterns)})")
return {
"patterns": self._d_patterns,
"synonyms": self._d_synonyms}
```
#### File: mda/dmo/entity_revmap_generator.py
```python
from base import BaseObject
class EntityRevmapGenerator(BaseObject):
def __init__(self, some_dict):
"""
Updated:
8-Dec-2016
<EMAIL>
* change from key:value to key:list
Updated:
12-Apr-2017
<EMAIL>
* renamed from "ProductEntityRevmapGeneratorevmapGenerator"
Updated:
2-Aug-2017
<EMAIL>
* clean up params/init section
* iterate dictionary by prov:
<https://github.ibm.com/abacus-implementation/Abacus/issues/1721#issuecomment-3080923>
"""
BaseObject.__init__(self, __name__)
if some_dict is None:
raise ValueError
self.dict = some_dict
@staticmethod
def normalize(some_revmap):
the_normalized_map = {}
for key in some_revmap:
the_normalized_map[key] = list(some_revmap[key])
return the_normalized_map
def create(self):
d = {}
for key in self.dict:
for variant in self.dict[key]["variations"]:
if variant not in d:
d[variant] = set()
d[variant].add(key)
return self.normalize(d)
def process(self):
return self.create()
```
#### File: mda/dto/kb_paths.py
```python
import logging
logger = logging.getLogger(__name__)
BASE_PATH = "python/datadict/core"
class KbPaths(object):
"""
Purpose:
paths for generated dictionaries
these are output file paths used in MDA.sh process
specifies which directories the generated files are placed in
Updated:
13-Dec-2019
<EMAIL>
* add ontology-name as a param
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583
"""
@staticmethod
def rel_implies(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_implies_kb.py"
@staticmethod
def rel_owns(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_owns_kb.py"
@staticmethod
def rel_requires(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_requires_kb.py"
@staticmethod
def rel_produces(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_produces_kb.py"
@staticmethod
def rel_runson(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_runson_kb.py"
@staticmethod
def rel_versions(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_versions_kb.py"
@staticmethod
def country_codes():
return "{0}/os/{1}".format(BASE_PATH, "country_codes.py")
@staticmethod
def city_to_region():
return "{0}/os/{1}".format(BASE_PATH, "city_region_kb.py")
@staticmethod
def country_to_region():
return "{0}/os/{1}".format(BASE_PATH, "country_region_kb.py")
@staticmethod
def rel_infinitive(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_infinitive_kb.py"
@staticmethod
def rel_similarity(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_similarity_kb.py"
@staticmethod
def rel_parts(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_parts_kb.py"
@staticmethod
def rel_defines(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/rel_defines_kb.py"
@staticmethod
def stopwords():
return "{0}/os/{1}".format(BASE_PATH, "stopwords_kb.py")
@staticmethod
def synonyms(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/synonym.py"
@staticmethod
def seealso(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/seealso.py"
@staticmethod
def dimesionality(a_name: str):
return "{0}/os/{1}".format(BASE_PATH, f"dimensionality_{a_name}_kb.py")
@staticmethod
def references():
return f"{BASE_PATH}/os/seealso.py"
@staticmethod
def jrs_lookup():
return "{0}/os/{1}".format(BASE_PATH, "jrs_lookup.py")
@staticmethod
def mapping():
return "{0}/os/{1}".format(BASE_PATH, "mapping_table.py")
@staticmethod
def mapping_rev():
return "{0}/os/{1}".format(BASE_PATH, "mapping_rev.py")
@staticmethod
def reverse_synonyms(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/reverse_synonym.py"
@staticmethod
def reverse_regex_synonyms(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/reverse_regex_synonym.py"
@staticmethod
def taxonomy():
return "{0}/os/{1}".format(BASE_PATH, "taxonomy_kb.py")
@staticmethod
def taxonomy_revmap():
return "{0}/os/{1}".format(BASE_PATH, "taxonomy_revmap_kb.py")
@staticmethod
def patterns(ontology_name:str):
return f"{BASE_PATH}/os/{ontology_name}/patterns.py"
@staticmethod
def certifications():
return "{0}/os/{1}".format(BASE_PATH, "certifications_kb.py")
@staticmethod
def certs_hierarchy():
return "{0}/os/{1}".format(BASE_PATH, "certification_hierarchy_kb.py")
@staticmethod
def labels(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/labels.py"
@staticmethod
def badges():
return "{0}/os/{1}".format(BASE_PATH, "badges.py")
@staticmethod
def ngrams(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/ngrams.py"
@staticmethod
def parents(ontology_name: str):
return f"{BASE_PATH}/os/{ontology_name}/parents.py"
@staticmethod
def badge_analytics():
return "{0}/os/{1}".format(BASE_PATH, "badge_analytics.py")
@staticmethod
def badge_distribution():
return "{0}/os/{1}".format(BASE_PATH, "badge_distribution.py")
```
#### File: mda/svc/generate_entity_ngrams.py
```python
import os
import pprint
from base import BaseObject
from base import FileIO
from base import MandatoryParamError
class GenerateEntityNgrams(BaseObject):
def __init__(self,
ontology_name: str,
some_labels: list,
some_patterns: dict,
is_debug: bool = False):
"""
Updated:
1-Aug-2017
<EMAIL>
* added output-file param so this can be controlled by orchestrating service
<https://github.ibm.com/abacus-implementation/Abacus/issues/1721#issuecomment-3069168>
Updated:
21-Feb-2019
<EMAIL>
* migrated to text
Updated:
28-Mar-2019
<EMAIL>
* updated to add labels and patterns
Updated:
13-Dec-2019
<EMAIL>
* add ontology-name as a param
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583
"""
BaseObject.__init__(self, __name__)
if not some_labels:
raise MandatoryParamError("Labels")
if not some_patterns:
raise MandatoryParamError("Patterns")
self._is_debug = is_debug
self._labels = some_labels
self._patterns = some_patterns
self._ontology_name = ontology_name
def process(self):
from taskmda.mda.dmo import GenericTemplateAccess
from taskmda.mda.dmo import EntityNgramDictGenerator
from taskmda.mda.dto import KbNames
from taskmda.mda.dto import KbPaths
dictionary = EntityNgramDictGenerator().process(self._labels,
list(self._patterns.values()))
the_json_result = pprint.pformat(dictionary, indent=4)
the_json_result = "{0} = {{\n {1}".format(
KbNames.entity_ngrams(), the_json_result[1:])
the_template_result = GenericTemplateAccess.process()
the_template_result = the_template_result.replace(
"CONTENT", the_json_result)
path = os.path.join(os.environ["CODE_BASE"],
KbPaths.ngrams(ontology_name=self._ontology_name))
FileIO.text_to_file(path, the_template_result)
return dictionary
```
#### File: mda/svc/generate_flow_taxonomy.py
```python
import os
import pprint
from base import BaseObject
from base import FileIO
class GenerateFlowTaxonomy(BaseObject):
def __init__(self,
is_debug: bool = False):
"""
Created:
17-Jul-2019
<EMAIL>
* based on 'taxonomy-dict-generator' from the abacus-att project
<EMAIL>:abacus-implementation/abacus-att.git
* reference
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/453
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
@staticmethod
def _lines():
"""
TODO: in the abacus-att project there was a file called
flow_taxonomy_kb.csv
this explicit taxonomy was loaded and transformed into JSON
*** WE DON'T NEED TO MAINTAIN AN EXPLICIT TAXONOMY ***
we can dynamically re-create what that file looked like based on
analyzing the current flow mapping definitions
:return:
"""
return ['CONTRACT_VALIDATE_NUMBER_NOT_RED', 'CHITCHAT_GREETING']
@staticmethod
def get_revmap(some_dict):
revmap = {}
for key in some_dict:
for value in some_dict[key]:
revmap[value] = key
return revmap
@staticmethod
def _to_file(d_result: dict,
kb_name: str,
kb_path: str):
from taskmda.mda.dmo import GenericTemplateAccess
the_json_result = pprint.pformat(d_result, indent=4)
the_json_result = "{0} = {{\n {1}".format(
kb_name, the_json_result[1:])
the_template_result = GenericTemplateAccess.process()
the_template_result = the_template_result.replace(
"CONTENT", the_json_result)
path = os.path.join(os.environ["CODE_BASE"],
kb_path)
FileIO.text_to_file(path, the_template_result)
def process(self):
from taskmda.mda.dto import KbNames
from taskmda.mda.dto import KbPaths
from taskmda.mda.dmo import TaxonomyDictGenerator
the_taxonomy_dict = TaxonomyDictGenerator(self._lines()).process()
the_taxonomy_revmap = self.get_revmap(the_taxonomy_dict)
self._to_file(the_taxonomy_dict,
KbNames.flow_taxonomy(),
KbPaths.taxonomy())
self._to_file(the_taxonomy_revmap,
KbNames.flow_taxonomy_revmap(),
KbPaths.taxonomy_revmap())
```
#### File: mda/svc/generate_metrics.py
```python
import pprint
import time
from base import BaseObject
from datadict import DictionaryLoader
from datamongo import CendantCollection
class GenerateMetrics(BaseObject):
""" Generate Metrics """
def __init__(self,
ontology_name: str,
is_debug: bool = False):
"""
Created:
4-Apr-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/60
Updated:
13-Dec-2019
<EMAIL>
* major refactoring from the ground-up in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583#issuecomment-16612838
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._ontology_name = ontology_name
self._dict_loader = DictionaryLoader(is_debug=self._is_debug,
ontology_name=self._ontology_name)
self.collection = self._collection()
@staticmethod
def _collection():
return CendantCollection(some_db_name="cendant",
some_collection_name="metrics_mda")
@staticmethod
def _count_keys_values(d: dict) -> dict:
total_keys = len(d)
total_values = 0
for k in d:
if type(d[k]) == list:
total_values += len(d[k])
elif type(d[k]) == dict:
total_values += sum([len(d[k][x]) for x in d])
return {"keys": total_keys,
"values": total_values}
@staticmethod
def _count_keys(d: dict) -> dict:
total_keys = len(d)
return {"keys": total_keys}
def process(self):
label_count = self._count_keys(self._dict_loader.taxonomy().labels())
ngram_count = self._count_keys_values(self._dict_loader.taxonomy().ngrams())
parent_count = self._count_keys_values(self._dict_loader.taxonomy().parents())
pattern_count = self._count_keys_values(self._dict_loader.taxonomy().patterns())
defines_count = self._count_keys_values(self._dict_loader.relationships().defines())
implies_count = self._count_keys_values(self._dict_loader.relationships().implies())
infinitive_count = self._count_keys_values(self._dict_loader.relationships().infinitive())
owns_count = self._count_keys_values(self._dict_loader.relationships().owns())
parts_count = self._count_keys_values(self._dict_loader.relationships().parts())
requires_count = self._count_keys_values(self._dict_loader.relationships().requires())
similarity_count = self._count_keys_values(self._dict_loader.relationships().similarity())
versions_count = self._count_keys_values(self._dict_loader.relationships().versions())
svcresult = {
"tts": str(time.time()),
"owl": self._ontology_name,
"entities": {
"labels": label_count,
"ngrams": ngram_count,
"parents": parent_count,
"patterns": pattern_count},
"rels": {
"defines": defines_count,
"implies": implies_count,
"infinitive": infinitive_count,
"owns": owns_count,
"parts": parts_count,
"requires": requires_count,
"similarity": similarity_count,
"versions": versions_count}}
self.logger.info("\n".join([
"Generated MDA Metrics",
pprint.pformat(svcresult, indent=4)]))
self._collection().save(svcresult)
```
#### File: mda/svc/generate_ontology_dictionaries.py
```python
from base import BaseObject
class GenerateOntologyDictionaries(BaseObject):
""" Generates all dictionaries for a given Ontology """
def __init__(self,
ontology_name: str,
syns_only: bool = False,
is_debug: bool = False):
"""
Created:
13-Dec-2019
<EMAIL>
* refactored out of 'mda-orchestrator'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583
Updated:
14-Jan-2020
<EMAIL>
* ensure synonym generation uses see-also dictionary
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1734
:param ontology_name:
the name of the Ontology (e.g., 'base' or 'biotech')
:param syns_only:
True only generate synonym dictionaries
False generate all dictionaries
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._syns_only = syns_only
self._ontology_name = ontology_name
if self._is_debug:
self.logger.debug('\n'.join([
"Initialize Ontology Generation",
f"\tSyns Only? {self._syns_only}",
f"\tOntology Name: {self._ontology_name}"]))
def _generate_syn_dictionaries(self) -> dict:
from taskmda import GenerateSynonyms
from taskmda import GenerateSeeAlso
from taskmda import GeneratePatterns
see_also_generation = GenerateSeeAlso(is_debug=self._is_debug,
ontology_name=self._ontology_name)
pattern_generation = GeneratePatterns(is_debug=self._is_debug,
ontology_name=self._ontology_name)
synonym_generation = GenerateSynonyms(is_debug=self._is_debug,
ontology_name=self._ontology_name)
# the 'injected synonyms' dictionary
# these do NOT come from the synonyms file
d_synonyms = {} # GIT-1734-17146514
def _merge(some_dict: dict) -> None:
for k in some_dict:
if k not in d_synonyms:
d_synonyms[k] = []
[d_synonyms[k].append(x) for x in some_dict[k]]
_merge(see_also_generation.process())
_merge(pattern_generation.process()["synonyms"])
synonym_generation.process(d_synonyms)
return d_synonyms
def _generate_dictionaries(self,
d_patterns: dict) -> None:
from taskmda import GenerateRelationships
from taskmda import GenerateLabels
from taskmda import GenerateParents
from taskmda import GenerateEntityNgrams
from taskmda import GenerateMetrics
from taskmda import GenerateSeeAlso
parents_gen = GenerateParents(is_debug=self._is_debug,
ontology_name=self._ontology_name)
ontology_gen = GenerateRelationships(is_debug=self._is_debug,
ontology_name=self._ontology_name)
label_gen = GenerateLabels(is_debug=self._is_debug,
ontology_name=self._ontology_name)
metrics_gen = GenerateMetrics(is_debug=self._is_debug,
ontology_name=self._ontology_name)
see_also = GenerateSeeAlso(is_debug=self._is_debug,
ontology_name=self._ontology_name)
parents_gen.process()
ontology_gen.process()
labels = label_gen.process()
see_also.process()
GenerateEntityNgrams(some_labels=labels,
is_debug=self._is_debug,
some_patterns=d_patterns,
ontology_name=self._ontology_name).process()
metrics_gen.process()
def process(self) -> None:
"""
Purpose
Generate Dictionaries from an Ontology
"""
d_patterns = self._generate_syn_dictionaries()
if not self._syns_only:
self._generate_dictionaries(d_patterns)
```
#### File: mda/svc/generate_parents.py
```python
import os
import pprint
from base import BaseObject
from base import FileIO
class GenerateParents(BaseObject):
""" Generate the parent (type) for each term
this is the SINGLE SOURCE OF TRUTH for parents in the entire system
this generated file can also be used to find ancestry via transitive lookups
"""
def __init__(self,
ontology_name: str,
is_debug: bool = False):
"""
Created:
26-Mar-2019
<EMAIL>
Updated:
13-Dec-2019
<EMAIL>
* add ontology-name as a param
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._ontology_name = ontology_name
def process(self):
from taskmda.mda.dmo import EntityParentGenerator
from taskmda.mda.dto import KbNames
from taskmda.mda.dto import KbPaths
from taskmda.mda.dmo import GenericTemplateAccess
labels = EntityParentGenerator(ontology_name=self._ontology_name).process()
the_json_result = pprint.pformat(labels, indent=4)
the_json_result = "{0} = {{\n {1}".format(
KbNames.parents(), the_json_result[1:])
the_template_result = GenericTemplateAccess.process()
the_template_result = the_template_result.replace(
"CONTENT", the_json_result)
relative_path = KbPaths.parents(ontology_name=self._ontology_name)
absolute_path = os.path.join(os.environ["CODE_BASE"], relative_path)
FileIO.text_to_file(absolute_path, the_template_result)
```
#### File: mda/svc/generate_relationships.py
```python
import os
import pprint
from base import BaseObject
from base import FileIO
from datagraph import OwlGraphConnector
from taskmda.mda.dmo import GenericTemplateAccess
from taskmda.mda.dmo import OntologyDictGenerator
from taskmda.mda.dto import KbNames
from taskmda.mda.dto import KbPaths
class GenerateRelationships(BaseObject):
def __init__(self,
ontology_name: str,
is_debug: bool = False):
"""
Created:
21-Feb-2019
<EMAIL>
* migrated to text
Updated:
28-Mar-2019
<EMAIL>
* removed dead code
* updated logging statements
Updated:
13-Dec-2019
<EMAIL>
* add ontology-name as a param
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1583
* also renamed from 'generate-ontology-dict'
Updated:
14-Dec-2019
<EMAIL>
* major refactoring in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1606
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._ontology_name = ontology_name
self._graph = OwlGraphConnector(is_debug=self._is_debug,
ontology_name=self._ontology_name).process()
@classmethod
def _write_dictionary(cls,
results: dict,
dictionary_name: str,
dictionary_path: str):
the_json_result = pprint.pformat(results, indent=4)
the_json_result = "{0} = {{\n {1}".format(
dictionary_name, the_json_result[1:])
the_template_result = GenericTemplateAccess.process()
the_template_result = the_template_result.replace(
"CONTENT", the_json_result)
path = os.path.join(os.environ["CODE_BASE"],
dictionary_path)
FileIO.text_to_file(path, the_template_result)
def _ontology_dict_generator(self,
query_name: str) -> dict:
return OntologyDictGenerator(some_graph=self._graph,
is_debug=self._is_debug,
ontology_name=self._ontology_name,
some_query_name=query_name).process()
def _generate_implies_dictionary(self):
results = self._ontology_dict_generator("find_implies_query.txt")
self._write_dictionary(results,
KbNames.rel_implies(),
KbPaths.rel_implies(self._ontology_name))
def _generate_ownership_dictionary(self):
results = self._ontology_dict_generator("find_owners_query.txt")
self._write_dictionary(results,
KbNames.rel_owns(),
KbPaths.rel_owns(self._ontology_name))
def _generate_requires_dictionary(self):
results = self._ontology_dict_generator("find_requires_query.txt")
self._write_dictionary(results,
KbNames.rel_requires(),
KbPaths.rel_requires(self._ontology_name))
def _generate_produces_dictionary(self):
results = self._ontology_dict_generator("find_produces_query.txt")
self._write_dictionary(results,
KbNames.rel_produces(),
KbPaths.rel_produces(self._ontology_name))
def _generate_runson_dictionary(self):
results = self._ontology_dict_generator("find_runson_query.txt")
self._write_dictionary(results,
KbNames.rel_runson(),
KbPaths.rel_runson(self._ontology_name))
def _generate_version_dictionary(self):
results = self._ontology_dict_generator("find_version_query.txt")
self._write_dictionary(results,
KbNames.rel_versions(),
KbPaths.rel_versions(self._ontology_name))
def _generate_infinitive_dictionary(self):
results = self._ontology_dict_generator("find_infinitive_query.txt")
self._write_dictionary(results,
KbNames.rel_infinitive(),
KbPaths.rel_infinitive(self._ontology_name))
def _generate_similarity_dictionary(self):
results = self._ontology_dict_generator("find_similarity_query.txt")
self._write_dictionary(results,
KbNames.rel_similarity(),
KbPaths.rel_similarity(self._ontology_name))
def _generate_partof_dictionary(self):
results = self._ontology_dict_generator("find_parts_query.txt")
self._write_dictionary(results,
KbNames.rel_parts(),
KbPaths.rel_parts(self._ontology_name))
def _generate_defined_by_dictionary(self):
results = self._ontology_dict_generator("find_definedBy_query.txt")
print(results)
self._write_dictionary(results,
KbNames.rel_defines(),
KbPaths.rel_defines(self._ontology_name))
def process(self):
self._generate_defined_by_dictionary()
self.logger.debug("Generated Defines Dictionary")
self._generate_implies_dictionary()
self.logger.debug("Generated Implications Dictionary")
self._generate_infinitive_dictionary()
self.logger.debug("Generated Infinitive Dictionary")
self._generate_similarity_dictionary()
self.logger.debug("Generated Similarity Dictionary")
self._generate_ownership_dictionary()
self.logger.debug("Generated Ownership Dictionary")
self._generate_partof_dictionary()
self.logger.debug("Generated Partonomy Dictionary")
self._generate_requires_dictionary()
self.logger.debug("Generated Requires Dictionary")
self._generate_produces_dictionary()
self.logger.debug("Generated Produces Dictionary")
self._generate_runson_dictionary()
self.logger.debug("Generated Runs-On Dictionary")
self._generate_version_dictionary()
self.logger.debug("Generated Versions Dictionary")
```
#### File: python/tests/test_find_dimensions.py
```python
import unittest
from datadict import FindDimensions
IS_DEBUG = True
class TestFindDimensions(unittest.TestCase):
def _execute_by_name(self,
tag_name: str,
expected_result: str,
xdm_schema: str,
ontology_name: str):
dim_finder = FindDimensions(schema=xdm_schema,
ontology_name=ontology_name,
is_debug=IS_DEBUG)
results = dim_finder.find(input_text=tag_name)
self.assertIsNotNone(results)
self.assertTrue(len(results))
actual_result = results[0]
self.assertEquals(actual_result, expected_result)
def _execute_biotech(self,
tag_name: str,
expected_result: str):
self._execute_by_name(tag_name=tag_name,
expected_result=expected_result,
xdm_schema='biotech',
ontology_name='biotech')
def test_biotech_01(self):
self._execute_biotech('anatomy', 'anatomy')
self._execute_biotech('cell', 'anatomy')
self._execute_biotech('bacteria', 'anatomy')
self._execute_biotech('blood cell', 'anatomy')
self._execute_biotech('white blood cell', 'anatomy')
self._execute_biotech('Lymphocyte', 'anatomy')
self._execute_biotech('t cell', 'anatomy')
# the actual ontology label
self._execute_biotech('memory t cell', 'anatomy')
# rdfs:seeAlso relationship to 'memory t cell'
self._execute_biotech('memory cd8 t cell', 'anatomy')
# I am suprised this works, but OK ...
self._execute_biotech('memory cd8 t_cell', 'anatomy')
def test_biotech_02(self):
self._execute_biotech('Interleukin 15', 'molecule')
def test_process(self):
self.test_biotech_01()
self.test_biotech_02()
if __name__ == '__main__':
unittest.main()
```
#### File: certifications/bp/certification_regression_api.py
```python
from base import BaseObject
from base import FileIO
from testsuite.certifications.svc import RunRegressionSuite
class CertificationRegressionAPI(BaseObject):
""" Regression API for Certifications """
def __init__(self,
config: list,
regression_name: str,
segment_by_vendor: bool = True,
is_debug: bool = False,
log_results: bool = False):
"""
Created:
13-Aug-2019
<EMAIL>
* refactored out of regression-orchestrator
:param regression_name:
a particular regression suite to run
:param segment_by_vendor:
:param is_debug:
:param log_results:
"""
BaseObject.__init__(self, __name__)
self._config = config
self._is_debug = is_debug
self._log_results = log_results
self._regression_name = regression_name
self._segment_by_vendor = segment_by_vendor
def _run_regression_suite(self,
regression: dict,
vendor_name: str = None) -> dict:
return RunRegressionSuite(
regression=regression,
vendor_name=vendor_name,
is_debug=self._is_debug,
log_results=self._log_results,
segment_by_vendor=self._segment_by_vendor).svcresult()
def all(self) -> dict or ValueError:
"""
:return:
the results from a single regression suite
"""
for regression in self._config:
if regression['suite'].lower() == self._regression_name.lower():
return self._run_regression_suite(regression)
raise ValueError(f"Regression Suite Not Found "
f"(name={self._regression_name})")
def by_vendor(self,
vendor_name: str) -> dict or ValueError:
"""
:param vendor_name:
a particular vendor within the regression suite to focus on
:return:
the results from a single regression suite
"""
for regression in self._config:
if regression['suite'].lower() == self._regression_name.lower():
return self._run_regression_suite(regression=regression,
vendor_name=vendor_name)
raise ValueError(f"Regression Suite Not Found "
f"(name={self._regression_name}, "
f"vendor={vendor_name})")
```
#### File: certifications/dmo/regression_analysis_summary.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class RegressionAnalysisSummary(BaseObject):
""" An analysis of the regression results will produce two DataFrames:
1. Gold Regression Analysis
2. Standard Regression Analysis
The Summarized DataFrame may be segmented by Vendor like this:
+----+----------+--------------------+------------------+---------------+---------+----------+
| | Failed | RegressionSuite | RegressionType | SuccessRate | Total | Vendor |
|----+----------+--------------------+------------------+---------------+---------+----------|
| 0 | 3 | self_certification | Gold | 50 | 6 | SAP |
| 1 | 1 | self_certification | Gold | 66.7 | 3 | Redhat |
| 2 | 12 | self_certification | Standard | 55.6 | 27 | SAP |
| 3 | 6 | self_certification | Standard | 45.5 | 11 | Redhat |
+----+----------+--------------------+------------------+---------------+---------+----------+
Or just fully summarized across the board:
+----+----------+--------------------+------------------+---------------+---------+
| | Failed | RegressionSuite | RegressionType | SuccessRate | Total |
|----+----------+--------------------+------------------+---------------+---------|
| 0 | 4 | self_certification | Gold | 55.6 | 9 |
| 1 | 18 | self_certification | Standard | 52.6 | 38 |
+----+----------+--------------------+------------------+---------------+---------+
In either event, we know at a glance how the Gold Regression fared and how the Standard Regression fared
The vendor-segmented view helps shows where immediate action should be focused
e.g., in the example above, the Gold Standard for SAP is doing poorly
"""
__df_summary = None
def __init__(self,
regression_suite_name: str,
df_gold_analysis: DataFrame,
df_standard_analysis: DataFrame,
segment_by_vendor: bool,
is_debug: bool = False):
"""
Created:
12-Aug-2019
<EMAIL>
* refactored out of 'run-regression-suite' in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/717
:param regression_suite_name:
the name of the regression suite this is for (e.g., 'synonyms' or 'self-certification')
:param segment_by_vendor:
True segment (summarize) the DataFrames by Vendor
False summarize across the board
:param df_gold_analysis:
the Gold Analysis regression results
:param df_standard_analysis:
the Standard Analysis regression results
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._process(regression_suite_name=regression_suite_name,
segment_by_vendor=segment_by_vendor,
df_gold_analysis=df_gold_analysis,
df_standard_analysis=df_standard_analysis)
def summary(self) -> DataFrame:
return self.__df_summary
@staticmethod
def _accuracy(total_records: float,
total_failed_records: float) -> float:
if total_failed_records == 0:
return float(100.0)
if total_failed_records == total_records:
return float(0.0)
f_total = float(total_records)
f_fails = float(total_failed_records)
x = float((f_total - f_fails) / f_total) * 100.0
return round(x, ndigits=1)
def _summarize_by_vendor(self,
regression_type: str,
regression_suite_name: str,
df_analysis: DataFrame) -> list:
"""
Purpose:
Perform Summarization (Segmentation) by Vendor
:param regression_type:
the type of regression analysis being summarized (e.g., Gold or Standard)
:param regression_suite_name:
the name of the regression suite (e.g., self_certification)
:param df_analysis:
the analysis dataframe (e.g., Gold Regression Analysis)
:return:
a list of results
"""
results = []
for vendor in df_analysis['Vendor'].unique():
print ("VENDOR ---> ",vendor)
df_vendor = df_analysis[df_analysis['Vendor'] == vendor]
total = 0
failed = 0
for _, row in df_vendor.iterrows():
total += row["Total"]
failed += row["Failed"]
if total == 0:
continue
success_rate = self._accuracy(total_records=total,
total_failed_records=failed)
results.append({
"Vendor": vendor,
"Total": total,
"Failed": failed,
"SuccessRate": success_rate,
"RegressionType": regression_type,
"RegressionSuite": regression_suite_name})
return results
@staticmethod
def _summarize(regression_type: str,
regression_suite_name: str,
df_analysis: DataFrame) -> list:
"""
Purpose:
Perform Summarization
:param regression_type:
the type of regression analysis being summarized (e.g., Gold or Standard)
:param regression_suite_name:
the name of the regression suite (e.g., self_certification)
:param df_analysis:
the analysis dataframe (e.g., Gold Regression Analysis)
:return:
a list of results
"""
results = []
for _, row in df_analysis[df_analysis['Result'] == 'Summary'].iterrows():
results.append({
"Total": row["Total"],
"Failed": row["Failed"],
"SuccessRate": row["SuccessRate"],
"RegressionType": regression_type,
"RegressionSuite": regression_suite_name})
return results
def _process(self,
regression_suite_name: str,
df_gold_analysis: DataFrame,
df_standard_analysis: DataFrame,
segment_by_vendor: bool) -> None:
"""
Purpose:
Summarize the Regression Analysis DataFrames
:param regression_suite_name:
the name of the regression suite (e.g., 'self_certifications')
:param df_gold_analysis:
the Gold Regression Analysis results
:param df_standard_analysis:
the Standard Regression Analysis results
:param segment_by_vendor:
True segment (summarize) the DataFrames by Vendor
False summarize across the board
:return:
a summarized DataFrame
"""
results = []
if segment_by_vendor:
results += self._summarize_by_vendor(regression_type="Gold",
regression_suite_name=regression_suite_name,
df_analysis=df_gold_analysis)
results += self._summarize_by_vendor(regression_type="Standard",
regression_suite_name=regression_suite_name,
df_analysis=df_standard_analysis)
else:
results += self._summarize(regression_type="Gold",
regression_suite_name=regression_suite_name,
df_analysis=df_gold_analysis)
results += self._summarize(regression_type="Standard",
regression_suite_name=regression_suite_name,
df_analysis=df_standard_analysis)
self.__df_summary = pd.DataFrame(results)
```
#### File: certifications/dmo/regression_result_analysis.py
```python
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
class RegressionResultAnalysis(BaseObject):
""" Analyzes Regression Result DataFrames
and provides Descriptive Statistics on results
The incoming DataFrame 'df-result' looks something like this:
+----+---------------------------------------------+-----------------------+---------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+--------+
| | ActualResults | ExpectedResult | InputText | NormalizedText | Pass |
|----+---------------------------------------------+-----------------------+---------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+--------|
| 0 | Certification, SAP Business One | ABAP for SAP HANA 2.0 | sap hana abap 2.0 certification from sap | sap_business_one abap 2 0 certification from sap_center | False |
| 1 | Certification, SAP Business One, Specialist | ABAP for SAP HANA 2.0 | SAP Certified Development Specialist - ABAP for SAP HANA 2.0 | sap_center certification development specialist abap for sap_business_one 2 0 | False |
| 2 | Certification, SAP Business One, Specialist | ABAP for SAP HANA 2.0 | The "SAP Certified Development Specialist - ABAP for SAP HANA 2.0" certification exam | the sap_business_one certification development specialist abap for sap_business_one 2 0 professional_certification | False |
| 3 | Certification, SAP Business One | ABAP for SAP HANA 2.0 | ABAP for SAP HANA cert | abap for sap_business_one certification | False |
| 4 | SAP Business One | ABAP for SAP HANA 2.0 | ABAP for SAP HANA 2.0. Exam Code | abap for sap_business_one 2 0 exam code | False |
| 5 | Certification, Online, SAP Business One | ABAP for SAP HANA 2.0 | SAP ABAP for HANA Certification Online Practice | sap_business_one abap for hana certification online practice | False |
| 6 | Certification, SAP Business One, Specialist | ABAP for SAP HANA 2.0 | Certified Development Specialist - ABAP for SAP HANA 2.0 | certification development specialist abap for sap_business_one 2 0 | False |
+----+---------------------------------------------+-----------------------+---------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+--------+
This DataFrame contains the results of a regression suite run against a CSV.
This component will analyze a given DataFrame and generate descriptive statistics
into an analysis DataFrame
+----+----------+-----------------------+---------------+---------+
| | Failed | Result | SuccessRate | Total |
|----+----------+-----------------------+---------------+---------|
| 0 | 0 | ABAP for SAP HANA 2.0 | 100 | 6 |
+----+----------+-----------------------+---------------+---------+
"""
def __init__(self,
df_result: DataFrame,
is_debug: bool = False):
"""
Created:
9-Aug-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/683
:param df_result:
the results of a regression analysis
Example:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_results = self._process(df_result)
def results(self) -> DataFrame:
return self._df_results
@staticmethod
def _accuracy(total_records: float,
total_failed_records: float) -> float:
if total_failed_records == 0:
return float(100.0)
if total_failed_records == total_records:
return float(0.0)
f_total = float(total_records)
f_fails = float(total_failed_records)
x = float((f_total - f_fails) / f_total) * 100.0
return round(x, ndigits=1)
def _add_summary(self,
total_records: float,
total_failed_records: float) -> dict:
summarized_accuracy = self._accuracy(total_records=total_records,
total_failed_records=total_failed_records)
return {
"Result": "Summary",
"Vendor": None,
"Total": total_records,
"Failed": total_failed_records,
"SuccessRate": summarized_accuracy}
def _process(self,
df_result: DataFrame) -> DataFrame:
results = []
all_total = 0
all_failure = 0
for key in df_result['ExpectedResult'].unique():
df2 = df_result[df_result['ExpectedResult'] == key]
vendor = list(df2['Vendor'].unique())[0]
total_records = len(df2)
all_total += total_records
total_fails = len(df2[df2['Pass'] == False])
all_failure += total_fails
accuracy = self._accuracy(total_records=total_records,
total_failed_records=total_fails)
results.append({
"Result": key,
"Vendor": vendor,
"Total": total_records,
"Failed": total_fails,
"SuccessRate": accuracy})
results.append(self._add_summary(total_records=all_total,
total_failed_records=all_failure))
df_analysis = pd.DataFrame(results)
if self._is_debug:
self.logger.debug('\n'.join([
"Analysis Results",
tabulate(df_analysis,
headers='keys',
tablefmt='psql')]))
return df_analysis
```
#### File: certifications/svc/analyze_regression_suite.py
```python
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class AnalyzeRegressionSuite(BaseObject):
__df_gold_analysis = None
__df_standard_analysis = None
def __init__(self,
df_results: DataFrame,
is_debug: bool = False):
"""
Created:
12-Aug-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/680
:param df_results:
the regression test results
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._process(df_results)
def results(self) -> (DataFrame, DataFrame):
return self.__df_gold_analysis, self.__df_standard_analysis
def _process(self,
df_results: DataFrame) -> None:
"""
Purpose:
Split the Regression Results into
Gold vs. Standard
and perform a summarized analysis on each
:param df_results:
the regression test results
"""
from testsuite.certifications.dmo import RegressionTestSplitter
from testsuite.certifications.dmo import RegressionResultAnalysis
df_gold, df_standard = RegressionTestSplitter(df_results).results()
def analyze_gold_regression():
if df_gold.empty:
self.logger.warning("Gold Regression is empty")
return pd.DataFrame([{
"Result": None,
"Vendor": None,
"Total": 0,
"Failed": 0,
"SuccessRate": 0}])
return RegressionResultAnalysis(df_gold,
is_debug=self._is_debug).results()
def analyze_standard_regression():
if df_standard.empty:
self.logger.warning("Standard Regression is empty")
return pd.DataFrame([{
"Result": None,
"Vendor": None,
"Total": 0,
"Failed": 0,
"SuccessRate": 0}])
return RegressionResultAnalysis(df_standard,
is_debug=self._is_debug).results()
self.__df_gold_analysis = analyze_gold_regression()
self.__df_standard_analysis = analyze_standard_regression()
```
#### File: certifications/svc/run_regression_suite.py
```python
from tabulate import tabulate
from base import BaseObject
class RunRegressionSuite(BaseObject):
def __init__(self,
regression: dict,
segment_by_vendor: bool,
vendor_name: str = None,
log_results: bool = False,
is_debug: bool = False):
"""
Created:
9-Aug-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/680
Updated:
13-Aug-2019
<EMAIL>
* add 'vendor-name' as a parameter
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/729
:param regression:
:param segment_by_vendor:
:param vendor_name:
the vendor to perform the regression on
e.g., vendor='SAP' will only run the regression on SAP Certifications
None run regression on all certifications
:param log_results:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._log_results = log_results
self._segment_by_vendor = segment_by_vendor
self._svcresult = self._process(regression=regression,
vendor_name=vendor_name)
def svcresult(self) -> dict:
return self._svcresult
def _process(self,
regression: dict,
vendor_name: str = None) -> dict:
"""
Purpose:
Execute a Regression Suite
:param regression:
the configuration for a regression suite is a dictionary with three entries:
{ 'name': 'self-reported certifications',
'file': 'regressions/certifications.csv',
'suite': 'self_certification' }
:param vendor_name:
the vendor to perform the regression on
e.g., vendor='SAP' will only run the regression on SAP Certifications
None run regression on all certifications
:return:
a dictionary of output
{ 'results': <the results DataFrame>,
'analysis': <the analysis DataFrame> }
the 'results' DataFrame contains something like this:
+----+---------------------------------------------+-----------------------+---------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+--------+
| | ActualResults | ExpectedResult | InputText | NormalizedText | Pass |
|----+---------------------------------------------+-----------------------+---------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+--------|
| 0 | Certification, SAP Business One | ABAP for SAP HANA 2.0 | sap hana abap 2.0 certification from sap | sap_business_one abap 2 0 certification from sap_center | False |
| 1 | Certification, SAP Business One, Specialist | ABAP for SAP HANA 2.0 | SAP Certified Development Specialist - ABAP for SAP HANA 2.0 | sap_center certification development specialist abap for sap_business_one 2 0 | False |
| 2 | Certification, SAP Business One, Specialist | ABAP for SAP HANA 2.0 | The "SAP Certified Development Specialist - ABAP for SAP HANA 2.0" certification exam | the sap_business_one certification development specialist abap for sap_business_one 2 0 professional_certification | False |
| 3 | Certification, SAP Business One | ABAP for SAP HANA 2.0 | ABAP for SAP HANA cert | abap for sap_business_one certification | False |
| 4 | SAP Business One | ABAP for SAP HANA 2.0 | ABAP for SAP HANA 2.0. Exam Code | abap for sap_business_one 2 0 exam code | False |
| 5 | Certification, Online, SAP Business One | ABAP for SAP HANA 2.0 | SAP ABAP for HANA Certification Online Practice | sap_business_one abap for hana certification online practice | False |
| 6 | Certification, SAP Business One, Specialist | ABAP for SAP HANA 2.0 | Certified Development Specialist - ABAP for SAP HANA 2.0 | certification development specialist abap for sap_business_one 2 0 | False |
+----+---------------------------------------------+-----------------------+---------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+--------+
the 'analysis' DataFrame contains something like this:
+----+----------+-----------------------+---------------+---------+
| | Failed | Result | SuccessRate | Total |
|----+----------+-----------------------+---------------+---------|
| 0 | 0 | ABAP for SAP HANA 2.0 | 100 | 6 |
+----+----------+-----------------------+---------------+---------+
"""
from testsuite.certifications.dmo import RegressionInputTransformer
from testsuite.certifications.dmo import RegressionAnalysisSummary
from testsuite.certifications.svc import AnalyzeRegressionSuite
from testsuite.certifications.svc import RegressionSuiteController
controller = RegressionSuiteController()
df_input = RegressionInputTransformer(
regression['file'],
vendor_name=vendor_name,
is_debug=self._is_debug).dataframe()
fn = getattr(controller, regression['suite'])
df_results = fn(df_input)
if self._log_results:
self.logger.debug('\n'.join([
f"Regression Suite Results (name={regression['suite']})",
tabulate(df_results,
headers='keys',
tablefmt='psql')]))
df_gold_analysis, df_standard_analysis = AnalyzeRegressionSuite(
df_results,
is_debug=self._is_debug).results()
df_summary = RegressionAnalysisSummary(
regression_suite_name=regression['suite'],
df_gold_analysis=df_gold_analysis,
segment_by_vendor=self._segment_by_vendor,
df_standard_analysis=df_standard_analysis,
is_debug=self._is_debug).summary()
return {
"results": df_results,
"summary": df_summary,
"gold": df_gold_analysis,
"standard": df_standard_analysis}
```
#### File: dimensions/bp/dimension_regression_api.py
```python
import os
from base import BaseObject
class DimensionRegressionAPI(BaseObject):
""" Regression API for Dimensionality """
def __init__(self,
config: list,
is_debug: bool = False,
log_results: bool = False):
"""
Created:
20-Aug-2019
<EMAIL>
:param config:
:param is_debug:
:param log_results:
"""
BaseObject.__init__(self, __name__)
self._config = config
self._is_debug = is_debug
self._log_results = log_results
self._input_file = self._find_input_file()
def _find_input_file(self) -> str:
"""
:return:
the input file for the regression suite
"""
for regression in self._config:
if regression['suite'] == 'dimensions':
file_path = regression['file']
if not os.path.exists(file_path):
raise FileNotFoundError(f"Dimemsions Input File Path Not Found: "
f"{file_path}")
return file_path
raise ValueError("Dimemsions Input File Path Not Defined")
def by_serial_number(self,
serial_number: str):
"""
Purpose:
Load the Regression Test into a DataFrame
:param serial_number:
Optionally filter the regression test by Serial Number
None load all regressions
:return:
"""
from testsuite.dimensions.svc import RunRegressionSuite
svc = RunRegressionSuite(input_file=self._input_file,
is_debug=self._is_debug)
return svc.by_serial_answer(serial_number)
``` |
{
"source": "jipp/mqttSensorApp",
"score": 2
} |
#### File: jipp/mqttSensorApp/moveBinary.py
```python
Import("env")
from shutil import copyfile
import os
def after_bin(source, target, env):
version = ""
print("source: " + str(source[0]))
print("target: " + str(target[0]))
cppdefines = env.ParseFlags(env['BUILD_FLAGS']).get("CPPDEFINES")
for arr in cppdefines:
if len(arr) == 2:
if arr[0] == "VERSION":
version = arr[1]
if version == "":
version = "mqttSensorApp"
path = "bin"
srcFile = str(target[0])
dstFile = os.path.join(path, version.strip('\\"') + ".bin")
if not os.path.exists(path):
os.makedirs(path)
print("src: " + srcFile)
print("dst: " + dstFile)
copyfile(srcFile, dstFile)
print("Current build targets", map(str, BUILD_TARGETS))
env.AddPostAction("$BUILD_DIR/firmware.bin", after_bin)
``` |
{
"source": "jippo015/Sub-Zero.bundle",
"score": 2
} |
#### File: Code/interface/advanced.py
```python
import datetime
import StringIO
import glob
import os
import traceback
import urlparse
from zipfile import ZipFile, ZIP_DEFLATED
from subzero.language import Language
from subzero.lib.io import FileIO
from subzero.constants import PREFIX, PLUGIN_IDENTIFIER
from menu_helpers import SubFolderObjectContainer, debounce, set_refresh_menu_state, ZipObject, ObjectContainer, route
from main import fatality
from support.helpers import timestamp, pad_title
from support.config import config
from support.lib import Plex
from support.storage import reset_storage, log_storage, get_subtitle_storage
from support.scheduler import scheduler
from support.items import set_mods_for_part, get_item_kind_from_rating_key
from support.i18n import _
@route(PREFIX + '/advanced')
def AdvancedMenu(randomize=None, header=None, message=None):
oc = SubFolderObjectContainer(
header=header or _("Internal stuff, pay attention!"),
message=message,
no_cache=True,
no_history=True,
replace_parent=False,
title2=_("Advanced"))
if config.lock_advanced_menu and not config.pin_correct:
oc.add(DirectoryObject(
key=Callback(
PinMenu,
randomize=timestamp(),
success_go_to=_("advanced")),
title=pad_title(_("Enter PIN")),
summary=_("The owner has restricted the access to this menu. Please enter the correct pin"),
))
return oc
oc.add(DirectoryObject(
key=Callback(TriggerRestart, randomize=timestamp()),
title=pad_title(_("Restart the plugin")),
))
oc.add(DirectoryObject(
key=Callback(GetLogsLink),
title=_("Get my logs (copy the appearing link and open it in your browser, please)"),
summary=_("Copy the appearing link and open it in your browser, please"),
))
oc.add(DirectoryObject(
key=Callback(TriggerBetterSubtitles, randomize=timestamp()),
title=pad_title(_("Trigger find better subtitles")),
))
oc.add(DirectoryObject(
key=Callback(SkipFindBetterSubtitles, randomize=timestamp()),
title=pad_title(_("Skip next find better subtitles (sets last run to now)")),
))
oc.add(DirectoryObject(
key=Callback(SkipRecentlyAddedMissing, randomize=timestamp()),
title=pad_title(_("Skip next find recently added with missing subtitles (sets last run to now)")),
))
oc.add(DirectoryObject(
key=Callback(TriggerStorageMaintenance, randomize=timestamp()),
title=pad_title(_("Trigger subtitle storage maintenance")),
))
oc.add(DirectoryObject(
key=Callback(TriggerStorageMigration, randomize=timestamp()),
title=pad_title(_("Trigger subtitle storage migration (expensive)")),
))
oc.add(DirectoryObject(
key=Callback(TriggerCacheMaintenance, randomize=timestamp()),
title=pad_title(_("Trigger cache maintenance (refiners, providers and packs/archives)")),
))
oc.add(DirectoryObject(
key=Callback(ApplyDefaultMods, randomize=timestamp()),
title=pad_title(_("Apply configured default subtitle mods to all (active) stored subtitles")),
))
oc.add(DirectoryObject(
key=Callback(ReApplyMods, randomize=timestamp()),
title=pad_title(_("Re-Apply mods of all stored subtitles")),
))
oc.add(DirectoryObject(
key=Callback(LogStorage, key="tasks", randomize=timestamp()),
title=pad_title(_("Log the plugin's scheduled tasks state storage")),
))
oc.add(DirectoryObject(
key=Callback(LogStorage, key="ignore", randomize=timestamp()),
title=pad_title(_("Log the plugin's internal ignorelist storage")),
))
oc.add(DirectoryObject(
key=Callback(LogStorage, key=None, randomize=timestamp()),
title=pad_title(_("Log the plugin's complete state storage")),
))
oc.add(DirectoryObject(
key=Callback(ResetStorage, key="tasks", randomize=timestamp()),
title=pad_title(_("Reset the plugin's scheduled tasks state storage")),
))
oc.add(DirectoryObject(
key=Callback(ResetStorage, key="ignore", randomize=timestamp()),
title=pad_title(_("Reset the plugin's internal ignorelist storage")),
))
oc.add(DirectoryObject(
key=Callback(ResetStorage, key="menu_history", randomize=timestamp()),
title=pad_title(_("Reset the plugin's menu history storage")),
))
oc.add(DirectoryObject(
key=Callback(InvalidateCache, randomize=timestamp()),
title=pad_title(_("Invalidate Sub-Zero metadata caches (subliminal)")),
))
oc.add(DirectoryObject(
key=Callback(ResetProviderThrottle, randomize=timestamp()),
title=pad_title(_("Reset provider throttle states")),
))
return oc
def DispatchRestart():
Thread.CreateTimer(1.0, Restart)
@route(PREFIX + '/advanced/restart/trigger')
@debounce
def TriggerRestart(randomize=None):
set_refresh_menu_state(_("Restarting the plugin"))
DispatchRestart()
return fatality(
header=_("Restart triggered, please wait about 5 seconds"),
force_title=" ",
only_refresh=True,
replace_parent=True,
no_history=True,
randomize=timestamp())
@route(PREFIX + '/advanced/restart/execute')
@debounce
def Restart(randomize=None):
Plex[":/plugins"].restart(PLUGIN_IDENTIFIER)
@route(PREFIX + '/storage/reset', sure=bool)
@debounce
def ResetStorage(key, randomize=None, sure=False):
if not sure:
oc = SubFolderObjectContainer(
no_history=True,
title1=_("Reset subtitle storage"),
title2=_("Are you sure?"))
oc.add(DirectoryObject(
key=Callback(
ResetStorage,
key=key,
sure=True,
randomize=timestamp()),
title=pad_title(_("Are you really sure?")),
))
return oc
reset_storage(key)
if key == "tasks":
# reinitialize the scheduler
scheduler.init_storage()
scheduler.setup_tasks()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Information Storage (%s) reset", key)
)
@route(PREFIX + '/storage/log')
def LogStorage(key, randomize=None):
log_storage(key)
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Information Storage (%s) logged", key)
)
@route(PREFIX + '/triggerbetter')
@debounce
def TriggerBetterSubtitles(randomize=None):
scheduler.dispatch_task("FindBetterSubtitles")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("FindBetterSubtitles triggered")
)
@route(PREFIX + '/skipbetter')
@debounce
def SkipFindBetterSubtitles(randomize=None):
task = scheduler.task("FindBetterSubtitles")
task.last_run = datetime.datetime.now()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("FindBetterSubtitles skipped")
)
@route(PREFIX + '/skipram')
@debounce
def SkipRecentlyAddedMissing(randomize=None):
task = scheduler.task("SearchAllRecentlyAddedMissing")
task.last_run = datetime.datetime.now()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("SearchAllRecentlyAddedMissing skipped")
)
@route(PREFIX + '/triggermaintenance')
@debounce
def TriggerStorageMaintenance(randomize=None):
scheduler.dispatch_task("SubtitleStorageMaintenance")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("SubtitleStorageMaintenance triggered")
)
@route(PREFIX + '/triggerstoragemigration')
@debounce
def TriggerStorageMigration(randomize=None):
scheduler.dispatch_task("MigrateSubtitleStorage")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("MigrateSubtitleStorage triggered")
)
@route(PREFIX + '/triggercachemaintenance')
@debounce
def TriggerCacheMaintenance(randomize=None):
scheduler.dispatch_task("CacheMaintenance")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("TriggerCacheMaintenance triggered")
)
def apply_default_mods(reapply_current=False, scandir_generic=False):
storage = get_subtitle_storage()
subs_applied = 0
try:
for fn in storage.get_all_files(scandir_generic=scandir_generic):
data = storage.load(None, filename=fn)
if data:
video_id = data.video_id
item_type = get_item_kind_from_rating_key(video_id)
if not item_type:
continue
for part_id, part in data.parts.iteritems():
for lang, subs in part.iteritems():
current_sub = subs.get("current")
if not current_sub:
continue
sub = subs[current_sub]
if not sub.content:
continue
current_mods = sub.mods or []
if not reapply_current:
add_mods = list(set(config.default_mods).difference(set(current_mods)))
if not add_mods:
continue
else:
if not current_mods:
continue
add_mods = []
try:
set_mods_for_part(video_id, part_id, Language.fromietf(lang), item_type, add_mods, mode="add")
except:
Log.Error("Couldn't set mods for %s:%s: %s", video_id, part_id, traceback.format_exc())
continue
subs_applied += 1
except OSError:
return apply_default_mods(reapply_current=reapply_current, scandir_generic=True)
storage.destroy()
Log.Debug("Applied mods to %i items" % subs_applied)
@route(PREFIX + '/applydefaultmods')
@debounce
def ApplyDefaultMods(randomize=None):
Thread.CreateTimer(1.0, apply_default_mods)
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("This may take some time ...")
)
@route(PREFIX + '/reapplyallmods')
@debounce
def ReApplyMods(randomize=None):
Thread.CreateTimer(1.0, apply_default_mods, reapply_current=True)
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("This may take some time ...")
)
@route(PREFIX + '/get_logs_link')
def GetLogsLink():
if not config.plex_token:
oc = ObjectContainer(
title2=_("Download Logs"),
no_cache=True,
no_history=True,
header=_("Sorry, feature unavailable"),
message=_("Universal Plex token not available"))
return oc
# try getting the link base via the request in context, first, otherwise use the public ip
req_headers = Core.sandbox.context.request.headers
get_external_ip = True
link_base = ""
if "Origin" in req_headers:
link_base = req_headers["Origin"]
Log.Debug("Using origin-based link_base")
get_external_ip = False
elif "Referer" in req_headers:
parsed = urlparse.urlparse(req_headers["Referer"])
link_base = "%s://%s%s" % (parsed.scheme, parsed.hostname, (":%s" % parsed.port) if parsed.port else "")
Log.Debug("Using referer-based link_base")
get_external_ip = False
if get_external_ip or "plex.tv" in link_base:
ip = Core.networking.http_request("http://www.plexapp.com/ip.php", cacheTime=7200).content.strip()
link_base = "https://%s:32400" % ip
Log.Debug("Using ip-based fallback link_base")
logs_link = "%s%s?X-Plex-Token=%s" % (link_base, PREFIX + '/logs', config.plex_token)
oc = ObjectContainer(
title2=logs_link,
no_cache=True,
no_history=True,
header=_("Copy this link and open this in your browser, please"),
message=logs_link)
return oc
@route(PREFIX + '/logs')
def DownloadLogs():
buff = StringIO.StringIO()
zip_archive = ZipFile(buff, mode='w', compression=ZIP_DEFLATED)
logs = sorted(glob.glob(config.plugin_log_path + '*')) + [config.server_log_path]
for path in logs:
data = StringIO.StringIO()
data.write(FileIO.read(path))
zip_archive.writestr(os.path.basename(path), data.getvalue())
zip_archive.close()
return ZipObject(buff.getvalue())
@route(PREFIX + '/invalidatecache')
@debounce
def InvalidateCache(randomize=None):
from subliminal.cache import region
if config.new_style_cache:
region.backend.clear()
else:
region.invalidate()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Cache invalidated")
)
@route(PREFIX + '/pin')
def PinMenu(pin="", randomize=None, success_go_to="channel"):
oc = ObjectContainer(
title2=_("Enter PIN number ") + str(len(pin) + 1),
no_cache=True,
no_history=True,
skip_pin_lock=True)
if pin == config.pin:
Dict["pin_correct_time"] = datetime.datetime.now()
config.locked = False
if success_go_to == "channel":
return fatality(
force_title=_("PIN correct"),
header=_("PIN correct"),
no_history=True)
elif success_go_to == "advanced":
return AdvancedMenu(randomize=timestamp())
for i in range(10):
oc.add(DirectoryObject(
key=Callback(
PinMenu,
randomize=timestamp(),
pin=pin + str(i),
success_go_to=success_go_to),
title=pad_title(str(i)),
))
oc.add(DirectoryObject(
key=Callback(
PinMenu,
randomize=timestamp(),
success_go_to=success_go_to),
title=pad_title(_("Reset")),
))
return oc
@route(PREFIX + '/pin_lock')
def ClearPin(randomize=None):
Dict["pin_correct_time"] = None
config.locked = True
return fatality(force_title=_("Menu locked"), header=" ", no_history=True)
@route(PREFIX + '/reset_throttle')
def ResetProviderThrottle(randomize=None):
Dict["provider_throttle"] = {}
Dict.Save()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Provider throttles reset")
)
```
#### File: Code/support/data.py
```python
import traceback
def dispatch_migrate():
try:
migrate()
except:
Log.Error("Migration failed: %s" % traceback.format_exc())
del Dict["subs"]
Dict.Save()
def migrate():
"""
some Dict/Data migrations here, no need for a more in-depth migration path for now
:return:
"""
# migrate subtitle history from Dict to Data
if "history" in Dict and Dict["history"].get("history_items"):
Log.Debug("Running migration for history data")
from support.history import get_history
history = get_history()
for item in reversed(Dict["history"]["history_items"]):
history.add(item.item_title, item.rating_key, item.section_title, subtitle=item.subtitle, mode=item.mode,
time=item.time)
del Dict["history"]
history.destroy()
Dict.Save()
# migrate subtitle storage from Dict to Data
if "subs" in Dict:
from support.storage import get_subtitle_storage
from subzero.subtitle_storage import StoredSubtitle
from support.plex_media import get_item
subtitle_storage = get_subtitle_storage()
for video_id, parts in Dict["subs"].iteritems():
try:
item = get_item(video_id)
except:
continue
if not item:
continue
stored_subs = subtitle_storage.load_or_new(item)
stored_subs.version = 1
Log.Debug(u"Migrating %s" % video_id)
stored_any = False
for part_id, lang_dict in parts.iteritems():
part_id = str(part_id)
Log.Debug(u"Migrating %s, %s" % (video_id, part_id))
for lang, subs in lang_dict.iteritems():
lang = str(lang)
if "current" in subs:
current_key = subs["current"]
provider_name, subtitle_id = current_key
sub = subs.get(current_key)
if sub and sub.get("title") and sub.get("mode"): # ditch legacy data without sufficient info
stored_subs.title = sub["title"]
new_sub = StoredSubtitle(sub["score"], sub["storage"], sub["hash"], provider_name,
subtitle_id, date_added=sub["date_added"], mode=sub["mode"])
if part_id not in stored_subs.parts:
stored_subs.parts[part_id] = {}
if lang not in stored_subs.parts[part_id]:
stored_subs.parts[part_id][lang] = {}
Log.Debug(u"Migrating %s, %s, %s" % (video_id, part_id, current_key))
stored_subs.parts[part_id][lang][current_key] = new_sub
stored_subs.parts[part_id][lang]["current"] = current_key
stored_any = True
if stored_any:
subtitle_storage.save(stored_subs)
subtitle_storage.destroy()
del Dict["subs"]
Dict.Save()
```
#### File: Code/support/i18n.py
```python
import inspect
from support.config import config
core = getattr(Data, "_core")
# get original localization module in order to access its base classes later on
def get_localization_module():
cls = getattr(core.localization, "__class__")
return inspect.getmodule(cls)
plex_i18n_module = get_localization_module()
def old_style_placeholders_count(s):
# fixme: incomplete, use regex
return sum(s.count(c) for c in ["%s", "%d", "%r", "%f", "%i"])
def check_old_style_placeholders(k, args):
# replace escaped %'s?
k = k.__str__().replace("%%", "")
if "%(" in k:
Log.Error(u"%r defines named placeholders for formatting" % k)
return "NEEDS NAMED ARGUMENTS"
placeholders_found = old_style_placeholders_count(k)
if placeholders_found and not args:
Log.Error(u"%r requires a arguments for formatting" % k)
return "NEEDS FORMAT ARGUMENTS"
elif not placeholders_found and args:
Log.Error(u"%r doesn't define placeholders for formatting" % k)
return "HAS NO FORMAT ARGUMENTS"
elif placeholders_found and placeholders_found != len(args):
Log.Error(u"%r wrong amount of arguments supplied for formatting" % k)
return "WRONG FORMAT ARGUMENT COUNT"
class SmartLocalStringFormatter(plex_i18n_module.LocalStringFormatter):
"""
this allows the use of dictionaries for string formatting, also does some sanity checking on the keys and values
"""
def __init__(self, string1, string2, locale=None):
if isinstance(string2, tuple):
# dictionary passed
if len(string2) == 1 and hasattr(string2[0], "iteritems"):
string2 = string2[0]
if config.debug_i18n:
if "%(" not in string1.__str__().replace("%%", ""):
Log.Error(u"%r: dictionary for non-named format string supplied" % string1.__str__())
string1 = "%s"
string2 = "NO NAMED ARGUMENTS"
# arguments
elif len(string2) >= 1 and config.debug_i18n:
msg = check_old_style_placeholders(string1, string2)
if msg:
string1 = "%s"
string2 = msg
setattr(self, "_string1", string1)
setattr(self, "_string2", string2)
setattr(self, "_locale", locale)
def local_string_with_optional_format(key, *args, **kwargs):
if kwargs:
args = (kwargs,)
else:
args = tuple(args)
if args:
# fixme: may not be the best idea as this evaluates the string early
try:
return unicode(SmartLocalStringFormatter(plex_i18n_module.LocalString(core, key, Locale.CurrentLocale), args))
except (TypeError, ValueError):
Log.Exception("Broken translation!")
Log.Debug("EN string: %s", plex_i18n_module.LocalString(core, key, "en"))
Log.Debug("%s string: %r", Locale.CurrentLocale,
unicode(plex_i18n_module.LocalString(core, key, Locale.CurrentLocale)))
return unicode(SmartLocalStringFormatter(plex_i18n_module.LocalString(core, key, "en"), args))
# check string instances for arguments
if config.debug_i18n:
msg = check_old_style_placeholders(key, args)
if msg:
return msg
try:
return unicode(plex_i18n_module.LocalString(core, key, Locale.CurrentLocale))
except TypeError:
Log.Exception("Broken translation!")
return unicode(plex_i18n_module.LocalString(core, key, "en"))
_ = local_string_with_optional_format
def is_localized_string(s):
return hasattr(s, "localize")
```
#### File: Code/support/ignore.py
```python
from subzero.lib.dict import DictProxy
from config import config
class ExcludeDict(DictProxy):
store = "ignore"
# single item keys returned by helpers.items.getItems mapped to their parents
translate_keys = {
"section": "sections",
"show": "series",
"movie": "videos",
"episode": "videos",
"season": "seasons",
}
# getItems types mapped to their verbose names
keys_verbose = {
"sections": "Section",
"series": "Series",
"videos": "Item",
"seasons": "Season",
}
key_order = ("sections", "series", "videos", "seasons")
def __len__(self):
try:
return sum(len(self.Dict[self.store][key]) for key in self.key_order)
except KeyError:
# old version
self.Dict[self.store] = self.setup_defaults()
return 0
def translate_key(self, name):
return self.translate_keys.get(name)
def verbose(self, name):
return self.keys_verbose.get(self.translate_key(name) or name)
def get_title_key(self, kind, key):
return "%s_%s" % (kind, key)
def add_title(self, kind, key, title):
self["titles"][self.get_title_key(kind, key)] = title
def remove_title(self, kind, key):
title_key = self.get_title_key(kind, key)
if title_key in self.titles:
del self.titles[title_key]
def get_title(self, kind, key):
title_key = self.get_title_key(kind, key)
if title_key in self.titles:
return self.titles[title_key]
def save(self):
Dict.Save()
def setup_defaults(self):
return {"sections": [], "series": [], "videos": [], "titles": {}, "seasons": []}
class IncludeDict(ExcludeDict):
store = "include"
exclude_list = ExcludeDict(Dict)
include_list = IncludeDict(Dict)
def get_decision_list():
return include_list if config.include else exclude_list
```
#### File: Code/support/scheduler.py
```python
import datetime
import logging
import traceback
from config import config
def parse_frequency(s):
if s == "never" or s is None:
return None, None
kind, num, unit = s.split()
return int(num), unit
class DefaultScheduler(object):
queue_thread = None
scheduler_thread = None
running = False
registry = None
def __init__(self):
self.queue_thread = None
self.scheduler_thread = None
self.running = False
self.registry = []
self.tasks = {}
self.init_storage()
def init_storage(self):
if "tasks" not in Dict:
Dict["tasks"] = {"queue": []}
Dict.Save()
if "queue" not in Dict["tasks"]:
Dict["tasks"]["queue"] = []
def get_task_data(self, name):
if name not in Dict["tasks"]:
raise NotImplementedError("Task missing! %s" % name)
if "data" in Dict["tasks"][name]:
return Dict["tasks"][name]["data"]
def clear_task_data(self, name=None):
if name is None:
# full clean
Log.Debug("Clearing previous task data")
if Dict["tasks"]:
for task_name in Dict["tasks"].keys():
if task_name == "queue":
Dict["tasks"][task_name] = []
continue
Dict["tasks"][task_name]["data"] = {}
Dict["tasks"][task_name]["running"] = False
Dict.Save()
return
if name not in Dict["tasks"]:
raise NotImplementedError("Task missing! %s" % name)
Dict["tasks"][name]["data"] = {}
Dict["tasks"][name]["running"] = False
Dict.Save()
Log.Debug("Task data cleared: %s", name)
def register(self, task):
self.registry.append(task)
def setup_tasks(self):
# discover tasks;
self.tasks = {}
for cls in self.registry:
task = cls()
try:
task_frequency = Prefs["scheduler.tasks.%s.frequency" % task.name]
except KeyError:
task_frequency = getattr(task, "frequency", None)
self.tasks[task.name] = {"task": task, "frequency": parse_frequency(task_frequency)}
def run(self):
self.running = True
self.scheduler_thread = Thread.Create(self.scheduler_worker)
self.queue_thread = Thread.Create(self.queue_worker)
def stop(self):
self.running = False
def task(self, name):
if name not in self.tasks:
return None
return self.tasks[name]["task"]
def is_task_running(self, name):
task = self.task(name)
if task:
return task.running
def last_run(self, task):
if task not in self.tasks:
return None
return self.tasks[task]["task"].last_run
def next_run(self, task):
if task not in self.tasks or not self.tasks[task]["task"].periodic:
return None
frequency_num, frequency_key = self.tasks[task]["frequency"]
if not frequency_num:
return None
last = self.tasks[task]["task"].last_run
use_date = last
now = datetime.datetime.now()
if not use_date:
use_date = now
return max(use_date + datetime.timedelta(**{frequency_key: frequency_num}), now)
def run_task(self, name, *args, **kwargs):
task = self.tasks[name]["task"]
if task.running:
Log.Debug("Scheduler: Not running %s, as it's currently running.", name)
return False
Log.Debug("Scheduler: Running task %s", name)
try:
task.prepare(*args, **kwargs)
task.run()
except Exception, e:
Log.Error("Scheduler: Something went wrong when running %s: %s", name, traceback.format_exc())
finally:
try:
task.post_run(Dict["tasks"][name]["data"])
except:
Log.Error("Scheduler: task.post_run failed for %s: %s", name, traceback.format_exc())
Dict.Save()
config.sync_cache()
def dispatch_task(self, *args, **kwargs):
if "queue" not in Dict["tasks"]:
Dict["tasks"]["queue"] = []
Dict["tasks"]["queue"].append((args, kwargs))
def signal(self, name, *args, **kwargs):
for task_name in self.tasks.keys():
task = self.task(task_name)
if not task:
Log.Error("Scheduler: Task %s not found (?!)" % task_name)
continue
if not task.periodic:
continue
if task.running:
Log.Debug("Scheduler: Sending signal %s to task %s (%s, %s)", name, task_name, args, kwargs)
try:
status = task.signal(name, *args, **kwargs)
except NotImplementedError:
Log.Debug("Scheduler: Signal ignored by %s", task_name)
continue
if status:
Log.Debug("Scheduler: Signal accepted by %s", task_name)
else:
Log.Debug("Scheduler: Signal not accepted by %s", task_name)
continue
Log.Debug("Scheduler: Not sending signal %s to task %s, because: not running", name, task_name)
def queue_worker(self):
Thread.Sleep(10.0)
while 1:
if not self.running:
break
# single dispatch requested?
if Dict["tasks"]["queue"]:
# work queue off
queue = Dict["tasks"]["queue"][:]
Dict["tasks"]["queue"] = []
Dict.Save()
for args, kwargs in queue:
Log.Debug("Queue: Dispatching single task: %s, %s", args, kwargs)
Thread.Create(self.run_task, True, *args, **kwargs)
Thread.Sleep(5.0)
Thread.Sleep(1)
def scheduler_worker(self):
Thread.Sleep(10.0)
while 1:
if not self.running:
break
# scheduled tasks
for name in self.tasks.keys():
now = datetime.datetime.now()
info = self.tasks.get(name)
if not info:
Log.Error("Scheduler: Task %s not found (?!)" % name)
continue
task = info["task"]
if name not in Dict["tasks"] or not task.periodic:
continue
if task.running:
continue
frequency_num, frequency_key = info["frequency"]
if not frequency_num:
continue
# run legacy SARAM once
if name == "SearchAllRecentlyAddedMissing" and ("hasRunLSARAM" not in Dict or not Dict["hasRunLSARAM"]):
task = self.tasks["LegacySearchAllRecentlyAddedMissing"]["task"]
task.last_run = None
name = "LegacySearchAllRecentlyAddedMissing"
Dict["hasRunLSARAM"] = True
Dict.Save()
if not task.last_run or (task.last_run + datetime.timedelta(**{frequency_key: frequency_num}) <= now):
# fixme: scheduled tasks run synchronously. is this the best idea?
Thread.Create(self.run_task, True, name)
#Thread.Sleep(5.0)
#self.run_task(name)
Thread.Sleep(5.0)
Thread.Sleep(1)
scheduler = DefaultScheduler()
```
#### File: Shared/asio/file_opener.py
```python
class FileOpener(object):
def __init__(self, file_path, parameters=None):
self.file_path = file_path
self.parameters = parameters
self.file = None
def __enter__(self):
self.file = ASIO.get_handler().open(
self.file_path,
self.parameters.handlers.get(ASIO.get_handler())
)
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.file:
return
self.file.close()
self.file = None
```
#### File: asio/interfaces/base.py
```python
from asio.file import DEFAULT_BUFFER_SIZE
class Interface(object):
@classmethod
def open(cls, file_path, parameters=None):
raise NotImplementedError()
@classmethod
def get_size(cls, fp):
raise NotImplementedError()
@classmethod
def get_path(cls, fp):
raise NotImplementedError()
@classmethod
def seek(cls, fp, pointer, distance):
raise NotImplementedError()
@classmethod
def read(cls, fp, n=DEFAULT_BUFFER_SIZE):
raise NotImplementedError()
@classmethod
def close(cls, fp):
raise NotImplementedError()
```
#### File: interfaces/windows/__init__.py
```python
from asio.file import File, DEFAULT_BUFFER_SIZE
from asio.interfaces.base import Interface
import os
NULL = 0
if os.name == 'nt':
from asio.interfaces.windows.interop import WindowsInterop
class WindowsInterface(Interface):
@classmethod
def open(cls, file_path, parameters=None):
"""
:type file_path: str
:rtype: asio.interfaces.windows.WindowsFile
"""
if not parameters:
parameters = {}
return WindowsFile(WindowsInterop.create_file(
file_path,
parameters.get('desired_access', WindowsInterface.GenericAccess.READ),
parameters.get('share_mode', WindowsInterface.ShareMode.ALL),
parameters.get('creation_disposition', WindowsInterface.CreationDisposition.OPEN_EXISTING),
parameters.get('flags_and_attributes', NULL)
))
@classmethod
def get_size(cls, fp):
"""
:type fp: asio.interfaces.windows.WindowsFile
:rtype: int
"""
return WindowsInterop.get_file_size(fp.handle)
@classmethod
def get_path(cls, fp):
"""
:type fp: asio.interfaces.windows.WindowsFile
:rtype: str
"""
if not fp.file_map:
fp.file_map = WindowsInterop.create_file_mapping(fp.handle, WindowsInterface.Protection.READONLY)
if not fp.map_view:
fp.map_view = WindowsInterop.map_view_of_file(fp.file_map, WindowsInterface.FileMapAccess.READ, 1)
file_name = WindowsInterop.get_mapped_file_name(fp.map_view)
return file_name
@classmethod
def seek(cls, fp, offset, origin):
"""
:type fp: asio.interfaces.windows.WindowsFile
:type offset: int
:type origin: int
:rtype: int
"""
return WindowsInterop.set_file_pointer(
fp.handle,
offset,
origin
)
@classmethod
def read(cls, fp, n=DEFAULT_BUFFER_SIZE):
"""
:type fp: asio.interfaces.windows.WindowsFile
:type n: int
:rtype: str
"""
return WindowsInterop.read(fp.handle, n)
@classmethod
def read_into(cls, fp, b):
"""
:type fp: asio.interfaces.windows.WindowsFile
:type b: str
:rtype: int
"""
return WindowsInterop.read_into(fp.handle, b)
@classmethod
def close(cls, fp):
"""
:type fp: asio.interfaces.windows.WindowsFile
:rtype: bool
"""
if fp.map_view:
WindowsInterop.unmap_view_of_file(fp.map_view)
if fp.file_map:
WindowsInterop.close_handle(fp.file_map)
return bool(WindowsInterop.close_handle(fp.handle))
class GenericAccess(object):
READ = 0x80000000
WRITE = 0x40000000
EXECUTE = 0x20000000
ALL = 0x10000000
class ShareMode(object):
READ = 0x00000001
WRITE = 0x00000002
DELETE = 0x00000004
ALL = READ | WRITE | DELETE
class CreationDisposition(object):
CREATE_NEW = 1
CREATE_ALWAYS = 2
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
class Attribute(object):
READONLY = 0x00000001
HIDDEN = 0x00000002
SYSTEM = 0x00000004
DIRECTORY = 0x00000010
ARCHIVE = 0x00000020
DEVICE = 0x00000040
NORMAL = 0x00000080
TEMPORARY = 0x00000100
SPARSE_FILE = 0x00000200
REPARSE_POINT = 0x00000400
COMPRESSED = 0x00000800
OFFLINE = 0x00001000
NOT_CONTENT_INDEXED = 0x00002000
ENCRYPTED = 0x00004000
class Flag(object):
WRITE_THROUGH = 0x80000000
OVERLAPPED = 0x40000000
NO_BUFFERING = 0x20000000
RANDOM_ACCESS = 0x10000000
SEQUENTIAL_SCAN = 0x08000000
DELETE_ON_CLOSE = 0x04000000
BACKUP_SEMANTICS = 0x02000000
POSIX_SEMANTICS = 0x01000000
OPEN_REPARSE_POINT = 0x00200000
OPEN_NO_RECALL = 0x00100000
FIRST_PIPE_INSTANCE = 0x00080000
class Protection(object):
NOACCESS = 0x01
READONLY = 0x02
READWRITE = 0x04
WRITECOPY = 0x08
EXECUTE = 0x10
EXECUTE_READ = 0x20,
EXECUTE_READWRITE = 0x40
EXECUTE_WRITECOPY = 0x80
GUARD = 0x100
NOCACHE = 0x200
WRITECOMBINE = 0x400
class FileMapAccess(object):
COPY = 0x0001
WRITE = 0x0002
READ = 0x0004
ALL_ACCESS = 0x001f
EXECUTE = 0x0020
class WindowsFile(File):
platform_handler = WindowsInterface
def __init__(self, handle, *args, **kwargs):
super(WindowsFile, self).__init__(*args, **kwargs)
self.handle = handle
self.file_map = None
self.map_view = None
def readinto(self, b):
return self.get_handler().read_into(self, b)
def __str__(self):
return "<asio_windows.WindowsFile file: %s>" % self.handle
```
#### File: babelfish/converters/__init__.py
```python
import collections
from pkg_resources import iter_entry_points, EntryPoint
from ..exceptions import LanguageConvertError, LanguageReverseError
# from https://github.com/kennethreitz/requests/blob/master/requests/structures.py
class CaseInsensitiveDict(collections.MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['English'] = 'eng'
cid['ENGLISH'] == 'eng' # True
list(cid) == ['English'] # True
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
class LanguageConverter(object):
"""A :class:`LanguageConverter` supports converting an alpha3 language code with an
alpha2 country code and a script code into a custom code
.. attribute:: codes
Set of possible custom codes
"""
def convert(self, alpha3, country=None, script=None):
"""Convert an alpha3 language code with an alpha2 country code and a script code
into a custom code
:param string alpha3: ISO-639-3 language code
:param country: ISO-3166 country code, if any
:type country: string or None
:param script: ISO-15924 script code, if any
:type script: string or None
:return: the corresponding custom code
:rtype: string
:raise: :class:`~babelfish.exceptions.LanguageConvertError`
"""
raise NotImplementedError
class LanguageReverseConverter(LanguageConverter):
"""A :class:`LanguageConverter` able to reverse a custom code into a alpha3
ISO-639-3 language code, alpha2 ISO-3166-1 country code and ISO-15924 script code
"""
def reverse(self, code):
"""Reverse a custom code into alpha3, country and script code
:param string code: custom code to reverse
:return: the corresponding alpha3 ISO-639-3 language code, alpha2 ISO-3166-1 country code and ISO-15924 script code
:rtype: tuple
:raise: :class:`~babelfish.exceptions.LanguageReverseError`
"""
raise NotImplementedError
class LanguageEquivalenceConverter(LanguageReverseConverter):
"""A :class:`LanguageEquivalenceConverter` is a utility class that allows you to easily define a
:class:`LanguageReverseConverter` by only specifying the dict from alpha3 to their corresponding symbols.
You must specify the dict of equivalence as a class variable named SYMBOLS.
If you also set the class variable CASE_SENSITIVE to ``True`` then the reverse conversion function will be
case-sensitive (it is case-insensitive by default).
Example::
class MyCodeConverter(babelfish.LanguageEquivalenceConverter):
CASE_SENSITIVE = True
SYMBOLS = {'fra': 'mycode1', 'eng': 'mycode2'}
"""
CASE_SENSITIVE = False
def __init__(self):
self.codes = set()
self.to_symbol = {}
if self.CASE_SENSITIVE:
self.from_symbol = {}
else:
self.from_symbol = CaseInsensitiveDict()
for alpha3, symbol in self.SYMBOLS.items():
self.to_symbol[alpha3] = symbol
self.from_symbol[symbol] = (alpha3, None, None)
self.codes.add(symbol)
def convert(self, alpha3, country=None, script=None):
try:
return self.to_symbol[alpha3]
except KeyError:
raise LanguageConvertError(alpha3, country, script)
def reverse(self, code):
try:
return self.from_symbol[code]
except KeyError:
raise LanguageReverseError(code)
class CountryConverter(object):
"""A :class:`CountryConverter` supports converting an alpha2 country code
into a custom code
.. attribute:: codes
Set of possible custom codes
"""
def convert(self, alpha2):
"""Convert an alpha2 country code into a custom code
:param string alpha2: ISO-3166-1 language code
:return: the corresponding custom code
:rtype: string
:raise: :class:`~babelfish.exceptions.CountryConvertError`
"""
raise NotImplementedError
class CountryReverseConverter(CountryConverter):
"""A :class:`CountryConverter` able to reverse a custom code into a alpha2
ISO-3166-1 country code
"""
def reverse(self, code):
"""Reverse a custom code into alpha2 code
:param string code: custom code to reverse
:return: the corresponding alpha2 ISO-3166-1 country code
:rtype: string
:raise: :class:`~babelfish.exceptions.CountryReverseError`
"""
raise NotImplementedError
class ConverterManager(object):
"""Manager for babelfish converters behaving like a dict with lazy loading
Loading is done in this order:
* Entry point converters
* Registered converters
* Internal converters
.. attribute:: entry_point
The entry point where to look for converters
.. attribute:: internal_converters
Internal converters with entry point syntax
"""
entry_point = ''
internal_converters = []
def __init__(self):
#: Registered converters with entry point syntax
self.registered_converters = []
#: Loaded converters
self.converters = {}
def __getitem__(self, name):
"""Get a converter, lazy loading it if necessary"""
if name in self.converters:
return self.converters[name]
for ep in iter_entry_points(self.entry_point):
if ep.name == name:
self.converters[ep.name] = ep.load()()
return self.converters[ep.name]
for ep in (EntryPoint.parse(c) for c in self.registered_converters + self.internal_converters):
if ep.name == name:
# `require` argument of ep.load() is deprecated in newer versions of setuptools
if hasattr(ep, 'resolve'):
plugin = ep.resolve()
elif hasattr(ep, '_load'):
plugin = ep._load()
else:
plugin = ep.load(require=False)
self.converters[ep.name] = plugin()
return self.converters[ep.name]
raise KeyError(name)
def __setitem__(self, name, converter):
"""Load a converter"""
self.converters[name] = converter
def __delitem__(self, name):
"""Unload a converter"""
del self.converters[name]
def __iter__(self):
"""Iterator over loaded converters"""
return iter(self.converters)
def register(self, entry_point):
"""Register a converter
:param string entry_point: converter to register (entry point syntax)
:raise: ValueError if already registered
"""
if entry_point in self.registered_converters:
raise ValueError('Already registered')
self.registered_converters.insert(0, entry_point)
def unregister(self, entry_point):
"""Unregister a converter
:param string entry_point: converter to unregister (entry point syntax)
"""
self.registered_converters.remove(entry_point)
def __contains__(self, name):
return name in self.converters
```
#### File: Shared/babelfish/script.py
```python
from collections import namedtuple
from pkg_resources import resource_stream # @UnresolvedImport
from . import basestr
#: Script code to script name mapping
SCRIPTS = {}
#: List of countries in the ISO-15924 as namedtuple of code, number, name, french_name, pva and date
SCRIPT_MATRIX = []
#: The namedtuple used in the :data:`SCRIPT_MATRIX`
IsoScript = namedtuple('IsoScript', ['code', 'number', 'name', 'french_name', 'pva', 'date'])
f = resource_stream('babelfish', 'data/iso15924-utf8-20131012.txt')
f.readline()
for l in f:
l = l.decode('utf-8').strip()
if not l or l.startswith('#'):
continue
script = IsoScript._make(l.split(';'))
SCRIPT_MATRIX.append(script)
SCRIPTS[script.code] = script.name
f.close()
class Script(object):
"""A human writing system
A script is represented by a 4-letter code from the ISO-15924 standard
:param string script: 4-letter ISO-15924 script code
"""
def __init__(self, script):
if script not in SCRIPTS:
raise ValueError('%r is not a valid script' % script)
#: ISO-15924 4-letter script code
self.code = script
@property
def name(self):
"""English name of the script"""
return SCRIPTS[self.code]
def __getstate__(self):
return self.code
def __setstate__(self, state):
self.code = state
def __hash__(self):
return hash(self.code)
def __eq__(self, other):
if isinstance(other, basestr):
return self.code == other
if not isinstance(other, Script):
return False
return self.code == other.code
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<Script [%s]>' % self
def __str__(self):
return self.code
```
#### File: Shared/babelfish/tests.py
```python
from __future__ import unicode_literals
import re
import sys
import pickle
from unittest import TestCase, TestSuite, TestLoader, TextTestRunner
from pkg_resources import resource_stream # @UnresolvedImport
from babelfish import (LANGUAGES, Language, Country, Script, language_converters, country_converters,
LanguageReverseConverter, LanguageConvertError, LanguageReverseError, CountryReverseError)
if sys.version_info[:2] <= (2, 6):
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _Py26FixTestCase(object):
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
else:
class _Py26FixTestCase(object):
pass
class TestScript(TestCase, _Py26FixTestCase):
def test_wrong_script(self):
self.assertRaises(ValueError, lambda: Script('Azer'))
def test_eq(self):
self.assertEqual(Script('Latn'), Script('Latn'))
def test_ne(self):
self.assertNotEqual(Script('Cyrl'), Script('Latn'))
def test_hash(self):
self.assertEqual(hash(Script('Hira')), hash('Hira'))
def test_pickle(self):
self.assertEqual(pickle.loads(pickle.dumps(Script('Latn'))), Script('Latn'))
class TestCountry(TestCase, _Py26FixTestCase):
def test_wrong_country(self):
self.assertRaises(ValueError, lambda: Country('ZZ'))
def test_eq(self):
self.assertEqual(Country('US'), Country('US'))
def test_ne(self):
self.assertNotEqual(Country('GB'), Country('US'))
self.assertIsNotNone(Country('US'))
def test_hash(self):
self.assertEqual(hash(Country('US')), hash('US'))
def test_pickle(self):
for country in [Country('GB'), Country('US')]:
self.assertEqual(pickle.loads(pickle.dumps(country)), country)
def test_converter_name(self):
self.assertEqual(Country('US').name, 'UNITED STATES')
self.assertEqual(Country.fromname('UNITED STATES'), Country('US'))
self.assertEqual(Country.fromcode('UNITED STATES', 'name'), Country('US'))
self.assertRaises(CountryReverseError, lambda: Country.fromname('ZZZZZ'))
self.assertEqual(len(country_converters['name'].codes), 249)
class TestLanguage(TestCase, _Py26FixTestCase):
def test_languages(self):
self.assertEqual(len(LANGUAGES), 7874)
def test_wrong_language(self):
self.assertRaises(ValueError, lambda: Language('zzz'))
def test_unknown_language(self):
self.assertEqual(Language('zzzz', unknown='und'), Language('und'))
def test_converter_alpha2(self):
self.assertEqual(Language('eng').alpha2, 'en')
self.assertEqual(Language.fromalpha2('en'), Language('eng'))
self.assertEqual(Language.fromcode('en', 'alpha2'), Language('eng'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha2('zz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha2)
self.assertEqual(len(language_converters['alpha2'].codes), 184)
def test_converter_alpha3b(self):
self.assertEqual(Language('fra').alpha3b, 'fre')
self.assertEqual(Language.fromalpha3b('fre'), Language('fra'))
self.assertEqual(Language.fromcode('fre', 'alpha3b'), Language('fra'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha3b('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha3b)
self.assertEqual(len(language_converters['alpha3b'].codes), 418)
def test_converter_alpha3t(self):
self.assertEqual(Language('fra').alpha3t, 'fra')
self.assertEqual(Language.fromalpha3t('fra'), Language('fra'))
self.assertEqual(Language.fromcode('fra', 'alpha3t'), Language('fra'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha3t('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha3t)
self.assertEqual(len(language_converters['alpha3t'].codes), 418)
def test_converter_name(self):
self.assertEqual(Language('eng').name, 'English')
self.assertEqual(Language.fromname('English'), Language('eng'))
self.assertEqual(Language.fromcode('English', 'name'), Language('eng'))
self.assertRaises(LanguageReverseError, lambda: Language.fromname('Zzzzzzzzz'))
self.assertEqual(len(language_converters['name'].codes), 7874)
def test_converter_scope(self):
self.assertEqual(language_converters['scope'].codes, set(['I', 'S', 'M']))
self.assertEqual(Language('eng').scope, 'individual')
self.assertEqual(Language('und').scope, 'special')
def test_converter_type(self):
self.assertEqual(language_converters['type'].codes, set(['A', 'C', 'E', 'H', 'L', 'S']))
self.assertEqual(Language('eng').type, 'living')
self.assertEqual(Language('und').type, 'special')
def test_converter_opensubtitles(self):
self.assertEqual(Language('fra').opensubtitles, Language('fra').alpha3b)
self.assertEqual(Language('por', 'BR').opensubtitles, 'pob')
self.assertEqual(Language.fromopensubtitles('fre'), Language('fra'))
self.assertEqual(Language.fromopensubtitles('pob'), Language('por', 'BR'))
self.assertEqual(Language.fromopensubtitles('pb'), Language('por', 'BR'))
# Montenegrin is not recognized as an ISO language (yet?) but for now it is
# unofficially accepted as Serbian from Montenegro
self.assertEqual(Language.fromopensubtitles('mne'), Language('srp', 'ME'))
self.assertEqual(Language.fromcode('pob', 'opensubtitles'), Language('por', 'BR'))
self.assertRaises(LanguageReverseError, lambda: Language.fromopensubtitles('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').opensubtitles)
self.assertEqual(len(language_converters['opensubtitles'].codes), 607)
# test with all the LANGUAGES from the opensubtitles api
# downloaded from: http://www.opensubtitles.org/addons/export_languages.php
f = resource_stream('babelfish', 'data/opensubtitles_languages.txt')
f.readline()
for l in f:
idlang, alpha2, _, upload_enabled, web_enabled = l.decode('utf-8').strip().split('\t')
if not int(upload_enabled) and not int(web_enabled):
# do not test LANGUAGES that are too esoteric / not widely available
continue
self.assertEqual(Language.fromopensubtitles(idlang).opensubtitles, idlang)
if alpha2:
self.assertEqual(Language.fromopensubtitles(idlang), Language.fromopensubtitles(alpha2))
f.close()
def test_converter_opensubtitles_codes(self):
for code in language_converters['opensubtitles'].from_opensubtitles.keys():
self.assertIn(code, language_converters['opensubtitles'].codes)
def test_fromietf_country_script(self):
language = Language.fromietf('fra-FR-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_country_no_script(self):
language = Language.fromietf('fra-FR')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertIsNone(language.script)
def test_fromietf_no_country_no_script(self):
language = Language.fromietf('fra-FR')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertIsNone(language.script)
def test_fromietf_no_country_script(self):
language = Language.fromietf('fra-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertIsNone(language.country)
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_alpha2_language(self):
language = Language.fromietf('fr-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertIsNone(language.country)
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_wrong_language(self):
self.assertRaises(ValueError, lambda: Language.fromietf('xyz-FR'))
def test_fromietf_wrong_country(self):
self.assertRaises(ValueError, lambda: Language.fromietf('fra-YZ'))
def test_fromietf_wrong_script(self):
self.assertRaises(ValueError, lambda: Language.fromietf('fra-FR-Wxyz'))
def test_eq(self):
self.assertEqual(Language('eng'), Language('eng'))
def test_ne(self):
self.assertNotEqual(Language('fra'), Language('eng'))
self.assertIsNotNone(Language('fra'))
def test_nonzero(self):
self.assertFalse(bool(Language('und')))
self.assertTrue(bool(Language('eng')))
def test_language_hasattr(self):
self.assertTrue(hasattr(Language('fra'), 'alpha3'))
self.assertTrue(hasattr(Language('fra'), 'alpha2'))
self.assertFalse(hasattr(Language('bej'), 'alpha2'))
def test_country_hasattr(self):
self.assertTrue(hasattr(Country('US'), 'name'))
self.assertTrue(hasattr(Country('FR'), 'alpha2'))
self.assertFalse(hasattr(Country('BE'), 'none'))
def test_country(self):
self.assertEqual(Language('por', 'BR').country, Country('BR'))
self.assertEqual(Language('eng', Country('US')).country, Country('US'))
def test_eq_with_country(self):
self.assertEqual(Language('eng', 'US'), Language('eng', Country('US')))
def test_ne_with_country(self):
self.assertNotEqual(Language('eng', 'US'), Language('eng', Country('GB')))
def test_script(self):
self.assertEqual(Language('srp', script='Latn').script, Script('Latn'))
self.assertEqual(Language('srp', script=Script('Cyrl')).script, Script('Cyrl'))
def test_eq_with_script(self):
self.assertEqual(Language('srp', script='Latn'), Language('srp', script=Script('Latn')))
def test_ne_with_script(self):
self.assertNotEqual(Language('srp', script='Latn'), Language('srp', script=Script('Cyrl')))
def test_eq_with_country_and_script(self):
self.assertEqual(Language('srp', 'SR', 'Latn'), Language('srp', Country('SR'), Script('Latn')))
def test_ne_with_country_and_script(self):
self.assertNotEqual(Language('srp', 'SR', 'Latn'), Language('srp', Country('SR'), Script('Cyrl')))
def test_hash(self):
self.assertEqual(hash(Language('fra')), hash('fr'))
self.assertEqual(hash(Language('ace')), hash('ace'))
self.assertEqual(hash(Language('por', 'BR')), hash('pt-BR'))
self.assertEqual(hash(Language('srp', script='Cyrl')), hash('sr-Cyrl'))
self.assertEqual(hash(Language('eng', 'US', 'Latn')), hash('en-US-Latn'))
def test_pickle(self):
for lang in [Language('fra'),
Language('eng', 'US'),
Language('srp', script='Latn'),
Language('eng', 'US', 'Latn')]:
self.assertEqual(pickle.loads(pickle.dumps(lang)), lang)
def test_str(self):
self.assertEqual(Language.fromietf(str(Language('eng', 'US', 'Latn'))), Language('eng', 'US', 'Latn'))
self.assertEqual(Language.fromietf(str(Language('fra', 'FR'))), Language('fra', 'FR'))
self.assertEqual(Language.fromietf(str(Language('bel'))), Language('bel'))
def test_register_converter(self):
class TestConverter(LanguageReverseConverter):
def __init__(self):
self.to_test = {'fra': 'test1', 'eng': 'test2'}
self.from_test = {'test1': 'fra', 'test2': 'eng'}
def convert(self, alpha3, country=None, script=None):
if alpha3 not in self.to_test:
raise LanguageConvertError(alpha3, country, script)
return self.to_test[alpha3]
def reverse(self, test):
if test not in self.from_test:
raise LanguageReverseError(test)
return (self.from_test[test], None)
language = Language('fra')
self.assertFalse(hasattr(language, 'test'))
language_converters['test'] = TestConverter()
self.assertTrue(hasattr(language, 'test'))
self.assertIn('test', language_converters)
self.assertEqual(Language('fra').test, 'test1')
self.assertEqual(Language.fromtest('test2').alpha3, 'eng')
del language_converters['test']
self.assertNotIn('test', language_converters)
self.assertRaises(KeyError, lambda: Language.fromtest('test1'))
self.assertRaises(AttributeError, lambda: Language('fra').test)
def suite():
suite = TestSuite()
suite.addTest(TestLoader().loadTestsFromTestCase(TestScript))
suite.addTest(TestLoader().loadTestsFromTestCase(TestCountry))
suite.addTest(TestLoader().loadTestsFromTestCase(TestLanguage))
return suite
if __name__ == '__main__':
TextTestRunner().run(suite())
```
#### File: Shared/enzyme/subtitle.py
```python
from .exceptions import ReadError
from .parsers import ebml
from .mkv import MKV
from .parsers import ebml
import logging
import codecs
import os
import io
__all__ = ['Subtitle']
logger = logging.getLogger(__name__)
class Subtitle(object):
"""Subtitle extractor for Matroska Video File.
Currently only SRT subtitles stored without lacing are supported
"""
def __init__(self, stream):
"""Read the available subtitles from a MKV file-like object"""
self._stream = stream
#Use the MKV class to parse the META information
mkv = MKV(stream)
self._timecode_scale = mkv.info.timecode_scale
self._subtitles = mkv.get_srt_subtitles_track_by_language()
def has_subtitle(self, language):
return language in self._subtitles
def write_subtitle_to_stream(self, language):
"""Write a single subtitle to stream or return None if language not available"""
if language in self._subtitles:
subtitle = self._subtitles[language]
return _write_track_to_srt_stream(self._stream,subtitle.number,self._timecode_scale)
logger.info("Writing subtitle for language %s to stream",language)
else:
logger.info("Subtitle for language %s not found",language)
def write_subtitles_to_stream(self):
"""Write all available subtitles as streams to a dictionary with language as the key"""
subtitles = dict()
for language in self._subtitles:
subtitles[language] = self.write_subtitle_to_stream(language)
return subtitles
def _write_track_to_srt_stream(mkv_stream, track, timecode_scale):
srt_stream = io.StringIO()
index = 0
for cluster in _parse_segment(mkv_stream,track):
for blockgroup in cluster.blockgroups:
index = index + 1
timeRange = _print_time_range(timecode_scale,cluster.timecode,blockgroup.block.timecode,blockgroup.duration)
srt_stream.write(str(index) + '\n')
srt_stream.write(timeRange + '\n')
srt_stream.write(codecs.decode(blockgroup.block.data.read(),'utf-8') + '\n')
srt_stream.write('\n')
return srt_stream
def _parse_segment(stream,track):
stream.seek(0)
specs = ebml.get_matroska_specs()
# Find all level 1 Cluster elements and its subelements. Speed up this process by excluding all other currently known level 1 elements
try:
segments = ebml.parse(stream, specs,include_element_names=['Segment','Cluster','BlockGroup','Timecode','Block','BlockDuration',],max_level=3)
except ReadError:
pass
clusters = []
for cluster in segments[0].data:
_parse_cluster(track, clusters, cluster)
return clusters
def _parse_cluster(track, clusters, cluster):
blockgroups = []
timecode = None
for child in cluster.data:
if child.name == 'BlockGroup':
_parse_blockgroup(track, blockgroups, child)
elif child.name == 'Timecode':
timecode = child.data
if len(blockgroups) > 0 and timecode != None:
clusters.append(Cluster(timecode, blockgroups))
def _parse_blockgroup(track, blockgroups, blockgroup):
block = None
duration = None
for child in blockgroup.data:
if child.name == 'Block':
block = Block.fromelement(child)
if block.track != track:
block = None
elif child.name == 'BlockDuration':
duration = child.data
if duration != None and block != None:
blockgroups.append(BlockGroup(block, duration))
def _print_time_range(timecode_scale,clusterTimecode,blockTimecode,duration):
timecode_scale_ms = timecode_scale / 1000000 #Timecode
rawTimecode = clusterTimecode + blockTimecode
startTimeMilleSeconds = (rawTimecode) * timecode_scale_ms
endTimeMilleSeconds = (rawTimecode + duration) * timecode_scale_ms
return _print_time(startTimeMilleSeconds) + " --> " + _print_time(endTimeMilleSeconds)
def _print_time(timeInMilleSeconds):
timeInSeconds, milleSeconds = divmod(timeInMilleSeconds, 1000)
timeInMinutes, seconds = divmod(timeInSeconds, 60)
hours, minutes = divmod(timeInMinutes, 60)
return '%d:%02d:%02d,%d' % (hours,minutes,seconds,milleSeconds)
class Cluster(object):
def __init__(self,timecode=None, blockgroups=[]):
self.timecode = timecode
self.blockgroups = blockgroups
class BlockGroup(object):
def __init__(self,block=None,duration=None):
self.block = block
self.duration = duration
class Block(object):
def __init__(self, track=None, timecode=None, invisible=False, lacing=None, flags=None, data=None):
self.track = track
self.timecode = timecode
self.invisible = invisible
self.lacing = lacing
self.flags = flags
self.data = data
@classmethod
def fromelement(cls,element):
stream = element.data
track = ebml.read_element_size(stream)
timecode = ebml.read_element_integer(stream,2)
flags = ord(stream.read(1))
invisible = bool(flags & 0x8)
if (flags & 0x6):
lacing = 'EBML'
elif (flags & 0x4):
lacing = 'fixed-size'
elif (flags & 0x2):
lacing = 'Xiph'
else:
lacing = None
if lacing:
raise ReadError('Laced blocks are not implemented yet')
data = ebml.read_element_binary(stream, element.size - stream.tell())
return cls(track,timecode,invisible,lacing,flags,data)
def __repr__(self):
return '<%s track=%d, timecode=%d, invisible=%d, lacing=%s>' % (self.__class__.__name__, self.track,self.timecode,self.invisible,self.lacing)
class SimpleBlock(Block):
def __init__(self, track=None, timecode=None, keyframe=False, invisible=False, lacing=None, flags=None, data=None, discardable=False):
super(SimpleBlock,self).__init__(track,timecode,invisible,lacing,flags,data)
self.keyframe = keyframe
self.discardable = discardable
def fromelement(cls,element):
simpleblock = super(SimpleBlock, cls).fromelement(element)
simpleblock.keyframe = bool(simpleblock.flags & 0x80)
simpleblock.discardable = bool(simpleblock.flags & 0x1)
return simpleblock
def __repr__(self):
return '<%s track=%d, timecode=%d, keyframe=%d, invisible=%d, lacing=%s, discardable=%d>' % (self.__class__.__name__, self.track,self.timecode,self.keyframe,self.invisible,self.lacing,self.discardable)
```
#### File: enzyme/tests/test_mkv.py
```python
from datetime import timedelta, datetime
from enzyme.mkv import MKV, VIDEO_TRACK, AUDIO_TRACK, SUBTITLE_TRACK
import io
import os.path
import requests
import unittest
import zipfile
# Test directory
TEST_DIR = os.path.join(os.path.dirname(__file__), os.path.splitext(__file__)[0])
def setUpModule():
if not os.path.exists(TEST_DIR):
r = requests.get('http://downloads.sourceforge.net/project/matroska/test_files/matroska_test_w1_1.zip')
with zipfile.ZipFile(io.BytesIO(r.content), 'r') as f:
f.extractall(TEST_DIR)
class MKVTestCase(unittest.TestCase):
def test_test1(self):
stream = io.open(os.path.join(TEST_DIR, 'test1.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(minutes=1, seconds=27, milliseconds=336))
self.assertTrue(mkv.info.date_utc == datetime(2010, 8, 21, 7, 23, 3))
self.assertTrue(mkv.info.muxing_app == 'libebml2 v0.10.0 + libmatroska2 v0.10.1')
self.assertTrue(mkv.info.writing_app == 'mkclean 0.5.5 ru from libebml v1.0.0 + libmatroska v1.0.0 + mkvmerge v4.1.1 (\'Bouncin\' Back\') built on Jul 3 2010 22:54:08')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == True)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MS/VFW/FOURCC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 854)
self.assertTrue(mkv.video_tracks[0].height == 480)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width is None)
self.assertTrue(mkv.video_tracks[0].display_height is None)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio track
self.assertTrue(len(mkv.audio_tracks) == 1)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'und')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == True)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_MPEG/L3')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 0)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Big Buck Bunny - test 1')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File1, basic MPEG4.2 and MP3 with only SimpleBlock')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def test_test2(self):
stream = io.open(os.path.join(TEST_DIR, 'test2.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(seconds=47, milliseconds=509))
self.assertTrue(mkv.info.date_utc == datetime(2011, 6, 2, 12, 45, 20))
self.assertTrue(mkv.info.muxing_app == 'libebml2 v0.21.0 + libmatroska2 v0.22.1')
self.assertTrue(mkv.info.writing_app == 'mkclean 0.8.3 ru from libebml2 v0.10.0 + libmatroska2 v0.10.1 + mkclean 0.5.5 ru from libebml v1.0.0 + libmatroska v1.0.0 + mkvmerge v4.1.1 (\'Bouncin\' Back\') built on Jul 3 2010 22:54:08')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == True)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MPEG4/ISO/AVC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 1024)
self.assertTrue(mkv.video_tracks[0].height == 576)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width == 1354)
self.assertTrue(mkv.video_tracks[0].display_height is None)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio track
self.assertTrue(len(mkv.audio_tracks) == 1)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'und')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == True)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_AAC')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 0)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Elephant Dream - test 2')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File 2, 100,000 timecode scale, odd aspect ratio, and CRC-32. Codecs are AVC and AAC')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def test_test3(self):
stream = io.open(os.path.join(TEST_DIR, 'test3.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(seconds=49, milliseconds=64))
self.assertTrue(mkv.info.date_utc == datetime(2010, 8, 21, 21, 43, 25))
self.assertTrue(mkv.info.muxing_app == 'libebml2 v0.11.0 + libmatroska2 v0.10.1')
self.assertTrue(mkv.info.writing_app == 'mkclean 0.5.5 ro from libebml v1.0.0 + libmatroska v1.0.0 + mkvmerge v4.1.1 (\'Bouncin\' Back\') built on Jul 3 2010 22:54:08')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == True)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MPEG4/ISO/AVC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 1024)
self.assertTrue(mkv.video_tracks[0].height == 576)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width is None)
self.assertTrue(mkv.video_tracks[0].display_height is None)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio track
self.assertTrue(len(mkv.audio_tracks) == 1)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'eng')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == True)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_MPEG/L3')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 0)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Elephant Dream - test 3')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File 3, header stripping on the video track and no SimpleBlock')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def test_test5(self):
stream = io.open(os.path.join(TEST_DIR, 'test5.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(seconds=46, milliseconds=665))
self.assertTrue(mkv.info.date_utc == datetime(2010, 8, 21, 18, 6, 43))
self.assertTrue(mkv.info.muxing_app == 'libebml v1.0.0 + libmatroska v1.0.0')
self.assertTrue(mkv.info.writing_app == 'mkvmerge v4.0.0 (\'The Stars were mine\') built on Jun 6 2010 16:18:42')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == True)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MPEG4/ISO/AVC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 1024)
self.assertTrue(mkv.video_tracks[0].height == 576)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width == 1024)
self.assertTrue(mkv.video_tracks[0].display_height == 576)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio tracks
self.assertTrue(len(mkv.audio_tracks) == 2)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'und')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == True)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_AAC')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
self.assertTrue(mkv.audio_tracks[1].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[1].number == 10)
self.assertTrue(mkv.audio_tracks[1].name == 'Commentary')
self.assertTrue(mkv.audio_tracks[1].language == 'eng')
self.assertTrue(mkv.audio_tracks[1].enabled == True)
self.assertTrue(mkv.audio_tracks[1].default == False)
self.assertTrue(mkv.audio_tracks[1].forced == False)
self.assertTrue(mkv.audio_tracks[1].lacing == True)
self.assertTrue(mkv.audio_tracks[1].codec_id == 'A_AAC')
self.assertTrue(mkv.audio_tracks[1].codec_name is None)
self.assertTrue(mkv.audio_tracks[1].sampling_frequency == 22050.0)
self.assertTrue(mkv.audio_tracks[1].channels == 1)
self.assertTrue(mkv.audio_tracks[1].output_sampling_frequency == 44100.0)
self.assertTrue(mkv.audio_tracks[1].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 8)
self.assertTrue(mkv.subtitle_tracks[0].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[0].number == 3)
self.assertTrue(mkv.subtitle_tracks[0].name is None)
self.assertTrue(mkv.subtitle_tracks[0].language == 'eng')
self.assertTrue(mkv.subtitle_tracks[0].enabled == True)
self.assertTrue(mkv.subtitle_tracks[0].default == True)
self.assertTrue(mkv.subtitle_tracks[0].forced == False)
self.assertTrue(mkv.subtitle_tracks[0].lacing == False)
self.assertTrue(mkv.subtitle_tracks[0].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[0].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[1].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[1].number == 4)
self.assertTrue(mkv.subtitle_tracks[1].name is None)
self.assertTrue(mkv.subtitle_tracks[1].language == 'hun')
self.assertTrue(mkv.subtitle_tracks[1].enabled == True)
self.assertTrue(mkv.subtitle_tracks[1].default == False)
self.assertTrue(mkv.subtitle_tracks[1].forced == False)
self.assertTrue(mkv.subtitle_tracks[1].lacing == False)
self.assertTrue(mkv.subtitle_tracks[1].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[1].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[2].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[2].number == 5)
self.assertTrue(mkv.subtitle_tracks[2].name is None)
self.assertTrue(mkv.subtitle_tracks[2].language == 'ger')
self.assertTrue(mkv.subtitle_tracks[2].enabled == True)
self.assertTrue(mkv.subtitle_tracks[2].default == False)
self.assertTrue(mkv.subtitle_tracks[2].forced == False)
self.assertTrue(mkv.subtitle_tracks[2].lacing == False)
self.assertTrue(mkv.subtitle_tracks[2].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[2].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[3].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[3].number == 6)
self.assertTrue(mkv.subtitle_tracks[3].name is None)
self.assertTrue(mkv.subtitle_tracks[3].language == 'fre')
self.assertTrue(mkv.subtitle_tracks[3].enabled == True)
self.assertTrue(mkv.subtitle_tracks[3].default == False)
self.assertTrue(mkv.subtitle_tracks[3].forced == False)
self.assertTrue(mkv.subtitle_tracks[3].lacing == False)
self.assertTrue(mkv.subtitle_tracks[3].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[3].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[4].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[4].number == 8)
self.assertTrue(mkv.subtitle_tracks[4].name is None)
self.assertTrue(mkv.subtitle_tracks[4].language == 'spa')
self.assertTrue(mkv.subtitle_tracks[4].enabled == True)
self.assertTrue(mkv.subtitle_tracks[4].default == False)
self.assertTrue(mkv.subtitle_tracks[4].forced == False)
self.assertTrue(mkv.subtitle_tracks[4].lacing == False)
self.assertTrue(mkv.subtitle_tracks[4].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[4].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[5].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[5].number == 9)
self.assertTrue(mkv.subtitle_tracks[5].name is None)
self.assertTrue(mkv.subtitle_tracks[5].language == 'ita')
self.assertTrue(mkv.subtitle_tracks[5].enabled == True)
self.assertTrue(mkv.subtitle_tracks[5].default == False)
self.assertTrue(mkv.subtitle_tracks[5].forced == False)
self.assertTrue(mkv.subtitle_tracks[5].lacing == False)
self.assertTrue(mkv.subtitle_tracks[5].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[5].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[6].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[6].number == 11)
self.assertTrue(mkv.subtitle_tracks[6].name is None)
self.assertTrue(mkv.subtitle_tracks[6].language == 'jpn')
self.assertTrue(mkv.subtitle_tracks[6].enabled == True)
self.assertTrue(mkv.subtitle_tracks[6].default == False)
self.assertTrue(mkv.subtitle_tracks[6].forced == False)
self.assertTrue(mkv.subtitle_tracks[6].lacing == False)
self.assertTrue(mkv.subtitle_tracks[6].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[6].codec_name is None)
self.assertTrue(mkv.subtitle_tracks[7].type == SUBTITLE_TRACK)
self.assertTrue(mkv.subtitle_tracks[7].number == 7)
self.assertTrue(mkv.subtitle_tracks[7].name is None)
self.assertTrue(mkv.subtitle_tracks[7].language == 'und')
self.assertTrue(mkv.subtitle_tracks[7].enabled == True)
self.assertTrue(mkv.subtitle_tracks[7].default == False)
self.assertTrue(mkv.subtitle_tracks[7].forced == False)
self.assertTrue(mkv.subtitle_tracks[7].lacing == False)
self.assertTrue(mkv.subtitle_tracks[7].codec_id == 'S_TEXT/UTF8')
self.assertTrue(mkv.subtitle_tracks[7].codec_name is None)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Big Buck Bunny - test 8')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File 8, secondary audio commentary track, misc subtitle tracks')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def test_test6(self):
stream = io.open(os.path.join(TEST_DIR, 'test6.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(seconds=87, milliseconds=336))
self.assertTrue(mkv.info.date_utc == datetime(2010, 8, 21, 16, 31, 55))
self.assertTrue(mkv.info.muxing_app == 'libebml2 v0.10.1 + libmatroska2 v0.10.1')
self.assertTrue(mkv.info.writing_app == 'mkclean 0.5.5 r from libebml v1.0.0 + libmatroska v1.0.0 + mkvmerge v4.0.0 (\'The Stars were mine\') built on Jun 6 2010 16:18:42')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == False)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MS/VFW/FOURCC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 854)
self.assertTrue(mkv.video_tracks[0].height == 480)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width is None)
self.assertTrue(mkv.video_tracks[0].display_height is None)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio track
self.assertTrue(len(mkv.audio_tracks) == 1)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'und')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == False)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_MPEG/L3')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 0)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Big Buck Bunny - test 6')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File 6, random length to code the size of Clusters and Blocks, no Cues for seeking')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def test_test7(self):
stream = io.open(os.path.join(TEST_DIR, 'test7.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(seconds=37, milliseconds=43))
self.assertTrue(mkv.info.date_utc == datetime(2010, 8, 21, 17, 0, 23))
self.assertTrue(mkv.info.muxing_app == 'libebml2 v0.10.1 + libmatroska2 v0.10.1')
self.assertTrue(mkv.info.writing_app == 'mkclean 0.5.5 r from libebml v1.0.0 + libmatroska v1.0.0 + mkvmerge v4.0.0 (\'The Stars were mine\') built on Jun 6 2010 16:18:42')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == False)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MPEG4/ISO/AVC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 1024)
self.assertTrue(mkv.video_tracks[0].height == 576)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width is None)
self.assertTrue(mkv.video_tracks[0].display_height is None)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio track
self.assertTrue(len(mkv.audio_tracks) == 1)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'und')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == False)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_AAC')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 0)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Big Buck Bunny - test 7')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File 7, junk elements are present at the beggining or end of clusters, the parser should skip it. There is also a damaged element at 451418')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def test_test8(self):
stream = io.open(os.path.join(TEST_DIR, 'test8.mkv'), 'rb')
mkv = MKV(stream)
# info
self.assertTrue(mkv.info.title is None)
self.assertTrue(mkv.info.duration == timedelta(seconds=47, milliseconds=341))
self.assertTrue(mkv.info.date_utc == datetime(2010, 8, 21, 17, 22, 14))
self.assertTrue(mkv.info.muxing_app == 'libebml2 v0.10.1 + libmatroska2 v0.10.1')
self.assertTrue(mkv.info.writing_app == 'mkclean 0.5.5 r from libebml v1.0.0 + libmatroska v1.0.0 + mkvmerge v4.0.0 (\'The Stars were mine\') built on Jun 6 2010 16:18:42')
# video track
self.assertTrue(len(mkv.video_tracks) == 1)
self.assertTrue(mkv.video_tracks[0].type == VIDEO_TRACK)
self.assertTrue(mkv.video_tracks[0].number == 1)
self.assertTrue(mkv.video_tracks[0].name is None)
self.assertTrue(mkv.video_tracks[0].language == 'und')
self.assertTrue(mkv.video_tracks[0].enabled == True)
self.assertTrue(mkv.video_tracks[0].default == False)
self.assertTrue(mkv.video_tracks[0].forced == False)
self.assertTrue(mkv.video_tracks[0].lacing == False)
self.assertTrue(mkv.video_tracks[0].codec_id == 'V_MPEG4/ISO/AVC')
self.assertTrue(mkv.video_tracks[0].codec_name is None)
self.assertTrue(mkv.video_tracks[0].width == 1024)
self.assertTrue(mkv.video_tracks[0].height == 576)
self.assertTrue(mkv.video_tracks[0].interlaced == False)
self.assertTrue(mkv.video_tracks[0].stereo_mode is None)
self.assertTrue(mkv.video_tracks[0].crop == {})
self.assertTrue(mkv.video_tracks[0].display_width is None)
self.assertTrue(mkv.video_tracks[0].display_height is None)
self.assertTrue(mkv.video_tracks[0].display_unit is None)
self.assertTrue(mkv.video_tracks[0].aspect_ratio_type is None)
# audio track
self.assertTrue(len(mkv.audio_tracks) == 1)
self.assertTrue(mkv.audio_tracks[0].type == AUDIO_TRACK)
self.assertTrue(mkv.audio_tracks[0].number == 2)
self.assertTrue(mkv.audio_tracks[0].name is None)
self.assertTrue(mkv.audio_tracks[0].language == 'und')
self.assertTrue(mkv.audio_tracks[0].enabled == True)
self.assertTrue(mkv.audio_tracks[0].default == False)
self.assertTrue(mkv.audio_tracks[0].forced == False)
self.assertTrue(mkv.audio_tracks[0].lacing == True)
self.assertTrue(mkv.audio_tracks[0].codec_id == 'A_AAC')
self.assertTrue(mkv.audio_tracks[0].codec_name is None)
self.assertTrue(mkv.audio_tracks[0].sampling_frequency == 48000.0)
self.assertTrue(mkv.audio_tracks[0].channels == 2)
self.assertTrue(mkv.audio_tracks[0].output_sampling_frequency is None)
self.assertTrue(mkv.audio_tracks[0].bit_depth is None)
# subtitle track
self.assertTrue(len(mkv.subtitle_tracks) == 0)
# chapters
self.assertTrue(len(mkv.chapters) == 0)
# tags
self.assertTrue(len(mkv.tags) == 1)
self.assertTrue(len(mkv.tags[0].simpletags) == 3)
self.assertTrue(mkv.tags[0].simpletags[0].name == 'TITLE')
self.assertTrue(mkv.tags[0].simpletags[0].default == True)
self.assertTrue(mkv.tags[0].simpletags[0].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[0].string == 'Big Buck Bunny - test 8')
self.assertTrue(mkv.tags[0].simpletags[0].binary is None)
self.assertTrue(mkv.tags[0].simpletags[1].name == 'DATE_RELEASED')
self.assertTrue(mkv.tags[0].simpletags[1].default == True)
self.assertTrue(mkv.tags[0].simpletags[1].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[1].string == '2010')
self.assertTrue(mkv.tags[0].simpletags[1].binary is None)
self.assertTrue(mkv.tags[0].simpletags[2].name == 'COMMENT')
self.assertTrue(mkv.tags[0].simpletags[2].default == True)
self.assertTrue(mkv.tags[0].simpletags[2].language == 'und')
self.assertTrue(mkv.tags[0].simpletags[2].string == 'Matroska Validation File 8, audio missing between timecodes 6.019s and 6.360s')
self.assertTrue(mkv.tags[0].simpletags[2].binary is None)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(MKVTestCase))
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
```
#### File: Shared/guessit/options.py
```python
import json
import os
import pkgutil
import shlex
from argparse import ArgumentParser
import six
def build_argument_parser():
"""
Builds the argument parser
:return: the argument parser
:rtype: ArgumentParser
"""
opts = ArgumentParser()
opts.add_argument(dest='filename', help='Filename or release name to guess', nargs='*')
naming_opts = opts.add_argument_group("Naming")
naming_opts.add_argument('-t', '--type', dest='type', default=None,
help='The suggested file type: movie, episode. If undefined, type will be guessed.')
naming_opts.add_argument('-n', '--name-only', dest='name_only', action='store_true', default=None,
help='Parse files as name only, considering "/" and "\\" like other separators.')
naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None,
help='If short date is found, consider the first digits as the year.')
naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None,
help='If short date is found, consider the second digits as the day.')
naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', default=None,
help='Allowed language (can be used multiple times)')
naming_opts.add_argument('-C', '--allowed-countries', action='append', dest='allowed_countries', default=None,
help='Allowed country (can be used multiple times)')
naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number',
default=None,
help='Guess "serie.213.avi" as the episode 213. Without this option, '
'it will be guessed as season 2, episode 13')
naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', default=None,
help='Expected title to parse (can be used multiple times)')
naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', default=None,
help='Expected release group (can be used multiple times)')
input_opts = opts.add_argument_group("Input")
input_opts.add_argument('-f', '--input-file', dest='input_file', default=None,
help='Read filenames from an input text file. File should use UTF-8 charset.')
output_opts = opts.add_argument_group("Output")
output_opts.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=None,
help='Display debug output')
output_opts.add_argument('-P', '--show-property', dest='show_property', default=None,
help='Display the value of a single property (title, series, video_codec, year, ...)')
output_opts.add_argument('-a', '--advanced', dest='advanced', action='store_true', default=None,
help='Display advanced information for filename guesses, as json output')
output_opts.add_argument('-s', '--single-value', dest='single_value', action='store_true', default=None,
help='Keep only first value found for each property')
output_opts.add_argument('-l', '--enforce-list', dest='enforce_list', action='store_true', default=None,
help='Wrap each found value in a list even when property has a single value')
output_opts.add_argument('-j', '--json', dest='json', action='store_true', default=None,
help='Display information for filename guesses as json output')
output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=None,
help='Display information for filename guesses as yaml output')
conf_opts = opts.add_argument_group("Configuration")
conf_opts.add_argument('-c', '--config', dest='config', action='append', default=None,
help='Filepath to the configuration file. Configuration contains the same options as '
'those command line options, but option names have "-" characters replaced with "_". '
'If not defined, guessit tries to read a configuration default configuration file at '
'~/.guessit/options.(json|yml|yaml) and ~/.config/guessit/options.(json|yml|yaml). '
'Set to "false" to disable default configuration file loading.')
conf_opts.add_argument('--no-embedded-config', dest='no_embedded_config', action='store_true',
default=None,
help='Disable default configuration.')
information_opts = opts.add_argument_group("Information")
information_opts.add_argument('-p', '--properties', dest='properties', action='store_true', default=None,
help='Display properties that can be guessed.')
information_opts.add_argument('-V', '--values', dest='values', action='store_true', default=None,
help='Display property values that can be guessed.')
information_opts.add_argument('--version', dest='version', action='store_true', default=None,
help='Display the guessit version.')
return opts
def parse_options(options=None, api=False):
"""
Parse given option string
:param options:
:type options:
:param api
:type boolean
:return:
:rtype:
"""
if isinstance(options, six.string_types):
args = shlex.split(options)
options = vars(argument_parser.parse_args(args))
elif options is None:
if api:
options = {}
else:
options = vars(argument_parser.parse_args())
elif not isinstance(options, dict):
options = vars(argument_parser.parse_args(options))
return options
argument_parser = build_argument_parser()
class ConfigurationException(Exception):
"""
Exception related to configuration file.
"""
pass
def load_config(options):
"""
Load configuration from configuration file, if defined.
:param options:
:type options:
:return:
:rtype:
"""
config_files_enabled = True
custom_config_files = None
if options.get('config') is not None:
custom_config_files = options.get('config')
if not custom_config_files \
or not custom_config_files[0] \
or custom_config_files[0].lower() in ['0', 'no', 'false', 'disabled']:
config_files_enabled = False
configurations = []
if config_files_enabled:
home_directory = os.path.expanduser("~")
cwd = os.getcwd()
yaml_supported = False
try:
import yaml # pylint: disable=unused-variable
yaml_supported = True
except ImportError:
pass
config_file_locations = get_config_file_locations(home_directory, cwd, yaml_supported)
config_files = [f for f in config_file_locations if os.path.exists(f)]
if custom_config_files:
config_files = config_files + custom_config_files
for config_file in config_files:
config_file_options = load_config_file(config_file)
if config_file_options:
configurations.append(config_file_options)
if not options.get('no_embedded_config'):
embedded_options_data = pkgutil.get_data('guessit', 'config/options.json').decode("utf-8")
embedded_options = json.loads(embedded_options_data)
configurations.append(embedded_options)
if configurations:
configurations.append(options)
return merge_configurations(*configurations)
return options
def merge_configurations(*configurations):
"""
Merge configurations into a single options dict.
:param configurations:
:type configurations:
:return:
:rtype:
"""
merged = {}
for options in configurations:
pristine = options.get('pristine')
if pristine:
if pristine is True:
merged = {}
else:
for to_reset in pristine:
if to_reset in merged:
del merged[to_reset]
for (option, value) in options.items():
if value is not None and option != 'pristine':
if option in merged.keys() and isinstance(merged[option], list):
merged[option].extend(value)
elif isinstance(value, list):
merged[option] = list(value)
else:
merged[option] = value
return merged
def load_config_file(filepath):
"""
Load a configuration as an options dict.
Format of the file is given with filepath extension.
:param filepath:
:type filepath:
:return:
:rtype:
"""
if filepath.endswith('.json'):
with open(filepath) as config_file_data:
return json.load(config_file_data)
if filepath.endswith('.yaml') or filepath.endswith('.yml'):
try:
import yaml
with open(filepath) as config_file_data:
return yaml.load(config_file_data)
except ImportError: # pragma: no cover
raise ConfigurationException('Configuration file extension is not supported. '
'PyYAML should be installed to support "%s" file' % (
filepath,))
raise ConfigurationException('Configuration file extension is not supported for "%s" file.' % (filepath,))
def get_config_file_locations(homedir, cwd, yaml_supported=False):
"""
Get all possible locations for configuration file.
:param homedir: user home directory
:type homedir: basestring
:param cwd: current working directory
:type homedir: basestring
:return:
:rtype: list
"""
locations = []
configdirs = [(os.path.join(homedir, '.guessit'), 'options'),
(os.path.join(homedir, '.config', 'guessit'), 'options'),
(cwd, 'guessit.options')]
configexts = ['json']
if yaml_supported:
configexts.append('yaml')
configexts.append('yml')
for configdir in configdirs:
for configext in configexts:
locations.append(os.path.join(configdir[0], configdir[1] + '.' + configext))
return locations
```
#### File: rules/common/numeral.py
```python
from rebulk.remodule import re
digital_numeral = r'\d{1,4}'
roman_numeral = r'(?=[MCDLXVI]+)M{0,4}(?:CM|CD|D?C{0,3})(?:XC|XL|L?X{0,3})(?:IX|IV|V?I{0,3})'
english_word_numeral_list = [
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten',
'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty'
]
french_word_numeral_list = [
'zéro', 'un', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix',
'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dix-sept', 'dix-huit', 'dix-neuf', 'vingt'
]
french_alt_word_numeral_list = [
'zero', 'une', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix',
'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dixsept', 'dixhuit', 'dixneuf', 'vingt'
]
def __build_word_numeral(*args):
"""
Build word numeral regexp from list.
:param args:
:type args:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
re_ = None
for word_list in args:
for word in word_list:
if not re_:
re_ = r'(?:(?=\w+)'
else:
re_ += '|'
re_ += word
re_ += ')'
return re_
word_numeral = __build_word_numeral(english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list)
numeral = '(?:' + digital_numeral + '|' + roman_numeral + '|' + word_numeral + ')'
__romanNumeralMap = (
('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1)
)
__romanNumeralPattern = re.compile('^' + roman_numeral + '$')
def __parse_roman(value):
"""
convert Roman numeral to integer
:param value: Value to parse
:type value: string
:return:
:rtype:
"""
if not __romanNumeralPattern.search(value):
raise ValueError('Invalid Roman numeral: %s' % value)
result = 0
index = 0
for num, integer in __romanNumeralMap:
while value[index:index + len(num)] == num:
result += integer
index += len(num)
return result
def __parse_word(value):
"""
Convert Word numeral to integer
:param value: Value to parse
:type value: string
:return:
:rtype:
"""
for word_list in [english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list]:
try:
return word_list.index(value.lower())
except ValueError:
pass
raise ValueError # pragma: no cover
_clean_re = re.compile(r'[^\d]*(\d+)[^\d]*')
def parse_numeral(value, int_enabled=True, roman_enabled=True, word_enabled=True, clean=True):
"""
Parse a numeric value into integer.
:param value: Value to parse. Can be an integer, roman numeral or word.
:type value: string
:param int_enabled:
:type int_enabled:
:param roman_enabled:
:type roman_enabled:
:param word_enabled:
:type word_enabled:
:param clean:
:type clean:
:return: Numeric value, or None if value can't be parsed
:rtype: int
"""
# pylint: disable=too-many-branches
if int_enabled:
try:
if clean:
match = _clean_re.match(value)
if match:
clean_value = match.group(1)
return int(clean_value)
return int(value)
except ValueError:
pass
if roman_enabled:
try:
if clean:
for word in value.split():
try:
return __parse_roman(word.upper())
except ValueError:
pass
return __parse_roman(value)
except ValueError:
pass
if word_enabled:
try:
if clean:
for word in value.split():
try:
return __parse_word(word)
except ValueError: # pragma: no cover
pass
return __parse_word(value) # pragma: no cover
except ValueError: # pragma: no cover
pass
raise ValueError('Invalid numeral: ' + value) # pragma: no cover
```
#### File: guessit/rules/__init__.py
```python
from rebulk import Rebulk
from .markers.path import path
from .markers.groups import groups
from .properties.episodes import episodes
from .properties.container import container
from .properties.format import format_
from .properties.video_codec import video_codec
from .properties.audio_codec import audio_codec
from .properties.screen_size import screen_size
from .properties.website import website
from .properties.date import date
from .properties.title import title
from .properties.episode_title import episode_title
from .properties.language import language
from .properties.country import country
from .properties.release_group import release_group
from .properties.streaming_service import streaming_service
from .properties.other import other
from .properties.size import size
from .properties.edition import edition
from .properties.cds import cds
from .properties.bonus import bonus
from .properties.film import film
from .properties.part import part
from .properties.crc import crc
from .properties.mimetype import mimetype
from .properties.type import type_
from .processors import processors
def rebulk_builder():
"""
Default builder for main Rebulk object used by api.
:return: Main Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk()
rebulk.rebulk(path())
rebulk.rebulk(groups())
rebulk.rebulk(episodes())
rebulk.rebulk(container())
rebulk.rebulk(format_())
rebulk.rebulk(video_codec())
rebulk.rebulk(audio_codec())
rebulk.rebulk(screen_size())
rebulk.rebulk(website())
rebulk.rebulk(date())
rebulk.rebulk(title())
rebulk.rebulk(episode_title())
rebulk.rebulk(language())
rebulk.rebulk(country())
rebulk.rebulk(release_group())
rebulk.rebulk(streaming_service())
rebulk.rebulk(other())
rebulk.rebulk(size())
rebulk.rebulk(edition())
rebulk.rebulk(cds())
rebulk.rebulk(bonus())
rebulk.rebulk(film())
rebulk.rebulk(part())
rebulk.rebulk(crc())
rebulk.rebulk(processors())
rebulk.rebulk(mimetype())
rebulk.rebulk(type_())
def customize_properties(properties):
"""
Customize default rebulk properties
"""
count = properties['count']
del properties['count']
properties['season_count'] = count
properties['episode_count'] = count
return properties
rebulk.customize_properties = customize_properties
return rebulk
```
#### File: rules/properties/audio_codec.py
```python
from rebulk.remodule import re
from rebulk import Rebulk, Rule, RemoveMatch
from ..common import dash
from ..common.validators import seps_before, seps_after
audio_properties = ['audio_codec', 'audio_profile', 'audio_channels']
def audio_codec():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True)
def audio_codec_priority(match1, match2):
"""
Gives priority to audio_codec
:param match1:
:type match1:
:param match2:
:type match2:
:return:
:rtype:
"""
if match1.name == 'audio_codec' and match2.name in ['audio_profile', 'audio_channels']:
return match2
if match1.name in ['audio_profile', 'audio_channels'] and match2.name == 'audio_codec':
return match1
return '__default__'
rebulk.defaults(name="audio_codec", conflict_solver=audio_codec_priority)
rebulk.regex("MP3", "LAME", r"LAME(?:\d)+-?(?:\d)+", value="MP3")
rebulk.regex('Dolby', 'DolbyDigital', 'Dolby-Digital', 'DD', 'AC3D?', value='AC3')
rebulk.regex("DolbyAtmos", "Dolby-Atmos", "Atmos", value="DolbyAtmos")
rebulk.string("AAC", value="AAC")
rebulk.string('EAC3', 'DDP', 'DD+', value="EAC3")
rebulk.string("Flac", value="FLAC")
rebulk.string("DTS", value="DTS")
rebulk.regex("True-?HD", value="TrueHD")
rebulk.defaults(name="audio_profile")
rebulk.string("HD", value="HD", tags="DTS")
rebulk.regex("HD-?MA", value="HDMA", tags="DTS")
rebulk.string("HE", value="HE", tags="AAC")
rebulk.string("LC", value="LC", tags="AAC")
rebulk.string("HQ", value="HQ", tags="AC3")
rebulk.defaults(name="audio_channels")
rebulk.regex(r'(7[\W_][01](?:ch)?)(?:[^\d]|$)', value='7.1', children=True)
rebulk.regex(r'(5[\W_][01](?:ch)?)(?:[^\d]|$)', value='5.1', children=True)
rebulk.regex(r'(2[\W_]0(?:ch)?)(?:[^\d]|$)', value='2.0', children=True)
rebulk.regex('7[01]', value='7.1', validator=seps_after, tags='weak-audio_channels')
rebulk.regex('5[01]', value='5.1', validator=seps_after, tags='weak-audio_channels')
rebulk.string('20', value='2.0', validator=seps_after, tags='weak-audio_channels')
rebulk.string('7ch', '8ch', value='7.1')
rebulk.string('5ch', '6ch', value='5.1')
rebulk.string('2ch', 'stereo', value='2.0')
rebulk.string('1ch', 'mono', value='1.0')
rebulk.rules(DtsRule, AacRule, Ac3Rule, AudioValidatorRule, HqConflictRule, AudioChannelsValidatorRule)
return rebulk
class AudioValidatorRule(Rule):
"""
Remove audio properties if not surrounded by separators and not next each others
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
ret = []
audio_list = matches.range(predicate=lambda match: match.name in audio_properties)
for audio in audio_list:
if not seps_before(audio):
valid_before = matches.range(audio.start - 1, audio.start,
lambda match: match.name in audio_properties)
if not valid_before:
ret.append(audio)
continue
if not seps_after(audio):
valid_after = matches.range(audio.end, audio.end + 1,
lambda match: match.name in audio_properties)
if not valid_after:
ret.append(audio)
continue
return ret
class AudioProfileRule(Rule):
"""
Abstract rule to validate audio profiles
"""
priority = 64
dependency = AudioValidatorRule
consequence = RemoveMatch
def __init__(self, codec):
super(AudioProfileRule, self).__init__()
self.codec = codec
def when(self, matches, context):
profile_list = matches.named('audio_profile', lambda match: self.codec in match.tags)
ret = []
for profile in profile_list:
codec = matches.previous(profile, lambda match: match.name == 'audio_codec' and match.value == self.codec)
if not codec:
codec = matches.next(profile, lambda match: match.name == 'audio_codec' and match.value == self.codec)
if not codec:
ret.append(profile)
return ret
class DtsRule(AudioProfileRule):
"""
Rule to validate DTS profile
"""
def __init__(self):
super(DtsRule, self).__init__("DTS")
class AacRule(AudioProfileRule):
"""
Rule to validate AAC profile
"""
def __init__(self):
super(AacRule, self).__init__("AAC")
class Ac3Rule(AudioProfileRule):
"""
Rule to validate AC3 profile
"""
def __init__(self):
super(Ac3Rule, self).__init__("AC3")
class HqConflictRule(Rule):
"""
Solve conflict between HQ from other property and from audio_profile.
"""
dependency = [DtsRule, AacRule, Ac3Rule]
consequence = RemoveMatch
def when(self, matches, context):
hq_audio = matches.named('audio_profile', lambda match: match.value == 'HQ')
hq_audio_spans = [match.span for match in hq_audio]
hq_other = matches.named('other', lambda match: match.span in hq_audio_spans)
if hq_other:
return hq_other
class AudioChannelsValidatorRule(Rule):
"""
Remove audio_channel if no audio codec as previous match.
"""
priority = 128
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for audio_channel in matches.tagged('weak-audio_channels'):
valid_before = matches.range(audio_channel.start - 1, audio_channel.start,
lambda match: match.name == 'audio_codec')
if not valid_before:
ret.append(audio_channel)
return ret
```
#### File: rules/properties/country.py
```python
import babelfish
from rebulk import Rebulk
from ..common.words import COMMON_WORDS, iter_words
def country():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().defaults(name='country')
rebulk.functional(find_countries,
# Prefer language and any other property over country if not US or GB.
conflict_solver=lambda match, other: match
if other.name != 'language' or match.value not in [babelfish.Country('US'),
babelfish.Country('GB')]
else other,
properties={'country': [None]})
return rebulk
COUNTRIES_SYN = {'ES': ['españa'],
'GB': ['UK'],
'BR': ['brazilian', 'bra'],
'CA': ['québec', 'quebec', 'qc'],
# FIXME: this one is a bit of a stretch, not sure how to do it properly, though...
'MX': ['Latinoamérica', 'latin america']}
class GuessitCountryConverter(babelfish.CountryReverseConverter): # pylint: disable=missing-docstring
def __init__(self):
self.guessit_exceptions = {}
for alpha2, synlist in COUNTRIES_SYN.items():
for syn in synlist:
self.guessit_exceptions[syn.lower()] = alpha2
@property
def codes(self): # pylint: disable=missing-docstring
return (babelfish.country_converters['name'].codes |
frozenset(babelfish.COUNTRIES.values()) |
frozenset(self.guessit_exceptions.keys()))
def convert(self, alpha2):
if alpha2 == 'GB':
return 'UK'
return str(babelfish.Country(alpha2))
def reverse(self, name): # pylint:disable=arguments-differ
# exceptions come first, as they need to override a potential match
# with any of the other guessers
try:
return self.guessit_exceptions[name.lower()]
except KeyError:
pass
try:
return babelfish.Country(name.upper()).alpha2
except ValueError:
pass
for conv in [babelfish.Country.fromname]:
try:
return conv(name).alpha2
except babelfish.CountryReverseError:
pass
raise babelfish.CountryReverseError(name)
babelfish.country_converters['guessit'] = GuessitCountryConverter()
def is_allowed_country(country_object, context=None):
"""
Check if country is allowed.
"""
if context and context.get('allowed_countries'):
allowed_countries = context.get('allowed_countries')
return country_object.name.lower() in allowed_countries or country_object.alpha2.lower() in allowed_countries
return True
def find_countries(string, context=None):
"""
Find countries in given string.
"""
ret = []
for word_match in iter_words(string.strip().lower()):
word = word_match.value
if word.lower() in COMMON_WORDS:
continue
try:
country_object = babelfish.Country.fromguessit(word)
if is_allowed_country(country_object, context):
ret.append((word_match.span[0], word_match.span[1], {'value': country_object}))
except babelfish.Error:
continue
return ret
```
#### File: rules/properties/crc.py
```python
from rebulk.remodule import re
from rebulk import Rebulk
from ..common.validators import seps_surround
def crc():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(validator=seps_surround)
rebulk.regex('(?:[a-fA-F]|[0-9]){8}', name='crc32',
conflict_solver=lambda match, other: match
if other.name in ['episode', 'season']
else '__default__')
rebulk.functional(guess_idnumber, name='uuid',
conflict_solver=lambda match, other: match
if other.name in ['episode', 'season']
else '__default__')
return rebulk
_DIGIT = 0
_LETTER = 1
_OTHER = 2
_idnum = re.compile(r'(?P<uuid>[a-zA-Z0-9-]{20,})') # 1.0, (0, 0))
def guess_idnumber(string):
"""
Guess id number function
:param string:
:type string:
:return:
:rtype:
"""
# pylint:disable=invalid-name
ret = []
matches = list(_idnum.finditer(string))
for match in matches:
result = match.groupdict()
switch_count = 0
switch_letter_count = 0
letter_count = 0
last_letter = None
last = _LETTER
for c in result['uuid']:
if c in '0123456789':
ci = _DIGIT
elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
ci = _LETTER
if c != last_letter:
switch_letter_count += 1
last_letter = c
letter_count += 1
else:
ci = _OTHER
if ci != last:
switch_count += 1
last = ci
# only return the result as probable if we alternate often between
# char type (more likely for hash values than for common words)
switch_ratio = float(switch_count) / len(result['uuid'])
letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1
if switch_ratio > 0.4 and letters_ratio > 0.4:
ret.append(match.span())
return ret
```
#### File: rules/properties/edition.py
```python
from rebulk.remodule import re
from rebulk import Rebulk
from ..common import dash
from ..common.validators import seps_surround
def edition():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True)
rebulk.defaults(name='edition', validator=seps_surround)
rebulk.regex('collector', 'collector-edition', 'edition-collector', value='Collector Edition')
rebulk.regex('special-edition', 'edition-special', value='Special Edition',
conflict_solver=lambda match, other: other
if other.name == 'episode_details' and other.value == 'Special'
else '__default__')
rebulk.string('se', value='Special Edition', tags='has-neighbor')
rebulk.regex('criterion-edition', 'edition-criterion', value='Criterion Edition')
rebulk.regex('deluxe', 'deluxe-edition', 'edition-deluxe', value='Deluxe Edition')
rebulk.regex('limited', 'limited-edition', value='Limited Edition', tags=['has-neighbor', 'release-group-prefix'])
rebulk.regex(r'theatrical-cut', r'theatrical-edition', r'theatrical', value='Theatrical Edition')
rebulk.regex(r"director'?s?-cut", r"director'?s?-cut-edition", r"edition-director'?s?-cut", 'DC',
value="Director's Cut")
rebulk.regex('extended', 'extended-?cut', 'extended-?version',
value='Extended', tags=['has-neighbor', 'release-group-prefix'])
rebulk.regex('alternat(e|ive)(?:-?Cut)?', value='Alternative Cut', tags=['has-neighbor', 'release-group-prefix'])
for value in ('Remastered', 'Uncensored', 'Uncut', 'Unrated'):
rebulk.string(value, value=value, tags=['has-neighbor', 'release-group-prefix'])
rebulk.string('Festival', value='Festival', tags=['has-neighbor-before', 'has-neighbor-after'])
return rebulk
```
#### File: rules/properties/film.py
```python
from rebulk import Rebulk, AppendMatch, Rule
from rebulk.remodule import re
from ..common.formatters import cleanup
from ..common.validators import seps_surround
def film():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, validate_all=True, validator={'__parent__': seps_surround})
rebulk.regex(r'f(\d{1,2})', name='film', private_parent=True, children=True, formatter=int)
rebulk.rules(FilmTitleRule)
return rebulk
class FilmTitleRule(Rule):
"""
Rule to find out film_title (hole after film property
"""
consequence = AppendMatch
properties = {'film_title': [None]}
def when(self, matches, context):
bonus_number = matches.named('film', lambda match: not match.private, index=0)
if bonus_number:
filepath = matches.markers.at_match(bonus_number, lambda marker: marker.name == 'path', 0)
hole = matches.holes(filepath.start, bonus_number.start + 1, formatter=cleanup, index=0)
if hole and hole.value:
hole.name = 'film_title'
return hole
```
#### File: rules/properties/video_codec.py
```python
from rebulk.remodule import re
from rebulk import Rebulk, Rule, RemoveMatch
from ..common import dash
from ..common.validators import seps_after, seps_before, seps_surround
def video_codec():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True)
rebulk.defaults(name="video_codec", tags=['format-suffix', 'streaming_service.suffix'])
rebulk.regex(r"Rv\d{2}", value="Real")
rebulk.regex("Mpeg2", value="Mpeg2")
rebulk.regex("DVDivX", "DivX", value="DivX")
rebulk.regex("XviD", value="XviD")
rebulk.regex("[hx]-?264(?:-?AVC(HD)?)?", "MPEG-?4(?:-?AVC(HD)?)", "AVC(?:HD)?", value="h264")
rebulk.regex("[hx]-?265(?:-?HEVC)?", "HEVC", value="h265")
rebulk.regex('(?P<video_codec>hevc)(?P<video_profile>10)', value={'video_codec': 'h265', 'video_profile': '10bit'},
tags=['video-codec-suffix'], children=True)
# http://blog.mediacoderhq.com/h264-profiles-and-levels/
# http://fr.wikipedia.org/wiki/H.264
rebulk.defaults(name="video_profile", validator=seps_surround)
rebulk.regex('10.?bits?', 'Hi10P?', 'YUV420P10', value='10bit')
rebulk.regex('8.?bits?', value='8bit')
rebulk.string('BP', value='BP', tags='video_profile.rule')
rebulk.string('XP', 'EP', value='XP', tags='video_profile.rule')
rebulk.string('MP', value='MP', tags='video_profile.rule')
rebulk.string('HP', 'HiP', value='HP', tags='video_profile.rule')
rebulk.regex('Hi422P', value='Hi422P', tags='video_profile.rule')
rebulk.regex('Hi444PP', value='Hi444PP', tags='video_profile.rule')
rebulk.string('DXVA', value='DXVA', name='video_api')
rebulk.rules(ValidateVideoCodec, VideoProfileRule)
return rebulk
class ValidateVideoCodec(Rule):
"""
Validate video_codec with format property or separated
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for codec in matches.named('video_codec'):
if not seps_before(codec) and \
not matches.at_index(codec.start - 1, lambda match: 'video-codec-prefix' in match.tags):
ret.append(codec)
continue
if not seps_after(codec) and \
not matches.at_index(codec.end + 1, lambda match: 'video-codec-suffix' in match.tags):
ret.append(codec)
continue
return ret
class VideoProfileRule(Rule):
"""
Rule to validate video_profile
"""
consequence = RemoveMatch
def when(self, matches, context):
profile_list = matches.named('video_profile', lambda match: 'video_profile.rule' in match.tags)
ret = []
for profile in profile_list:
codec = matches.previous(profile, lambda match: match.name == 'video_codec')
if not codec:
codec = matches.next(profile, lambda match: match.name == 'video_codec')
if not codec:
ret.append(profile)
return ret
```
#### File: guessit/test/test_api_unicode_literals.py
```python
from __future__ import unicode_literals
import os
import pytest
import six
from ..api import guessit, properties, GuessitException
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def test_default():
ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret
def test_forced_unicode():
ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], six.text_type)
def test_forced_binary():
ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type)
def test_unicode_japanese():
ret = guessit('[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi')
assert ret and 'title' in ret
def test_unicode_japanese_options():
ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]})
assert ret and 'title' in ret and ret['title'] == "阿维达"
def test_forced_unicode_japanese_options():
ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]})
assert ret and 'title' in ret and ret['title'] == u"阿维达"
# TODO: This doesn't compile on python 3, but should be tested on python 2.
"""
if six.PY2:
def test_forced_binary_japanese_options():
ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]})
assert ret and 'title' in ret and ret['title'] == b"阿维达"
"""
def test_properties():
props = properties()
assert 'video_codec' in props.keys()
def test_exception():
with pytest.raises(GuessitException) as excinfo:
guessit(object())
assert "An internal error has occured in guessit" in str(excinfo.value)
assert "Guessit Exception Report" in str(excinfo.value)
assert "Please report at https://github.com/guessit-io/guessit/issues" in str(excinfo.value)
```
#### File: Shared/json_tricks/decoders.py
```python
from datetime import datetime, date, time, timedelta
from fractions import Fraction
from importlib import import_module
from collections import OrderedDict
from decimal import Decimal
from logging import warning
from json_tricks import NoPandasException, NoNumpyException
class DuplicateJsonKeyException(Exception):
""" Trying to load a json map which contains duplicate keys, but allow_duplicates is False """
class TricksPairHook(object):
"""
Hook that converts json maps to the appropriate python type (dict or OrderedDict)
and then runs any number of hooks on the individual maps.
"""
def __init__(self, ordered=True, obj_pairs_hooks=None, allow_duplicates=True):
"""
:param ordered: True if maps should retain their ordering.
:param obj_pairs_hooks: An iterable of hooks to apply to elements.
"""
self.map_type = OrderedDict
if not ordered:
self.map_type = dict
self.obj_pairs_hooks = []
if obj_pairs_hooks:
self.obj_pairs_hooks = list(obj_pairs_hooks)
self.allow_duplicates = allow_duplicates
def __call__(self, pairs):
if not self.allow_duplicates:
known = set()
for key, value in pairs:
if key in known:
raise DuplicateJsonKeyException(('Trying to load a json map which contains a' +
' duplicate key "{0:}" (but allow_duplicates is False)').format(key))
known.add(key)
map = self.map_type(pairs)
for hook in self.obj_pairs_hooks:
map = hook(map)
return map
def json_date_time_hook(dct):
"""
Return an encoded date, time, datetime or timedelta to it's python representation, including optional timezone.
:param dct: (dict) json encoded date, time, datetime or timedelta
:return: (date/time/datetime/timedelta obj) python representation of the above
"""
def get_tz(dct):
if not 'tzinfo' in dct:
return None
try:
import pytz
except ImportError as err:
raise ImportError(('Tried to load a json object which has a timezone-aware (date)time. '
'However, `pytz` could not be imported, so the object could not be loaded. '
'Error: {0:}').format(str(err)))
return pytz.timezone(dct['tzinfo'])
if isinstance(dct, dict):
if '__date__' in dct:
return date(year=dct.get('year', 0), month=dct.get('month', 0), day=dct.get('day', 0))
elif '__time__' in dct:
tzinfo = get_tz(dct)
return time(hour=dct.get('hour', 0), minute=dct.get('minute', 0), second=dct.get('second', 0),
microsecond=dct.get('microsecond', 0), tzinfo=tzinfo)
elif '__datetime__' in dct:
tzinfo = get_tz(dct)
return datetime(year=dct.get('year', 0), month=dct.get('month', 0), day=dct.get('day', 0),
hour=dct.get('hour', 0), minute=dct.get('minute', 0), second=dct.get('second', 0),
microsecond=dct.get('microsecond', 0), tzinfo=tzinfo)
elif '__timedelta__' in dct:
return timedelta(days=dct.get('days', 0), seconds=dct.get('seconds', 0),
microseconds=dct.get('microseconds', 0))
return dct
def json_complex_hook(dct):
"""
Return an encoded complex number to it's python representation.
:param dct: (dict) json encoded complex number (__complex__)
:return: python complex number
"""
if isinstance(dct, dict):
if '__complex__' in dct:
parts = dct['__complex__']
assert len(parts) == 2
return parts[0] + parts[1] * 1j
return dct
def numeric_types_hook(dct):
if isinstance(dct, dict):
if '__decimal__' in dct:
return Decimal(dct['__decimal__'])
if '__fraction__' in dct:
return Fraction(numerator=dct['numerator'], denominator=dct['denominator'])
return dct
class ClassInstanceHook(object):
"""
This hook tries to convert json encoded by class_instance_encoder back to it's original instance.
It only works if the environment is the same, e.g. the class is similarly importable and hasn't changed.
"""
def __init__(self, cls_lookup_map=None):
self.cls_lookup_map = cls_lookup_map or {}
def __call__(self, dct):
if isinstance(dct, dict) and '__instance_type__' in dct:
mod, name = dct['__instance_type__']
attrs = dct['attributes']
if mod is None:
try:
Cls = getattr((__import__('__main__')), name)
except (ImportError, AttributeError) as err:
if not name in self.cls_lookup_map:
raise ImportError(('class {0:s} seems to have been exported from the main file, which means '
'it has no module/import path set; you need to provide cls_lookup_map which maps names '
'to classes').format(name))
Cls = self.cls_lookup_map[name]
else:
imp_err = None
try:
module = import_module('{0:}'.format(mod, name))
except ImportError as err:
imp_err = ('encountered import error "{0:}" while importing "{1:}" to decode a json file; perhaps '
'it was encoded in a different environment where {1:}.{2:} was available').format(err, mod, name)
else:
if not hasattr(module, name):
imp_err = 'imported "{0:}" but could find "{1:}" inside while decoding a json file (found {2:}'.format(
module, name, ', '.join(attr for attr in dir(module) if not attr.startswith('_')))
Cls = getattr(module, name)
if imp_err:
if 'name' in self.cls_lookup_map:
Cls = self.cls_lookup_map[name]
else:
raise ImportError(imp_err)
try:
obj = Cls.__new__(Cls)
except TypeError:
raise TypeError(('problem while decoding instance of "{0:s}"; this instance has a special '
'__new__ method and can\'t be restored').format(name))
if hasattr(obj, '__json_decode__'):
obj.__json_decode__(**attrs)
else:
obj.__dict__ = dict(attrs)
return obj
return dct
def json_set_hook(dct):
"""
Return an encoded set to it's python representation.
"""
if isinstance(dct, dict):
if '__set__' in dct:
return set((tuple(item) if isinstance(item, list) else item) for item in dct['__set__'])
return dct
def pandas_hook(dct):
if '__pandas_dataframe__' in dct or '__pandas_series__' in dct:
# todo: this is experimental
if not getattr(pandas_hook, '_warned', False):
pandas_hook._warned = True
warning('Pandas loading support in json-tricks is experimental and may change in future versions.')
if '__pandas_dataframe__' in dct:
try:
from pandas import DataFrame
except ImportError:
raise NoPandasException('Trying to decode a map which appears to represent a pandas data structure, but pandas appears not to be installed.')
from numpy import dtype, array
meta = dct.pop('__pandas_dataframe__')
indx = dct.pop('index') if 'index' in dct else None
dtypes = dict((colname, dtype(tp)) for colname, tp in zip(meta['column_order'], meta['types']))
data = OrderedDict()
for name, col in dct.items():
data[name] = array(col, dtype=dtypes[name])
return DataFrame(
data=data,
index=indx,
columns=meta['column_order'],
# mixed `dtypes` argument not supported, so use duct of numpy arrays
)
elif '__pandas_series__' in dct:
from pandas import Series
from numpy import dtype, array
meta = dct.pop('__pandas_series__')
indx = dct.pop('index') if 'index' in dct else None
return Series(
data=dct['data'],
index=indx,
name=meta['name'],
dtype=dtype(meta['type']),
)
return dct
def nopandas_hook(dct):
if isinstance(dct, dict) and ('__pandas_dataframe__' in dct or '__pandas_series__' in dct):
raise NoPandasException(('Trying to decode a map which appears to represent a pandas '
'data structure, but pandas support is not enabled, perhaps it is not installed.'))
return dct
def json_numpy_obj_hook(dct):
"""
Replace any numpy arrays previously encoded by NumpyEncoder to their proper
shape, data type and data.
:param dct: (dict) json encoded ndarray
:return: (ndarray) if input was an encoded ndarray
"""
if isinstance(dct, dict) and '__ndarray__' in dct:
try:
from numpy import asarray
import numpy as nptypes
except ImportError:
raise NoNumpyException('Trying to decode a map which appears to represent a numpy '
'array, but numpy appears not to be installed.')
order = 'A'
if 'Corder' in dct:
order = 'C' if dct['Corder'] else 'F'
if dct['shape']:
return asarray(dct['__ndarray__'], dtype=dct['dtype'], order=order)
else:
dtype = getattr(nptypes, dct['dtype'])
return dtype(dct['__ndarray__'])
return dct
def json_nonumpy_obj_hook(dct):
"""
This hook has no effect except to check if you're trying to decode numpy arrays without support, and give you a useful message.
"""
if isinstance(dct, dict) and '__ndarray__' in dct:
raise NoNumpyException(('Trying to decode a map which appears to represent a numpy array, '
'but numpy support is not enabled, perhaps it is not installed.'))
return dct
```
#### File: plex_activity/sources/base.py
```python
from pyemitter import Emitter
from threading import Thread
import logging
log = logging.getLogger(__name__)
class Source(Emitter):
name = None
def __init__(self):
self.thread = Thread(target=self._run_wrapper)
def start(self):
self.thread.start()
def run(self):
pass
def _run_wrapper(self):
try:
self.run()
except Exception as ex:
log.error('Exception raised in "%s" activity source: %s', self.name, ex, exc_info=True)
```
#### File: s_logging/parsers/base.py
```python
from plex.lib.six.moves import urllib_parse as urlparse
from plex_activity.core.helpers import str_format
from pyemitter import Emitter
import logging
import re
log = logging.getLogger(__name__)
LOG_PATTERN = r'^.*?\[\w+\]\s\w+\s-\s{message}$'
REQUEST_HEADER_PATTERN = str_format(LOG_PATTERN, message=r"Request: (\[(?P<address>.*?):(?P<port>\d+)[^]]*\]\s)?{method} {path}.*?")
IGNORE_PATTERNS = [
r'error parsing allowedNetworks.*?',
r'Comparing request from.*?',
r'(Auth: )?We found auth token (.*?), enabling token-based authentication\.',
r'(Auth: )?Came in with a super-token, authorization succeeded\.',
r'(Auth: )?Refreshing tokens inside the token-based authentication filter\.',
r'\[Now\] Updated play state for .*?',
r'Play progress on .*? - got played .*? ms by account .*?!',
r'(Statistics: )?\(.*?\) Reporting active playback in state \d+ of type \d+ \(.*?\) for account \d+',
r'Request: \[.*?\] (GET|PUT) /video/:/transcode/.*?',
r'Received transcode session ping for session .*?'
]
IGNORE_REGEX = re.compile(str_format(LOG_PATTERN, message='(%s)' % ('|'.join('(%s)' % x for x in IGNORE_PATTERNS))), re.IGNORECASE)
PARAM_REGEX = re.compile(str_format(LOG_PATTERN, message=r' \* (?P<key>.*?) =\> (?P<value>.*?)'), re.IGNORECASE)
class Parser(Emitter):
def __init__(self, core):
self.core = core
def read_parameters(self, *match_functions):
match_functions = [self.parameter_match] + list(match_functions)
info = {}
while True:
line = self.core.read_line_retry(timeout=5)
if not line:
log.info('Unable to read log file')
return {}
# Run through each match function to find a result
match = None
for func in match_functions:
match = func(line)
if match is not None:
break
# Update info dict with result, otherwise finish reading
if match:
info.update(match)
elif match is None and IGNORE_REGEX.match(line.strip()) is None:
log.debug('break on "%s"', line.strip())
break
return info
def process(self, line):
raise NotImplementedError()
@staticmethod
def parameter_match(line):
match = PARAM_REGEX.match(line.strip())
if not match:
return None
match = match.groupdict()
return {match['key']: match['value']}
@staticmethod
def regex_match(regex, line):
match = regex.match(line.strip())
if not match:
return None
return match.groupdict()
@staticmethod
def query(match, value):
if not value:
return
try:
parameters = urlparse.parse_qsl(value, strict_parsing=True)
except ValueError:
return
for key, value in parameters:
match.setdefault(key, value)
```
#### File: plex/core/idict.py
```python
from plex.lib.six import string_types
class idict(dict):
def __init__(self, initial=None):
if initial:
self.update(initial)
def get(self, k, d=None):
if isinstance(k, string_types):
k = k.lower()
if super(idict, self).__contains__(k):
return self[k]
return d
def update(self, E=None, **F):
if E:
if hasattr(E, 'keys'):
# Update with `E` dictionary
for k in E:
self[k] = E[k]
else:
# Update with `E` items
for (k, v) in E:
self[k] = v
# Update with `F` dictionary
for k in F:
self[k] = F[k]
def __contains__(self, k):
if isinstance(k, string_types):
k = k.lower()
return super(idict, self).__contains__(k)
def __delitem__(self, k):
if isinstance(k, string_types):
k = k.lower()
super(idict, self).__delitem__(k)
def __getitem__(self, k):
if isinstance(k, string_types):
k = k.lower()
return super(idict, self).__getitem__(k)
def __setitem__(self, k, value):
if isinstance(k, string_types):
k = k.lower()
super(idict, self).__setitem__(k, value)
```
#### File: plex/objects/detail.py
```python
from plex.objects.core.base import Descriptor, Property
from plex.objects.container import Container
class Detail(Container):
myplex = Property(resolver=lambda: Detail.construct_myplex)
transcoder = Property(resolver=lambda: Detail.construct_transcoder)
friendly_name = Property('friendlyName')
machine_identifier = Property('machineIdentifier')
version = Property
platform = Property
platform_version = Property('platformVersion')
allow_camera_upload = Property('allowCameraUpload', [int, bool])
allow_channel_access = Property('allowChannelAccess', [int, bool])
allow_sync = Property('allowSync', [int, bool])
certificate = Property(type=[int, bool])
multiuser = Property(type=[int, bool])
sync = Property(type=[int, bool])
start_state = Property('startState')
silverlight = Property('silverlightInstalled', [int, bool])
soundflower = Property('soundflowerInstalled', [int, bool])
flash = Property('flashInstalled', [int, bool])
webkit = Property(type=[int, bool])
cookie_parameters = Property('requestParametersInCookie', [int, bool])
@staticmethod
def construct_myplex(client, node):
return MyPlexDetail.construct(client, node, child=True)
@staticmethod
def construct_transcoder(client, node):
return TranscoderDetail.construct(client, node, child=True)
class MyPlexDetail(Descriptor):
enabled = Property('myPlex', type=bool)
username = Property('myPlexUsername')
mapping_state = Property('myPlexMappingState')
signin_state = Property('myPlexSigninState')
subscription = Property('myPlexSubscription', [int, bool])
class TranscoderDetail(Descriptor):
audio = Property('transcoderAudio', [int, bool])
video = Property('transcoderVideo', [int, bool])
video_bitrates = Property('transcoderVideoBitrates')
video_qualities = Property('transcoderVideoQualities')
video_resolutions = Property('transcoderVideoResolutions')
active_video_sessions = Property('transcoderActiveVideoSessions', int)
```
#### File: library/extra/director.py
```python
from plex.objects.core.base import Descriptor, Property
class Director(Descriptor):
id = Property(type=int)
tag = Property
@classmethod
def from_node(cls, client, node):
return cls.construct(client, cls.helpers.find(node, 'Director'), child=True)
```
#### File: library/metadata/album.py
```python
from plex.objects.core.base import Property
from plex.objects.directory import Directory
from plex.objects.library.container import ChildrenContainer
from plex.objects.library.extra.genre import Genre
from plex.objects.library.metadata.base import Metadata
from plex.objects.library.metadata.artist import Artist
from plex.objects.mixins.rate import RateMixin
class Album(Directory, Metadata, RateMixin):
artist = Property(resolver=lambda: Album.construct_artist)
genres = Property(resolver=lambda: Genre.from_node)
index = Property(type=int)
year = Property(type=int)
originally_available_at = Property('originallyAvailableAt')
track_count = Property('leafCount', int)
viewed_track_count = Property('viewedLeafCount', int)
def children(self):
return self.client['library/metadata'].children(self.rating_key)
@staticmethod
def construct_artist(client, node):
attribute_map = {
'key': 'parentKey',
'ratingKey': 'parentRatingKey',
'title': 'parentTitle',
'thumb': 'parentThumb'
}
return Artist.construct(client, node, attribute_map, child=True)
class AlbumChildrenContainer(ChildrenContainer):
artist = Property(resolver=lambda: AlbumChildrenContainer.construct_artist)
album = Property(resolver=lambda: AlbumChildrenContainer.construct_album)
key = Property
@staticmethod
def construct_artist(client, node):
attribute_map = {
'title': 'grandparentTitle'
}
return Artist.construct(client, node, attribute_map, child=True)
@staticmethod
def construct_album(client, node):
attribute_map = {
'index': 'parentIndex',
'title': 'parentTitle',
'year' : 'parentYear'
}
return Album.construct(client, node, attribute_map, child=True)
def __iter__(self):
for item in super(ChildrenContainer, self).__iter__():
item.artist = self.artist
item.album = self.album
yield item
```
#### File: objects/library/video.py
```python
from plex.objects.core.base import Property
from plex.objects.directory import Directory
from plex.objects.library.extra.director import Director
from plex.objects.library.extra.writer import Writer
from plex.objects.library.media import Media
from plex.objects.mixins.session import SessionMixin
class Video(Directory, SessionMixin):
director = Property(resolver=lambda: Director.from_node)
media = Property(resolver=lambda: Media.from_node)
writers = Property(resolver=lambda: Writer.from_node)
view_count = Property('viewCount', type=int)
view_offset = Property('viewOffset', type=int)
chapter_source = Property('chapterSource')
duration = Property(type=int)
@property
def seen(self):
return self.view_count and self.view_count >= 1
```
#### File: Shared/pyga/__init__.py
```python
from pyga.requests import Q
def shutdown():
'''
Fire all stored GIF requests One by One.
You should call this if you set Config.queue_requests = True
'''
map(lambda func: func(), Q.REQ_ARRAY)
```
#### File: Shared/pysrt/srtitem.py
```python
from pysrt.srtexc import InvalidItem, InvalidIndex
from pysrt.srttime import SubRipTime
from pysrt.comparablemixin import ComparableMixin
from pysrt.compat import str, is_py2
import re
class SubRipItem(ComparableMixin):
"""
SubRipItem(index, start, end, text, position)
index -> int: index of item in file. 0 by default.
start, end -> SubRipTime or coercible.
text -> unicode: text content for item.
position -> unicode: raw srt/vtt "display coordinates" string
"""
ITEM_PATTERN = str('%s\n%s --> %s%s\n%s\n')
TIMESTAMP_SEPARATOR = '-->'
def __init__(self, index=0, start=None, end=None, text='', position=''):
try:
self.index = int(index)
except (TypeError, ValueError): # try to cast as int, but it's not mandatory
self.index = index
self.start = SubRipTime.coerce(start or 0)
self.end = SubRipTime.coerce(end or 0)
self.position = str(position)
self.text = str(text)
@property
def duration(self):
return self.end - self.start
@property
def text_without_tags(self):
RE_TAG = re.compile(r'<[^>]*?>')
return RE_TAG.sub('', self.text)
@property
def characters_per_second(self):
characters_count = len(self.text_without_tags.replace('\n', ''))
try:
return characters_count / (self.duration.ordinal / 1000.0)
except ZeroDivisionError:
return 0.0
def __str__(self):
position = ' %s' % self.position if self.position.strip() else ''
return self.ITEM_PATTERN % (self.index, self.start, self.end,
position, self.text)
if is_py2:
__unicode__ = __str__
def __str__(self):
raise NotImplementedError('Use unicode() instead!')
def _cmpkey(self):
return (self.start, self.end)
def shift(self, *args, **kwargs):
"""
shift(hours, minutes, seconds, milliseconds, ratio)
Add given values to start and end attributes.
All arguments are optional and have a default value of 0.
"""
self.start.shift(*args, **kwargs)
self.end.shift(*args, **kwargs)
@classmethod
def from_string(cls, source):
return cls.from_lines(source.splitlines(True))
@classmethod
def from_lines(cls, lines):
if len(lines) < 2:
raise InvalidItem()
lines = [l.rstrip() for l in lines]
index = None
if cls.TIMESTAMP_SEPARATOR not in lines[0]:
index = lines.pop(0)
start, end, position = cls.split_timestamps(lines[0])
body = '\n'.join(lines[1:])
return cls(index, start, end, body, position)
@classmethod
def split_timestamps(cls, line):
timestamps = line.split(cls.TIMESTAMP_SEPARATOR)
if len(timestamps) != 2:
raise InvalidItem()
start, end_and_position = timestamps
end_and_position = end_and_position.lstrip().split(' ', 1)
end = end_and_position[0]
position = end_and_position[1] if len(end_and_position) > 1 else ''
return (s.strip() for s in (start, end, position))
```
#### File: Shared/rebulk/chain.py
```python
import itertools
from .loose import call, set_defaults
from .match import Match, Matches
from .pattern import Pattern, filter_match_kwargs
from .remodule import re
class _InvalidChainException(Exception):
"""
Internal exception raised when a chain is not valid
"""
pass
class Chain(Pattern):
"""
Definition of a pattern chain to search for.
"""
def __init__(self, rebulk, chain_breaker=None, **kwargs):
call(super(Chain, self).__init__, **kwargs)
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
self._defaults = {}
self._regex_defaults = {}
self._string_defaults = {}
self._functional_defaults = {}
if callable(chain_breaker):
self.chain_breaker = chain_breaker
else:
self.chain_breaker = None
self.rebulk = rebulk
self.parts = []
def defaults(self, **kwargs):
"""
Define default keyword arguments for all patterns
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._defaults = kwargs
return self
def regex_defaults(self, **kwargs):
"""
Define default keyword arguments for functional patterns.
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._regex_defaults = kwargs
return self
def string_defaults(self, **kwargs):
"""
Define default keyword arguments for string patterns.
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._string_defaults = kwargs
return self
def functional_defaults(self, **kwargs):
"""
Define default keyword arguments for functional patterns.
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._functional_defaults = kwargs
return self
def chain(self):
"""
Add patterns chain, using configuration from this chain
:return:
:rtype:
"""
# pylint: disable=protected-access
chain = self.rebulk.chain(**self._kwargs)
chain._defaults = dict(self._defaults)
chain._regex_defaults = dict(self._regex_defaults)
chain._functional_defaults = dict(self._functional_defaults)
chain._string_defaults = dict(self._string_defaults)
return chain
def regex(self, *pattern, **kwargs):
"""
Add re pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._regex_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_re(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part
def functional(self, *pattern, **kwargs):
"""
Add functional pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._functional_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_functional(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._functional_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_string(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part
def close(self):
"""
Close chain builder to continue registering other pattern
:return:
:rtype:
"""
return self.rebulk
def _match(self, pattern, input_string, context=None):
# pylint: disable=too-many-locals,too-many-nested-blocks
chain_matches = []
chain_input_string = input_string
offset = 0
while offset < len(input_string):
chain_found = False
current_chain_matches = []
valid_chain = True
is_chain_start = True
for chain_part in self.parts:
try:
chain_part_matches, raw_chain_part_matches = Chain._match_chain_part(is_chain_start, chain_part,
chain_input_string,
context)
Chain._fix_matches_offset(chain_part_matches, input_string, offset)
Chain._fix_matches_offset(raw_chain_part_matches, input_string, offset)
if raw_chain_part_matches:
grouped_matches_dict = dict()
for match_index, match in itertools.groupby(chain_part_matches,
lambda m: m.match_index):
grouped_matches_dict[match_index] = list(match)
grouped_raw_matches_dict = dict()
for match_index, raw_match in itertools.groupby(raw_chain_part_matches,
lambda m: m.match_index):
grouped_raw_matches_dict[match_index] = list(raw_match)
for match_index, grouped_raw_matches in grouped_raw_matches_dict.items():
chain_found = True
offset = grouped_raw_matches[-1].raw_end
chain_input_string = input_string[offset:]
if not chain_part.is_hidden:
grouped_matches = grouped_matches_dict.get(match_index, [])
if self._chain_breaker_eval(current_chain_matches + grouped_matches):
current_chain_matches.extend(grouped_matches)
except _InvalidChainException:
valid_chain = False
if current_chain_matches:
offset = current_chain_matches[0].raw_end
break
is_chain_start = False
if not chain_found:
break
if current_chain_matches and valid_chain:
match = self._build_chain_match(current_chain_matches, input_string)
chain_matches.append(match)
return chain_matches
def _match_parent(self, match, yield_parent):
"""
Handle a parent match
:param match:
:type match:
:param yield_parent:
:type yield_parent:
:return:
:rtype:
"""
ret = super(Chain, self)._match_parent(match, yield_parent)
original_children = Matches(match.children)
original_end = match.end
while not ret and match.children:
last_pattern = match.children[-1].pattern
last_pattern_children = [child for child in match.children if child.pattern == last_pattern]
last_pattern_groups_iter = itertools.groupby(last_pattern_children, lambda child: child.match_index)
last_pattern_groups = {}
for index, matches in last_pattern_groups_iter:
last_pattern_groups[index] = list(matches)
for index in reversed(list(last_pattern_groups)):
last_matches = list(last_pattern_groups[index])
for last_match in last_matches:
match.children.remove(last_match)
match.end = match.children[-1].end if match.children else match.start
ret = super(Chain, self)._match_parent(match, yield_parent)
if ret:
return True
match.children = original_children
match.end = original_end
return ret
def _build_chain_match(self, current_chain_matches, input_string):
start = None
end = None
for match in current_chain_matches:
if start is None or start > match.start:
start = match.start
if end is None or end < match.end:
end = match.end
match = call(Match, start, end, pattern=self, input_string=input_string, **self._match_kwargs)
for chain_match in current_chain_matches:
if chain_match.children:
for child in chain_match.children:
match.children.append(child)
if chain_match not in match.children:
match.children.append(chain_match)
chain_match.parent = match
return match
def _chain_breaker_eval(self, matches):
return not self.chain_breaker or not self.chain_breaker(Matches(matches))
@staticmethod
def _fix_matches_offset(chain_part_matches, input_string, offset):
for chain_part_match in chain_part_matches:
if chain_part_match.input_string != input_string:
chain_part_match.input_string = input_string
chain_part_match.end += offset
chain_part_match.start += offset
if chain_part_match.children:
Chain._fix_matches_offset(chain_part_match.children, input_string, offset)
@staticmethod
def _match_chain_part(is_chain_start, chain_part, chain_input_string, context):
chain_part_matches, raw_chain_part_matches = chain_part.pattern.matches(chain_input_string, context,
with_raw_matches=True)
chain_part_matches = Chain._truncate_chain_part_matches(is_chain_start, chain_part_matches, chain_part,
chain_input_string)
raw_chain_part_matches = Chain._truncate_chain_part_matches(is_chain_start, raw_chain_part_matches, chain_part,
chain_input_string)
Chain._validate_chain_part_matches(raw_chain_part_matches, chain_part)
return chain_part_matches, raw_chain_part_matches
@staticmethod
def _truncate_chain_part_matches(is_chain_start, chain_part_matches, chain_part, chain_input_string):
if not chain_part_matches:
return chain_part_matches
if not is_chain_start:
separator = chain_input_string[0:chain_part_matches[0].initiator.raw_start]
if separator:
return []
j = 1
for i in range(0, len(chain_part_matches) - 1):
separator = chain_input_string[chain_part_matches[i].initiator.raw_end:
chain_part_matches[i + 1].initiator.raw_start]
if separator:
break
j += 1
truncated = chain_part_matches[:j]
if chain_part.repeater_end is not None:
truncated = [m for m in truncated if m.match_index < chain_part.repeater_end]
return truncated
@staticmethod
def _validate_chain_part_matches(chain_part_matches, chain_part):
max_match_index = -1
if chain_part_matches:
max_match_index = max([m.match_index for m in chain_part_matches])
if max_match_index + 1 < chain_part.repeater_start:
raise _InvalidChainException
@property
def match_options(self):
return {}
@property
def patterns(self):
return [self]
def __repr__(self):
defined = ""
if self.defined_at:
defined = "@%s" % (self.defined_at,)
return "<%s%s:%s>" % (self.__class__.__name__, defined, self.parts)
class ChainPart(object):
"""
Part of a pattern chain.
"""
def __init__(self, chain, pattern):
self._chain = chain
self.pattern = pattern
self.repeater_start = 1
self.repeater_end = 1
self._hidden = False
def chain(self):
"""
Add patterns chain, using configuration from this chain
:return:
:rtype:
"""
return self._chain.chain()
def hidden(self, hidden=True):
"""
Hide chain part results from global chain result
:param hidden:
:type hidden:
:return:
:rtype:
"""
self._hidden = hidden
return self
@property
def is_hidden(self):
"""
Check if the chain part is hidden
:return:
:rtype:
"""
return self._hidden
def regex(self, *pattern, **kwargs):
"""
Add re pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
return self._chain.regex(*pattern, **kwargs)
def functional(self, *pattern, **kwargs):
"""
Add functional pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
return self._chain.functional(*pattern, **kwargs)
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
return self._chain.string(*pattern, **kwargs)
def close(self):
"""
Close the chain builder to continue registering other patterns
:return:
:rtype:
"""
return self._chain.close()
def repeater(self, value):
"""
Define the repeater of the current chain part.
:param value:
:type value:
:return:
:rtype:
"""
try:
value = int(value)
self.repeater_start = value
self.repeater_end = value
return self
except ValueError:
pass
if value == '+':
self.repeater_start = 1
self.repeater_end = None
if value == '*':
self.repeater_start = 0
self.repeater_end = None
elif value == '?':
self.repeater_start = 0
self.repeater_end = 1
else:
match = re.match(r'\{\s*(\d*)\s*,?\s*(\d*)\s*\}', value)
if match:
start = match.group(1)
end = match.group(2)
if start or end:
self.repeater_start = int(start) if start else 0
self.repeater_end = int(end) if end else None
return self
def __repr__(self):
return "%s({%s,%s})" % (self.pattern, self.repeater_start, self.repeater_end)
```
#### File: Shared/rebulk/debug.py
```python
import inspect
import logging
import os
from collections import namedtuple
DEBUG = False
LOG_LEVEL = logging.DEBUG
class Frame(namedtuple('Frame', ['lineno', 'package', 'name', 'filename'])):
"""
Stack frame representation.
"""
__slots__ = ()
def __repr__(self):
return "%s#L%s" % (os.path.basename(self.filename), self.lineno)
def defined_at():
"""
Get definition location of a pattern or a match (outside of rebulk package).
:return:
:rtype:
"""
if DEBUG:
frame = inspect.currentframe()
while frame:
try:
if frame.f_globals['__package__'] != __package__:
break
except KeyError: # pragma:no cover
# If package is missing, consider we are in. Workaround for python 3.3.
break
frame = frame.f_back
ret = Frame(frame.f_lineno,
frame.f_globals.get('__package__'),
frame.f_globals.get('__name__'),
frame.f_code.co_filename)
del frame
return ret
```
#### File: Shared/rebulk/pattern.py
```python
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from . import debug
from .loose import call, ensure_list, ensure_dict
from .match import Match
from .remodule import re, REGEX_AVAILABLE
from .utils import find_all, is_iterable, get_first_defined
@six.add_metaclass(ABCMeta)
class Pattern(object):
"""
Definition of a particular pattern to search for.
"""
def __init__(self, name=None, tags=None, formatter=None, value=None, validator=None, children=False, every=False,
private_parent=False, private_children=False, private=False, private_names=None, ignore_names=None,
marker=False, format_all=False, validate_all=False, disabled=lambda context: False, log_level=None,
properties=None, post_processor=None, **kwargs):
"""
:param name: Name of this pattern
:type name: str
:param tags: List of tags related to this pattern
:type tags: list[str]
:param formatter: dict (name, func) of formatter to use with this pattern. name is the match name to support,
and func a function(input_string) that returns the formatted string. A single formatter function can also be
passed as a shortcut for {None: formatter}. The returned formatted string with be set in Match.value property.
:type formatter: dict[str, func] || func
:param value: dict (name, value) of value to use with this pattern. name is the match name to support,
and value an object for the match value. A single object value can also be
passed as a shortcut for {None: value}. The value with be set in Match.value property.
:type value: dict[str, object] || object
:param validator: dict (name, func) of validator to use with this pattern. name is the match name to support,
and func a function(match) that returns the a boolean. A single validator function can also be
passed as a shortcut for {None: validator}. If return value is False, match will be ignored.
:param children: generates children instead of parent
:type children: bool
:param every: generates both parent and children.
:type every: bool
:param private: flag this pattern as beeing private.
:type private: bool
:param private_parent: force return of parent and flag parent matches as private.
:type private_parent: bool
:param private_children: force return of children and flag children matches as private.
:type private_children: bool
:param private_names: force return of named matches as private.
:type private_names: bool
:param ignore_names: drop some named matches after validation.
:type ignore_names: bool
:param marker: flag this pattern as beeing a marker.
:type private: bool
:param format_all if True, pattern will format every match in the hierarchy (even match not yield).
:type format_all: bool
:param validate_all if True, pattern will validate every match in the hierarchy (even match not yield).
:type validate_all: bool
:param disabled: if True, this pattern is disabled. Can also be a function(context).
:type disabled: bool|function
:param log_lvl: Log level associated to this pattern
:type log_lvl: int
:param post_process: Post processing function
:type post_processor: func
"""
# pylint:disable=too-many-locals,unused-argument
self.name = name
self.tags = ensure_list(tags)
self.formatters, self._default_formatter = ensure_dict(formatter, lambda x: x)
self.values, self._default_value = ensure_dict(value, None)
self.validators, self._default_validator = ensure_dict(validator, lambda match: True)
self.every = every
self.children = children
self.private = private
self.private_names = private_names if private_names else []
self.ignore_names = ignore_names if ignore_names else []
self.private_parent = private_parent
self.private_children = private_children
self.marker = marker
self.format_all = format_all
self.validate_all = validate_all
if not callable(disabled):
self.disabled = lambda context: disabled
else:
self.disabled = disabled
self._log_level = log_level
self._properties = properties
self.defined_at = debug.defined_at()
if not callable(post_processor):
self.post_processor = None
else:
self.post_processor = post_processor
@property
def log_level(self):
"""
Log level for this pattern.
:return:
:rtype:
"""
return self._log_level if self._log_level is not None else debug.LOG_LEVEL
def _yield_children(self, match):
"""
Does this match has children
:param match:
:type match:
:return:
:rtype:
"""
return match.children and (self.children or self.every)
def _yield_parent(self):
"""
Does this mat
:param match:
:type match:
:return:
:rtype:
"""
return not self.children or self.every
def _match_parent(self, match, yield_parent):
"""
Handle a parent match
:param match:
:type match:
:param yield_parent:
:type yield_parent:
:return:
:rtype:
"""
if not match or match.value == "":
return False
pattern_value = get_first_defined(self.values, [match.name, '__parent__', None],
self._default_value)
if pattern_value:
match.value = pattern_value
if yield_parent or self.format_all:
match.formatter = get_first_defined(self.formatters, [match.name, '__parent__', None],
self._default_formatter)
if yield_parent or self.validate_all:
validator = get_first_defined(self.validators, [match.name, '__parent__', None],
self._default_validator)
if validator and not validator(match):
return False
return True
def _match_child(self, child, yield_children):
"""
Handle a children match
:param child:
:type child:
:param yield_children:
:type yield_children:
:return:
:rtype:
"""
if not child or child.value == "":
return False
pattern_value = get_first_defined(self.values, [child.name, '__children__', None],
self._default_value)
if pattern_value:
child.value = pattern_value
if yield_children or self.format_all:
child.formatter = get_first_defined(self.formatters, [child.name, '__children__', None],
self._default_formatter)
if yield_children or self.validate_all:
validator = get_first_defined(self.validators, [child.name, '__children__', None],
self._default_validator)
if validator and not validator(child):
return False
return True
def matches(self, input_string, context=None, with_raw_matches=False):
"""
Computes all matches for a given input
:param input_string: the string to parse
:type input_string: str
:param context: the context
:type context: dict
:param with_raw_matches: should return details
:type with_raw_matches: dict
:return: matches based on input_string for this pattern
:rtype: iterator[Match]
"""
# pylint: disable=too-many-branches
matches = []
raw_matches = []
for pattern in self.patterns:
yield_parent = self._yield_parent()
match_index = -1
for match in self._match(pattern, input_string, context):
match_index += 1
match.match_index = match_index
raw_matches.append(match)
yield_children = self._yield_children(match)
if not self._match_parent(match, yield_parent):
continue
validated = True
for child in match.children:
if not self._match_child(child, yield_children):
validated = False
break
if validated:
if self.private_parent:
match.private = True
if self.private_children:
for child in match.children:
child.private = True
if yield_parent or self.private_parent:
matches.append(match)
if yield_children or self.private_children:
for child in match.children:
child.match_index = match_index
matches.append(child)
matches = self._matches_post_process(matches)
self._matches_privatize(matches)
self._matches_ignore(matches)
if with_raw_matches:
return matches, raw_matches
return matches
def _matches_post_process(self, matches):
"""
Post process matches with user defined function
:param matches:
:type matches:
:return:
:rtype:
"""
if self.post_processor:
return self.post_processor(matches, self)
return matches
def _matches_privatize(self, matches):
"""
Mark matches included in private_names with private flag.
:param matches:
:type matches:
:return:
:rtype:
"""
if self.private_names:
for match in matches:
if match.name in self.private_names:
match.private = True
def _matches_ignore(self, matches):
"""
Ignore matches included in ignore_names.
:param matches:
:type matches:
:return:
:rtype:
"""
if self.ignore_names:
for match in list(matches):
if match.name in self.ignore_names:
matches.remove(match)
@abstractproperty
def patterns(self): # pragma: no cover
"""
List of base patterns defined
:return: A list of base patterns
:rtype: list
"""
pass
@property
def properties(self):
"""
Properties names and values that can ben retrieved by this pattern.
:return:
:rtype:
"""
if self._properties:
return self._properties
return {}
@abstractproperty
def match_options(self): # pragma: no cover
"""
dict of default options for generated Match objects
:return: **options to pass to Match constructor
:rtype: dict
"""
pass
@abstractmethod
def _match(self, pattern, input_string, context=None): # pragma: no cover
"""
Computes all matches for a given pattern and input
:param pattern: the pattern to use
:param input_string: the string to parse
:type input_string: str
:param context: the context
:type context: dict
:return: matches based on input_string for this pattern
:rtype: iterator[Match]
"""
pass
def __repr__(self):
defined = ""
if self.defined_at:
defined = "@%s" % (self.defined_at,)
return "<%s%s:%s>" % (self.__class__.__name__, defined, self.__repr__patterns__)
@property
def __repr__patterns__(self):
return self.patterns
class StringPattern(Pattern):
"""
Definition of one or many strings to search for.
"""
def __init__(self, *patterns, **kwargs):
super(StringPattern, self).__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@property
def patterns(self):
return self._patterns
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
for index in find_all(input_string, pattern, **self._kwargs):
yield Match(index, index + len(pattern), pattern=self, input_string=input_string, **self._match_kwargs)
class RePattern(Pattern):
"""
Definition of one or many regular expression pattern to search for.
"""
def __init__(self, *patterns, **kwargs):
super(RePattern, self).__init__(**kwargs)
self.repeated_captures = REGEX_AVAILABLE
if 'repeated_captures' in kwargs:
self.repeated_captures = kwargs.get('repeated_captures')
if self.repeated_captures and not REGEX_AVAILABLE: # pragma: no cover
raise NotImplementedError("repeated_capture is available only with regex module.")
self.abbreviations = kwargs.get('abbreviations', [])
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
self._children_match_kwargs = filter_match_kwargs(kwargs, children=True)
self._patterns = []
for pattern in patterns:
if isinstance(pattern, six.string_types):
if self.abbreviations and pattern:
for key, replacement in self.abbreviations:
pattern = pattern.replace(key, replacement)
pattern = call(re.compile, pattern, **self._kwargs)
elif isinstance(pattern, dict):
if self.abbreviations and 'pattern' in pattern:
for key, replacement in self.abbreviations:
pattern['pattern'] = pattern['pattern'].replace(key, replacement)
pattern = re.compile(**pattern)
elif hasattr(pattern, '__iter__'):
pattern = re.compile(*pattern)
self._patterns.append(pattern)
@property
def patterns(self):
return self._patterns
@property
def __repr__patterns__(self):
return [pattern.pattern for pattern in self.patterns]
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
names = dict((v, k) for k, v in pattern.groupindex.items())
for match_object in pattern.finditer(input_string):
start = match_object.start()
end = match_object.end()
main_match = Match(start, end, pattern=self, input_string=input_string, **self._match_kwargs)
if pattern.groups:
for i in range(1, pattern.groups + 1):
name = names.get(i, main_match.name)
if self.repeated_captures:
for start, end in match_object.spans(i):
child_match = Match(start, end, name=name, parent=main_match, pattern=self,
input_string=input_string, **self._children_match_kwargs)
main_match.children.append(child_match)
else:
start, end = match_object.span(i)
if start > -1 and end > -1:
child_match = Match(start, end, name=name, parent=main_match, pattern=self,
input_string=input_string, **self._children_match_kwargs)
main_match.children.append(child_match)
yield main_match
class FunctionalPattern(Pattern):
"""
Definition of one or many functional pattern to search for.
"""
def __init__(self, *patterns, **kwargs):
super(FunctionalPattern, self).__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@property
def patterns(self):
return self._patterns
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
ret = call(pattern, input_string, context, **self._kwargs)
if ret:
if not is_iterable(ret) or isinstance(ret, dict) \
or (is_iterable(ret) and hasattr(ret, '__getitem__') and isinstance(ret[0], int)):
args_iterable = [ret]
else:
args_iterable = ret
for args in args_iterable:
if isinstance(args, dict):
options = args
options.pop('input_string', None)
options.pop('pattern', None)
if self._match_kwargs:
options = self._match_kwargs.copy()
options.update(args)
yield Match(pattern=self, input_string=input_string, **options)
else:
kwargs = self._match_kwargs
if isinstance(args[-1], dict):
kwargs = dict(kwargs)
kwargs.update(args[-1])
args = args[:-1]
yield Match(*args, pattern=self, input_string=input_string, **kwargs)
def filter_match_kwargs(kwargs, children=False):
"""
Filters out kwargs for Match construction
:param kwargs:
:type kwargs: dict
:param children:
:type children: Flag to filter children matches
:return: A filtered dict
:rtype: dict
"""
kwargs = kwargs.copy()
for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'):
if key in kwargs:
del kwargs[key]
if children:
for key in ('name',):
if key in kwargs:
del kwargs[key]
return kwargs
```
#### File: rebulk/test/test_introspector.py
```python
from ..rebulk import Rebulk
from .. import introspector
from .default_rules_module import RuleAppend2, RuleAppend3
def test_string_introspector():
rebulk = Rebulk().string('One', 'Two', 'Three', name='first').string('1', '2', '3', name='second')
introspected = introspector.introspect(rebulk, None)
assert len(introspected.patterns) == 2
first_properties = introspected.patterns[0].properties
assert len(first_properties) == 1
first_properties['first'] == ['One', 'Two', 'Three']
second_properties = introspected.patterns[1].properties
assert len(second_properties) == 1
second_properties['second'] == ['1', '2', '3']
properties = introspected.properties
assert len(properties) == 2
assert properties['first'] == first_properties['first']
assert properties['second'] == second_properties['second']
def test_string_properties():
rebulk = Rebulk()\
.string('One', 'Two', 'Three', name='first', properties={'custom': ['One']})\
.string('1', '2', '3', name='second', properties={'custom': [1]})
introspected = introspector.introspect(rebulk, None)
assert len(introspected.patterns) == 2
assert len(introspected.rules) == 2
first_properties = introspected.patterns[0].properties
assert len(first_properties) == 1
first_properties['custom'] == ['One']
second_properties = introspected.patterns[1].properties
assert len(second_properties) == 1
second_properties['custom'] == [1]
properties = introspected.properties
assert len(properties) == 1
assert properties['custom'] == ['One', 1]
def test_various_pattern():
rebulk = Rebulk()\
.regex('One', 'Two', 'Three', name='first', value="string") \
.string('1', '2', '3', name='second', value="digit") \
.string('4', '5', '6', name='third') \
.string('private', private=True) \
.functional(lambda string: (0, 5), name='func', value='test') \
.regex('One', 'Two', 'Three', name='regex_name') \
.regex('(?P<one>One)(?P<two>Two)(?P<three>Three)') \
.functional(lambda string: (6, 10), name='func2') \
.string('7', name='third')
introspected = introspector.introspect(rebulk, None)
assert len(introspected.patterns) == 8
assert len(introspected.rules) == 2
first_properties = introspected.patterns[0].properties
assert len(first_properties) == 1
first_properties['first'] == ['string']
second_properties = introspected.patterns[1].properties
assert len(second_properties) == 1
second_properties['second'] == ['digit']
third_properties = introspected.patterns[2].properties
assert len(third_properties) == 1
third_properties['third'] == ['4', '5', '6']
func_properties = introspected.patterns[3].properties
assert len(func_properties) == 1
func_properties['func'] == ['test']
regex_name_properties = introspected.patterns[4].properties
assert len(regex_name_properties) == 1
regex_name_properties['regex_name'] == [None]
regex_groups_properties = introspected.patterns[5].properties
assert len(regex_groups_properties) == 3
regex_groups_properties['one'] == [None]
regex_groups_properties['two'] == [None]
regex_groups_properties['three'] == [None]
func2_properties = introspected.patterns[6].properties
assert len(func2_properties) == 1
func2_properties['func2'] == [None]
append_third_properties = introspected.patterns[7].properties
assert len(append_third_properties) == 1
append_third_properties['third'] == [None]
properties = introspected.properties
assert len(properties) == 9
assert properties['first'] == first_properties['first']
assert properties['second'] == second_properties['second']
assert properties['third'] == third_properties['third'] + append_third_properties['third']
assert properties['func'] == func_properties['func']
assert properties['regex_name'] == regex_name_properties['regex_name']
assert properties['one'] == regex_groups_properties['one']
assert properties['two'] == regex_groups_properties['two']
assert properties['three'] == regex_groups_properties['three']
assert properties['func2'] == func2_properties['func2']
def test_rule_properties():
rebulk = Rebulk(default_rules=False).rules(RuleAppend2, RuleAppend3)
introspected = introspector.introspect(rebulk, None)
assert len(introspected.rules) == 2
assert len(introspected.patterns) == 0
rule_properties = introspected.rules[0].properties
assert len(rule_properties) == 1
assert rule_properties['renamed'] == [None]
rule_properties = introspected.rules[1].properties
assert len(rule_properties) == 1
assert rule_properties['renamed'] == [None]
properties = introspected.properties
assert len(properties) == 1
assert properties['renamed'] == [None]
```
#### File: Shared/rebulk/utils.py
```python
from collections import MutableSet
from types import GeneratorType
def find_all(string, sub, start=None, end=None, ignore_case=False, **kwargs):
"""
Return all indices in string s where substring sub is
found, such that sub is contained in the slice s[start:end].
>>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox'))
[16]
>>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain'))
[]
>>> list(find_all('The quick brown fox jumps over the lazy dog', 'The'))
[0]
>>> list(find_all(
... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person',
... 'an'))
[44, 51, 70]
>>> list(find_all(
... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person',
... 'an',
... 50,
... 60))
[51]
:param string: the input string
:type string: str
:param sub: the substring
:type sub: str
:return: all indices in the input string
:rtype: __generator[str]
"""
#pylint: disable=unused-argument
if ignore_case:
sub = sub.lower()
string = string.lower()
while True:
start = string.find(sub, start, end)
if start == -1:
return
yield start
start += len(sub)
def get_first_defined(data, keys, default_value=None):
"""
Get the first defined key in data.
:param data:
:type data:
:param keys:
:type keys:
:param default_value:
:type default_value:
:return:
:rtype:
"""
for key in keys:
if key in data:
return data[key]
return default_value
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
# pylint: disable=consider-using-ternary
return hasattr(obj, '__iter__') and not isinstance(obj, str) or isinstance(obj, GeneratorType)
def extend_safe(target, source):
"""
Extends source list to target list only if elements doesn't exists in target list.
:param target:
:type target: list
:param source:
:type source: list
"""
for elt in source:
if elt not in target:
target.append(elt)
class _Ref(object):
"""
Reference for IdentitySet
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value is other.value
def __hash__(self):
return id(self.value)
class IdentitySet(MutableSet): # pragma: no cover
"""
Set based on identity
"""
def __init__(self, items=None): # pylint: disable=super-init-not-called
if items is None:
items = []
self.refs = set(map(_Ref, items))
def __contains__(self, elem):
return _Ref(elem) in self.refs
def __iter__(self):
return (ref.value for ref in self.refs)
def __len__(self):
return len(self.refs)
def add(self, value):
self.refs.add(_Ref(value))
def discard(self, value):
self.refs.discard(_Ref(value))
def update(self, iterable):
"""
Update set with iterable
:param iterable:
:type iterable:
:return:
:rtype:
"""
for elem in iterable:
self.add(elem)
def __repr__(self): # pragma: no cover
return "%s(%s)" % (type(self).__name__, list(self))
```
#### File: Shared/retry/compat.py
```python
import functools
try:
from decorator import decorator
except ImportError:
def decorator(caller):
""" Turns caller into a decorator.
Unlike decorator module, function signature is not preserved.
:param caller: caller(f, *args, **kwargs)
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return caller(f, *args, **kwargs)
return wrapper
return decor
```
#### File: stevedore/tests/test_hook.py
```python
from stevedore import hook
from stevedore.tests import utils
class TestHook(utils.TestCase):
def test_hook(self):
em = hook.HookManager(
'stevedore.test.extension',
't1',
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
self.assertEqual(len(em.extensions), 1)
self.assertEqual(em.names(), ['t1'])
def test_get_by_name(self):
em = hook.HookManager(
'stevedore.test.extension',
't1',
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
e_list = em['t1']
self.assertEqual(len(e_list), 1)
e = e_list[0]
self.assertEqual(e.name, 't1')
def test_get_by_name_missing(self):
em = hook.HookManager(
'stevedore.test.extension',
't1',
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
try:
em['t2']
except KeyError:
pass
else:
assert False, 'Failed to raise KeyError'
```
#### File: subliminal_patch/providers/legendastv.py
```python
import logging
import rarfile
import os
from subliminal.exceptions import ConfigurationError
from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \
LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guess_matches, guessit, sanitize, region, type_map, \
raise_for_status, json, SHOW_EXPIRATION_TIME, title_re, season_re, datetime, pytz, NO_VALUE, releases_key, \
SUBTITLE_EXTENSIONS, language_converters
from subzero.language import Language
logger = logging.getLogger(__name__)
class LegendasTVSubtitle(_LegendasTVSubtitle):
def __init__(self, language, type, title, year, imdb_id, season, archive, name):
super(LegendasTVSubtitle, self).__init__(language, type, title, year, imdb_id, season, archive, name)
self.archive.content = None
self.release_info = archive.name
self.page_link = archive.link
def make_picklable(self):
self.archive.content = None
return self
def get_matches(self, video, hearing_impaired=False):
matches = set()
# episode
if isinstance(video, Episode) and self.type == 'episode':
# series
if video.series and (sanitize(self.title) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# movie
elif isinstance(video, Movie) and self.type == 'movie':
# title
if video.title and (sanitize(self.title) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# name
matches |= guess_matches(video, guessit(self.name, {'type': self.type, 'single_value': True}))
return matches
class LegendasTVProvider(_LegendasTVProvider):
languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()}
subtitle_class = LegendasTVSubtitle
def __init__(self, username=None, password=<PASSWORD>):
# Provider needs UNRAR installed. If not available raise ConfigurationError
try:
rarfile.custom_check([rarfile.UNRAR_TOOL], True)
except rarfile.RarExecError:
raise ConfigurationError('UNRAR tool not available')
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
@staticmethod
def is_valid_title(title, title_id, sanitized_title, season, year, imdb_id):
"""Check if is a valid title."""
if title["imdb_id"] and title["imdb_id"] == imdb_id:
logger.debug(u'Matched title "%s" as IMDB ID %s', sanitized_title, title["imdb_id"])
return True
if title["title2"] and sanitize(title['title2']) == sanitized_title:
logger.debug(u'Matched title "%s" as "%s"', sanitized_title, title["title2"])
return True
return _LegendasTVProvider.is_valid_title(title, title_id, sanitized_title, season, year)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, title, season, title_year, imdb_id):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str title: the title to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles = {}
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
if "dsc_nome_br" in source:
name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups()
title['title2'] = name2
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id):
logger.debug(u'Found title: %s', title)
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles
def query(self, language, title, season=None, episode=None, year=None, imdb_id=None):
# search for titles
titles = self.search_titles(title, season, year, imdb_id)
subtitles = []
# iterate over titles
for title_id, t in titles.items():
logger.info('Getting archives for title %d and language %d', title_id, language.legendastv)
archives = self.get_archives(title_id, language.legendastv, t['type'], season, episode)
if not archives:
logger.info('No archives found for title %d and language %d', title_id, language.legendastv)
# iterate over title's archives
for a in archives:
# compute an expiration time based on the archive timestamp
expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds()
# attempt to get the releases from the cache
cache_key = releases_key.format(archive_id=a.id, archive_name=a.name)
releases = region.get(cache_key, expiration_time=expiration_time)
# the releases are not in cache or cache is expired
if releases == NO_VALUE:
logger.info('Releases not found in cache')
# download archive
self.download_archive(a)
# extract the releases
releases = []
for name in a.content.namelist():
# discard the legendastv file
if name.startswith('Legendas.tv'):
continue
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
releases.append(name)
# cache the releases
region.set(cache_key, releases)
# iterate over releases
for r in releases:
subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'),
t.get('season'), a, r)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episode = video.episode
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episode=episode, year=video.year, imdb_id=video.imdb_id)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
super(LegendasTVProvider, self).download_subtitle(subtitle)
subtitle.archive.content = None
def get_archives(self, title_id, language_code, title_type, season, episode):
return super(LegendasTVProvider, self).get_archives.original(self, title_id, language_code, title_type,
season, episode)
```
#### File: subliminal_patch/providers/shooter.py
```python
from subliminal.providers.shooter import ShooterProvider as _ShooterProvider, ShooterSubtitle as _ShooterSubtitle
class ShooterSubtitle(_ShooterSubtitle):
def __init__(self, language, hash, download_link):
super(ShooterSubtitle, self).__init__(language, hash, download_link)
self.release_info = hash
self.page_link = download_link
class ShooterProvider(_ShooterProvider):
subtitle_class = ShooterSubtitle
```
#### File: subliminal_patch/refiners/common.py
```python
import logging
import os
from guessit import guessit
from subliminal import Episode
from subliminal_patch.core import remove_crap_from_fn
logger = logging.getLogger(__name__)
def update_video(video, fn):
guess_from = remove_crap_from_fn(fn)
logger.debug(u"Got original filename: %s", guess_from)
# guess
hints = {
"single_value": True,
"type": "episode" if isinstance(video, Episode) else "movie",
}
guess = guessit(guess_from, options=hints)
for attr in ("release_group", "format",):
if attr in guess:
value = guess.get(attr)
logger.debug(u"%s: Filling attribute %s: %s", video.name, attr, value)
setattr(video, attr, value)
video.original_name = os.path.basename(guess_from)
```
#### File: subliminal_patch/refiners/omdb.py
```python
import os
import subliminal
import base64
import zlib
from subliminal import __short_version__
from subliminal.refiners.omdb import OMDBClient, refine
class SZOMDBClient(OMDBClient):
def __init__(self, version=1, session=None, headers=None, timeout=10):
super(SZOMDBClient, self).__init__(version=version, session=session, headers=headers, timeout=timeout)
def get_params(self, params):
self.session.params['apikey'] = \
zlib.decompress(base64.b16decode(os.environ['U1pfT01EQl9LRVk']))\
.decode('cm90MTM=\n'.decode("base64")) \
.decode('YmFzZTY0\n'.decode("base64")).split("x")[0]
return dict(self.session.params, **params)
def get(self, id=None, title=None, type=None, year=None, plot='short', tomatoes=False):
# build the params
params = {}
if id:
params['i'] = id
if title:
params['t'] = title
if not params:
raise ValueError('At least id or title is required')
params['type'] = type
params['y'] = year
params['plot'] = plot
params['tomatoes'] = tomatoes
# perform the request
r = self.session.get(self.base_url, params=self.get_params(params))
r.raise_for_status()
# get the response as json
j = r.json()
# check response status
if j['Response'] == 'False':
return None
return j
def search(self, title, type=None, year=None, page=1):
# build the params
params = {'s': title, 'type': type, 'y': year, 'page': page}
# perform the request
r = self.session.get(self.base_url, params=self.get_params(params))
r.raise_for_status()
# get the response as json
j = r.json()
# check response status
if j['Response'] == 'False':
return None
return j
omdb_client = SZOMDBClient(headers={'User-Agent': 'Subliminal/%s' % __short_version__})
subliminal.refiners.omdb.omdb_client = omdb_client
```
#### File: subliminal/providers/addic7ed.py
```python
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from . import ParserBeautifulSoup, Provider
from .. import __short_version__
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded
from ..score import get_equivalent_release_groups
from ..subtitle import Subtitle, fix_line_ending, guess_matches
from ..utils import sanitize, sanitize_release_group
from ..video import Episode
logger = logging.getLogger(__name__)
language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')
# Series cell matching regex
show_cells_re = re.compile(b'<td class="version">.*?</td>', re.DOTALL)
#: Series header parsing regex
series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),*&!?-]+?)(?: \((?P<year>\d{4})\))?$')
class Addic7edSubtitle(Subtitle):
"""Addic7ed Subtitle."""
provider_name = 'addic7ed'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,
download_link):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
# series name
if video.series and sanitize(self.series) in (
sanitize(name) for name in [video.series] + video.alternative_series):
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# title of the episode
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# release_group
if (video.release_group and self.version and
any(r in sanitize_release_group(self.version)
for r in get_equivalent_release_groups(sanitize_release_group(video.release_group)))):
matches.add('release_group')
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# format
if video.format and self.version and video.format.lower() in self.version.lower():
matches.add('format')
# other properties
matches |= guess_matches(video, guessit(self.version), partial=True)
return matches
class Addic7edProvider(Provider):
"""Addic7ed Provider."""
languages = {Language('por', 'BR')} | {Language(l) for l in [
'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',
'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',
'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'
]}
video_types = (Episode,)
server_url = 'http://www.addic7ed.com/'
subtitle_class = Addic7edSubtitle
def __init__(self, username=None, password=None):
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
# login
if self.username and self.password:
logger.info('Logging in')
data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'}
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10)
if r.status_code != 302:
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'logout.php', timeout=10)
r.raise_for_status()
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
# LXML parser seems to fail when parsing Addic7ed.com HTML markup.
# Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)
# Assuming the site's markup is bad, and stripping it down to only contain what's needed.
show_cells = re.findall(show_cells_re, r.content)
if show_cells:
soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser'])
else:
# If RegEx fails, fall back to original r.content and use 'html.parser'
soup = ParserBeautifulSoup(r.content, ['html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _search_show_id(self, series, year=None):
"""Search the show id from the `series` and `year`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:return: the show id, if found.
:rtype: int
"""
# addic7ed doesn't support search with quotes
series = series.replace('\'', ' ')
# build the params
series_year = '%s %d' % (series, year) if year is not None else series
params = {'search': series_year, 'Submit': 'Search'}
# make the search
logger.info('Searching show ids with %r', params)
r = self.session.get(self.server_url + 'search.php', params=params, timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get the suggestion
suggestion = soup.select('span.titulo > a[href^="/show/"]')
if not suggestion:
logger.warning('Show id not found: no suggestion')
return None
if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year):
logger.warning('Show id not found: suggestion does not match')
return None
show_id = int(suggestion[0]['href'][6:])
logger.debug('Found show id %d', show_id)
return show_id
def get_show_id(self, series, year=None, country_code=None):
"""Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int
"""
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series %s not found in show ids', series)
show_id = self._search_show_id(series)
return show_id
def query(self, show_id, series, season, year=None, country=None):
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('No data returned from provider')
return []
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over subtitle rows
match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])
series = match.group('series')
year = int(match.group('year')) if match.group('year') else None
subtitles = []
for row in soup.select('tr.epeven'):
cells = row('td')
# ignore incomplete subtitles
status = cells[5].text
if status != 'Completed':
logger.debug('Ignoring subtitle with status %s', status)
continue
# read the item
language = Language.fromaddic7ed(cells[3].text)
hearing_impaired = bool(cells[6].text)
page_link = self.server_url + cells[2].a['href'][1:]
season = int(cells[0].text)
episode = int(cells[1].text)
title = cells[2].text
version = cells[4].text
download_link = cells[9].a['href'][1:]
subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year,
version, download_link)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
# lookup show_id
titles = [video.series] + video.alternative_series
show_id = None
for title in titles:
show_id = self.get_show_id(title, video.year)
if show_id is not None:
break
# query for subtitles with the show_id
if show_id is not None:
subtitles = [s for s in self.query(show_id, title, video.season, video.year)
if s.language in languages and s.episode == video.episode]
if subtitles:
return subtitles
else:
logger.error('No show id found for %r (%r)', video.series, {'year': video.year})
return []
def download_subtitle(self, subtitle):
# download the subtitle
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('Unable to download subtitle. No data returned from provider')
return
# detect download limit exceeded
if r.headers['Content-Type'] == 'text/html':
raise DownloadLimitExceeded
subtitle.content = fix_line_ending(r.content)
```
#### File: subliminal/providers/napiprojekt.py
```python
import logging
from babelfish import Language
from requests import Session
from . import Provider
from .. import __short_version__
from ..subtitle import Subtitle
logger = logging.getLogger(__name__)
def get_subhash(hash):
"""Get a second hash based on napiprojekt's hash.
:param str hash: napiprojekt's hash.
:return: the subhash.
:rtype: str
"""
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in range(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(hash[i], 16)
v = int(hash[t:t + 2], 16)
b.append(('%x' % (v * m))[-1])
return ''.join(b)
class NapiProjektSubtitle(Subtitle):
"""NapiProjekt Subtitle."""
provider_name = 'napiprojekt'
def __init__(self, language, hash):
super(NapiProjektSubtitle, self).__init__(language)
self.hash = hash
self.content = None
@property
def id(self):
return self.hash
def get_matches(self, video):
matches = set()
# hash
if 'napiprojekt' in video.hashes and video.hashes['napiprojekt'] == self.hash:
matches.add('hash')
return matches
class NapiProjektProvider(Provider):
"""NapiProjekt Provider."""
languages = {Language.fromalpha2(l) for l in ['pl']}
required_hash = 'napiprojekt'
server_url = 'http://napiprojekt.pl/unit_napisy/dl.php'
subtitle_class = NapiProjektSubtitle
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
def terminate(self):
self.session.close()
def query(self, language, hash):
params = {
'v': 'dreambox',
'kolejka': 'false',
'nick': '',
'pass': '',
'napios': 'Linux',
'l': language.alpha2.upper(),
'f': hash,
't': get_subhash(hash)}
logger.info('Searching subtitle %r', params)
r = self.session.get(self.server_url, params=params, timeout=10)
r.raise_for_status()
# handle subtitles not found and errors
if r.content[:4] == b'NPc0':
logger.debug('No subtitles found')
return None
subtitle = self.subtitle_class(language, hash)
subtitle.content = r.content
logger.debug('Found subtitle %r', subtitle)
return subtitle
def list_subtitles(self, video, languages):
return [s for s in [self.query(l, video.hashes['napiprojekt']) for l in languages] if s is not None]
def download_subtitle(self, subtitle):
# there is no download step, content is already filled from listing subtitles
pass
```
#### File: subliminal/providers/podnapisi.py
```python
import io
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from requests import Session
from zipfile import ZipFile
from . import Provider
from .. import __short_version__
from ..exceptions import ProviderError
from ..subtitle import Subtitle, fix_line_ending, guess_matches
from ..utils import sanitize
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
class PodnapisiSubtitle(Subtitle):
"""Podnapisi Subtitle."""
provider_name = 'podnapisi'
def __init__(self, language, hearing_impaired, page_link, pid, releases, title, season=None, episode=None,
year=None):
super(PodnapisiSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)
self.pid = pid
self.releases = releases
self.title = title
self.season = season
self.episode = episode
self.year = year
@property
def id(self):
return self.pid
def get_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
# series
if video.series and (sanitize(self.title) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'episode'}))
# movie
elif isinstance(video, Movie):
# title
if video.title and (sanitize(self.title) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'movie'}))
return matches
class PodnapisiProvider(Provider):
"""Podnapisi Provider."""
languages = ({Language('por', 'BR'), Language('srp', script='Latn')} |
{Language.fromalpha2(l) for l in language_converters['alpha2'].codes})
server_url = 'https://www.podnapisi.net/subtitles/'
subtitle_class = PodnapisiSubtitle
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
def terminate(self):
self.session.close()
def query(self, language, keyword, season=None, episode=None, year=None):
# set parameters, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164#p212652
params = {'sXML': 1, 'sL': str(language), 'sK': keyword}
is_episode = False
if season and episode:
is_episode = True
params['sTS'] = season
params['sTE'] = episode
if year:
params['sY'] = year
# loop over paginated results
logger.info('Searching subtitles %r', params)
subtitles = []
pids = set()
while True:
# query the server
r = self.session.get(self.server_url + 'search/old', params=params, timeout=10)
r.raise_for_status()
xml = etree.fromstring(r.content)
# exit if no results
if not int(xml.find('pagination/results').text):
logger.debug('No subtitles found')
break
# loop over subtitles
for subtitle_xml in xml.findall('subtitle'):
# read xml elements
pid = subtitle_xml.find('pid').text
# ignore duplicates, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164&start=10#p213321
if pid in pids:
continue
language = Language.fromietf(subtitle_xml.find('language').text)
hearing_impaired = 'n' in (subtitle_xml.find('flags').text or '')
page_link = subtitle_xml.find('url').text
releases = []
if subtitle_xml.find('release').text:
for release in subtitle_xml.find('release').text.split():
release = re.sub(r'\.+$', '', release) # remove trailing dots
release = ''.join(filter(lambda x: ord(x) < 128, release)) # remove non-ascii characters
releases.append(release)
title = subtitle_xml.find('title').text
season = int(subtitle_xml.find('tvSeason').text)
episode = int(subtitle_xml.find('tvEpisode').text)
year = int(subtitle_xml.find('year').text)
if is_episode:
subtitle = self.subtitle_class(language, hearing_impaired, page_link, pid, releases, title,
season=season, episode=episode, year=year)
else:
subtitle = self.subtitle_class(language, hearing_impaired, page_link, pid, releases, title,
year=year)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
pids.add(pid)
# stop on last page
if int(xml.find('pagination/current').text) >= int(xml.find('pagination/count').text):
break
# increment current page
params['page'] = int(xml.find('pagination/current').text) + 1
logger.debug('Getting page %d', params['page'])
return subtitles
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episode = video.episode
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episode=episode, year=video.year)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
# download as a zip
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.pid + '/download', params={'container': 'zip'}, timeout=10)
r.raise_for_status()
# open the zip
with ZipFile(io.BytesIO(r.content)) as zf:
if len(zf.namelist()) > 1:
raise ProviderError('More than one file to unzip')
subtitle.content = fix_line_ending(zf.read(zf.namelist()[0]))
```
#### File: Shared/subzero/analytics.py
```python
import struct
import binascii
from pyga.requests import Event, Page, Tracker, Session, Visitor, Config
def track_event(category=None, action=None, label=None, value=None, identifier=None, first_use=None, add=None,
noninteraction=True):
anonymousConfig = Config()
anonymousConfig.anonimize_ip_address = True
tracker = Tracker('UA-86466078-1', 'none', conf=anonymousConfig)
visitor = Visitor()
# convert the last 8 bytes of the machine identifier to an integer to get a "unique" user
visitor.unique_id = struct.unpack("!I", binascii.unhexlify(identifier[32:]))[0]/2
if add:
# add visitor's ip address (will be anonymized)
visitor.ip_address = add
if first_use:
visitor.first_visit_time = first_use
session = Session()
event = Event(category=category, action=action, label=label, value=value, noninteraction=noninteraction)
path = u"/" + u"/".join([category, action, label])
page = Page(path.lower())
tracker.track_event(event, session, visitor)
tracker.track_pageview(page, session, visitor)
```
#### File: Shared/subzero/history_storage.py
```python
import datetime
import logging
import traceback
import types
from subzero.language import Language
from constants import mode_map
logger = logging.getLogger(__name__)
class SubtitleHistoryItem(object):
item_title = None
section_title = None
rating_key = None
provider_name = None
lang_name = None
lang_data = None
score = None
thumb = None
time = None
mode = "a"
def __init__(self, item_title, rating_key, section_title=None, subtitle=None, thumb=None, mode="a", time=None):
self.item_title = item_title
self.section_title = section_title
self.rating_key = str(rating_key)
self.provider_name = subtitle.provider_name
self.lang_name = str(subtitle.language.name)
self.lang_data = str(subtitle.language.alpha3), \
str(subtitle.language.country) if subtitle.language.country else None, \
str(subtitle.language.script) if subtitle.language.script else None
self.score = subtitle.score
self.thumb = thumb
self.time = time or datetime.datetime.now()
self.mode = mode
@property
def title(self):
return u"%s: %s" % (self.section_title, self.item_title)
@property
def language(self):
if self.lang_data:
lang_data = [s if s != "None" else None for s in self.lang_data]
if lang_data[0]:
return Language(lang_data[0], country=lang_data[1], script=lang_data[2])
@property
def mode_verbose(self):
return mode_map.get(self.mode, "Unknown")
def __repr__(self):
return unicode(self)
def __unicode__(self):
return u"%s (Score: %s)" % (unicode(self.item_title), self.score)
def __str__(self):
return str(self.rating_key)
def __hash__(self):
return hash((self.rating_key, self.score))
def __eq__(self, other):
return (self.rating_key, self.score) == (other.rating_key, other.score)
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not (self == other)
class SubtitleHistory(object):
size = 100
storage = None
threadkit = None
def __init__(self, storage, threadkit, size=100):
self.size = size
self.storage = storage
self.threadkit = threadkit
def add(self, item_title, rating_key, section_title=None, subtitle=None, thumb=None, mode="a", time=None):
with self.threadkit.Lock(key="sub_history_add"):
items = self.items
item = SubtitleHistoryItem(item_title, rating_key, section_title=section_title, subtitle=subtitle,
thumb=thumb, mode=mode, time=time)
# insert item
items.insert(0, item)
# clamp item amount
items = items[:self.size]
# store items
self.storage.SaveObject("subtitle_history", items)
@property
def items(self):
try:
items = self.storage.LoadObject("subtitle_history") or []
except:
items = []
logger.error("Failed to load history storage: %s" % traceback.format_exc())
if not isinstance(items, types.ListType):
items = []
else:
items = items[:]
return items
def destroy(self):
self.storage = None
self.threadkit = None
```
#### File: subzero/lib/dict.py
```python
class DictProxy(object):
store = None
def __init__(self, d):
self.Dict = d
super(DictProxy, self).__init__()
if self.store not in self.Dict or not self.Dict[self.store]:
self.Dict[self.store] = self.setup_defaults()
self.save()
self.__initialized = True
def __getattr__(self, name):
if name in self.Dict[self.store]:
return self.Dict[self.store][name]
return getattr(super(DictProxy, self), name)
def __setattr__(self, name, value):
if not self.__dict__.has_key(
'_DictProxy__initialized'): # this test allows attributes to be set in the __init__ method
return object.__setattr__(self, name, value)
elif self.__dict__.has_key(name): # any normal attributes are handled normally
object.__setattr__(self, name, value)
else:
if name in self.Dict[self.store]:
self.Dict[self.store][name] = value
return
object.__setattr__(self, name, value)
def __cmp__(self, d):
return cmp(self.Dict[self.store], d)
def __contains__(self, item):
return item in self.Dict[self.store]
def __setitem__(self, key, item):
self.Dict[self.store][key] = item
self.Dict.Save()
def __iter__(self):
return iter(self.Dict[self.store])
def __getitem__(self, key):
if key in self.Dict[self.store]:
return self.Dict[self.store][key]
def __repr__(self):
return repr(self.Dict[self.store])
def __str__(self):
return str(self.Dict[self.store])
def __len__(self):
return len(self.Dict[self.store].keys())
def __delitem__(self, key):
del self.Dict[self.store][key]
def save(self):
self.Dict.Save()
def clear(self):
del self.Dict[self.store]
return None
def copy(self):
return self.Dict[self.store].copy()
def has_key(self, k):
return k in self.Dict[self.store]
def pop(self, k, d=None):
return self.Dict[self.store].pop(k, d)
def update(self, *args, **kwargs):
return self.Dict[self.store].update(*args, **kwargs)
def keys(self):
return self.Dict[self.store].keys()
def values(self):
return self.Dict[self.store].values()
def items(self):
return self.Dict[self.store].items()
def __unicode__(self):
return unicode(repr(self.Dict[self.store]))
def setup_defaults(self):
raise NotImplementedError
class Dicked(object):
"""
mirrors a dictionary; readonly
"""
_entries = None
def __init__(self, **entries):
self._entries = entries or None
for key, value in entries.iteritems():
self.__dict__[key] = (Dicked(**value) if isinstance(value, dict) else value)
def __repr__(self):
return str(self)
def __unicode__(self):
return unicode(self.__digged__)
def __str__(self):
return str(self.__digged__)
def __lt__(self, d):
return self._entries < d
def __le__(self, d):
return self._entries <= d
def __eq__(self, d):
if d is None and not self._entries:
return True
return self._entries == d
def __ne__(self, d):
return self._entries != d
def __gt__(self, d):
return self._entries > d
def __ge__(self, d):
return self._entries >= d
def __getattr__(self, name):
# fixme: this might be wildly stupid; maybe implement stuff like .iteritems() directly
return getattr(self._entries, name, Dicked())
@property
def __digged__(self):
return {key: value for key, value in self.__dict__.iteritems() if key != "_entries"}
def __len__(self):
return len(self.__digged__)
def __nonzero__(self):
return bool(self.__digged__)
def __iter__(self):
return iter(self.__digged__)
def __hash__(self):
return hash(self.__digged__)
def __getitem__(self, name):
if name in self._entries:
return getattr(self, name)
raise KeyError(name)
```
#### File: modification/processors/__init__.py
```python
class Processor(object):
"""
Processor base class
"""
name = None
parent = None
supported = None
enabled = True
def __init__(self, name=None, parent=None, supported=None):
self.name = name
self.parent = parent
self.supported = supported if supported else lambda parent: True
@property
def info(self):
return self.name
def process(self, content, debug=False, **kwargs):
return content
def __repr__(self):
return "Processor <%s %s>" % (self.__class__.__name__, self.info)
def __str__(self):
return repr(self)
def __unicode__(self):
return unicode(repr(self))
class FuncProcessor(Processor):
func = None
def __init__(self, func, name=None, parent=None, supported=None):
super(FuncProcessor, self).__init__(name=name, supported=supported)
self.func = func
def process(self, content, debug=False, **kwargs):
return self.func(content)
```
#### File: wraptor/context/throttle.py
```python
import time
from wraptor.context import maybe
class throttle(maybe):
def __init__(self, seconds=1):
self.seconds = seconds
self.last_run = 0
def predicate():
now = time.time()
if now > self.last_run + self.seconds:
self.last_run = now
return True
return False
maybe.__init__(self, predicate)
```
#### File: decorators/test/test_exception_catcher.py
```python
from wraptor.decorators import exception_catcher
import threading
import pytest
def test_basic():
@exception_catcher
def work():
raise Exception()
t = threading.Thread(target=work)
t.start()
t.join()
with pytest.raises(Exception):
work.check()
```
#### File: Shared/xdg/MenuEditor.py
```python
from xdg.Menu import *
from xdg.BaseDirectory import *
from xdg.Exceptions import *
from xdg.DesktopEntry import *
from xdg.Config import *
import xml.dom.minidom
import os
import re
# XML-Cleanups: Move / Exclude
# FIXME: proper reverte/delete
# FIXME: pass AppDirs/DirectoryDirs around in the edit/move functions
# FIXME: catch Exceptions
# FIXME: copy functions
# FIXME: More Layout stuff
# FIXME: unod/redo function / remove menu...
# FIXME: Advanced MenuEditing Stuff: LegacyDir/MergeFile
# Complex Rules/Deleted/OnlyAllocated/AppDirs/DirectoryDirs
class MenuEditor:
def __init__(self, menu=None, filename=None, root=False):
self.menu = None
self.filename = None
self.doc = None
self.parse(menu, filename, root)
# fix for creating two menus with the same name on the fly
self.filenames = []
def parse(self, menu=None, filename=None, root=False):
if root == True:
setRootMode(True)
if isinstance(menu, Menu):
self.menu = menu
elif menu:
self.menu = parse(menu)
else:
self.menu = parse()
if root == True:
self.filename = self.menu.Filename
elif filename:
self.filename = filename
else:
self.filename = os.path.join(xdg_config_dirs[0], "menus", os.path.split(self.menu.Filename)[1])
try:
self.doc = xml.dom.minidom.parse(self.filename)
except IOError:
self.doc = xml.dom.minidom.parseString('<!DOCTYPE Menu PUBLIC "-//freedesktop//DTD Menu 1.0//EN" "http://standards.freedesktop.org/menu-spec/menu-1.0.dtd"><Menu><Name>Applications</Name><MergeFile type="parent">'+self.menu.Filename+'</MergeFile></Menu>')
except xml.parsers.expat.ExpatError:
raise ParsingError('Not a valid .menu file', self.filename)
self.__remove_whilespace_nodes(self.doc)
def save(self):
self.__saveEntries(self.menu)
self.__saveMenu()
def createMenuEntry(self, parent, name, command=None, genericname=None, comment=None, icon=None, terminal=None, after=None, before=None):
menuentry = MenuEntry(self.__getFileName(name, ".desktop"))
menuentry = self.editMenuEntry(menuentry, name, genericname, comment, command, icon, terminal)
self.__addEntry(parent, menuentry, after, before)
sort(self.menu)
return menuentry
def createMenu(self, parent, name, genericname=None, comment=None, icon=None, after=None, before=None):
menu = Menu()
menu.Parent = parent
menu.Depth = parent.Depth + 1
menu.Layout = parent.DefaultLayout
menu.DefaultLayout = parent.DefaultLayout
menu = self.editMenu(menu, name, genericname, comment, icon)
self.__addEntry(parent, menu, after, before)
sort(self.menu)
return menu
def createSeparator(self, parent, after=None, before=None):
separator = Separator(parent)
self.__addEntry(parent, separator, after, before)
sort(self.menu)
return separator
def moveMenuEntry(self, menuentry, oldparent, newparent, after=None, before=None):
self.__deleteEntry(oldparent, menuentry, after, before)
self.__addEntry(newparent, menuentry, after, before)
sort(self.menu)
return menuentry
def moveMenu(self, menu, oldparent, newparent, after=None, before=None):
self.__deleteEntry(oldparent, menu, after, before)
self.__addEntry(newparent, menu, after, before)
root_menu = self.__getXmlMenu(self.menu.Name)
if oldparent.getPath(True) != newparent.getPath(True):
self.__addXmlMove(root_menu, os.path.join(oldparent.getPath(True), menu.Name), os.path.join(newparent.getPath(True), menu.Name))
sort(self.menu)
return menu
def moveSeparator(self, separator, parent, after=None, before=None):
self.__deleteEntry(parent, separator, after, before)
self.__addEntry(parent, separator, after, before)
sort(self.menu)
return separator
def copyMenuEntry(self, menuentry, oldparent, newparent, after=None, before=None):
self.__addEntry(newparent, menuentry, after, before)
sort(self.menu)
return menuentry
def editMenuEntry(self, menuentry, name=None, genericname=None, comment=None, command=None, icon=None, terminal=None, nodisplay=None, hidden=None):
deskentry = menuentry.DesktopEntry
if name:
if not deskentry.hasKey("Name"):
deskentry.set("Name", name)
deskentry.set("Name", name, locale = True)
if comment:
if not deskentry.hasKey("Comment"):
deskentry.set("Comment", comment)
deskentry.set("Comment", comment, locale = True)
if genericname:
if not deskentry.hasKey("GnericNe"):
deskentry.set("GenericName", genericname)
deskentry.set("GenericName", genericname, locale = True)
if command:
deskentry.set("Exec", command)
if icon:
deskentry.set("Icon", icon)
if terminal == True:
deskentry.set("Terminal", "true")
elif terminal == False:
deskentry.set("Terminal", "false")
if nodisplay == True:
deskentry.set("NoDisplay", "true")
elif nodisplay == False:
deskentry.set("NoDisplay", "false")
if hidden == True:
deskentry.set("Hidden", "true")
elif hidden == False:
deskentry.set("Hidden", "false")
menuentry.updateAttributes()
if len(menuentry.Parents) > 0:
sort(self.menu)
return menuentry
def editMenu(self, menu, name=None, genericname=None, comment=None, icon=None, nodisplay=None, hidden=None):
# Hack for legacy dirs
if isinstance(menu.Directory, MenuEntry) and menu.Directory.Filename == ".directory":
xml_menu = self.__getXmlMenu(menu.getPath(True, True))
self.__addXmlTextElement(xml_menu, 'Directory', menu.Name + ".directory")
menu.Directory.setAttributes(menu.Name + ".directory")
# Hack for New Entries
elif not isinstance(menu.Directory, MenuEntry):
if not name:
name = menu.Name
filename = self.__getFileName(name, ".directory").replace("/", "")
if not menu.Name:
menu.Name = filename.replace(".directory", "")
xml_menu = self.__getXmlMenu(menu.getPath(True, True))
self.__addXmlTextElement(xml_menu, 'Directory', filename)
menu.Directory = MenuEntry(filename)
deskentry = menu.Directory.DesktopEntry
if name:
if not deskentry.hasKey("Name"):
deskentry.set("Name", name)
deskentry.set("Name", name, locale = True)
if genericname:
if not deskentry.hasKey("GenericName"):
deskentry.set("GenericName", genericname)
deskentry.set("GenericName", genericname, locale = True)
if comment:
if not deskentry.hasKey("Comment"):
deskentry.set("Comment", comment)
deskentry.set("Comment", comment, locale = True)
if icon:
deskentry.set("Icon", icon)
if nodisplay == True:
deskentry.set("NoDisplay", "true")
elif nodisplay == False:
deskentry.set("NoDisplay", "false")
if hidden == True:
deskentry.set("Hidden", "true")
elif hidden == False:
deskentry.set("Hidden", "false")
menu.Directory.updateAttributes()
if isinstance(menu.Parent, Menu):
sort(self.menu)
return menu
def hideMenuEntry(self, menuentry):
self.editMenuEntry(menuentry, nodisplay = True)
def unhideMenuEntry(self, menuentry):
self.editMenuEntry(menuentry, nodisplay = False, hidden = False)
def hideMenu(self, menu):
self.editMenu(menu, nodisplay = True)
def unhideMenu(self, menu):
self.editMenu(menu, nodisplay = False, hidden = False)
xml_menu = self.__getXmlMenu(menu.getPath(True,True), False)
for node in self.__getXmlNodesByName(["Deleted", "NotDeleted"], xml_menu):
node.parentNode.removeChild(node)
def deleteMenuEntry(self, menuentry):
if self.getAction(menuentry) == "delete":
self.__deleteFile(menuentry.DesktopEntry.filename)
for parent in menuentry.Parents:
self.__deleteEntry(parent, menuentry)
sort(self.menu)
return menuentry
def revertMenuEntry(self, menuentry):
if self.getAction(menuentry) == "revert":
self.__deleteFile(menuentry.DesktopEntry.filename)
menuentry.Original.Parents = []
for parent in menuentry.Parents:
index = parent.Entries.index(menuentry)
parent.Entries[index] = menuentry.Original
index = parent.MenuEntries.index(menuentry)
parent.MenuEntries[index] = menuentry.Original
menuentry.Original.Parents.append(parent)
sort(self.menu)
return menuentry
def deleteMenu(self, menu):
if self.getAction(menu) == "delete":
self.__deleteFile(menu.Directory.DesktopEntry.filename)
self.__deleteEntry(menu.Parent, menu)
xml_menu = self.__getXmlMenu(menu.getPath(True, True))
xml_menu.parentNode.removeChild(xml_menu)
sort(self.menu)
return menu
def revertMenu(self, menu):
if self.getAction(menu) == "revert":
self.__deleteFile(menu.Directory.DesktopEntry.filename)
menu.Directory = menu.Directory.Original
sort(self.menu)
return menu
def deleteSeparator(self, separator):
self.__deleteEntry(separator.Parent, separator, after=True)
sort(self.menu)
return separator
""" Private Stuff """
def getAction(self, entry):
if isinstance(entry, Menu):
if not isinstance(entry.Directory, MenuEntry):
return "none"
elif entry.Directory.getType() == "Both":
return "revert"
elif entry.Directory.getType() == "User" \
and (len(entry.Submenus) + len(entry.MenuEntries)) == 0:
return "delete"
elif isinstance(entry, MenuEntry):
if entry.getType() == "Both":
return "revert"
elif entry.getType() == "User":
return "delete"
else:
return "none"
return "none"
def __saveEntries(self, menu):
if not menu:
menu = self.menu
if isinstance(menu.Directory, MenuEntry):
menu.Directory.save()
for entry in menu.getEntries(hidden=True):
if isinstance(entry, MenuEntry):
entry.save()
elif isinstance(entry, Menu):
self.__saveEntries(entry)
def __saveMenu(self):
if not os.path.isdir(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
fd = open(self.filename, 'w')
fd.write(re.sub("\n[\s]*([^\n<]*)\n[\s]*</", "\\1</", self.doc.toprettyxml().replace('<?xml version="1.0" ?>\n', '')))
fd.close()
def __getFileName(self, name, extension):
postfix = 0
while 1:
if postfix == 0:
filename = name + extension
else:
filename = name + "-" + str(postfix) + extension
if extension == ".desktop":
dir = "applications"
elif extension == ".directory":
dir = "desktop-directories"
if not filename in self.filenames and not \
os.path.isfile(os.path.join(xdg_data_dirs[0], dir, filename)):
self.filenames.append(filename)
break
else:
postfix += 1
return filename
def __getXmlMenu(self, path, create=True, element=None):
if not element:
element = self.doc
if "/" in path:
(name, path) = path.split("/", 1)
else:
name = path
path = ""
found = None
for node in self.__getXmlNodesByName("Menu", element):
for child in self.__getXmlNodesByName("Name", node):
if child.childNodes[0].nodeValue == name:
if path:
found = self.__getXmlMenu(path, create, node)
else:
found = node
break
if found:
break
if not found and create == True:
node = self.__addXmlMenuElement(element, name)
if path:
found = self.__getXmlMenu(path, create, node)
else:
found = node
return found
def __addXmlMenuElement(self, element, name):
node = self.doc.createElement('Menu')
self.__addXmlTextElement(node, 'Name', name)
return element.appendChild(node)
def __addXmlTextElement(self, element, name, text):
node = self.doc.createElement(name)
text = self.doc.createTextNode(text)
node.appendChild(text)
return element.appendChild(node)
def __addXmlFilename(self, element, filename, type = "Include"):
# remove old filenames
for node in self.__getXmlNodesByName(["Include", "Exclude"], element):
if node.childNodes[0].nodeName == "Filename" and node.childNodes[0].childNodes[0].nodeValue == filename:
element.removeChild(node)
# add new filename
node = self.doc.createElement(type)
node.appendChild(self.__addXmlTextElement(node, 'Filename', filename))
return element.appendChild(node)
def __addXmlMove(self, element, old, new):
node = self.doc.createElement("Move")
node.appendChild(self.__addXmlTextElement(node, 'Old', old))
node.appendChild(self.__addXmlTextElement(node, 'New', new))
return element.appendChild(node)
def __addXmlLayout(self, element, layout):
# remove old layout
for node in self.__getXmlNodesByName("Layout", element):
element.removeChild(node)
# add new layout
node = self.doc.createElement("Layout")
for order in layout.order:
if order[0] == "Separator":
child = self.doc.createElement("Separator")
node.appendChild(child)
elif order[0] == "Filename":
child = self.__addXmlTextElement(node, "Filename", order[1])
elif order[0] == "Menuname":
child = self.__addXmlTextElement(node, "Menuname", order[1])
elif order[0] == "Merge":
child = self.doc.createElement("Merge")
child.setAttribute("type", order[1])
node.appendChild(child)
return element.appendChild(node)
def __getXmlNodesByName(self, name, element):
for child in element.childNodes:
if child.nodeType == xml.dom.Node.ELEMENT_NODE and child.nodeName in name:
yield child
def __addLayout(self, parent):
layout = Layout()
layout.order = []
layout.show_empty = parent.Layout.show_empty
layout.inline = parent.Layout.inline
layout.inline_header = parent.Layout.inline_header
layout.inline_alias = parent.Layout.inline_alias
layout.inline_limit = parent.Layout.inline_limit
layout.order.append(["Merge", "menus"])
for entry in parent.Entries:
if isinstance(entry, Menu):
layout.parseMenuname(entry.Name)
elif isinstance(entry, MenuEntry):
layout.parseFilename(entry.DesktopFileID)
elif isinstance(entry, Separator):
layout.parseSeparator()
layout.order.append(["Merge", "files"])
parent.Layout = layout
return layout
def __addEntry(self, parent, entry, after=None, before=None):
if after or before:
if after:
index = parent.Entries.index(after) + 1
elif before:
index = parent.Entries.index(before)
parent.Entries.insert(index, entry)
else:
parent.Entries.append(entry)
xml_parent = self.__getXmlMenu(parent.getPath(True, True))
if isinstance(entry, MenuEntry):
parent.MenuEntries.append(entry)
entry.Parents.append(parent)
self.__addXmlFilename(xml_parent, entry.DesktopFileID, "Include")
elif isinstance(entry, Menu):
parent.addSubmenu(entry)
if after or before:
self.__addLayout(parent)
self.__addXmlLayout(xml_parent, parent.Layout)
def __deleteEntry(self, parent, entry, after=None, before=None):
parent.Entries.remove(entry)
xml_parent = self.__getXmlMenu(parent.getPath(True, True))
if isinstance(entry, MenuEntry):
entry.Parents.remove(parent)
parent.MenuEntries.remove(entry)
self.__addXmlFilename(xml_parent, entry.DesktopFileID, "Exclude")
elif isinstance(entry, Menu):
parent.Submenus.remove(entry)
if after or before:
self.__addLayout(parent)
self.__addXmlLayout(xml_parent, parent.Layout)
def __deleteFile(self, filename):
try:
os.remove(filename)
except OSError:
pass
try:
self.filenames.remove(filename)
except ValueError:
pass
def __remove_whilespace_nodes(self, node):
remove_list = []
for child in node.childNodes:
if child.nodeType == xml.dom.minidom.Node.TEXT_NODE:
child.data = child.data.strip()
if not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
self.__remove_whilespace_nodes(child)
for node in remove_list:
node.parentNode.removeChild(node)
``` |
{
"source": "jipp/theEye",
"score": 2
} |
#### File: RaspberryPi/systemd/main.py
```python
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import json
import logging
import time
import sys
import ConfigParser
import paramiko
import picamera
import platform
#logging.basicConfig(level=logging.WARNING)
logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.DEBUG)
logging.getLogger("paramiko.transport").setLevel(logging.INFO)
#paramiko.util.log_to_file('/tmp/paramiko.log')
try:
camera = picamera.PiCamera()
except Exception as e:
print(e)
sys.exit('interrupted')
id = platform.node()
client = mqtt.Client()
config = ConfigParser.ConfigParser()
config.read('/data/theEye/RaspberryPi/theEye.ini')
try:
local_folder = config.get('local', 'folder')
local_description = config.get('local', 'description')
mqtt_host = config.get('mqtt', 'host')
mqtt_username = config.get('mqtt', 'username')
mqtt_password = config.get('mqtt', 'password')
mqtt_nodes = config.get('mqtt', 'nodes').split(',')
mqtt_status = config.get('mqtt', 'status')
camera.rotation = config.get('camera', 'rotation')
camera.hflip = config.getboolean('camera', 'hflip')
camera.vflip = config.getboolean('camera', 'vflip')
camera.resolution = (config.getint('camera', 'width'), config.getint('camera', 'height'))
camera.led = config.getboolean('camera', 'led')
remote_enabled = config.getboolean('remote', 'enable')
remote_host = config.get('remote', 'host')
remote_username = config.get('remote', 'username')
remote_password = config.get('remote', 'password')
remote_folder = config.get('remote', 'folder')
except Exception as e:
print(e)
sys.exit('interrupted')
def takePhoto(picture, sensor, trigger):
output = "{0}/{1}".format(local_folder, picture)
camera.capture(output)
logging.info('saving output: ' + output)
publish.single(mqtt_status + "/status", "{\"id\":" + id + ",\"description\":" + local_description + ",\"sensor\":" + sensor + ",\"trigger\":" + trigger + ",\"picture\":" + picture + ",\"upload\":" + str(remote_enabled) + "}", hostname = mqtt_host, auth = {'username': mqtt_username, 'password': <PASSWORD>})
if (remote_enabled):
upload(picture)
def upload(picture):
localfile = "{0}/{1}".format(local_folder, picture)
remotefile = "{0}/{1}".format(remote_folder, picture)
transport = paramiko.Transport((remote_host, 22))
transport.connect(username = remote_username, password = <PASSWORD>_password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(localfile, remotefile)
except Exception as e:
print(e)
print('host: ' + remote_host + '; username: ' + remote_username + '; password: ' + remote_password)
print(localfile + ' -> ' + remotefile)
sftp.close()
transport.close()
def get_picture_name():
timestr = time.strftime("%Y%m%d-%H%M%S")
picture = "{0}-{1}.jpg".format(id, timestr)
return picture
def on_connect(client, userdata, flags, rc):
logging.info('on_connect rc: ' + str(rc))
client.subscribe(id + "/value")
for node in mqtt_nodes:
client.subscribe(node + "/value")
def on_disconnect(client, userdata, rc):
logging.info('on_disconnect rc: ' + str(rc))
def on_message(client, userdata, message):
logging.info('topic: ' + message.topic + ', qos: ' + str(message.qos) + ', payload: ' + str(message.payload))
try:
payload = json.loads(message.payload)
sensor = message.topic.split("/")[0]
for key in payload:
if key != 'vcc':
if payload[key]:
logging.info(key + ' triggered')
picture = get_picture_name()
takePhoto(picture, sensor, key)
except Exception as e:
print(e)
def on_subscribe(client, userdata, mid, granted_qos):
logging.info('mid: ' + str(mid) + ', granted_qos: ' + str(granted_qos))
def on_log(client, userdata, level, buf):
logging.debug(buf)
def main():
# client = mqtt.Client()
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.on_subscribe = on_subscribe
client.on_log = on_log
client.username_pw_set(username=mqtt_username, password=<PASSWORD>)
client.connect(mqtt_host)
client.loop_forever()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
camera.close()
client.disconnect()
sys.exit('interrupted')
``` |
{
"source": "JiPRA/openlierox",
"score": 2
} |
#### File: gamedir/scripts/pwn0meter.py
```python
import sys, time, cgi, os, random, traceback, re
f = open("pwn0meter.txt","r")
w = open("pwn0meter.html","w")
#w = sys.stdout
w.write("<html><HEAD>\n")
w.write("<META HTTP-EQUIV=\"content-type\" CONTENT=\"text/html; charset=utf-8\">\n")
w.write("<TITLE>Pwn0meter</TITLE>\n")
w.write("</HEAD>\n<BODY>\n<H2>Pwn0meter</H2>\n")
w.write("<p>updated on %s</p>\n" % time.asctime())
# make random chat quotes
# doesn't matter if it fails, so surround by try/catch
try:
# really hacky way to get latest logfile (assume that ls sorts by name)
lastlogfile = "logs/" + os.popen("ls logs").read().splitlines()[-1]
chatlogmark = "n: CHAT: "
chatlines = os.popen("tail -n 10000 \"" + lastlogfile + "\" | grep \"" + chatlogmark + "\"").read().splitlines()
chatstr = "<h3>Random chat quotes</h3><p>"
rndstart = random.randint(0, len(chatlines) - 5)
for i in xrange(rndstart, rndstart + 5):
chatstr += cgi.escape(chatlines[i].replace(chatlogmark, "")) + "<br>"
chatstr += "</p>"
w.write(chatstr)
except:
print "Unexpected error:", traceback.format_exc()
pass
killers = {}
deaders = {}
clan_killers = {}
clan_deaders = {}
clans = {}
re_clans = (
re.compile("^\[\[\[(?P<clan>.+)\]\]\].+$"),
re.compile("^\[(?P<clan>.+)\].+$"),
re.compile("^.+\[(?P<clan>.+)\]$"),
re.compile("^\((?P<clan>.+)\).+$"),
re.compile("^.+\((?P<clan>.+)\)$"),
re.compile("^-=(?P<clan>.+)=-.+$"),
re.compile("^-(?P<clan>.+)-.+$"),
re.compile("^\<(?P<clan>.+)\>.+$"),
re.compile("^\{(?P<clan>.+)\}.+$"),
re.compile("^.+\{(?P<clan>.+)\}$"),
re.compile("^\|(?P<clan>.+)\|.+$"),
re.compile("^.+\[(?P<clan>.+)\]$"),
re.compile("^\|(?P<clan>.+)\|.+$"),
re.compile("^.+\[(?P<clan>.+)\]$"),
)
def clan_of(name):
for m in [r.match(name) for r in re_clans]:
if m:
return m.group("clan")
return "Clanfree"
for l in f.readlines():
l = l.strip()
if l == "":
continue
try:
( time, deader, killer ) = l.split("\t")
except:
continue
if killer.find("[CPU]") >= 0:
continue
if killer.find("The Third") >= 0:
continue
if killer.find("OpenLieroXor") >= 0:
continue
if not killer in killers:
killers[killer] = {}
if not deader in deaders:
deaders[deader] = {}
killers[killer][deader] = killers[killer].get(deader,0) + 1
deaders[deader][killer] = deaders[deader].get(killer,0) + 1
clankiller = clan_of(killer)
clandeader = clan_of(deader)
#if clankiller == clandeader: continue # ignore that
if not clankiller in clan_killers:
clan_killers[clankiller] = {}
if not clandeader in clan_deaders:
clan_deaders[clandeader] = {}
clan_killers[clankiller][clandeader] = clan_killers[clankiller].get(clandeader,0) + 1
clan_deaders[clandeader][clankiller] = clan_deaders[clandeader].get(clankiller,0) + 1
if not clankiller in clans:
clans[clankiller] = set()
clans[clankiller].add(killer)
f.close()
#print killers
def printRanks(killers, deaders):
sorted = killers.keys()
def sortFunc(s1, s2):
kills1 = sum(killers[s1].itervalues()) - killers[s1].get(s1,0)
kills2 = sum(killers[s2].itervalues()) - killers[s2].get(s2,0)
if kills1 < kills2: return 1
if kills1 > kills2: return -1
try:
deaths1 = sum(deaders[s1].itervalues())
except:
deaths1 = 0
try:
deaths2 = sum(deaders[s2].itervalues())
except:
deaths2 = 0
if deaths1 < deaths2: return -1
if deaths1 > deaths2: return 1
return 0
sorted.sort(cmp=sortFunc)
i = 1
for k in sorted:
kills = sum(killers[k].itervalues())
try:
deaths = sum(deaders[k].itervalues())
except:
deatsh = 0
suicides = killers[k].get(k,0)
kills -= suicides
deaths -= suicides
w.write("%i. <B>%s</B>: %i kills %i deaths %i suicides, killed:" %
( i, cgi.escape(k), kills, deaths, suicides ))
# Ugly killer sorting
killedMax = {}
for f in killers[k]:
if not killers[k][f] in killedMax:
killedMax[killers[k][f]] = []
if killedMax[killers[k][f]].count(f) == 0:
killedMax[killers[k][f]].append(f)
killedMax1 = killedMax.keys()
killedMax1.sort(reverse=True)
count = 0
for f in killedMax1:
for f1 in killedMax[f]:
if f1 == k: # Don't write suicides
continue
count += 1
if count >= 5:
break
if count != 1:
w.write(",")
w.write(" %s - %i" % ( cgi.escape(f1), f ) )
w.write("<BR>\n")
i += 1
w.write("<a href=\"#players\">go directly to player ranks</a>")
w.write("<h2>Clans</h2>\n")
printRanks(clan_killers, clan_deaders)
w.write("<h2>Players</h2>")
w.write("<a name=\"players\"></a>\n")
printRanks(killers, deaders)
w.write("<h2>Clan members</h2>\n")
for c in clans.iterkeys():
if c == "Clanfree": continue # ignore
w.write("<b>%s</b>: " % cgi.escape(c))
w.write("%s<br>\n" % cgi.escape(", ".join(list(clans[c]))))
w.write("</BODY>\n</html>\n")
w.close()
```
#### File: gamedir/scripts/simulator.py
```python
import sys, os, re
if len(sys.argv) < 2:
print "usage:", sys.argv[0], "<script>"
exit(1)
script = sys.argv[1]
sin, sout = os.popen2(script)
def olxdir():
try:
from win32com.shell import shellcon, shell
homedir = shell.SHGetFolderPath(0, shellcon.CSIDL_MYDOCUMENTS, 0, 0)
except ImportError:
homedir = os.path.expanduser("~")
if os.name == "nt":
p = "OpenLieroX"
elif os.name == "mac":
p = "Library/Application Support/OpenLieroX"
else:
p = ".OpenLieroX"
return homedir + "/" + p
def getwritefullfilename(fn):
return olxdir() + "/" + fn
def getvar(var):
var = var.lower()
if var == "gameoptions.network.forceminversion":
return "OpenLieroX/0.58_rc1"
# no special handling yet; assume its a string and return empty
return ""
def handle(cmd, params):
if cmd == "getwritefullfilename" or cmd == "getfullfilename":
return getwritefullfilename(params[0])
if cmd == "listmaps":
# some scripts need some output here
return ["CastleStrike.lxl"]
if cmd == "getvar":
return [ getvar(params[0]) ]
if cmd == "nextsignal":
while True:
ret = re.findall("[^ \t\"]+", raw_input("Enter signal: ").strip())
if len(ret) > 0: break
return ret
# unknown/not handled, just return empty list
return []
while True:
l = sout.readline().strip()
print "Script:", l
cmd = re.findall("[^ \t\"]+", l)
if len(cmd) > 0:
ret = []
try:
ret = handle(cmd[0].lower(), cmd[1:])
except:
print "Error while handling", cmd
print sys.exc_info()
for rl in ret:
sin.write(":" + str(rl) + "\n")
sin.write(".\n")
sin.flush()
else:
break
```
#### File: DedicatedServerVideo/gdata/client.py
```python
__author__ = '<EMAIL> (<NAME>)'
import re
import atom.client
import atom.core
import atom.http_core
import gdata.gauth
import gdata.data
class Error(Exception):
pass
class RequestError(Error):
status = None
reason = None
body = None
headers = None
class RedirectError(RequestError):
pass
class CaptchaChallenge(RequestError):
captcha_url = None
captcha_token = None
class ClientLoginTokenMissing(Error):
pass
class MissingOAuthParameters(Error):
pass
class ClientLoginFailed(RequestError):
pass
class UnableToUpgradeToken(RequestError):
pass
class Unauthorized(Error):
pass
class BadAuthenticationServiceURL(RedirectError):
pass
class BadAuthentication(RequestError):
pass
class NotModified(RequestError):
pass
def error_from_response(message, http_response, error_class,
response_body=None):
"""Creates a new exception and sets the HTTP information in the error.
Args:
message: str human readable message to be displayed if the exception is
not caught.
http_response: The response from the server, contains error information.
error_class: The exception to be instantiated and populated with
information from the http_response
response_body: str (optional) specify if the response has already been read
from the http_response object.
"""
if response_body is None:
body = http_response.read()
else:
body = response_body
error = error_class('%s: %i, %s' % (message, http_response.status, body))
error.status = http_response.status
error.reason = http_response.reason
error.body = body
error.headers = http_response.getheaders()
return error
def get_xml_version(version):
"""Determines which XML schema to use based on the client API version.
Args:
version: string which is converted to an int. The version string is in
the form 'Major.Minor.x.y.z' and only the major version number
is considered. If None is provided assume version 1.
"""
if version is None:
return 1
return int(version.split('.')[0])
class GDClient(atom.client.AtomPubClient):
"""Communicates with Google Data servers to perform CRUD operations.
This class is currently experimental and may change in backwards
incompatible ways.
This class exists to simplify the following three areas involved in using
the Google Data APIs.
CRUD Operations:
The client provides a generic 'request' method for making HTTP requests.
There are a number of convenience methods which are built on top of
request, which include get_feed, get_entry, get_next, post, update, and
delete. These methods contact the Google Data servers.
Auth:
Reading user-specific private data requires authorization from the user as
do any changes to user data. An auth_token object can be passed into any
of the HTTP requests to set the Authorization header in the request.
You may also want to set the auth_token member to a an object which can
use modify_request to set the Authorization header in the HTTP request.
If you are authenticating using the email address and password, you can
use the client_login method to obtain an auth token and set the
auth_token member.
If you are using browser redirects, specifically AuthSub, you will want
to use gdata.gauth.AuthSubToken.from_url to obtain the token after the
redirect, and you will probably want to updgrade this since use token
to a multiple use (session) token using the upgrade_token method.
API Versions:
This client is multi-version capable and can be used with Google Data API
version 1 and version 2. The version should be specified by setting the
api_version member to a string, either '1' or '2'.
"""
# The gsessionid is used by Google Calendar to prevent redirects.
__gsessionid = None
api_version = None
# Name of the Google Data service when making a ClientLogin request.
auth_service = None
# URL prefixes which should be requested for AuthSub and OAuth.
auth_scopes = None
def request(self, method=None, uri=None, auth_token=None,
http_request=None, converter=None, desired_class=None,
redirects_remaining=4, **kwargs):
"""Make an HTTP request to the server.
See also documentation for atom.client.AtomPubClient.request.
If a 302 redirect is sent from the server to the client, this client
assumes that the redirect is in the form used by the Google Calendar API.
The same request URI and method will be used as in the original request,
but a gsessionid URL parameter will be added to the request URI with
the value provided in the server's 302 redirect response. If the 302
redirect is not in the format specified by the Google Calendar API, a
RedirectError will be raised containing the body of the server's
response.
The method calls the client's modify_request method to make any changes
required by the client before the request is made. For example, a
version 2 client could add a GData-Version: 2 header to the request in
its modify_request method.
Args:
method: str The HTTP verb for this request, usually 'GET', 'POST',
'PUT', or 'DELETE'
uri: atom.http_core.Uri, str, or unicode The URL being requested.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others.
http_request: (optional) atom.http_core.HttpRequest
converter: function which takes the body of the response as it's only
argument and returns the desired object.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned.
redirects_remaining: (optional) int, if this number is 0 and the
server sends a 302 redirect, the request method
will raise an exception. This parameter is used in
recursive request calls to avoid an infinite loop.
Any additional arguments are passed through to
atom.client.AtomPubClient.request.
Returns:
An HTTP response object (see atom.http_core.HttpResponse for a
description of the object's interface) if no converter was
specified and no desired_class was specified. If a converter function
was provided, the results of calling the converter are returned. If no
converter was specified but a desired_class was provided, the response
body will be converted to the class using
atom.core.parse.
"""
if isinstance(uri, (str, unicode)):
uri = atom.http_core.Uri.parse_uri(uri)
# Add the gsession ID to the URL to prevent further redirects.
# TODO: If different sessions are using the same client, there will be a
# multitude of redirects and session ID shuffling.
# If the gsession ID is in the URL, adopt it as the standard location.
if uri is not None and uri.query is not None and 'gsessionid' in uri.query:
self.__gsessionid = uri.query['gsessionid']
# The gsession ID could also be in the HTTP request.
elif (http_request is not None and http_request.uri is not None
and http_request.uri.query is not None
and 'gsessionid' in http_request.uri.query):
self.__gsessionid = http_request.uri.query['gsessionid']
# If the gsession ID is stored in the client, and was not present in the
# URI then add it to the URI.
elif self.__gsessionid is not None:
uri.query['gsessionid'] = self.__gsessionid
# The AtomPubClient should call this class' modify_request before
# performing the HTTP request.
#http_request = self.modify_request(http_request)
response = atom.client.AtomPubClient.request(self, method=method,
uri=uri, auth_token=auth_token, http_request=http_request, **kwargs)
# On success, convert the response body using the desired converter
# function if present.
if response is None:
return None
if response.status == 200 or response.status == 201:
if converter is not None:
return converter(response)
elif desired_class is not None:
if self.api_version is not None:
return atom.core.parse(response.read(), desired_class,
version=get_xml_version(self.api_version))
else:
# No API version was specified, so allow parse to
# use the default version.
return atom.core.parse(response.read(), desired_class)
else:
return response
# TODO: move the redirect logic into the Google Calendar client once it
# exists since the redirects are only used in the calendar API.
elif response.status == 302:
if redirects_remaining > 0:
location = (response.getheader('Location')
or response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
# Make a recursive call with the gsession ID in the URI to follow
# the redirect.
return self.request(method=method, uri=uri, auth_token=auth_token,
http_request=http_request, converter=converter,
desired_class=desired_class,
redirects_remaining=redirects_remaining-1,
**kwargs)
else:
raise error_from_response('302 received without Location header',
response, RedirectError)
else:
raise error_from_response('Too many redirects from server',
response, RedirectError)
elif response.status == 401:
raise error_from_response('Unauthorized - Server responded with',
response, Unauthorized)
elif response.status == 304:
raise error_from_response('Entry Not Modified - Server responded with',
response, NotModified)
# If the server's response was not a 200, 201, 302, or 401, raise an
# exception.
else:
raise error_from_response('Server responded with', response,
RequestError)
Request = request
def request_client_login_token(
self, email, password, source, service=None,
account_type='HOSTED_OR_GOOGLE',
auth_url=atom.http_core.Uri.parse_uri(
'https://www.google.com/accounts/ClientLogin'),
captcha_token=None, captcha_response=None):
service = service or self.auth_service
# Set the target URL.
http_request = atom.http_core.HttpRequest(uri=auth_url, method='POST')
http_request.add_body_part(
gdata.gauth.generate_client_login_request_body(email=email,
password=password, service=service, source=source,
account_type=account_type, captcha_token=captcha_token,
captcha_response=captcha_response),
'application/x-www-form-urlencoded')
# Use the underlying http_client to make the request.
response = self.http_client.request(http_request)
response_body = response.read()
if response.status == 200:
token_string = gdata.gauth.get_client_login_token_string(response_body)
if token_string is not None:
return gdata.gauth.ClientLoginToken(token_string)
else:
raise ClientLoginTokenMissing(
'Recieved a 200 response to client login request,'
' but no token was present. %s' % (response_body,))
elif response.status == 403:
captcha_challenge = gdata.gauth.get_captcha_challenge(response_body)
if captcha_challenge:
challenge = CaptchaChallenge('CAPTCHA required')
challenge.captcha_url = captcha_challenge['url']
challenge.captcha_token = captcha_challenge['token']
raise challenge
elif response_body.splitlines()[0] == 'Error=BadAuthentication':
raise BadAuthentication('Incorrect username or password')
else:
raise error_from_response('Server responded with a 403 code',
response, RequestError, response_body)
elif response.status == 302:
# Google tries to redirect all bad URLs back to
# http://www.google.<locale>. If a redirect
# attempt is made, assume the user has supplied an incorrect
# authentication URL
raise error_from_response('Server responded with a redirect',
response, BadAuthenticationServiceURL,
response_body)
else:
raise error_from_response('Server responded to ClientLogin request',
response, ClientLoginFailed, response_body)
RequestClientLoginToken = request_client_login_token
def client_login(self, email, password, source, service=None,
account_type='HOSTED_OR_GOOGLE',
auth_url=atom.http_core.Uri.parse_uri(
'https://www.google.com/accounts/ClientLogin'),
captcha_token=None, captcha_response=None):
"""Performs an auth request using the user's email address and password.
In order to modify user specific data and read user private data, your
application must be authorized by the user. One way to demonstrage
authorization is by including a Client Login token in the Authorization
HTTP header of all requests. This method requests the Client Login token
by sending the user's email address, password, the name of the
application, and the service code for the service which will be accessed
by the application. If the username and password are correct, the server
will respond with the client login code and a new ClientLoginToken
object will be set in the client's auth_token member. With the auth_token
set, future requests from this client will include the Client Login
token.
For a list of service names, see
http://code.google.com/apis/gdata/faq.html#clientlogin
For more information on Client Login, see:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
Args:
email: str The user's email address or username.
password: str The password for the user's account.
source: str The name of your application. This can be anything you
like but should should give some indication of which app is
making the request.
service: str The service code for the service you would like to access.
For example, 'cp' for contacts, 'cl' for calendar. For a full
list see
http://code.google.com/apis/gdata/faq.html#clientlogin
If you are using a subclass of the gdata.client.GDClient, the
service will usually be filled in for you so you do not need
to specify it. For example see BloggerClient,
SpreadsheetsClient, etc.
account_type: str (optional) The type of account which is being
authenticated. This can be either 'GOOGLE' for a Google
Account, 'HOSTED' for a Google Apps Account, or the
default 'HOSTED_OR_GOOGLE' which will select the Google
Apps Account if the same email address is used for both
a Google Account and a Google Apps Account.
auth_url: str (optional) The URL to which the login request should be
sent.
captcha_token: str (optional) If a previous login attempt was reponded
to with a CAPTCHA challenge, this is the token which
identifies the challenge (from the CAPTCHA's URL).
captcha_response: str (optional) If a previous login attempt was
reponded to with a CAPTCHA challenge, this is the
response text which was contained in the challenge.
Returns:
None
Raises:
A RequestError or one of its suclasses: BadAuthentication,
BadAuthenticationServiceURL, ClientLoginFailed,
ClientLoginTokenMissing, or CaptchaChallenge
"""
service = service or self.auth_service
self.auth_token = self.request_client_login_token(email, password,
source, service=service, account_type=account_type, auth_url=auth_url,
captcha_token=captcha_token, captcha_response=captcha_response)
ClientLogin = client_login
def upgrade_token(self, token=None, url=atom.http_core.Uri.parse_uri(
'https://www.google.com/accounts/AuthSubSessionToken')):
"""Asks the Google auth server for a multi-use AuthSub token.
For details on AuthSub, see:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
token: gdata.gauth.AuthSubToken or gdata.gauth.SecureAuthSubToken
(optional) If no token is passed in, the client's auth_token member
is used to request the new token. The token object will be modified
to contain the new session token string.
url: str or atom.http_core.Uri (optional) The URL to which the token
upgrade request should be sent. Defaults to:
https://www.google.com/accounts/AuthSubSessionToken
Returns:
The upgraded gdata.gauth.AuthSubToken object.
"""
# Default to using the auth_token member if no token is provided.
if token is None:
token = self.auth_token
# We cannot upgrade a None token.
if token is None:
raise UnableToUpgradeToken('No token was provided.')
if not isinstance(token, gdata.gauth.AuthSubToken):
raise UnableToUpgradeToken(
'Cannot upgrade the token because it is not an AuthSubToken object.')
http_request = atom.http_core.HttpRequest(uri=url, method='GET')
token.modify_request(http_request)
# Use the lower level HttpClient to make the request.
response = self.http_client.request(http_request)
if response.status == 200:
token._upgrade_token(response.read())
return token
else:
raise UnableToUpgradeToken(
'Server responded to token upgrade request with %s: %s' % (
response.status, response.read()))
UpgradeToken = upgrade_token
def get_oauth_token(self, scopes, next, consumer_key, consumer_secret=None,
rsa_private_key=None,
url=gdata.gauth.REQUEST_TOKEN_URL):
"""Obtains an OAuth request token to allow the user to authorize this app.
Once this client has a request token, the user can authorize the request
token by visiting the authorization URL in their browser. After being
redirected back to this app at the 'next' URL, this app can then exchange
the authorized request token for an access token.
For more information see the documentation on Google Accounts with OAuth:
http://code.google.com/apis/accounts/docs/OAuth.html#AuthProcess
Args:
scopes: list of strings or atom.http_core.Uri objects which specify the
URL prefixes which this app will be accessing. For example, to access
the Google Calendar API, you would want to use scopes:
['https://www.google.com/calendar/feeds/',
'http://www.google.com/calendar/feeds/']
next: str or atom.http_core.Uri object, The URL which the user's browser
should be sent to after they authorize access to their data. This
should be a URL in your application which will read the token
information from the URL and upgrade the request token to an access
token.
consumer_key: str This is the identifier for this application which you
should have received when you registered your application with Google
to use OAuth.
consumer_secret: str (optional) The shared secret between your app and
Google which provides evidence that this request is coming from you
application and not another app. If present, this libraries assumes
you want to use an HMAC signature to verify requests. Keep this data
a secret.
rsa_private_key: str (optional) The RSA private key which is used to
generate a digital signature which is checked by Google's server. If
present, this library assumes that you want to use an RSA signature
to verify requests. Keep this data a secret.
url: The URL to which a request for a token should be made. The default
is Google's OAuth request token provider.
"""
http_request = None
if rsa_private_key is not None:
http_request = gdata.gauth.generate_request_for_request_token(
consumer_key, gdata.gauth.RSA_SHA1, scopes,
rsa_key=rsa_private_key, auth_server_url=url, next=next)
elif consumer_secret is not None:
http_request = gdata.gauth.generate_request_for_request_token(
consumer_key, gdata.gauth.HMAC_SHA1, scopes,
consumer_secret=consumer_secret, auth_server_url=url, next=next)
else:
raise MissingOAuthParameters(
'To request an OAuth token, you must provide your consumer secret'
' or your private RSA key.')
response = self.http_client.request(http_request)
response_body = response.read()
if response.status != 200:
raise error_from_response('Unable to obtain OAuth request token',
response, RequestError, response_body)
if rsa_private_key is not None:
return gdata.gauth.rsa_token_from_body(response_body, consumer_key,
rsa_private_key,
gdata.gauth.REQUEST_TOKEN)
elif consumer_secret is not None:
return gdata.gauth.hmac_token_from_body(response_body, consumer_key,
consumer_secret,
gdata.gauth.REQUEST_TOKEN)
GetOAuthToken = get_oauth_token
def get_access_token(self, request_token,
url=gdata.gauth.ACCESS_TOKEN_URL):
"""Exchanges an authorized OAuth request token for an access token.
Contacts the Google OAuth server to upgrade a previously authorized
request token. Once the request token is upgraded to an access token,
the access token may be used to access the user's data.
For more details, see the Google Accounts OAuth documentation:
http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken
Args:
request_token: An OAuth token which has been authorized by the user.
url: (optional) The URL to which the upgrade request should be sent.
Defaults to: https://www.google.com/accounts/OAuthAuthorizeToken
"""
http_request = gdata.gauth.generate_request_for_access_token(
request_token, auth_server_url=url)
response = self.http_client.request(http_request)
response_body = response.read()
if response.status != 200:
raise error_from_response(
'Unable to upgrade OAuth request token to access token',
response, RequestError, response_body)
return gdata.gauth.upgrade_to_access_token(request_token, response_body)
GetAccessToken = get_access_token
def modify_request(self, http_request):
"""Adds or changes request before making the HTTP request.
This client will add the API version if it is specified.
Subclasses may override this method to add their own request
modifications before the request is made.
"""
http_request = atom.client.AtomPubClient.modify_request(self,
http_request)
if self.api_version is not None:
http_request.headers['GData-Version'] = self.api_version
return http_request
ModifyRequest = modify_request
def get_feed(self, uri, auth_token=None, converter=None,
desired_class=gdata.data.GDFeed, **kwargs):
return self.request(method='GET', uri=uri, auth_token=auth_token,
converter=converter, desired_class=desired_class,
**kwargs)
GetFeed = get_feed
def get_entry(self, uri, auth_token=None, converter=None,
desired_class=gdata.data.GDEntry, etag=None, **kwargs):
http_request = atom.http_core.HttpRequest()
# Conditional retrieval
if etag is not None:
http_request.headers['If-None-Match'] = etag
return self.request(method='GET', uri=uri, auth_token=auth_token,
http_request=http_request, converter=converter,
desired_class=desired_class, **kwargs)
GetEntry = get_entry
def get_next(self, feed, auth_token=None, converter=None,
desired_class=None, **kwargs):
"""Fetches the next set of results from the feed.
When requesting a feed, the number of entries returned is capped at a
service specific default limit (often 25 entries). You can specify your
own entry-count cap using the max-results URL query parameter. If there
are more results than could fit under max-results, the feed will contain
a next link. This method performs a GET against this next results URL.
Returns:
A new feed object containing the next set of entries in this feed.
"""
if converter is None and desired_class is None:
desired_class = feed.__class__
return self.get_feed(feed.find_next_link(), auth_token=auth_token,
converter=converter, desired_class=desired_class,
**kwargs)
GetNext = get_next
# TODO: add a refresh method to re-fetch the entry/feed from the server
# if it has been updated.
def post(self, entry, uri, auth_token=None, converter=None,
desired_class=None, **kwargs):
if converter is None and desired_class is None:
desired_class = entry.__class__
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(
entry.to_string(get_xml_version(self.api_version)),
'application/atom+xml')
return self.request(method='POST', uri=uri, auth_token=auth_token,
http_request=http_request, converter=converter,
desired_class=desired_class, **kwargs)
Post = post
def update(self, entry, auth_token=None, force=False, **kwargs):
"""Edits the entry on the server by sending the XML for this entry.
Performs a PUT and converts the response to a new entry object with a
matching class to the entry passed in.
Args:
entry:
auth_token:
force: boolean stating whether an update should be forced. Defaults to
False. Normally, if a change has been made since the passed in
entry was obtained, the server will not overwrite the entry since
the changes were based on an obsolete version of the entry.
Setting force to True will cause the update to silently
overwrite whatever version is present.
Returns:
A new Entry object of a matching type to the entry which was passed in.
"""
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(
entry.to_string(get_xml_version(self.api_version)),
'application/atom+xml')
# Include the ETag in the request if present.
if force:
http_request.headers['If-Match'] = '*'
elif hasattr(entry, 'etag') and entry.etag:
http_request.headers['If-Match'] = entry.etag
return self.request(method='PUT', uri=entry.find_edit_link(),
auth_token=auth_token, http_request=http_request,
desired_class=entry.__class__, **kwargs)
Update = update
def delete(self, entry_or_uri, auth_token=None, force=False, **kwargs):
http_request = atom.http_core.HttpRequest()
# Include the ETag in the request if present.
if force:
http_request.headers['If-Match'] = '*'
elif hasattr(entry_or_uri, 'etag') and entry_or_uri.etag:
http_request.headers['If-Match'] = entry_or_uri.etag
# If the user passes in a URL, just delete directly, may not work as
# the service might require an ETag.
if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)):
return self.request(method='DELETE', uri=entry_or_uri,
http_request=http_request, auth_token=auth_token,
**kwargs)
return self.request(method='DELETE', uri=entry_or_uri.find_edit_link(),
http_request=http_request, auth_token=auth_token,
**kwargs)
Delete = delete
#TODO: implement batch requests.
#def batch(feed, uri, auth_token=None, converter=None, **kwargs):
# pass
# TODO: add a refresh method to request a conditional update to an entry
# or feed.
def _add_query_param(param_string, value, http_request):
if value:
http_request.uri.query[param_string] = value
class Query(object):
def __init__(self, text_query=None, categories=None, author=None, alt=None,
updated_min=None, updated_max=None, pretty_print=False,
published_min=None, published_max=None, start_index=None,
max_results=None, strict=False):
"""Constructs a Google Data Query to filter feed contents serverside.
Args:
text_query: Full text search str (optional)
categories: list of strings (optional). Each string is a required
category. To include an 'or' query, put a | in the string between
terms. For example, to find everything in the Fitz category and
the Laurie or Jane category (Fitz and (Laurie or Jane)) you would
set categories to ['Fitz', 'Laurie|Jane'].
author: str (optional) The service returns entries where the author
name and/or email address match your query string.
alt: str (optional) for the Alternative representation type you'd like
the feed in. If you don't specify an alt parameter, the service
returns an Atom feed. This is equivalent to alt='atom'.
alt='rss' returns an RSS 2.0 result feed.
alt='json' returns a JSON representation of the feed.
alt='json-in-script' Requests a response that wraps JSON in a script
tag.
alt='atom-in-script' Requests an Atom response that wraps an XML
string in a script tag.
alt='rss-in-script' Requests an RSS response that wraps an XML
string in a script tag.
updated_min: str (optional), RFC 3339 timestamp format, lower bounds.
For example: 2005-08-09T10:57:00-08:00
updated_max: str (optional) updated time must be earlier than timestamp.
pretty_print: boolean (optional) If True the server's XML response will
be indented to make it more human readable. Defaults to False.
published_min: str (optional), Similar to updated_min but for published
time.
published_max: str (optional), Similar to updated_max but for published
time.
start_index: int or str (optional) 1-based index of the first result to
be retrieved. Note that this isn't a general cursoring mechanism.
If you first send a query with ?start-index=1&max-results=10 and
then send another query with ?start-index=11&max-results=10, the
service cannot guarantee that the results are equivalent to
?start-index=1&max-results=20, because insertions and deletions
could have taken place in between the two queries.
max_results: int or str (optional) Maximum number of results to be
retrieved. Each service has a default max (usually 25) which can
vary from service to service. There is also a service-specific
limit to the max_results you can fetch in a request.
strict: boolean (optional) If True, the server will return an error if
the server does not recognize any of the parameters in the request
URL. Defaults to False.
"""
self.text_query = text_query
self.categories = categories or []
self.author = author
self.alt = alt
self.updated_min = updated_min
self.updated_max = updated_max
self.pretty_print = pretty_print
self.published_min = published_min
self.published_max = published_max
self.start_index = start_index
self.max_results = max_results
self.strict = strict
def modify_request(self, http_request):
_add_query_param('q', self.text_query, http_request)
if self.categories:
http_request.uri.query['categories'] = ','.join(self.categories)
_add_query_param('author', self.author, http_request)
_add_query_param('alt', self.alt, http_request)
_add_query_param('updated-min', self.updated_min, http_request)
_add_query_param('updated-max', self.updated_max, http_request)
if self.pretty_print:
http_request.uri.query['prettyprint'] = 'true'
_add_query_param('published-min', self.published_min, http_request)
_add_query_param('published-max', self.published_max, http_request)
if self.start_index is not None:
http_request.uri.query['start-index'] = str(self.start_index)
if self.max_results is not None:
http_request.uri.query['max-results'] = str(self.max_results)
if self.strict:
http_request.uri.query['strict'] = 'true'
ModifyRequest = modify_request
class GDQuery(atom.http_core.Uri):
def _get_text_query(self):
return self.query['q']
def _set_text_query(self, value):
self.query['q'] = value
text_query = property(_get_text_query, _set_text_query,
doc='The q parameter for searching for an exact text match on content')
```
#### File: tools/OLXLogAnalyser/logins.py
```python
import sys,os,re
opts = [ opt for opt in sys.argv[1:] if not os.path.exists(opt) ]
files = [ f for f in sys.argv[1:] if f not in opts ]
def Stream(files):
for fn in files:
for l in open(fn, "r"):
yield l.strip("\n")
reJoin = re.compile("^H: Worm joined: (?P<name>.*) \\(id (?P<id>[0-9]+), from (?P<ip>[0-9.]+):[0-9]+\\((?P<version>.*)\\)\\)$")
def Joins(stream):
for l in stream:
m = reJoin.match(l)
if m: yield m.groupdict()
ips = dict()
for j in Joins(Stream(files)):
ip = j["ip"]
name = j["name"]
if not ip in ips: ips[ip] = dict() #names
ips[ip][name] = j
print "Total IPs:", len(ips)
for i in range(2,100):
ipsfilter = [ d for d in ips.itervalues() if len(d) > i ]
print "IPs used by more than", i, "nicks:", len(ipsfilter)
if len(ipsfilter) == 0: break
N = 4
print "IPs used by more than", N, "nicks:"
ipsfilter = [ d for d in ips.iteritems() if len(d[1]) > N ]
for d in ipsfilter:
#print " ", d[0], ":"
print "-------------"
for name in d[1]:
print " ", name
``` |
{
"source": "jiptool/jip",
"score": 2
} |
#### File: jip/jip/commands.py
```python
import os
import sys
import shutil
import stat
import inspect
import warnings
from string import Template
from jip import repos_manager, index_manager, logger,\
__version__, __path__, pool, cache_manager
from jip.maven import Pom, Artifact
from jip.util import get_lib_path, get_virtual_home
## command dictionary {name: function}
commands = {}
## options [(name, nargs, description, option_type), ...]
def command(register=True, options=[]):
def _command(func):
## init default repos before running command
def wrapper(*args, **kwargs):
repos_manager.init_repos()
index_manager.initialize()
func(*args, **kwargs)
index_manager.finalize()
## register in command dictionary
if register:
commands[func.__name__.replace('_','-')] = wrapper
wrapper.__doc__ = inspect.getdoc(func)
wrapper.__raw__ = func
### inspect arguments
args = inspect.getargspec(func)
defaults = list(args[3]) if args[3] else []
wrapper.args = []
for argidx in range(len(args[0])-1, -1, -1):
if args[0][argidx] != 'options':
default_value = defaults.pop() if defaults else None
wrapper.args.append((args[0][argidx], default_value))
wrapper.args.reverse()
else:
## options should have default value {}
## so remove this one for counter
defaults.pop()
### addtional options
wrapper.options = options
return wrapper
return _command
def _find_pom(artifact, verify=True):
""" find pom and repos contains pom """
## lookup cache first
if cache_manager.is_artifact_in_cache(artifact):
pom = cache_manager.get_artifact_pom(artifact)
return (pom, cache_manager.as_repos())
else:
for repos in repos_manager.repos:
pom = repos.download_pom(artifact, verify)
## find the artifact
if pom is not None:
cache_manager.put_artifact_pom(artifact, pom)
return (pom, repos)
return None
@command(options=[
("dry-run", 0, "perform a command without actual download", bool),
("copy-pom", 0, "copy pom to library directory", bool),
("exclude", "+", "exclude artifacts in install, for instance, 'junit:junit'", str),
("insecure", 0, "do not verify the server's TLS certificate", bool)
])
def install(artifact_id, options={}):
""" Install a package identified by "groupId:artifactId:version" """
artifact = Artifact.from_id(artifact_id)
_install([artifact], options=options)
def _resolve_artifacts(artifacts, exclusions=[], verify=True):
## download queue
download_list = []
## dependency_set contains artifact objects to resolve
dependency_set = set()
for a in artifacts:
dependency_set.add(a)
while len(dependency_set) > 0:
artifact = dependency_set.pop()
## to prevent multiple version installed
## TODO we need a better strategy to resolve this
if index_manager.is_same_installed(artifact)\
and artifact not in download_list:
continue
pominfo = _find_pom(artifact, verify)
if pominfo is None:
logger.error("[Error] Artifact not found: %s", artifact)
sys.exit(1)
if not index_manager.is_installed(artifact):
pom, repos = pominfo
# repos.download_jar(artifact, get_lib_path())
artifact.repos = repos
# skip excluded artifact
if not any(map(artifact.is_same_artifact, exclusions)):
download_list.append(artifact)
index_manager.add_artifact(artifact)
pom_obj = Pom(pom)
for r in pom_obj.get_repositories():
repos_manager.add_repos(*r)
more_dependencies = pom_obj.get_dependencies(verify)
for d in more_dependencies:
d.exclusions.extend(artifact.exclusions)
if not index_manager.is_same_installed(d):
dependency_set.add(d)
return download_list
def _install(artifacts, exclusions=[], options={}):
dryrun = options.get("dry-run", False)
verify = not options.get("insecure", True)
if not verify:
warnings.filterwarnings("ignore", category=Warning)
_exclusions = options.get('exclude', [])
copy_pom = options.get('copy-pom', False)
if _exclusions:
_exclusions = map(lambda x: Artifact(*(x.split(":"))), _exclusions)
exclusions.extend(_exclusions)
download_list = _resolve_artifacts(artifacts, exclusions, verify)
if not dryrun:
## download to cache first
for artifact in download_list:
if artifact.repos != cache_manager.as_repos():
artifact.repos.download_jar(artifact,
cache_manager.get_jar_path(artifact), verify)
pool.join()
for artifact in download_list:
cache_manager.get_artifact_jar(artifact, get_lib_path())
if copy_pom:
cache_manager.get_artifact_pom(artifact, get_lib_path())
index_manager.commit()
logger.info("[Finished] dependencies resolved")
else:
logger.info("[Install] Artifacts to install:")
for artifact in download_list:
logger.info(artifact)
@command()
def clean():
""" Remove all downloaded packages """
logger.info("[Deleting] remove java libs in %s" % get_lib_path())
shutil.rmtree(get_lib_path())
index_manager.remove_all()
index_manager.commit()
logger.info("[Finished] all downloaded files erased")
## another resolve task, allow jip to resovle dependencies from a pom file.
@command(options=[
("dry-run", 0, "perform a command without actual download", bool)
])
def resolve(pomfile, options={}):
""" Resolve and download dependencies in pom file """
pomfile = open(pomfile, 'r')
pomstring = pomfile.read()
pom = Pom(pomstring)
## custom defined repositories
repositories = pom.get_repositories()
for repos in repositories:
repos_manager.add_repos(*repos)
dependencies = pom.get_dependencies()
_install(dependencies, options=options)
@command()
def update(artifact_id):
""" Update a snapshot artifact, check for new version """
artifact = Artifact.from_id(artifact_id)
artifact = index_manager.get_artifact(artifact)
if artifact is None:
logger.error('[Error] Can not update %s, please install it first' % artifact)
sys.exit(1)
if artifact.is_snapshot():
selected_repos = artifact.repos
installed_file = os.path.join(get_lib_path(), artifact.to_jip_name())
if os.path.exists(installed_file):
lm = os.stat(installed_file)[stat.ST_MTIME]
## find the repository contains the new release
ts = selected_repos.last_modified(artifact)
if ts is not None and ts > lm :
## download new jar
selected_repos.download_jar(artifact, get_lib_path())
## try to update dependencies
pomstring = selected_repos.download_pom(artifact)
pom = Pom(pomstring)
dependencies = pom.get_dependencies()
_install(dependencies)
else:
logger.error('[Error] Artifact not installed: %s' % artifact)
sys.exit(1)
else:
logger.error('[Error] Can not update non-snapshot artifact')
return
@command()
def version():
""" Display jip version """
logger.info('[Version] jip %s, jython %s' % (__version__, sys.version))
@command(options=[
("dry-run", 0, "perform a command without actual download", bool)
])
def deps(artifact_id, options={}):
""" Install dependencies for a given artifact coordinator """
artifact = Artifact.from_id(artifact_id)
pominfo = _find_pom(artifact)
if pominfo is not None:
pom = Pom(pominfo[0])
_install(pom.get_dependencies(), options=options)
else:
logger.error('[Error] artifact %s not found in any repository' % artifact_id)
sys.exit(1)
@command(options=[
('group', '?', "group name", str),
('artifact', '?', "artifact name", str)
])
def search(query="", options={}):
""" Search maven central repository with keywords"""
from .search import searcher
if query is not None and len(query) > 0:
logger.info('[Searching] "%s" in Maven central repository...' % query)
results = searcher.search(query)
else:
g = options.get('group', '')
a = options.get('artifact', '')
logger.info('[Searching] "%s:%s" in Maven central repository...' % (g,a))
results = searcher.search_group_artifact(g, a)
if len(results) > 0:
for item in results:
g,a,v,p = item
logger.info("%s-%s (%s)\n\t%s:%s:%s" % (a,v,p,g,a,v))
else:
logger.info('[Finished] nothing returned by criteria "%s"' % query)
@command()
def list():
""" List current installed artifacts """
index_manager.keep_consistent()
for a in index_manager.installed:
logger.info("%s" % a)
@command()
def remove(artifact_id):
""" Remove an artifact from library path """
logger.info('[Checking] %s in library index' % artifact_id)
artifact = Artifact.from_id(artifact_id)
artifact_path = os.path.join(get_lib_path(), artifact.to_jip_name())
if index_manager.is_installed(artifact) and os.path.exists(artifact_path):
os.remove(artifact_path)
index_manager.remove_artifact(artifact)
index_manager.commit()
logger.info('[Finished] %s removed from library path' % artifact_id)
else:
logger.error('[Error] %s not installed' % artifact_id)
sys.exit(1)
@command()
def freeze():
""" Dump current configuration to a pom file """
dependencies = index_manager.to_pom()
repositories = repos_manager.to_pom()
template = Template(open(os.path.join(__path__[0], '../data/pom.tpl'), 'r').read())
logger.info( template.substitute({'dependencies': dependencies,
'repositories': repositories}))
```
#### File: jip/jip/util.py
```python
import os
import sys
import time
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
try:
import queue
except ImportError:
import Queue as queue
import threading
from jip import __version__, logger
JIP_USER_AGENT = 'jip/%s' % __version__
BUF_SIZE = 4096
class DownloadException(Exception):
pass
def download(url, target, non_blocking=False, close_target=False, quiet=True, verify=True):
import requests
### download file to target (target is a file-like object)
if non_blocking:
pool.submit(url, target, verify)
else:
try:
t0 = time.time()
source = requests.get(url, verify=verify, headers={'User-Agent': JIP_USER_AGENT})
source.raise_for_status()
size = source.headers['Content-Length']
if not quiet:
logger.info('[Downloading] %s %s bytes to download' % (url, size))
for buf in source.iter_content(BUF_SIZE):
target.write(buf)
source.close()
if close_target:
target.close()
t1 = time.time()
if not quiet:
logger.info('[Downloading] Download %s completed in %f secs' % (url, (t1-t0)))
except requests.exceptions.RequestException:
_, e, _ = sys.exc_info()
raise DownloadException(url, e)
def download_string(url, verify=True):
import requests
try:
response = requests.get(url, verify=verify, headers={'User-Agent': JIP_USER_AGENT})
response.raise_for_status()
return response.text
except requests.exceptions.RequestException:
_, e, _ = sys.exc_info()
raise DownloadException(url, e)
class DownloadThreadPool(object):
def __init__(self, size=3):
self.queue = queue.Queue()
self.workers = [threading.Thread(target=self._do_work) for _ in range(size)]
self.initialized = False
def init_threads(self):
for worker in self.workers:
worker.setDaemon(True)
worker.start()
self.initialized = True
def _do_work(self):
while True:
url, target, verify = self.queue.get()
download(url, target, close_target=True, quiet=False, verify=verify)
self.queue.task_done()
def join(self):
self.queue.join()
def submit(self, url, target, verify=True):
if not self.initialized:
self.init_threads()
self.queue.put((url, target, verify))
pool = DownloadThreadPool(3)
def get_virtual_home():
if 'VIRTUAL_ENV' in os.environ:
JYTHON_HOME = os.environ['VIRTUAL_ENV']
else:
## fail back to use current directory
JYTHON_HOME = os.getcwd()
return JYTHON_HOME
def get_lib_path():
JYTHON_HOME = get_virtual_home()
DEFAULT_JAVA_LIB_PATH = os.path.join(JYTHON_HOME, 'javalib')
if not os.path.exists(DEFAULT_JAVA_LIB_PATH):
os.mkdir(DEFAULT_JAVA_LIB_PATH)
return DEFAULT_JAVA_LIB_PATH
``` |
{
"source": "JIQ1314an/nlp-case",
"score": 3
} |
#### File: nlp-case/sample/helpers.py
```python
from collections import Counter
from . import core
import pandas as pd
def read(path):
corpus = []
with open(path) as f:
for line in f.readlines():
corpus.append(line.rstrip('\n'))
# print(corpus)
return corpus
def split_sentence(corpus: list):
"""split corpus
:param corpus: list type sentence
:return: word_list: two-dimensional list
"""
word_list = list()
for i in range(len(corpus)):
word_list.append(corpus[i].split(' '))
return word_list
def count_word_num(corpus: list):
"""Count words
:param corpus: two-dimensional list
:return: list, the element is Count type
"""
word_list = split_sentence(corpus)
count_list = []
unrepeated_word_list = []
for i in range(len(word_list)):
# The Counter type is similar to a dict, returns 0 for nonexistent keys
count_list.append(Counter(word_list[i]))
unrepeated_word_list.extend(word_list[i])
return pd.Series(unrepeated_word_list).unique(), count_list
def get_text_vector(corpus: list):
text_vector = {}
# Get words and statistics for all articles
unrepeated_word_list, count_list = count_word_num(corpus)
# print(unrepeated_word_list)
for i, count in enumerate(count_list): # Get words and statistics for the current article
print("TF-IDF statistics for Document {}".format(i + 1))
scores = {word: core.tf_idf(word, count, count_list) for word in count}
# print(scores.keys())
# Sort in descending order by value
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words:
print("word: {}, TF-IDF: {}\t".format(word, round(score, 5)))
# get text vector
for w in unrepeated_word_list:
if w in scores.keys():
fill_text_vector(text_vector, w, round(scores[w], 5))
else:
fill_text_vector(text_vector, w, 0)
# print(text_vector)
print()
return pd.DataFrame(text_vector).values
def fill_text_vector(text_vector, w, score):
temp_list = text_vector.get(w) # Key=w If not exists, then return None
if temp_list:
temp_list.append(score)
text_vector[w] = temp_list
else:
text_vector[w] = [score]
``` |
{
"source": "JiQi535/pymatgen-analysis-diffusion",
"score": 2
} |
#### File: aimd/tests/test_pathway.py
```python
__author__ = "<NAME>"
__date__ = "01/16"
import json
import os
import unittest
import numpy as np
from pymatgen.core import Structure
from pymatgen.io.vasp import Chgcar
from pymatgen.analysis.diffusion.aimd.pathway import ProbabilityDensityAnalysis, SiteOccupancyAnalyzer
from pymatgen.analysis.diffusion.analyzer import DiffusionAnalyzer
tests_dir = os.path.dirname(os.path.abspath(__file__))
class ProbabilityDensityTest(unittest.TestCase):
def test_probability(self):
traj_file = os.path.join(tests_dir, "cNa3PS4_trajectories.npy")
struc_file = os.path.join(tests_dir, "cNa3PS4.cif")
trajectories = np.load(traj_file)
structure = Structure.from_file(struc_file, False)
# ProbabilityDensityAnalysis object
pda = ProbabilityDensityAnalysis(structure, trajectories, interval=0.5)
dV = pda.structure.lattice.volume / pda.lens[0] / pda.lens[1] / pda.lens[2]
Pr_tot = np.sum(pda.Pr) * dV
self.assertAlmostEqual(pda.Pr.max(), 0.030735573102, 12)
self.assertAlmostEqual(pda.Pr.min(), 0.0, 12)
self.assertAlmostEqual(Pr_tot, 1.0, 12)
def test_probability_classmethod(self):
file = os.path.join(tests_dir, "cNa3PS4_pda.json")
data = json.load(open(file))
diff_analyzer = DiffusionAnalyzer.from_dict(data)
# ProbabilityDensityAnalysis object
pda = ProbabilityDensityAnalysis.from_diffusion_analyzer(diffusion_analyzer=diff_analyzer, interval=0.5)
dV = pda.structure.lattice.volume / pda.lens[0] / pda.lens[1] / pda.lens[2]
Pr_tot = np.sum(pda.Pr) * dV
self.assertAlmostEqual(pda.Pr.max(), 0.0361594977596, 8)
self.assertAlmostEqual(pda.Pr.min(), 0.0, 12)
self.assertAlmostEqual(Pr_tot, 1.0, 12)
def test_generate_stable_sites(self):
file = os.path.join(tests_dir, "cNa3PS4_pda.json")
data = json.load(open(file))
diff_analyzer = DiffusionAnalyzer.from_dict(data)
# ProbabilityDensityAnalysis object
pda = ProbabilityDensityAnalysis.from_diffusion_analyzer(diffusion_analyzer=diff_analyzer, interval=0.1)
pda.generate_stable_sites(p_ratio=0.25, d_cutoff=1.5)
self.assertEqual(len(pda.stable_sites), 50)
self.assertAlmostEqual(pda.stable_sites[1][2], 0.24113475177304966, 8)
self.assertAlmostEqual(pda.stable_sites[7][1], 0.5193661971830985, 8)
s = pda.get_full_structure()
self.assertEqual(s.num_sites, 178)
self.assertEqual(s.composition["Na"], 48)
self.assertEqual(s.composition["X"], 50)
self.assertAlmostEqual(s[177].frac_coords[2], 0.57446809)
def test_to_chgcar(self):
file = os.path.join(tests_dir, "cNa3PS4_pda.json")
data = json.load(open(file))
diff_analyzer = DiffusionAnalyzer.from_dict(data)
# ProbabilityDensityAnalysis object
pda = ProbabilityDensityAnalysis.from_diffusion_analyzer(diffusion_analyzer=diff_analyzer, interval=0.1)
pda.to_chgcar("CHGCAR.PDA")
chgcar = Chgcar.from_file("CHGCAR.PDA")
self.assertEqual(pda.structure.species, chgcar.structure.species)
os.remove("CHGCAR.PDA")
class SiteOccupancyTest(unittest.TestCase):
def test_site_occupancy(self):
traj_file = os.path.join(tests_dir, "cNa3PS4_trajectories.npy")
struc_file = os.path.join(tests_dir, "cNa3PS4.cif")
trajectories = np.load(traj_file)
structure = Structure.from_file(struc_file, False)
coords_ref = [ss.frac_coords for ss in structure if ss.specie.symbol == "Na"]
# SiteOccupancyAnalyzer object
socc = SiteOccupancyAnalyzer(structure, coords_ref, trajectories, species=("Li", "Na"))
site_occ = socc.site_occ
self.assertAlmostEqual(np.sum(site_occ), len(coords_ref), 12)
self.assertAlmostEqual(site_occ[11], 0.98, 12)
self.assertAlmostEqual(site_occ[15], 0.875, 12)
self.assertEqual(len(coords_ref), 48)
def test_site_occupancy_classmethod(self):
file = os.path.join(tests_dir, "cNa3PS4_pda.json")
data = json.load(open(file))
diff_analyzer = DiffusionAnalyzer.from_dict(data)
structure = diff_analyzer.structure
coords_ref = [ss.frac_coords for ss in structure if ss.specie.symbol == "Na"]
# SiteOccupancyAnalyzer object
socc = SiteOccupancyAnalyzer.from_diffusion_analyzer(coords_ref, diffusion_analyzer=diff_analyzer)
site_occ = socc.site_occ
self.assertAlmostEqual(np.sum(site_occ), len(coords_ref), 12)
self.assertAlmostEqual(site_occ[1], 0.98, 12)
self.assertAlmostEqual(site_occ[26], 0.97, 12)
self.assertEqual(len(coords_ref), 48)
if __name__ == "__main__":
unittest.main()
```
#### File: diffusion/tests/test_analyzer.py
```python
import csv
import json
import os
import random
import unittest
import numpy as np
import scipy.constants as const
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.diffusion.analyzer import DiffusionAnalyzer, fit_arrhenius, get_conversion_factor
module_dir = os.path.dirname(os.path.abspath(__file__))
class FuncTest(PymatgenTest):
def test_get_conversion_factor(self):
s = PymatgenTest.get_structure("LiFePO4")
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(41370704.343540139, get_conversion_factor(s, "Li", 600), delta=20)
def test_fit_arrhenius(self):
Ea = 0.5
k = const.k / const.e
c = 12
temps = np.array([300, 1000, 500])
diffusivities = c * np.exp(-Ea / (k * temps))
diffusivities *= np.array([1.00601834013, 1.00803236262, 0.98609720824])
r = fit_arrhenius(temps, diffusivities)
self.assertAlmostEqual(r[0], Ea)
self.assertAlmostEqual(r[1], c)
self.assertAlmostEqual(r[2], 0.000895566)
# when not enough values for error estimate
r2 = fit_arrhenius([1, 2], [10, 10])
self.assertAlmostEqual(r2[0], 0)
self.assertAlmostEqual(r2[1], 10)
self.assertEqual(r2[2], None)
class DiffusionAnalyzerTest(PymatgenTest):
def test_init(self):
# Diffusion vasprun.xmls are rather large. We are only going to use a
# very small preprocessed run for testing. Note that the results are
# unreliable for short runs.
with open(os.path.join(module_dir, "DiffusionAnalyzer.json")) as f:
dd = json.load(f)
d = DiffusionAnalyzer.from_dict(dd)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4)
self.assertAlmostEqual(d.chg_conductivity, 232.8278799754324, 4)
self.assertAlmostEqual(d.diffusivity, 1.16083658794e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 3.64565578208e-06, 7)
self.assertAlmostEqual(d.conductivity_std_dev, 0.0097244677795984488, 7)
self.assertAlmostEqual(d.diffusivity_std_dev, 9.1013023085561779e-09, 7)
self.assertAlmostEqual(d.chg_diffusivity_std_dev, 7.20911399729e-10, 5)
self.assertAlmostEqual(d.haven_ratio, 0.31854161048867402, 7)
self.assertArrayAlmostEqual(d.conductivity_components, [45.7903694, 26.1651956, 150.5406140], 3)
self.assertArrayAlmostEqual(
d.diffusivity_components,
[7.49601236e-07, 4.90254273e-07, 2.24649255e-06],
)
self.assertArrayAlmostEqual(d.conductivity_components_std_dev, [0.0063566, 0.0180854, 0.0217918])
self.assertArrayAlmostEqual(
d.diffusivity_components_std_dev,
[8.9465670e-09, 2.4931224e-08, 2.2636384e-08],
)
self.assertArrayAlmostEqual(d.mscd[0:4], [0.69131064, 0.71794072, 0.74315283, 0.76703961])
self.assertArrayAlmostEqual(
d.max_ion_displacements,
[
1.4620659693989553,
1.2787303484445025,
3.419618540097756,
2.340104469126246,
2.6080973517594233,
1.3928579365672844,
1.3561505956708932,
1.6699242923686253,
1.0352389639563648,
1.1662520093955808,
1.2322019205885841,
0.8094210554832534,
1.9917808504954169,
1.2684148391206396,
2.392633794162402,
2.566313049232671,
1.3175030435622759,
1.4628945430952793,
1.0984921286753002,
1.2864482076554093,
0.655567027815413,
0.5986961164605746,
0.5639091444309045,
0.6166004192954059,
0.5997911580422605,
0.4374606277579815,
1.1865683960470783,
0.9017064371676591,
0.6644840367853767,
1.0346375380664645,
0.6177630142863979,
0.7952002051914302,
0.7342686123054011,
0.7858047956905577,
0.5570732369065661,
1.0942937746885417,
0.6509372395308788,
1.0876687380413455,
0.7058162184725,
0.8298306317598585,
0.7813913747621343,
0.7337655232056153,
0.9057161616236746,
0.5979093093186919,
0.6830333586985015,
0.7926500894084628,
0.6765180009988608,
0.8555866032968998,
0.713087091642237,
0.7621007695790749,
],
)
self.assertEqual(d.sq_disp_ions.shape, (50, 206))
self.assertEqual(d.lattices.shape, (1, 3, 3))
self.assertEqual(d.mscd.shape, (206,))
self.assertEqual(d.mscd.shape, d.msd.shape)
self.assertAlmostEqual(d.max_framework_displacement, 1.18656839605)
ss = list(d.get_drift_corrected_structures(10, 1000, 20))
self.assertEqual(len(ss), 50)
n = random.randint(0, 49)
n_orig = n * 20 + 10
self.assertArrayAlmostEqual(
ss[n].cart_coords - d.structure.cart_coords + d.drift[:, n_orig, :],
d.disp[:, n_orig, :],
)
d = DiffusionAnalyzer.from_dict(d.as_dict())
self.assertIsInstance(d, DiffusionAnalyzer)
# Ensure summary dict is json serializable.
json.dumps(d.get_summary_dict(include_msd_t=True))
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="max",
)
self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4)
self.assertAlmostEqual(d.diffusivity, 1.14606446822e-06, 7)
self.assertAlmostEqual(d.haven_ratio, 0.318541610489, 6)
self.assertAlmostEqual(d.chg_conductivity, 232.8278799754324, 4)
self.assertAlmostEqual(d.chg_diffusivity, 3.64565578208e-06, 7)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=False,
)
self.assertAlmostEqual(d.conductivity, 27.20479170406027, 4)
self.assertAlmostEqual(d.diffusivity, 4.25976905436e-07, 7)
self.assertAlmostEqual(d.chg_diffusivity, 1.6666666666666667e-17, 3)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4)
self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7)
self.assertAlmostEqual(d.chg_conductivity, 1.06440821953e-09, 4)
# Can't average over 2000 steps because this is a 1000-step run.
self.assertRaises(
ValueError,
DiffusionAnalyzer,
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=2000,
)
d = DiffusionAnalyzer.from_structures(
list(d.get_drift_corrected_structures()),
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=d.smoothed,
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4)
self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7)
d.export_msdt("test.csv")
with open("test.csv") as f:
data = []
for row in csv.reader(f):
if row:
data.append(row)
data.pop(0)
data = np.array(data, dtype=np.float64)
self.assertArrayAlmostEqual(data[:, 1], d.msd)
self.assertArrayAlmostEqual(data[:, -1], d.mscd)
os.remove("test.csv")
def test_init_npt(self):
# Diffusion vasprun.xmls are rather large. We are only going to use a
# very small preprocessed run for testing. Note that the results are
# unreliable for short runs.
with open(os.path.join(module_dir, "DiffusionAnalyzer_NPT.json")) as f:
dd = json.load(f)
d = DiffusionAnalyzer.from_dict(dd)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(d.conductivity, 499.1504129387108, 4)
self.assertAlmostEqual(d.chg_conductivity, 1219.5959181678043, 4)
self.assertAlmostEqual(d.diffusivity, 8.40265434771e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 2.05305709033e-05, 6)
self.assertAlmostEqual(d.conductivity_std_dev, 0.10368477696021029, 7)
self.assertAlmostEqual(d.diffusivity_std_dev, 9.1013023085561779e-09, 7)
self.assertAlmostEqual(d.chg_diffusivity_std_dev, 1.20834853646e-08, 6)
self.assertAlmostEqual(d.haven_ratio, 0.409275240679, 7)
self.assertArrayAlmostEqual(d.conductivity_components, [455.178101, 602.252644, 440.0210014], 3)
self.assertArrayAlmostEqual(
d.diffusivity_components,
[7.66242570e-06, 1.01382648e-05, 7.40727250e-06],
)
self.assertArrayAlmostEqual(d.conductivity_components_std_dev, [0.1196577, 0.0973347, 0.1525400])
self.assertArrayAlmostEqual(
d.diffusivity_components_std_dev,
[2.0143072e-09, 1.6385239e-09, 2.5678445e-09],
)
self.assertArrayAlmostEqual(
d.max_ion_displacements,
[
1.13147881,
0.79899554,
1.04153733,
0.96061850,
0.83039864,
0.70246715,
0.61365911,
0.67965179,
1.91973907,
1.69127386,
1.60568746,
1.35587641,
1.03280378,
0.99202692,
2.03359655,
1.03760269,
1.40228350,
1.36315080,
1.27414979,
1.26742035,
0.88199589,
0.97700804,
1.11323184,
1.00139511,
2.94164403,
0.89438909,
1.41508334,
1.23660358,
0.39322939,
0.54264064,
1.25291806,
0.62869809,
0.40846708,
1.43415505,
0.88891241,
0.56259128,
0.81712740,
0.52700441,
0.51011733,
0.55557882,
0.49131002,
0.66740277,
0.57798671,
0.63521025,
0.50277142,
0.52878021,
0.67803443,
0.81161269,
0.46486345,
0.47132761,
0.74301293,
0.79285519,
0.48789600,
0.61776836,
0.60695847,
0.67767756,
0.70972268,
1.08232442,
0.87871177,
0.84674206,
0.45694693,
0.60417985,
0.61652272,
0.66444583,
0.52211986,
0.56544134,
0.43311443,
0.43027547,
1.10730439,
0.59829728,
0.52270635,
0.72327608,
1.02919775,
0.84423208,
0.61694764,
0.72795752,
0.72957755,
0.55491631,
0.68507454,
0.76745343,
0.96346584,
0.66672645,
1.06810107,
0.65705843,
],
)
self.assertEqual(d.sq_disp_ions.shape, (84, 217))
self.assertEqual(d.lattices.shape, (1001, 3, 3))
self.assertEqual(d.mscd.shape, (217,))
self.assertEqual(d.mscd.shape, d.msd.shape)
self.assertAlmostEqual(d.max_framework_displacement, 1.43415505156)
ss = list(d.get_drift_corrected_structures(10, 1000, 20))
self.assertEqual(len(ss), 50)
n = random.randint(0, 49)
n_orig = n * 20 + 10
self.assertArrayAlmostEqual(
ss[n].cart_coords - d.structure.cart_coords + d.drift[:, n_orig, :],
d.disp[:, n_orig, :],
)
d = DiffusionAnalyzer.from_dict(d.as_dict())
self.assertIsInstance(d, DiffusionAnalyzer)
# Ensure summary dict is json serializable.
json.dumps(d.get_summary_dict(include_msd_t=True))
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="max",
)
self.assertAlmostEqual(d.conductivity, 499.1504129387108, 4)
self.assertAlmostEqual(d.diffusivity, 8.40265434771e-06, 7)
self.assertAlmostEqual(d.haven_ratio, 0.409275240679, 7)
self.assertAlmostEqual(d.chg_diffusivity, 2.05305709033e-05, 7)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=False,
)
self.assertAlmostEqual(d.conductivity, 406.5964019770787, 4)
self.assertAlmostEqual(d.diffusivity, 6.8446082e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 1.03585877962e-05, 6)
self.assertAlmostEqual(d.haven_ratio, 0.6607665413, 6)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 425.77884571149525, 4)
self.assertAlmostEqual(d.diffusivity, 7.167523809142514e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 9.33480892187e-06, 6)
self.assertAlmostEqual(d.haven_ratio, 0.767827586952, 6)
self.assertAlmostEqual(d.chg_conductivity, 554.5240271992852, 6)
# Can't average over 2000 steps because this is a 1000-step run.
self.assertRaises(
ValueError,
DiffusionAnalyzer,
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=2000,
)
d = DiffusionAnalyzer.from_structures(
list(d.get_drift_corrected_structures()),
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=d.smoothed,
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 425.7788457114952, 4)
self.assertAlmostEqual(d.diffusivity, 7.1675238091425148e-06, 7)
self.assertAlmostEqual(d.haven_ratio, 0.767827586952, 7)
self.assertAlmostEqual(d.chg_conductivity, 554.5240271992852, 6)
d.export_msdt("test.csv")
with open("test.csv") as f:
data = []
for row in csv.reader(f):
if row:
data.append(row)
data.pop(0)
data = np.array(data, dtype=np.float64)
self.assertArrayAlmostEqual(data[:, 1], d.msd)
self.assertArrayAlmostEqual(data[:, -1], d.mscd)
os.remove("test.csv")
def test_from_structure_NPT(self):
coords1 = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]])
coords2 = np.array([[0.0, 0.0, 0.0], [0.6, 0.6, 0.6]])
coords3 = np.array([[0.0, 0.0, 0.0], [0.7, 0.7, 0.7]])
lattice1 = Lattice.from_parameters(a=2.0, b=2.0, c=2.0, alpha=90, beta=90, gamma=90)
lattice2 = Lattice.from_parameters(a=2.1, b=2.1, c=2.1, alpha=90, beta=90, gamma=90)
lattice3 = Lattice.from_parameters(a=2.0, b=2.0, c=2.0, alpha=90, beta=90, gamma=90)
s1 = Structure(coords=coords1, lattice=lattice1, species=["F", "Li"])
s2 = Structure(coords=coords2, lattice=lattice2, species=["F", "Li"])
s3 = Structure(coords=coords3, lattice=lattice3, species=["F", "Li"])
structures = [s1, s2, s3]
d = DiffusionAnalyzer.from_structures(
structures,
specie="Li",
temperature=500.0,
time_step=2.0,
step_skip=1,
smoothed=None,
)
self.assertArrayAlmostEqual(
d.disp[1],
np.array([[0.0, 0.0, 0.0], [0.21, 0.21, 0.21], [0.40, 0.40, 0.40]]),
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JiQi535/pymatgen",
"score": 2
} |
#### File: gb/tests/test_grain.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__email__ = "<EMAIL>"
__date__ = "07/30/18"
import os
import unittest
import warnings
import numpy as np
from pymatgen import Structure
from pymatgen.analysis.gb.grain import GrainBoundary, GrainBoundaryGenerator
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "grain_boundary"
)
class Test_GrainBoundary(PymatgenTest):
@classmethod
def setUpClass(cls):
warnings.filterwarnings("ignore")
cls.Cu_conv = Structure.from_file(
os.path.join(test_dir, "Cu_mp-30_conventional_standard.cif")
)
GB_Cu_conv = GrainBoundaryGenerator(cls.Cu_conv)
cls.Cu_GB1 = GB_Cu_conv.gb_from_parameters(
[1, 2, 3],
123.74898859588858,
expand_times=4,
vacuum_thickness=1.5,
ab_shift=[0.0, 0.0],
plane=[1, 3, 1],
rm_ratio=0.0,
)
cls.Cu_GB2 = GB_Cu_conv.gb_from_parameters(
[1, 2, 3],
123.74898859588858,
expand_times=4,
vacuum_thickness=1.5,
ab_shift=[0.2, 0.2],
rm_ratio=0.0,
)
@classmethod
def tearDownClass(cls):
warnings.simplefilter("default")
def test_init(self):
self.assertAlmostEqual(self.Cu_GB1.rotation_angle, 123.74898859588858)
self.assertAlmostEqual(self.Cu_GB1.vacuum_thickness, 1.5)
self.assertListEqual(self.Cu_GB2.rotation_axis, [1, 2, 3])
self.assertArrayAlmostEqual(
np.array(self.Cu_GB1.ab_shift), np.array([0.0, 0.0])
)
self.assertArrayAlmostEqual(
np.array(self.Cu_GB2.ab_shift), np.array([0.2, 0.2])
)
self.assertListEqual(self.Cu_GB1.gb_plane, [1, 3, 1])
self.assertListEqual(self.Cu_GB2.gb_plane, [1, 2, 3])
self.assertArrayAlmostEqual(
np.array(self.Cu_GB1.init_cell.lattice.matrix),
np.array(self.Cu_conv.lattice.matrix),
)
def test_copy(self):
Cu_GB1_copy = self.Cu_GB1.copy()
self.assertAlmostEqual(Cu_GB1_copy.sigma, self.Cu_GB1.sigma)
self.assertAlmostEqual(Cu_GB1_copy.rotation_angle, self.Cu_GB1.rotation_angle)
self.assertListEqual(Cu_GB1_copy.rotation_axis, self.Cu_GB1.rotation_axis)
self.assertListEqual(Cu_GB1_copy.gb_plane, self.Cu_GB1.gb_plane)
self.assertArrayAlmostEqual(
Cu_GB1_copy.init_cell.lattice.matrix, self.Cu_GB1.init_cell.lattice.matrix
)
self.assertArrayAlmostEqual(
Cu_GB1_copy.oriented_unit_cell.lattice.matrix,
self.Cu_GB1.oriented_unit_cell.lattice.matrix,
)
self.assertArrayAlmostEqual(
Cu_GB1_copy.lattice.matrix, self.Cu_GB1.lattice.matrix
)
def test_sigma(self):
self.assertAlmostEqual(self.Cu_GB1.sigma, 9)
self.assertAlmostEqual(self.Cu_GB2.sigma, 9)
def test_top_grain(self):
self.assertAlmostEqual(
self.Cu_GB1.num_sites, self.Cu_GB1.top_grain.num_sites * 2
)
self.assertArrayAlmostEqual(
self.Cu_GB1.lattice.matrix, self.Cu_GB1.top_grain.lattice.matrix
)
def test_bottom_grain(self):
self.assertAlmostEqual(
self.Cu_GB1.num_sites, self.Cu_GB1.bottom_grain.num_sites * 2
)
self.assertArrayAlmostEqual(
self.Cu_GB1.lattice.matrix, self.Cu_GB1.bottom_grain.lattice.matrix
)
def test_coincidents(self):
self.assertAlmostEqual(
self.Cu_GB1.num_sites / self.Cu_GB1.sigma, len(self.Cu_GB1.coincidents)
)
self.assertAlmostEqual(
self.Cu_GB2.num_sites / self.Cu_GB2.sigma, len(self.Cu_GB2.coincidents)
)
def test_as_dict_and_from_dict(self):
d1 = self.Cu_GB1.as_dict()
d2 = self.Cu_GB2.as_dict()
Cu_GB1_new = GrainBoundary.from_dict(d1)
Cu_GB2_new = GrainBoundary.from_dict(d2)
self.assertAlmostEqual(Cu_GB1_new.sigma, self.Cu_GB1.sigma)
self.assertAlmostEqual(Cu_GB1_new.rotation_angle, self.Cu_GB1.rotation_angle)
self.assertListEqual(Cu_GB1_new.rotation_axis, self.Cu_GB1.rotation_axis)
self.assertListEqual(Cu_GB1_new.gb_plane, self.Cu_GB1.gb_plane)
self.assertArrayAlmostEqual(
Cu_GB1_new.init_cell.lattice.matrix, self.Cu_GB1.init_cell.lattice.matrix
)
self.assertArrayAlmostEqual(
Cu_GB1_new.oriented_unit_cell.lattice.matrix,
self.Cu_GB1.oriented_unit_cell.lattice.matrix,
)
self.assertArrayAlmostEqual(
Cu_GB1_new.lattice.matrix, self.Cu_GB1.lattice.matrix
)
self.assertAlmostEqual(Cu_GB2_new.sigma, self.Cu_GB2.sigma)
self.assertAlmostEqual(Cu_GB2_new.rotation_angle, self.Cu_GB2.rotation_angle)
self.assertListEqual(Cu_GB2_new.rotation_axis, self.Cu_GB2.rotation_axis)
self.assertListEqual(Cu_GB2_new.gb_plane, self.Cu_GB2.gb_plane)
self.assertArrayAlmostEqual(
Cu_GB2_new.init_cell.lattice.matrix, self.Cu_GB2.init_cell.lattice.matrix
)
self.assertArrayAlmostEqual(
Cu_GB2_new.oriented_unit_cell.lattice.matrix,
self.Cu_GB2.oriented_unit_cell.lattice.matrix,
)
self.assertArrayAlmostEqual(
Cu_GB2_new.lattice.matrix, self.Cu_GB2.lattice.matrix
)
class GrainBoundaryGeneratorTest(PymatgenTest):
@classmethod
def setUpClass(cls):
warnings.filterwarnings("ignore")
cls.Cu_prim = Structure.from_file(
os.path.join(test_dir, "Cu_mp-30_primitive.cif")
)
cls.GB_Cu_prim = GrainBoundaryGenerator(cls.Cu_prim)
cls.Cu_conv = Structure.from_file(
os.path.join(test_dir, "Cu_mp-30_conventional_standard.cif")
)
cls.GB_Cu_conv = GrainBoundaryGenerator(cls.Cu_conv)
cls.Be = Structure.from_file(
os.path.join(test_dir, "Be_mp-87_conventional_standard.cif")
)
cls.GB_Be = GrainBoundaryGenerator(cls.Be)
cls.Pa = Structure.from_file(
os.path.join(test_dir, "Pa_mp-62_conventional_standard.cif")
)
cls.GB_Pa = GrainBoundaryGenerator(cls.Pa)
cls.Br = Structure.from_file(
os.path.join(test_dir, "Br_mp-23154_conventional_standard.cif")
)
cls.GB_Br = GrainBoundaryGenerator(cls.Br)
cls.Bi = Structure.from_file(
os.path.join(test_dir, "Bi_mp-23152_primitive.cif")
)
cls.GB_Bi = GrainBoundaryGenerator(cls.Bi)
@classmethod
def tearDownClass(cls):
warnings.simplefilter("default")
def test_gb_from_parameters(self):
# from fcc primitive cell,axis[1,2,3],sigma 9.
gb_cu_123_prim1 = self.GB_Cu_prim.gb_from_parameters(
[1, 2, 3], 123.74898859588858, expand_times=2
)
lat_mat1 = gb_cu_123_prim1.lattice.matrix
c_vec1 = np.cross(lat_mat1[0], lat_mat1[1]) / np.linalg.norm(
np.cross(lat_mat1[0], lat_mat1[1])
)
c_len1 = np.dot(lat_mat1[2], c_vec1)
vol_ratio = gb_cu_123_prim1.volume / self.Cu_prim.volume
self.assertAlmostEqual(vol_ratio, 9 * 2 * 2, 8)
# test expand_times and vacuum layer
gb_cu_123_prim2 = self.GB_Cu_prim.gb_from_parameters(
[1, 2, 3], 123.74898859588858, expand_times=4, vacuum_thickness=1.5
)
lat_mat2 = gb_cu_123_prim2.lattice.matrix
c_vec2 = np.cross(lat_mat2[0], lat_mat2[1]) / np.linalg.norm(
np.cross(lat_mat2[0], lat_mat2[1])
)
c_len2 = np.dot(lat_mat2[2], c_vec2)
self.assertAlmostEqual((c_len2 - 1.5 * 2) / c_len1, 2)
# test normal
gb_cu_123_prim3 = self.GB_Cu_prim.gb_from_parameters(
[1, 2, 3], 123.74898859588858, expand_times=2, normal=True
)
lat_mat3 = gb_cu_123_prim3.lattice.matrix
c_vec3 = np.cross(lat_mat3[0], lat_mat3[1]) / np.linalg.norm(
np.cross(lat_mat3[0], lat_mat3[1])
)
ab_len3 = np.linalg.norm(np.cross(lat_mat3[2], c_vec3))
self.assertAlmostEqual(ab_len3, 0)
# test normal in tilt boundary
# The 'finfo(np.float32).eps' is the smallest representable positive number in float32,
# which has been introduced because comparing to just zero or one failed the test by rounding errors.
gb_cu_010_conv1 = self.GB_Cu_conv.gb_from_parameters(
rotation_axis=[0, 1, 0],
rotation_angle=36.8698976458,
expand_times=1,
vacuum_thickness=1.0,
ab_shift=[0.0, 0.0],
rm_ratio=0.0,
plane=[0, 0, 1],
normal=True,
)
self.assertTrue(
np.all(-np.finfo(np.float32).eps <= gb_cu_010_conv1.frac_coords)
)
self.assertTrue(
np.all(1 + np.finfo(np.float32).eps >= gb_cu_010_conv1.frac_coords)
)
# from fcc conventional cell,axis [1,2,3], siamg 9
gb_cu_123_conv1 = self.GB_Cu_conv.gb_from_parameters(
[1, 2, 3], 123.74898859588858, expand_times=4, vacuum_thickness=1.5
)
lat_mat1 = gb_cu_123_conv1.lattice.matrix
self.assertAlmostEqual(np.dot(lat_mat1[0], [1, 2, 3]), 0)
self.assertAlmostEqual(np.dot(lat_mat1[1], [1, 2, 3]), 0)
# test plane
gb_cu_123_conv2 = self.GB_Cu_conv.gb_from_parameters(
[1, 2, 3],
123.74898859588858,
expand_times=2,
vacuum_thickness=1.5,
normal=False,
plane=[1, 3, 1],
)
lat_mat2 = gb_cu_123_conv2.lattice.matrix
self.assertAlmostEqual(np.dot(lat_mat2[0], [1, 3, 1]), 0)
self.assertAlmostEqual(np.dot(lat_mat2[1], [1, 3, 1]), 0)
# from hex cell,axis [1,1,1], sigma 21
gb_Be_111_1 = self.GB_Be.gb_from_parameters(
[1, 1, 1],
147.36310249644626,
ratio=[5, 2],
expand_times=4,
vacuum_thickness=1.5,
plane=[1, 2, 1],
)
lat_priv = self.Be.lattice.matrix
lat_mat1 = np.matmul(gb_Be_111_1.lattice.matrix, np.linalg.inv(lat_priv))
self.assertAlmostEqual(np.dot(lat_mat1[0], [1, 2, 1]), 0)
self.assertAlmostEqual(np.dot(lat_mat1[1], [1, 2, 1]), 0)
# test volume associated with sigma value
gb_Be_111_2 = self.GB_Be.gb_from_parameters(
[1, 1, 1], 147.36310249644626, ratio=[5, 2], expand_times=4
)
vol_ratio = gb_Be_111_2.volume / self.Be.volume
self.assertAlmostEqual(vol_ratio, 19 * 2 * 4)
# test ratio = None, axis [0,0,1], sigma 7
gb_Be_111_3 = self.GB_Be.gb_from_parameters(
[0, 0, 1], 21.786789298261812, ratio=[5, 2], expand_times=4
)
gb_Be_111_4 = self.GB_Be.gb_from_parameters(
[0, 0, 1], 21.786789298261812, ratio=None, expand_times=4
)
self.assertTupleEqual(gb_Be_111_3.lattice.abc, gb_Be_111_4.lattice.abc)
self.assertTupleEqual(gb_Be_111_3.lattice.angles, gb_Be_111_4.lattice.angles)
gb_Be_111_5 = self.GB_Be.gb_from_parameters(
[3, 1, 0], 180.0, ratio=[5, 2], expand_times=4
)
gb_Be_111_6 = self.GB_Be.gb_from_parameters(
[3, 1, 0], 180.0, ratio=None, expand_times=4
)
self.assertTupleEqual(gb_Be_111_5.lattice.abc, gb_Be_111_6.lattice.abc)
self.assertTupleEqual(gb_Be_111_5.lattice.angles, gb_Be_111_6.lattice.angles)
# gb from tetragonal cell, axis[1,1,1], sigma 15
gb_Pa_111_1 = self.GB_Pa.gb_from_parameters(
[1, 1, 1], 151.92751306414706, ratio=[2, 3], expand_times=4, max_search=10
)
vol_ratio = gb_Pa_111_1.volume / self.Pa.volume
self.assertAlmostEqual(vol_ratio, 17 * 2 * 4)
# gb from orthorhombic cell, axis[1,1,1], sigma 83
gb_Br_111_1 = self.GB_Br.gb_from_parameters(
[1, 1, 1],
131.5023374652235,
ratio=[21, 20, 5],
expand_times=4,
max_search=10,
)
vol_ratio = gb_Br_111_1.volume / self.Br.volume
self.assertAlmostEqual(vol_ratio, 83 * 2 * 4)
# gb from rhombohedra cell, axis[1,2,0], sigma 63
gb_Bi_120_1 = self.GB_Bi.gb_from_parameters(
[1, 2, 0], 63.310675060280246, ratio=[19, 5], expand_times=4, max_search=5
)
vol_ratio = gb_Bi_120_1.volume / self.Bi.volume
self.assertAlmostEqual(vol_ratio, 59 * 2 * 4)
def test_get_ratio(self):
# hexagnal
Be_ratio = self.GB_Be.get_ratio(max_denominator=2)
self.assertListEqual(Be_ratio, [5, 2])
Be_ratio = self.GB_Be.get_ratio(max_denominator=5)
self.assertListEqual(Be_ratio, [12, 5])
# tetragonal
Pa_ratio = self.GB_Pa.get_ratio(max_denominator=5)
self.assertListEqual(Pa_ratio, [2, 3])
# orthorombic
Br_ratio = self.GB_Br.get_ratio(max_denominator=5)
self.assertListEqual(Br_ratio, [21, 20, 5])
# orthorombic
Bi_ratio = self.GB_Bi.get_ratio(max_denominator=5)
self.assertListEqual(Bi_ratio, [19, 5])
def test_enum_sigma_cubic(self):
true_100 = [5, 13, 17, 25, 29, 37, 41]
true_110 = [3, 9, 11, 17, 19, 27, 33, 41, 43]
true_111 = [3, 7, 13, 19, 21, 31, 37, 39, 43, 49]
sigma_100 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [1, 0, 0]).keys())
sigma_110 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [1, 1, 0]).keys())
sigma_111 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [1, 1, 1]).keys())
sigma_222 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [2, 2, 2]).keys())
sigma_888 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [8, 8, 8]).keys())
self.assertListEqual(sorted(true_100), sorted(sigma_100))
self.assertListEqual(sorted(true_110), sorted(sigma_110))
self.assertListEqual(sorted(true_111), sorted(sigma_111))
self.assertListEqual(sorted(true_111), sorted(sigma_222))
self.assertListEqual(sorted(true_111), sorted(sigma_888))
def test_enum_sigma_hex(self):
true_100 = [17, 18, 22, 27, 38, 41]
true_001 = [7, 13, 19, 31, 37, 43, 49]
true_210 = [10, 11, 14, 25, 35, 49]
sigma_100 = list(
GrainBoundaryGenerator.enum_sigma_hex(50, [1, 0, 0], [8, 3]).keys()
)
sigma_001 = list(
GrainBoundaryGenerator.enum_sigma_hex(50, [0, 0, 1], [8, 3]).keys()
)
sigma_210 = list(
GrainBoundaryGenerator.enum_sigma_hex(50, [2, 1, 0], [8, 3]).keys()
)
sigma_420 = list(
GrainBoundaryGenerator.enum_sigma_hex(50, [4, 2, 0], [8, 3]).keys()
)
sigma_840 = list(
GrainBoundaryGenerator.enum_sigma_hex(50, [8, 4, 0], [8, 3]).keys()
)
self.assertListEqual(sorted(true_100), sorted(sigma_100))
self.assertListEqual(sorted(true_001), sorted(sigma_001))
self.assertListEqual(sorted(true_210), sorted(sigma_210))
self.assertListEqual(sorted(true_210), sorted(sigma_420))
self.assertListEqual(sorted(true_210), sorted(sigma_840))
def test_enum_sigma_tet(self):
true_100 = [5, 37, 41, 13, 3, 15, 39, 25, 17, 29]
true_331 = [9, 3, 21, 39, 7, 31, 43, 13, 19, 37, 49]
sigma_100 = list(
GrainBoundaryGenerator.enum_sigma_tet(50, [1, 0, 0], [9, 1]).keys()
)
sigma_331 = list(
GrainBoundaryGenerator.enum_sigma_tet(50, [3, 3, 1], [9, 1]).keys()
)
self.assertListEqual(sorted(true_100), sorted(sigma_100))
self.assertListEqual(sorted(true_331), sorted(sigma_331))
def test_enum_sigma_ort(self):
true_100 = [41, 37, 39, 5, 15, 17, 13, 3, 25, 29]
sigma_100 = list(
GrainBoundaryGenerator.enum_sigma_ort(50, [1, 0, 0], [270, 30, 29]).keys()
)
self.assertListEqual(sorted(true_100), sorted(sigma_100))
def test_enum_sigma_rho(self):
true_100 = [7, 11, 43, 13, 41, 19, 47, 31]
sigma_100 = list(
GrainBoundaryGenerator.enum_sigma_rho(50, [1, 0, 0], [15, 4]).keys()
)
self.assertListEqual(sorted(true_100), sorted(sigma_100))
def test_enum_possible_plane_cubic(self):
all_plane = GrainBoundaryGenerator.enum_possible_plane_cubic(4, [1, 1, 1], 60)
self.assertEqual(len(all_plane["Twist"]), 1)
self.assertEqual(len(all_plane["Symmetric tilt"]), 6)
self.assertEqual(len(all_plane["Normal tilt"]), 12)
def test_get_trans_mat(self):
mat1, mat2 = GrainBoundaryGenerator.get_trans_mat(
[1, 1, 1],
95.55344419565849,
lat_type="o",
ratio=[10, 20, 21],
surface=[21, 20, 10],
normal=True,
)
self.assertAlmostEqual(np.dot(mat1[0], [21, 20, 10]), 0)
self.assertAlmostEqual(np.dot(mat1[1], [21, 20, 10]), 0)
self.assertAlmostEqual(np.linalg.det(mat1), np.linalg.det(mat2))
ab_len1 = np.linalg.norm(np.cross(mat1[2], [1, 1, 1]))
self.assertAlmostEqual(ab_len1, 0)
def test_get_rotation_angle_from_sigma(self):
true_angle = [12.680383491819821, 167.3196165081802]
angle = GrainBoundaryGenerator.get_rotation_angle_from_sigma(
41, [1, 0, 0], lat_type="o", ratio=[270, 30, 29]
)
self.assertArrayAlmostEqual(true_angle, angle)
close_angle = [36.86989764584403, 143.13010235415598]
angle = GrainBoundaryGenerator.get_rotation_angle_from_sigma(
6, [1, 0, 0], lat_type="o", ratio=[270, 30, 29]
)
self.assertArrayAlmostEqual(close_angle, angle)
if __name__ == "__main__":
unittest.main()
```
#### File: ext/tests/test_jhu.py
```python
import os
import unittest
from pymatgen.ext.jhu import get_kpoints
from pymatgen.io.vasp.inputs import Incar
from pymatgen.io.vasp.sets import MPRelaxSet
from pymatgen.util.testing import PymatgenTest
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, The Materials Project"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "June 22, 2017"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files")
class JhuTest(PymatgenTest):
_multiprocess_shared_ = True
def test_get_kpoints(self):
si = PymatgenTest.get_structure("Si")
input_set = MPRelaxSet(si)
kpoints = get_kpoints(si, incar=input_set.incar)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jiqialin/comet.fms",
"score": 2
} |
#### File: comet/util/runAndUpload.py
```python
import os
import requests
from httprunner.api import HttpRunner
from comet.util.parserIni import getConfigValue
from comet.util.DataCombing import dataManipulation
class RunCaseAndUpload(object):
def __init__(self, **kwargs):
self.runner = HttpRunner()
self.summary = self._runTestCase()
self.section = kwargs.get('section')
self.build_id = kwargs.get('buildId')
# noinspection SpellCheckingInspection
def _runTestCase(self, dot_env_path=None, maping=None):
"""
Run the test case in the specified directory based on the parameters.
:param dot_env_path: Empty runs the default environment, otherwise the corresponding environment
:param maping: ignore
:return: test case result.
"""
path = getConfigValue(self.section, 'autoTest.casePath')
if not path:
print("test path nonentity or not find files 'properties.ini'")
raise FileNotFoundError
elif os.getenv("TEST_ENV") == 'txy':
self.runner.run(path, dot_env_path=dot_env_path, mapping=maping)
return self.runner.summary
elif os.getenv("TEST_ENV") == 'docker':
self.runner.run(path, dot_env_path='doc.env', mapping=maping)
return self.runner.summary
def uploadDataToStargazing(self):
"""
Processing is completed the processing of the results will be uploaded to stargazing platform.
:return:
"""
headers = {'Content-Type': 'application/json'}
url = getConfigValue('stargazing', 'total')
data = dataManipulation(self.summary, self.section, buildId=self.build_id)
# noinspection PyBroadException
try:
response = requests.post(url, json=data, headers=headers)
print("upload success: {}".format(response.json() if response.status_code == 200 else "upload failed:",
response.content))
except Exception as e:
print(f'Upload Exception:{e}')
```
#### File: jiqialin/comet.fms/setup.py
```python
import os, sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
APP_NAME = 'comet'
VERSION = '0.1.2'
DESCRIPTION = """
Used to integrate the httpRunner framework, join up CI/CD to the platform, and upload the results to the platform
"""
PROJECTS = 'https://github.com/jiqialin/comet.fms'
HERE = os.path.abspath(os.path.dirname(__file__))
__version__ = None
about = dict()
if not VERSION:
with open(os.path.join(HERE, APP_NAME, 'models\__version__.py'), 'rb') as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
# Get the long description from the README file
with open(os.path.join(HERE, "README.md"), encoding="utf-8") as f:
long_description = f.read()
class UploadCommand(Command):
"""Support setup.py upload."""
...
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(HERE, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name='comet.fms',
version=VERSION,
description=DESCRIPTION,
long_description=f'Just Enjoy:{long_description}',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
# supported python versions
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
],
keywords='python httpRunner jenkins comet.fms terminal CI/CD',
author='Angst',
author_email='<EMAIL>',
url=PROJECTS,
license='Apache-2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=[
'requests',
'httprunner',
'pymysql',
'requests-toolbelt'
],
entry_points={
'console_scripts': [
'comet.fms = comet.comet:MainRun'
]
},
)
print("\nWelcome to Comet!")
print("If you have any questions, please visit our documentation page: {}\n".format(PROJECTS))
``` |
{
"source": "JiqianDong/BDD-100-CV",
"score": 2
} |
#### File: JiqianDong/BDD-100-CV/decision_generator_model.py
```python
from imports import *
class MHSA(nn.Module):
def __init__(self,
emb_dim,
kqv_dim,
num_heads=1):
super(MHSA, self).__init__()
self.emb_dim = emb_dim
self.kqv_dim = kqv_dim
self.num_heads = num_heads
self.w_k = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False)
self.w_q = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False)
self.w_v = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False)
self.w_out = nn.Linear(kqv_dim * num_heads, emb_dim)
def forward(self, x):
b, t, _ = x.shape
e = self.kqv_dim
h = self.num_heads
keys = self.w_k(x).view(b, t, h, e)
values = self.w_v(x).view(b, t, h, e)
queries = self.w_q(x).view(b, t, h, e)
keys = keys.transpose(2, 1)
queries = queries.transpose(2, 1)
values = values.transpose(2, 1)
dot = queries @ keys.transpose(3, 2)
dot = dot / np.sqrt(e)
dot = nn.functional.softmax(dot, dim=3)
out = dot @ values
out = out.transpose(1,2).contiguous().view(b, t, h * e)
out = self.w_out(out)
return out
class MHSA2(nn.Module):
def __init__(self,
emb_dim,
kqv_dim,
output_dim=10,
num_heads=8):
super(MHSA2, self).__init__()
self.emb_dim = emb_dim
self.kqv_dim = kqv_dim
self.num_heads = num_heads
self.w_k = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False)
self.w_q = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False)
self.w_v = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False)
self.w_out = nn.Linear(kqv_dim * num_heads, output_dim)
def forward(self, x):
b, t, _ = x.shape
e = self.kqv_dim
h = self.num_heads
keys = self.w_k(x).view(b, t, h, e)
values = self.w_v(x).view(b, t, h, e)
queries = self.w_q(x).view(b, t, h, e)
keys = keys.transpose(2, 1)
queries = queries.transpose(2, 1)
values = values.transpose(2, 1)
dot = queries @ keys.transpose(3, 2)
dot = dot / np.sqrt(e)
dot = nn.functional.softmax(dot, dim=3)
out = dot @ values
out = out.transpose(1,2).contiguous().view(b, t, h * e)
out = self.w_out(out)
return out
class DecisionGenerator(nn.Module):
def __init__(self,faster_rcnn_model,device,batch_size,select_k=1,action_num=4,explanation_num=21,freeze_rcnn=True):
super().__init__()
self.rcnn = faster_rcnn_model
self.batch_size = batch_size
if freeze_rcnn:
for param in self.rcnn.parameters():
param.requires_grad = False
self.rcnn.eval()
self.object_attention = MHSA(1024, kqv_dim=10, num_heads=5)
self.roi_pooling_conv = nn.Conv1d(in_channels=1000,out_channels=select_k,kernel_size=1)
self.action_branch = nn.Sequential(
nn.Linear(select_k*1024, 1024),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(1024, action_num))
self.explanation_branch = nn.Sequential(
nn.Linear(select_k*1024, 1024),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(1024, explanation_num))
self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device)
def loss_fn(self,device):
class_weights = [1, 1, 2, 2]
w = torch.FloatTensor(class_weights).to(device)
action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device)
explanation_loss = nn.BCEWithLogitsLoss().to(device)
return action_loss,explanation_loss
def forward(self,images,targets=None):
if self.training:
assert targets is not None
target_reasons = torch.stack([t['reason'] for t in targets])
target_actions = torch.stack([t['action'] for t in targets])
with torch.no_grad():
self.rcnn.eval()
batch_size = len(images)
images,_ = self.rcnn.transform(images)
features = self.rcnn.backbone(images.tensors)
proposals, _ = self.rcnn.rpn(images, features)
box_features = self.rcnn.roi_heads.box_roi_pool(features,proposals,images.image_sizes)
box_features = self.rcnn.roi_heads.box_head(box_features).view(batch_size, -1, 1024) #(B, num_proposal, 1024)
box_features = self.object_attention(box_features) #(B, num_proposal, 1024)
# feature_polled,_ = torch.max(box_features,1)
feature_polled = self.roi_pooling_conv(box_features)
# print(feature_polled.shape)
feature_polled = torch.flatten(feature_polled,start_dim=1)
# print(feature_polled.shape)
actions = self.action_branch(feature_polled)
reasons = self.explanation_branch(feature_polled)
if self.training:
action_loss = self.action_loss_fn(actions, target_actions)
reason_loss = self.reason_loss_fn(reasons, target_reasons)
loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss}
return loss_dic
else:
return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)}
class DecisionGenerator_v1(nn.Module):
def __init__(self,faster_rcnn_model,device,batch_size,action_num=4,explanation_num=21,freeze_rcnn=True):
super().__init__()
self.rcnn = faster_rcnn_model
self.batch_size = batch_size
if freeze_rcnn:
for param in self.rcnn.parameters():
param.requires_grad = False
self.rcnn.eval()
self.object_attention = MHSA(1024, kqv_dim=10, num_heads=5)
self.action_branch = nn.Linear(1024,action_num)
self.explanation_branch = nn.Linear(1024, explanation_num)
self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device)
def loss_fn(self,device):
class_weights = [1, 1, 2, 2]
w = torch.FloatTensor(class_weights).to(device)
action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device)
explanation_loss = nn.BCEWithLogitsLoss().to(device)
return action_loss,explanation_loss
def forward(self,images,targets=None):
if self.training:
target_reasons = torch.stack([t['reason'] for t in targets])
target_actions = torch.stack([t['action'] for t in targets])
with torch.no_grad():
self.rcnn.eval()
batch_size = len(images)
images,_ = self.rcnn.transform(images)
features = self.rcnn.backbone(images.tensors)
proposals, _ = self.rcnn.rpn(images, features)
box_features = self.rcnn.roi_heads.box_roi_pool(features,proposals,images.image_sizes)
box_features = self.rcnn.roi_heads.box_head(box_features).view(batch_size, -1, 1024) #(B, num_proposal, 1024)
box_features = self.object_attention(box_features) #(B, num_proposal, 1024)
feature_polled,_ = torch.max(box_features,1)
actions = self.action_branch(feature_polled)
reasons = self.explanation_branch(feature_polled)
if self.training:
action_loss = self.action_loss_fn(actions, target_actions)
reason_loss = self.reason_loss_fn(reasons, target_reasons)
loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss}
return loss_dic
else:
return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)}
class DecisionGenerator_v3(nn.Module): # hard attention
def __init__(self,faster_rcnn_model,device,batch_size,select_k=5s,action_num=4,explanation_num=21,freeze_rcnn=True):
super().__init__()
self.rcnn = faster_rcnn_model
self.batch_size = batch_size
self.select_k = select_k
if freeze_rcnn:
for param in self.rcnn.parameters():
param.requires_grad = False
self.rcnn.eval()
self.attention_score = nn.Sequential(nn.Linear(1024,512),
nn.ReLU(),
nn.Linear(512,1),
nn.Softmax(dim=1))
self.roi_pooling_conv = nn.Conv1d(in_channels=1000,out_channels=select_k,kernel_size=1)
self.action_branch = nn.Sequential(
nn.Linear(select_k*1024, 1024),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(1024, action_num))
self.explanation_branch = nn.Sequential(
nn.Linear(select_k*1024, 1024),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(1024, explanation_num))
self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device)
def loss_fn(self,device):
class_weights = [1, 1, 2, 2]
w = torch.FloatTensor(class_weights).to(device)
action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device)
explanation_loss = nn.BCEWithLogitsLoss().to(device)
return action_loss,explanation_loss
def forward(self,images,targets=None):
if self.training:
assert targets is not None
target_reasons = torch.stack([t['reason'] for t in targets])
target_actions = torch.stack([t['action'] for t in targets])
with torch.no_grad():
self.rcnn.eval()
batch_size = len(images)
images,_ = self.rcnn.transform(images)
features = self.rcnn.backbone(images.tensors)
proposals, _ = self.rcnn.rpn(images, features)
box_features = self.rcnn.roi_heads.box_roi_pool(features,proposals,images.image_sizes)
box_features = self.rcnn.roi_heads.box_head(box_features).view(batch_size, -1, 1024) #(B, num_proposal, 1024)
score = self.attention_score(box_features) #(B, num_proposal, 1024)
_,ind = torch.topk(score,k=self.select_k,dim=1)
### cnn for dimensional reduction
# box_features = box_features * score
# feature_polled = self.roi_pooling_conv(box_features)
feature_polled = torch.gather(box_features,1,ind.expand(ind.size(0),ind.size(1),box_features.size(2))) #select_top_k
feature_polled = torch.flatten(feature_polled,start_dim=1)
actions = self.action_branch(feature_polled)
reasons = self.explanation_branch(feature_polled)
if self.training:
action_loss = self.action_loss_fn(actions, target_actions)
reason_loss = self.reason_loss_fn(reasons, target_reasons)
loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss}
return loss_dic
else:
return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)}
class DecisionGenerator_v4(nn.Module):
def __init__(self,faster_rcnn_model,device,batch_size,select_k=2,action_num=4,explanation_num=21,freeze_rcnn=True):
super().__init__()
self.rcnn = faster_rcnn_model
self.batch_size = batch_size
if freeze_rcnn:
for param in self.rcnn.parameters():
param.requires_grad = False
self.rcnn.eval()
self.object_attention = MHSA2(1024, kqv_dim=10, num_heads=8, output_dim=10)
self.action_branch = nn.Sequential(
nn.Linear(10000, 1024),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(1024, action_num))
self.explanation_branch = nn.Sequential(
nn.Linear(10000, 1024),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(1024, explanation_num))
self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device)
def loss_fn(self,device):
class_weights = [1, 1, 2, 2]
w = torch.FloatTensor(class_weights).to(device)
action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device)
explanation_loss = nn.BCEWithLogitsLoss().to(device)
return action_loss,explanation_loss
def forward(self,images,targets=None):
if self.training:
assert targets is not None
target_reasons = torch.stack([t['reason'] for t in targets])
target_actions = torch.stack([t['action'] for t in targets])
with torch.no_grad():
self.rcnn.eval()
batch_size = len(images)
images,_ = self.rcnn.transform(images)
features = self.rcnn.backbone(images.tensors)
proposals, _ = self.rcnn.rpn(images, features)
box_features = self.rcnn.roi_heads.box_roi_pool(features,proposals,images.image_sizes)
box_features = self.rcnn.roi_heads.box_head(box_features).view(batch_size, -1, 1024) #(B, num_proposal, 1024)
box_features = self.object_attention(box_features) #(B, num_proposal, 10)
# feature_polled,_ = torch.max(box_features,1)
# print(feature_polled.shape)
feature_polled = torch.flatten(box_features,start_dim=1)
# print(feature_polled.shape)
actions = self.action_branch(feature_polled)
reasons = self.explanation_branch(feature_polled)
if self.training:
action_loss = self.action_loss_fn(actions, target_actions)
reason_loss = self.reason_loss_fn(reasons, target_reasons)
loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss}
return loss_dic
else:
return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)}
class DecisionGenerator_whole_attention(nn.Module):
def __init__(self, encoder, encoder_dims, device, num_heads=8, \
attention_out_dim=10, action_num=4, explanation_num=21):
super().__init__()
"""
encoder_dims = (F,H,W)
F:Feature shape (1280 for mobile net, 2048 for resnet)
H,W = image feature height, width
"""
self.encoder = encoder
assert len(encoder_dims) == 3, "encoder_dims has to be a triplet with shape (F,H,W)"
F,H,W = encoder_dims
self.MHSA = MHSA2(emb_dim=F,kqv_dim=10,output_dim=attention_out_dim,num_heads=num_heads)
T = H*W
self.action_branch = nn.Sequential(
nn.Linear(attention_out_dim*T,64),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(64,action_num))
self.explanation_branch = nn.Sequential(
nn.Linear(attention_out_dim*T,64),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(64, explanation_num))
self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device)
def loss_fn(self,device):
class_weights = [1, 1, 2, 2]
w = torch.FloatTensor(class_weights).to(device)
action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device)
explanation_loss = nn.BCEWithLogitsLoss().to(device)
return action_loss,explanation_loss
def forward(self,images,targets=None):
images = torch.stack(images)
if self.training:
assert targets is not None
target_reasons = torch.stack([t['reason'] for t in targets])
target_actions = torch.stack([t['action'] for t in targets])
# print(images.shape)
features = self.encoder(images) #
# print(features.shape)
B,F,H,W = features.shape
# print(features.view(B,F,H*W).transpose(1,2).shape)
transformed_feature = self.MHSA(features.view(B,F,H*W).transpose(1,2)) #(B, H, T, 10)
# print(transformed_feature.shape)
feature_polled = torch.flatten(transformed_feature,start_dim=1)
# print(feature_polled.shape)
actions = self.action_branch(feature_polled)
reasons = self.explanation_branch(feature_polled)
if self.training:
action_loss = self.action_loss_fn(actions, target_actions)
reason_loss = self.reason_loss_fn(reasons, target_reasons)
loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss}
return loss_dic
else:
return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)}
class DecisionGenerator_no_attention(nn.Module):
def __init__(self, encoder, encoder_dims, device, action_num=4, explanation_num=21):
super().__init__()
"""
encoder_dims = (F,H,W)
F:Feature shape (1280 for mobile net, 2048 for resnet)
H,W = image feature height, width
"""
self.encoder = encoder
assert len(encoder_dims) == 3, "encoder_dims has to be a triplet with shape (F,H,W)"
F,H,W = encoder_dims
in_dim = H*W*F
self.action_branch = nn.Sequential(
nn.Linear(in_dim,12),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(12,action_num))
self.explanation_branch = nn.Sequential(
nn.Linear(in_dim,12),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(12, explanation_num))
self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device)
def loss_fn(self,device):
class_weights = [1, 1, 2, 2]
w = torch.FloatTensor(class_weights).to(device)
action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device)
explanation_loss = nn.BCEWithLogitsLoss().to(device)
return action_loss,explanation_loss
def forward(self,images,targets=None):
images = torch.stack(images)
if self.training:
assert targets is not None
target_reasons = torch.stack([t['reason'] for t in targets])
target_actions = torch.stack([t['action'] for t in targets])
# print(images.shape)
features = self.encoder(images) #
# print(features.shape)
B,F,H,W = features.shape
# print(features.view(B,F,H*W).transpose(1,2).shape)
# print(transformed_feature.shape)
feature_polled = torch.flatten(features,start_dim=1)
# print(feature_polled.shape)
# print(feature_polled.shape)
actions = self.action_branch(feature_polled)
reasons = self.explanation_branch(feature_polled)
if self.training:
action_loss = self.action_loss_fn(actions, target_actions)
reason_loss = self.reason_loss_fn(reasons, target_reasons)
loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss}
return loss_dic
else:
return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)}
```
#### File: JiqianDong/BDD-100-CV/evaluation_baseline.py
```python
import pickle
import time
from cfg import *
from coco_eval import CocoEvaluator
from coco_utils import get_coco_api_from_dataset
from datasets.bdd import *
from datasets.idd import *
from imports import *
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print("Loading files")
if ds in ["idd_non_hq", "idd_hq"]:
print("Evaluation on India Driving dataset")
with open("datalists/idd_images_path_list.txt", "rb") as fp:
idd_image_path_list = pickle.load(fp)
with open("datalists/idd_anno_path_list.txt", "rb") as fp:
idd_anno_path_list = pickle.load(fp)
val_img_paths = []
with open(idd_path + "val.txt") as f:
val_img_paths = f.readlines()
for i in range(len(val_img_paths)):
val_img_paths[i] = val_img_paths[i].strip("\n")
val_img_paths[i] = val_img_paths[i] + ".jpg"
val_img_paths[i] = os.path.join(idd_path + "JPEGImages", val_img_paths[i])
val_anno_paths = []
for i in range(len(val_img_paths)):
val_anno_paths.append(val_img_paths[i].replace("JPEGImages", "Annotations"))
val_anno_paths[i] = val_anno_paths[i].replace(".jpg", ".xml")
val_img_paths, val_anno_paths = sorted(val_img_paths), sorted(val_anno_paths)
assert len(val_img_paths) == len(val_anno_paths)
val_img_paths = val_img_paths[:10]
val_anno_paths = val_anno_paths[:10]
val_dataset = IDD(val_img_paths, val_anno_paths, None)
val_dl = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
collate_fn=utils.collate_fn,
)
if ds == "bdd100k":
print("Evaluation on Berkeley Deep Drive")
root_img_path = os.path.join(bdd_path,"bdd100k", "images", "100k")
root_anno_path = os.path.join(bdd_path, "bdd100k", "labels")
val_img_path = root_img_path + "/val/"
val_anno_json_path = root_anno_path + "/bdd100k_labels_images_val.json"
with open("datalists/bdd100k_val_images_path.txt", "rb") as fp:
bdd_img_path_list = pickle.load(fp)
# bdd_img_path_list = bdd_img_path_list[:10]
val_dataset = BDD(bdd_img_path_list, val_anno_json_path)
val_dl = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0,
collate_fn=utils.collate_fn,
pin_memory=True,
)
if ds == "Cityscapes":
with open("datalists/cityscapes_val_images_path.txt", "rb") as fp:
images = pickle.load(fp)
with open("datalists/cityscapes_val_targets_path.txt", "rb") as fp:
targets = pickle.load(fp)
val_dataset = Cityscapes(images, targets)
val_dl = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
collate_fn=utils.collate_fn,
)
###################################################################################################3
def get_model(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = torchvision.models.detection.faster_rcnn.FastRCNNPredictor(
in_features, num_classes
) # replace the pre-trained head with a new one
return model.cuda()
model = get_model(12)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
checkpoint = torch.load("saved_models/" + 'download.pth')
model.load_state_dict(checkpoint["model"])
print("Model Loaded successfully")
print("##### Dataloader is ready #######")
print("Getting coco api from dataset")
coco = get_coco_api_from_dataset(val_dl.dataset)
print("Done")
print("Evaluation in progress")
evaluate(model, val_dl, device=device)
```
#### File: JiqianDong/BDD-100-CV/train_decision_generator.py
```python
from imports import *
from datasets.bdd_oia import BDD_OIA
from decision_generator_model import *
def get_encoder(model_name):
if model_name == 'mobile_net':
md = torchvision.models.mobilenet_v2(pretrained=True)
encoder = nn.Sequential(*list(md.children())[:-1])
elif model_name == 'resnet':
md = torchvision.models.resnet50(pretrained=True)
encoder = nn.Sequential(*list(md.children())[:-2])
return encoder
def get_model(num_classes,image_mean=None,image_std=None):
# model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True,
# image_mean=image_mean,
# image_std=image_std)
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = torchvision.models.detection.faster_rcnn.FastRCNNPredictor(in_features,num_classes)
return model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_one_epoch2(model, optimizer, data_loader, device, epoch, print_freq):
global num_iters
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = "Epoch: [{}]".format(epoch)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
if len(images)!=len(targets):
continue
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
num_iters += 1
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
writer.add_scalar("Loss/train", loss_value, num_iters)
writer.add_scalar("Learning rate", optimizer.param_groups[0]["lr"], num_iters)
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
optimizer.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return loss_value
def get_loader(version):
## Data loader
image_dir = './data/bdd_oia/lastframe/data/'
label_dir = './data/bdd_oia/lastframe/labels/'
if version == "whole_attention" or "no_attention":
bdd_oia_dataset = BDD_OIA(image_dir,label_dir+'train_25k_images_actions.json',
label_dir+'train_25k_images_reasons.json',
image_min_size=180)
else:
bdd_oia_dataset = BDD_OIA(image_dir,label_dir+'train_25k_images_actions.json',
label_dir+'train_25k_images_reasons.json')
training_loader = DataLoader(bdd_oia_dataset,
shuffle=True,
batch_size=batch_size,
num_workers=0,
drop_last=True,
collate_fn=utils.collate_fn)
return training_loader
if __name__ == "__main__":
# model_name = "whole_attention_v1_"
# version = "whole_attention"
# encoder_name = "mobile_net"
# encoder_dims=(1280,6,10)
##########################################################################
# model_name = "whole_attention_resnet_"
# version = "whole_attention"
# encoder_name = "resnet"
# encoder_dims=(2048,6,10)
##########################################################################
model_name = "no_attention_resnet_"
version = "no_attention"
encoder_name = "resnet"
encoder_dims = (2048,6,10)
##########################################################################
# model_name = 'v4_mhsa_test'
# version = 'v4'
# sel_k = 10
##########################################################################
device = torch.device("cuda:0")
batch_size = 10
training_loader = get_loader(version)
num_iters = 0
if version == 'whole_attention':
encoder = get_encoder(encoder_name)
decision_generator = DecisionGenerator_whole_attention(encoder,
encoder_dims=encoder_dims,
device=device)
writer = SummaryWriter('./runs/whole_attention/'+model_name+'/')
elif version == "no_attention":
encoder = get_encoder(encoder_name)
decision_generator = DecisionGenerator_no_attention(encoder,
encoder_dims=encoder_dims,
device=device)
writer = SummaryWriter('./runs/whole_attention/'+model_name+'/')
else:
fastercnn = get_model(10)
checkpoint = torch.load('saved_models/bdd100k_24.pth')
fastercnn.load_state_dict(checkpoint['model'])
writer = SummaryWriter('./runs/'+model_name+'/')
if version == 'v3':
decision_generator = DecisionGenerator_v3(fastercnn,device,batch_size, select_k=sel_k)
elif version == 'v1':
decision_generator = DecisionGenerator_v1(fastercnn,device, batch_size)
elif version == 'v4':
decision_generator = DecisionGenerator_v4(fastercnn,device, batch_size)
else:
decision_generator = DecisionGenerator(fastercnn,device,batch_size, select_k=sel_k)
decision_generator = decision_generator.to(device)
#### continue training
# checkpoint = torch.load("/home/ai/Desktop/Jiqian work/work4/saved_models/v3_hard_sel_1039.pth")
# decision_generator.load_state_dict(checkpoint["model"])
params = [p for p in decision_generator.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.0005)
# optimizer = torch.optim.Adam(params,lr=0.001, weight_decay=5e-5)
# lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=1e-3, max_lr=6e-3)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
num_epochs = 40
for epoch in tqdm(range(num_epochs)):
# try:
loss_value = train_one_epoch2(decision_generator, optimizer, training_loader, device, epoch, print_freq=200)
lr_scheduler.step(loss_value)
# except Exception as e:
# print(e)
# train_one_epoch2(decision_generator, optimizer, training_loader, device, epoch, print_freq=200)
if (epoch+1)%20==0:
if version == "whole_attention":
save_name = "../saved_models/whole_attention/%s"%model_name + str(epoch) + ".pth"
else:
save_name = "../saved_models/%s"%model_name + str(epoch) + ".pth"
torch.save(
{"model": decision_generator.state_dict(), "optimizer": optimizer.state_dict(),},
save_name,
)
print("Saved model", save_name)
```
#### File: JiqianDong/BDD-100-CV/train_soft_attention.py
```python
from imports import *
from datasets.bdd_oia import BDD_OIA_NLP
from soft_attention_model import *
def get_encoder(model_name):
if model_name == 'mobile_net':
md = torchvision.models.mobilenet_v2(pretrained=True)
encoder = nn.Sequential(*list(md.children())[:-1])
elif model_name == 'resnet':
md = torchvision.models.resnet50(pretrained=True)
encoder = nn.Sequential(*list(md.children())[:-2])
return encoder
def train_one_epoch2(encoder, decoder, decoder_optimizer, data_loader, device, epoch, print_freq, encoder_optimizer=None):
global num_iters
encoder.train()
decoder.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = "Epoch: [{}]".format(epoch)
for image_batch, labels_batch in metric_logger.log_every(data_loader, print_freq, header):
if len(image_batch)!=len(labels_batch):
continue
reasons_batch = [l['reason'].to(device) for l in labels_batch]
image_features = encoder(torch.stack(image_batch).to(device))
loss , scores, attention_weights, hs = decoder(image_features, reasons_batch)
num_iters += 1
loss_value = loss.item()
# print(loss_value)
# reduce losses over all GPUs for logging purposes
writer.add_scalar("Loss/train", loss_value, num_iters)
writer.add_scalar("Decoder earning rate", decoder_optimizer.param_groups[0]["lr"], num_iters)
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_value)
sys.exit(1)
decoder_optimizer.zero_grad()
loss.backward()
decoder_optimizer.step()
metric_logger.update(loss=loss_value)
metric_logger.update(lr=decoder_optimizer.param_groups[0]["lr"])
return loss_value
def get_loader(batch_size):
## Data loader
image_dir = './data/bdd_oia/lastframe/data/'
label_dir = './data/bdd_oia/lastframe/labels/'
bdd_oia_dataset = BDD_OIA_NLP(image_dir, label_dir+'no_train.pkl', label_dir+'ind_to_word.pkl',image_min_size=180)
training_loader = DataLoader(bdd_oia_dataset,
shuffle=True,
batch_size=batch_size,
num_workers=0,
drop_last=True,
collate_fn=utils.collate_fn)
NULL_INDEX = bdd_oia_dataset.word_to_ind['NULL']
DICT_SIZE = len(bdd_oia_dataset.word_to_ind.keys())
return training_loader,NULL_INDEX,DICT_SIZE
if __name__ == "__main__":
encoder_model = 'resnet'
image_f_dim = 2048
using_gate = False
model_name = 'soft_attention_resnet_nogate'
# encoder_model = 'mobile_net'
# image_f_dim = 1280
# model_name = 'soft_attention'
version = 'v1'
num_epochs = 15
DEVICE = torch.device("cuda:1")
batch_size = 10
training_loader,NULL_INDEX,DICT_SIZE = get_loader(batch_size)
writer = SummaryWriter('./runs/soft_attention/'+model_name+'/')
encoder = get_encoder(encoder_model)
decoder = ReasonDecoder(image_f_dim=image_f_dim,\
embedding_dim=128, \
hidden_dim=128, \
dict_size=DICT_SIZE, \
device=DEVICE,\
null_index=NULL_INDEX, \
using_gate=using_gate)
encoder.to(DEVICE)
decoder.to(DEVICE)
#### continue training
# checkpoint = torch.load("/home/ai/Desktop/Jiqian work/work4/saved_models/v3_hard_sel_1039.pth")
# decision_generator.load_state_dict(checkpoint["model"])
decoder_params = [p for p in decoder.parameters() if p.requires_grad]
# print(len(decoder_params))
decoder_optimizer = torch.optim.SGD(decoder_params, lr=0.001, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(decoder_optimizer)
for epoch in tqdm(range(num_epochs)):
# try:
loss_value = train_one_epoch2(encoder,
decoder,
decoder_optimizer,
training_loader,
DEVICE,
epoch,
print_freq=200)
lr_scheduler.step(loss_value)
# except Exception as e:
# print(e)
# train_one_epoch2(decision_generator, optimizer, training_loader, device, epoch, print_freq=200)
if (epoch+1)%5==0:
save_name = "../saved_models/soft_attention/%s"%model_name + str(epoch) + ".pth"
torch.save(
{"encoder": encoder.state_dict(),
"decoder":decoder.state_dict(),
"decoder_optimizer": decoder_optimizer.state_dict()},
save_name,
)
print("Saved model", save_name)
``` |
{
"source": "jiqirenno1/centerpoint123",
"score": 2
} |
#### File: det3d/datasets/dataset_factory.py
```python
from .nuscenes import NuScenesDataset
from .waymo import WaymoDataset
from .kitti import KittiDataset
dataset_factory = {
"KITTI": KittiDataset,
"NUSC": NuScenesDataset,
"WAYMO": WaymoDataset
}
def get_dataset(dataset_name):
return dataset_factory[dataset_name]
``` |
{
"source": "jiqiujia/DeepCTR-Torch",
"score": 2
} |
#### File: DeepCTR-Torch/deepctr_torch/inputs.py
```python
from collections import OrderedDict, namedtuple
from itertools import chain
import torch
from .layers.utils import concat_fun
class SparseFeat(namedtuple('SparseFeat', ['name', 'dimension', 'use_hash', 'dtype', 'embedding_name', 'embedding'])):
__slots__ = ()
def __new__(cls, name, dimension, use_hash=False, dtype="int32", embedding_name=None, embedding=True):
if embedding and embedding_name is None:
embedding_name = name
return super(SparseFeat, cls).__new__(cls, name, dimension, use_hash, dtype, embedding_name, embedding)
class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])):
__slots__ = ()
def __new__(cls, name, dimension=1, dtype="float32"):
return super(DenseFeat, cls).__new__(cls, name, dimension, dtype)
class VarLenSparseFeat(namedtuple('VarLenFeat',
['name', 'dimension', 'maxlen', 'combiner', 'use_hash', 'dtype', 'embedding_name',
'embedding'])):
__slots__ = ()
def __new__(cls, name, dimension, maxlen, combiner="mean", use_hash=False, dtype="float32", embedding_name=None,
embedding=True):
if embedding_name is None:
embedding_name = name
return super(VarLenSparseFeat, cls).__new__(cls, name, dimension, maxlen, combiner, use_hash, dtype,
embedding_name, embedding)
def get_feature_names(feature_columns):
features = build_input_features(feature_columns)
return list(features.keys())
def get_inputs_list(inputs):
return list(chain(*list(map(lambda x: x.values(), filter(lambda x: x is not None, inputs)))))
def build_input_features(feature_columns):
features = OrderedDict()
start = 0
for feat in feature_columns:
feat_name = feat.name
if feat_name in features:
continue
if isinstance(feat, SparseFeat):
features[feat_name] = (start, start + 1)
start += 1
elif isinstance(feat, DenseFeat):
features[feat_name] = (start, start + feat.dimension)
start += feat.dimension
elif isinstance(feat,VarLenSparseFeat):
features[feat_name] = (start, start + feat.maxlen)
start += feat.maxlen
else:
raise TypeError("Invalid feature column type,got",type(feat))
return features
def get_dense_input(features, feature_columns):
dense_feature_columns = list(filter(lambda x: isinstance(
x, DenseFeat), feature_columns)) if feature_columns else []
dense_input_list = []
for fc in dense_feature_columns:
dense_input_list.append(features[fc.name])
return dense_input_list
def combined_dnn_input(sparse_embedding_list, dense_value_list):
if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
sparse_dnn_input = torch.flatten(
torch.cat(sparse_embedding_list, dim=-1), start_dim=1)
dense_dnn_input = torch.flatten(
torch.cat(dense_value_list, dim=-1), start_dim=1)
return concat_fun([sparse_dnn_input, dense_dnn_input])
elif len(sparse_embedding_list) > 0:
return torch.flatten(torch.cat(sparse_embedding_list, dim=-1), start_dim=1)
elif len(dense_value_list) > 0:
return torch.flatten(torch.cat(dense_value_list, dim=-1), start_dim=1)
else:
raise NotImplementedError
```
#### File: deepctr_torch/models/fibinet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input, SparseFeat, DenseFeat, VarLenSparseFeat
from ..layers import SENETLayer,BilinearInteraction,DNN
class FiBiNET(BaseModel):
"""Instantiates the Feature Importance and Bilinear feature Interaction NETwork architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param embedding_size: positive integer,sparse feature embedding_size
:param bilinear_type: str,bilinear function type used in Bilinear Interaction Layer,can be ``'all'`` , ``'each'`` or ``'interaction'``
:param reduction_ratio: integer in [1,inf), reduction ratio used in SENET Layer
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to wide part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param device: str, ``"cpu"`` or ``"cuda:0"``
:return: A PyTorch model instance.
"""
def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8, bilinear_type='interaction',
reduction_ratio=3, dnn_hidden_units=(128, 128), l2_reg_linear=1e-5,
l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation=F.relu,
task='binary', device='cpu'):
super(FiBiNET, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
seed=seed,
dnn_dropout=dnn_dropout, dnn_activation=dnn_activation,
task=task, device=device)
self.linear_feature_columns = linear_feature_columns
self.dnn_feature_columns = dnn_feature_columns
self.filed_size = len(self.embedding_dict)
self.SE = SENETLayer(self.filed_size, reduction_ratio, seed, device)
self.Bilinear = BilinearInteraction(self.filed_size,embedding_size, bilinear_type, seed, device)
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False,
init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
def compute_input_dim(self, feature_columns, embedding_size, include_sparse=True, include_dense=True):
sparse_feature_columns = list(
filter(lambda x: isinstance(x, (SparseFeat,VarLenSparseFeat)), feature_columns)) if len(feature_columns) else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), feature_columns)) if len(feature_columns) else []
field_size = len(sparse_feature_columns)
dense_input_dim = sum(map(lambda x: x.dimension, dense_feature_columns))
sparse_input_dim = field_size * (field_size - 1) * embedding_size
input_dim = 0
if include_sparse:
input_dim += sparse_input_dim
if include_dense:
input_dim += dense_input_dim
return input_dim
def forward(self, X):
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns,
self.embedding_dict)
sparse_embedding_input = torch.cat(sparse_embedding_list, dim=1)
senet_output = self.SE(sparse_embedding_input)
senet_bilinear_out = self.Bilinear(senet_output)
bilinear_out = self.Bilinear(sparse_embedding_input)
linear_logit = self.linear_model(X)
temp = torch.split(torch.cat((senet_bilinear_out,bilinear_out), dim = 1), 1, dim = 1)
dnn_input = combined_dnn_input(temp, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
if len(self.linear_feature_columns) > 0 and len(self.dnn_feature_columns) > 0: # linear + dnn
final_logit = linear_logit + dnn_logit
elif len(self.linear_feature_columns) == 0:
final_logit = dnn_logit
elif len(self.dnn_feature_columns) == 0:
final_logit = linear_logit
else:
raise NotImplementedError
y_pred = self.out(final_logit)
return y_pred
```
#### File: tests/models/ONN_test.py
```python
import pytest
from deepctr_torch.models import ONN
from ..utils import check_model, get_test_data, SAMPLE_SIZE
@pytest.mark.parametrize(
'hidden_size,sparse_feature_num',
[((8,), 2)]
)
def test_NFFM(hidden_size, sparse_feature_num):
model_name = "NFFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
sample_size, sparse_feature_num, sparse_feature_num, hash_flag=True)
model = ONN(feature_columns, feature_columns, embedding_size=4,
dnn_hidden_units=[32, 32], dnn_dropout=0.5)
check_model(model, model_name, x, y)
if __name__ == "__main__":
pass
``` |
{
"source": "JiQThon/Syllabus_Embedding_Network",
"score": 3
} |
#### File: Syllabus_Embedding_Network/function/AvgSimilarity.py
```python
import sys
import operator
import time
import getopt
## Regex Modules
import re
## File I/O Modules
import csv
from string import digits
import copy
import json
import math
import networkx
## Encoding UTF-8 Setting
reload(sys)
sys.setdefaultencoding('UTF8')
def cal7_recal_simil():
ids_file = open("./ids.txt", 'r')
mjs_file = open("./mjs.txt", 'r')
mat_file = open("./mat.txt", 'r')
std_file = open("./mjs_standards.txt", 'r')
ids = []
mjs = []
mat = []
std = []
for chunk in iter(lambda: ids_file.readline(), ''):
ids.append(chunk.replace("\n",""))
for chunk in iter(lambda: mjs_file.readline(), ''):
mjs.append(chunk.replace("\n","").replace("u","").replace("\'", ""))
for chunk in iter(lambda: mat_file.readline(), ''):
mat.append( [round(float(f),5) for f in chunk.replace("\n","").split(",")] )
for chunk in iter(lambda: std_file.readline(), ''):
std.append( chunk.replace("\n","") )
print "read"
print std
out_within = open("./recal_simil_within.txt", 'w')
out_across = open("./recal_simil_across.txt", 'w')
for i in range(0, len(ids)):
for j in range(i+1, len(ids)):
if (str(mjs[i]) in std) and (str(mjs[j]) in std) and (mjs[i] == mjs[j]):
out_within.write( repr(mat[i][j])+"\n")
elif (str(mjs[i]) in std) and (str(mjs[j]) in std) and (mjs[i] != mjs[j]):
out_across.write( repr(mat[i][j])+"\n")
#out.write(repr(k) + "," + ",".join(str(x) for x in ratios) + "\n")
#out2.write(repr(k) + "," + ",".join(str(x) for x in coeffs) + "\n")
def main(argv=None):
if argv is None:
argv = sys.argv
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
"""
## give documentation
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
"""
# process arguments
cal7_recal_simil()
if __name__ == "__main__":
main()
``` |
{
"source": "jir322/sea_cli",
"score": 2
} |
#### File: sea_cli/sea_cli/sea.py
```python
__author__ = 'mnowotka'
import os
import sys
import re
import hashlib
from optparse import OptionParser
import requests
import BeautifulSoup
#-------------------------------------------------------------------------------
PAGE_REGEX = re.compile(r"<<\s?<\s?(?P<current_page>\d+) of (?P<total_pages>\d+)\s?>\s?>>")
REQUESTS_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive',
'Host': 'sea.bkslab.org',
}
#-------------------------------------------------------------------------------
def meta_redirect(content):
soup = BeautifulSoup.BeautifulSoup(content)
result = soup.meta['content']
if result:
wait, text = result.split(";")
if text.lower().startswith("url="):
url = text[4:]
return url
return None
#-------------------------------------------------------------------------------
def get_similarity(smitxt, descriptor, reference, orderby, sort):
s = requests.session()
s.headers.update(REQUESTS_HEADERS)
payload = {'descriptor': descriptor, 'reference': reference,
'smitxt': smitxt}
url = 'http://sea.bkslab.org/search/index.php'
r = s.get(url, allow_redirects=True)
if r.ok:
redirected = r.url
else:
redirected = url
s.headers.update({'Referer': redirected,
'Content-Type': 'application/x-www-form-urlencoded'})
res = s.post(url, data=payload, allow_redirects=True)
if not res.ok:
print "Error searching similarity, url = %s" % url
print "payload: %s" % payload
print "Error code: %s" % res.status_code
print "Message: %s" % res.text
return
table_url = meta_redirect(res.content)
if not table_url:
print "no url for table..."
return
res = s.get(table_url)
soup = BeautifulSoup.BeautifulSoup(res.content)
m = PAGE_REGEX.search(soup.text)
if not m:
print "can't find page number in %s" % soup.text
return
total_pages = int(m.groupdict().get('total_pages', '1'))
return scrape_table(table_url, total_pages, s, orderby, sort)
#-------------------------------------------------------------------------------
def scrape_table(table_url, total_pages, session, orderby, sort):
table = []
for i in range(total_pages):
payload = {'page': i, 'orderby': orderby, 'sort': sort}
r = session.get(table_url, params=payload)
if not r.ok:
print "Error retrieving page %s, base url = %s" % (i, table_url)
print "payload: %s" % payload
print "Error code: %s" % r.status_code
print "Message: %s" % r.text
continue
soup = BeautifulSoup.BeautifulSoup(r.content)
tab = soup.find("table", {"class": "main"})
for row in tab.findAll('tr')[1:]:
col = row.findAll('td')
if len(col) >= 7:
num = col[1].text
code = col[2].text
num_ligands = col[3].text
ref_name = col[4].text
e_value = col[5].text
max_tc = col[6].text
table.append((num, code, num_ligands, ref_name, e_value, max_tc))
return table
#-------------------------------------------------------------------------------
def get_file_descriptor(path, mode):
filename, file_extension = os.path.splitext(path)
if file_extension == '.gz':
import gzip
return gzip.open(path, mode + 'b' if mode in ('r', 'w') else mode)
elif file_extension == '.bz2':
import bz2
return bz2.BZ2File(path, mode + 'b' if mode in ('r', 'w') else mode)
elif file_extension == '.zip':
import zipfile
return zipfile.ZipFile(path, mode + 'b' if mode in ('r', 'w') else mode)
else:
return open(path, mode + 'U' if mode == 'r' else mode)
#-------------------------------------------------------------------------------
def write_table(smiles, table, o):
if o:
o.write('\n%s\n\n' % smiles)
else:
print '\n%s\n' % smiles
for row in table:
if o:
o.write('\t'.join(row) + '\n')
else:
print '\t'.join(row)
if o:
o.write('\n####\n')
else:
print '\n####'
#-------------------------------------------------------------------------------
def get_smiles_id_pair(smiles):
m = hashlib.md5()
m.update(smiles)
return "%s %s\n" % (smiles, m.hexdigest())
#-------------------------------------------------------------------------------
def main():
usage = "usage: %prog [options] SMILES"
parser = OptionParser(usage)
parser.add_option("-d", "--descriptor", dest="descriptor",
default='ecfp4', help="molecular descriptor")
parser.add_option("-r", "--reference",
default='chembl16', dest="reference",
help="Database to search against")
parser.add_option("-i", "--input", default=None,
dest="input", help="Input file with smiles")
parser.add_option("-o", "--output", default=None,
dest="output", help="Output file")
parser.add_option("-b", "--order_by", default='zscore',
dest="order_by", help="Column to order by")
parser.add_option("-s", "--sort", default='desc',
dest="sort", help="Sorting order (asc/desc)")
(options, args) = parser.parse_args()
if options.input:
if not os.path.isfile(options.input):
print "%s is not a file" % options.input
sys.exit(1)
i = get_file_descriptor(options.input, 'r')
else:
i = None
if options.output:
o = get_file_descriptor(options.output, 'w')
else:
o = None
descriptor = options.descriptor
reference = options.reference
orderby = options.order_by
sort = options.sort
if i:
smitxt = ''
for line in i:
if line.strip():
chunk = line.strip().split()[0]
smitxt += get_smiles_id_pair(chunk)
else:
table = get_similarity(smitxt, descriptor, reference, orderby, sort)
write_table(chunk, table, o)
smitxt = ''
if smitxt:
table = get_similarity(smitxt, descriptor, reference, orderby, sort)
write_table(chunk, table, o)
elif len(args) == 1:
smitxt = get_smiles_id_pair(args[0])
table = get_similarity(smitxt, descriptor, reference, orderby, sort)
write_table(args[0], table, o)
else:
parser.print_help()
sys.exit(1)
if i:
i.close()
if o:
o.close()
#-------------------------------------------------------------------------------
if __name__ == "__main__":
main()
#-------------------------------------------------------------------------------
``` |
{
"source": "Jirapongs55/PythonBootcamp2021",
"score": 4
} |
#### File: Jirapongs55/PythonBootcamp2021/01_TestGUItranslator.py
```python
from tkinter import *
from tkinter import ttk
# Googletrans Library
from googletrans import Translator
translator = Translator()
GUI = Tk()
GUI.geometry("500x300")
GUI.title("โปรแกรมแปลคำศัพท์ by K'Jirapong")
# ------Config-------
font_1 = ("Century Gothic", 12)
font_2 = ("Century Gothic", 24)
# ------Label 1------
L1 = ttk.Label(GUI, text = "กรุณากรอกคำศัพท์ที่ต้องการ", font = font_1)
L1.pack()
# ------Entry (ปุ่มให้ user กรอกคำที่ต้องการแปล)------
v_vocab = StringVar()
E1 = ttk.Entry(GUI, textvariable = v_vocab, font = font_1, width = 24)
E1.pack(pady = 10)
def translate():
vocab = v_vocab.get() # ดึงข้อมูลจากตัวแปรที่เก็บค่าที่ u ser กรอกเข้ามา
meaning = translator.translate(vocab, dest = "th") # ใช้ฟังก์ชันแปลคำศัพท์จาก googletrans
v_result.set(meaning.text) # assign ค่าที่แปลได้เข้าไปในตัวแปรที่แสดงผลใน Label 2
# ------Button (ปุ่มคำสั่งให้กดแปล)------
B1 = ttk.Button(GUI, text = "Translate", command = translate)
B1.pack(ipadx = 20, ipady = 10)
# ------Label 2------
v_result = StringVar()
L2 = ttk.Label(GUI, textvariable = v_result, font = font_2, foreground = "crimson" )
L2.pack()
GUI.mainloop() # คำสั่งในการให้ GUI รันตลอดเวลา (ใช้เป็นบรรทัดจบ)
```
#### File: Jirapongs55/PythonBootcamp2021/02_GUIWiki.py
```python
import wikipedia
# python to docx
from docx import Document
def wiki(keyword, lang = "th"):
wikipedia.set_lang(lang)
# summary เอาแค่สรุปบทความ
data = wikipedia.summary(keyword)
# page + content เอาเนื้อหาทั้งหน้าบทความ
data2 = wikipedia.page(keyword)
data2 = data2.content
# ส่งข้อมูล และจัดการในไฟล์ docx
docx = Document() # สร้างไฟล์ word
docx.add_heading(keyword,0)
docx.add_paragraph(data2)
docx.save(keyword + ".docx")
print("Completed generating",keyword,"file")
# เปลี่ยนภาษา
wikipedia.set_lang("th")
# GUI
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
GUI = Tk()
GUI.title("โปรแกรม wikipedia by K'Jirapong")
GUI.geometry("600x300")
# Config
font_1 = ("TH Sarabun New",16)
# คำอธิบาย
L1 = ttk.Label(GUI, text = "กรุณากรอกสิ่งที่ต้องการค้นหา", font = font_1)
L1.pack()
# ช่องค้นหาข้อมูล
v_search = StringVar() # กล่องสำหรับเก็บ keyword
E1 = ttk.Entry(GUI, textvariable = v_search, width = 42, font = font_1)
E1.pack(pady = 10)
def search():
keyword = v_search.get() # .get() คือคำสั่งในการดึงข้อมูลจากตัวแปรเข้ามา (ใช้ได้เฉพาะ StringVar()เท่านั้น !!! IntVar(), FloatVar() ใช้ไม่ได้)
try: # ลองค้นหาดูว่าได้ผลลัพธ์หรือไม่ หากได้ให้ผ่านไป else
lang = v_radio.get() # th/en/zh
wiki(keyword, lang)
except: # หากไม่ได้หรือมีปัญหา ให้แสดง message box แจ้งเตือนuser
messagebox.showwarning("Alert message", "please try keyword again")
else: # หากได้ต้องการให้ขึ้น message box แสดงว่าเสร็จแล้ว
messagebox.showinfo("Completed message", "Completed generating the " + keyword + " file")
'''
print(wikipedia.search(keyword))
result = wikipedia.summary(keyword)
print(result)
'''
# ปุ่มค้นหา
B1 = ttk.Button(GUI, text = "Search", command = search)
B1.pack(ipadx = 20, ipady = 10, pady =10)
# เมนูเลือกภาษา
F1 = Frame(GUI)
F1.pack()
v_radio = StringVar() # เป็นช่องเก็บข้อมูลภาษา
RB1 = ttk.Radiobutton(F1, text = "ภาษาไทย", variable = v_radio, value = "th")
RB2 = ttk.Radiobutton(F1, text = "ภาษาอังกฤษ", variable = v_radio, value = "en")
RB3 = ttk.Radiobutton(F1, text = "ภาษาจีน", variable = v_radio, value = "zh")
RB1.invoke()
RB1.grid(row = 0, column = 0)
RB2.grid(row = 0, column = 1)
RB3.grid(row = 0, column = 2)
GUI.mainloop()
``` |
{
"source": "jiratmanasuksanti/Funkinv.0.01",
"score": 3
} |
#### File: jiratmanasuksanti/Funkinv.0.01/Funkin.py
```python
import os,sys
import pygame
import math
import time
#################
pygame.init()
Width = 800
Height = 600
screen = pygame.display.set_mode((Width,Height), pygame.FULLSCREEN)
mainloop = True
current_selected_menu = 1
############################Game Object Storage##############################
bopeebo_inst = pygame.mixer.music.load("./assets/music/Bopeebo_Inst.ogg")
bopeebo_voices = pygame.mixer.music.load("./assets/music/Bopeebo_Voices.ogg")
exitbutton_image = pygame.image.load("assets/images/exit_button.png")
pressed_exitbutton_image = pygame.image.load("assets/images/pressed_exit_button.png")
menu_music = pygame.mixer.music.load("./assets/music/freakyMenu.ogg")
confirmMenu_sounds = pygame.mixer.Sound("./assets/sounds/confirmMenu.ogg")
maximumMenu = 1
minimumMenu = 1
################Main Game Loop#################
def create_button(self):
if self == "exit"
screen.blit(exitbutton_image,(Width/2,Height/2))
def confirmMenu():
confirmMenu_sounds.play()
def BacktoMenu():
if menu_music_play == False:
menu_music_play = True
pygame.mixer.music.stop()
menu_music.play()
def press_Exit():
for counter in range(1,10):
screen.blit(pressed_exitbutton_image,(Width/2,Height/2))
screen.blit(exitbutton_image,(Width/2,Height/2))
pygame.display.update()
menu_music_play = False
BacktoMenu()
while mainloop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
if not maximumMenu == current_selected_menu:
current_selected_menu = current_selected_menu + 1
if event.key == pygame.K_UP:
if not minimumMenu == current_selected_menu:
current_selected_menu = current_selected_menu - 1
if event.key == pygame.K_ENTER:
if current_selected_menu == 1:
confirmMenu()
press_Exit()
exit()
create_button("exit")
pygame.display.update()
``` |
{
"source": "jiratmanasuksanti/Land-Scape",
"score": 3
} |
#### File: jiratmanasuksanti/Land-Scape/menu_data.py
```python
import pygame
start_button_image = pygame.image.load("lib/assets/images/start_button.png")
start_buttonx = 680
start_buttony = 400
def show_menu():
screen.blit(start_button_image,(start_buttonx,start_buttony))
``` |
{
"source": "jiratQ/discord-bot",
"score": 3
} |
#### File: src/utils/utils.py
```python
from difflib import SequenceMatcher
commands = ['$hello', '$poker', '$voice', '$disconnect']
async def guess_command(client, message):
similar_commands = []
if (message.content.startswith('$')) & (message.content.split()[0] not in commands):
print(f'{message.author}: {message.content}')
for command in commands:
similar_ratio = SequenceMatcher(
None, message.content, command).ratio()
if similar_ratio >= 0.6:
similar_commands.append([command, similar_ratio])
similar_commands = sorted(
similar_commands, key=lambda l: l[1], reverse=True)
await message.channel.send(f'คุณกำลังจะพิมพ์ {similar_commands[0][0]} หรือเปล่า')
``` |
{
"source": "Jireh012/SignInSetForPython",
"score": 2
} |
#### File: Jireh012/SignInSetForPython/run.py
```python
import os
import sys
import time
import traceback
try:
from utils.config import load_config
from utils.log import get_logger
from utils.ftqq import send_to_ftqq
from utils.email import send_to_email
from utils import cliwait
from utils.version import check_script_update
from utils.version import SCRIPT_VERSION
from signin.WuAiPoJie import variable_52pj
from signin.WuAiPoJie import conventional_52pj
from signin.HouQiJun import variable_hqj
from signin.HouQiJun import conventional_hqj
from signin.WuAiProgrammer import variable_52cxy
from signin.WuAiProgrammer import conventional_52cxy
from signin.Smzdm import variable_smzdm
from signin.Smzdm import conventional_smzdm
except ImportError as e:
print(e)
print('导入模块出错,请执行 pip install -r requirements.txt 安装所需的依赖库')
cliwait()
exit()
logger = get_logger('Run')
def conventional():
pojie52 = CFG['52pojie']
houqijun = CFG['houqijun']
wuaicxy = CFG['52cxy']
smzdm = CFG['smzdm']
mcfg = CFG['main']
ftqq = CFG['ftqq']
# 执行签到函数
conventional_52pj(pojie52, ftqq['skey'])
conventional_hqj(houqijun, ftqq['skey'])
conventional_52cxy(wuaicxy, ftqq['skey'])
conventional_smzdm(smzdm, ftqq['skey'])
data = []
logger.info(f'脚本版本:[{SCRIPT_VERSION}]')
data.append(f'#### {"=" * 30}\n'
f'#### 脚本版本:[{SCRIPT_VERSION}]')
end_time = time.time()
logger.info(f'脚本耗时:[{round(end_time - start_time, 4)}]s')
data.append(f'#### 任务耗时:[{round(end_time - start_time, 4)}]s')
message = '\n'.join(data)
title = '自动签到脚本'
if mcfg['check_update']:
logger.info('检查脚本更新……')
result = check_script_update()
if result:
latest_version, detail, download_url = result
logger.info(f'-->脚本有更新<--'
f'最新版本[{latest_version}]'
f'更新内容[{detail}]'
f'下载地址[{download_url}]')
data.append('')
data.append = (f'### 脚本有更新\n'
f'#### 最新版本[{latest_version}]\n'
f'#### 下载地址:[GitHub]({download_url})\n'
f'#### 更新内容\n'
f'{detail}\n'
f'> 如果碰到问题欢迎联系QQ**814046228**')
title += '【有更新】'
else:
logger.info(f'脚本已是最新,当前版本{SCRIPT_VERSION}')
else:
logger.info(f'检查脚本更新已禁用,当前版本{SCRIPT_VERSION}')
logger.info('推送统计信息……')
message_push(title, message, True)
logger.info('脚本执行完毕')
def variable():
SCKEY = os.environ.get('SCKEY')
mcfg = CFG['main']
data = []
cookie_52pj = os.environ.get('cookie_52pj')
username_houqijun = os.environ.get('username_houqijun')
password_houqijun = os.environ.get('password_hou<PASSWORD>')
cookie_52cxy = os.environ.get('cookie_52cxy')
cookie_smzdm = os.environ.get('cookie_smzdm')
# 执行签到函数
variable_52pj(cookie_52pj, SCKEY)
variable_hqj(username_houqijun, password_houqijun, SCKEY)
variable_52cxy(cookie_52cxy, SCKEY)
variable_smzdm(cookie_smzdm, SCKEY)
end_time = time.time()
logger.info(f'脚本耗时:[{round(end_time - start_time, 4)}]s')
data.append(f'#### 任务耗时:[{round(end_time - start_time, 4)}]s')
message = '\n'.join(data)
title = '自动签到脚本'
if mcfg['check_update']:
logger.info('检查脚本更新……')
result = check_script_update()
if result:
latest_version, detail, download_url = result
logger.info(f'-->脚本有更新<--'
f'最新版本[{latest_version}]'
f'更新内容[{detail}]'
f'下载地址[{download_url}]')
data.append('')
data.append = (f'### 脚本有更新\n'
f'#### 最新版本[{latest_version}]\n'
f'#### 下载地址:[GitHub]({download_url})\n'
f'#### 更新内容\n'
f'{detail}\n'
f'> 如果碰到问题欢迎加QQ**814046228**')
title += '【有更新】'
else:
logger.info(f'脚本已是最新,当前版本{SCRIPT_VERSION}')
else:
logger.info('检查脚本更新已禁用,当前版本{SCRIPT_VERSION}')
logger.info('推送统计信息……')
message_push(title, message, True)
logger.info('脚本执行完毕')
def message_push(title: str, message: str, error: bool = False):
'''
推送通知
'''
ftqq = CFG['ftqq']
email = CFG['email']
if ftqq['enable']:
if (ftqq['only_on_error'] == True and error) or (ftqq['only_on_error'] == False):
result = send_to_ftqq(title, message, ftqq)
if result:
logger.info('FTQQ推送成功')
else:
logger.warning('[*] FTQQ推送失败')
if email['enable']:
if (email['only_on_error'] == True and error) or (email['only_on_error'] == False):
result = send_to_email(title, message, email)
if result:
logger.info('邮件推送成功')
else:
logger.warning('[*] 邮件推送失败')
if __name__ == '__main__':
run_type = sys.argv[1]
start_time = time.time()
if "1" in run_type:
try:
logger.info('载入配置文件')
CFG = load_config()
except FileNotFoundError:
logger.error('[*] 配置文件[config.toml]不存在,请参考[README.md]生成配置')
cliwait()
except ValueError:
logger.error('[*] 尚未配置有效的账户凭据,请添加到[config.toml]中')
cliwait()
except Exception as e:
logger.error(f'[*] 载入配置文件出错,请检查[config.toml] [{e}]')
cliwait()
exit()
try:
conventional()
except KeyboardInterrupt:
logger.info('[*] 手动终止运行')
cliwait()
except Exception as e:
logger.error(f'遇到未知错误 [{e}]', exc_info=True)
title = '脚本执行遇到未知错误'
message = (f'#### 脚本版本:[{SCRIPT_VERSION}]\n'
f'#### 系统信息:[{os.name}]\n'
f'#### Python版本: [{sys.version}]\n'
f'#### {"=" * 30}\n'
f'#### 错误信息: {traceback.format_exc()}\n'
f'#### {"=" * 30}\n'
'#### 联系信息:\n'
'* QQ: 814046228\n'
'* TG群: https://t.me/joinchat/HTtNrSJLz7s2A-0N\n'
'* 邮箱: <EMAIL>\n'
'> 如果需要帮助请附带上错误信息')
message_push(title, message, True)
cliwait()
elif "2" in run_type:
try:
variable()
except KeyboardInterrupt:
logger.info('[*] 手动终止运行')
cliwait()
except Exception as e:
logger.error(f'遇到未知错误 [{e}]', exc_info=True)
title = '脚本执行遇到未知错误'
message = (f'#### 脚本版本:[{SCRIPT_VERSION}]\n'
f'#### 系统信息:[{os.name}]\n'
f'#### Python版本: [{sys.version}]\n'
f'#### {"=" * 30}\n'
f'#### 错误信息: {traceback.format_exc()}\n'
f'#### {"=" * 30}\n'
'#### 联系信息:\n'
'* QQ: 814046228\n'
'* TG群: https://t.me/joinchat/HTtNrSJLz7s2A-0N\n'
'* 邮箱: <EMAIL>\n'
'> 如果需要帮助请附带上错误信息')
message_push(title, message, True)
cliwait()
else:
print('该启动方式不存在:', run_type)
```
#### File: SignInSetForPython/signin/HouQiJun.py
```python
import requests, os
import json
import random
import string
class randoms():
# 获取26个大小写字母
letters = string.ascii_letters
# 获取26个小写字母
Lowercase_letters = string.ascii_lowercase
# 获取26个大写字母
Capital = string.ascii_uppercase
# 获取阿拉伯数字
digits = string.digits
def sign_in_hqj(username: str, password: str, SCKEY: str):
msg = ""
try:
s = requests.Session()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4298.4 Safari/537.36',
'Cookie': 'PHPSESSID=' + code(),
'ContentType': 'text/html;charset=gbk',
'DNT': '1'
}
url = "http://www.houqijun.vip/Passport_runLogin.html"
loginData = {
"from": "http://www.houqijun.vip/",
"username": username,
"password": password,
}
login = s.post(url=url, data=loginData, headers=headers)
msg += "登录成功!,"
a = s.get('http://www.houqijun.vip/Center_runQiandao.html',
headers=headers)
data = json.loads(a.text)
if data['code'] == 1:
msg += username + " 后期菌签到成功!,"
print(username + " 后期菌签到成功")
elif data['code'] == 0:
msg += username + " 后期菌重复签到!,"
print(username + " 后期菌重复签到")
else:
if SCKEY:
scurl = f"https://sc.ftqq.com/{SCKEY}.send"
data = {
"text": username + " 后期菌签到异常",
"desp": data
}
requests.post(scurl, data=data)
print(data)
except Exception as e:
print('repr(e):', repr(e))
msg += '运行出错,repr(e):' + repr(e)
return msg + "\n"
def code():
# s是小写字母和数字的集合
s = randoms.Lowercase_letters + randoms.digits
# 生成28位小写和数字的集合,并将列表转字符串
code = ''.join(random.sample(s, 28))
return code
def variable_hqj(usernames, passwords, SCKEY):
msg = ""
ulist = usernames.split("\n")
plist = passwords.split("\n")
if len(ulist) == len(plist):
print("----------后期菌开始尝试签到----------")
i = 0
while i < len(ulist):
msg += f"第 {i + 1} 个账号开始执行任务\n"
username = ulist[i]
password = plist[i]
msg += sign_in_hqj(username, password, SCKEY)
i += 1
else:
msg = "账号密码个数不相符"
print(msg)
print("----------后期菌签到执行完毕----------")
def conventional_hqj(houqijun: dict, SCKEY):
if houqijun:
print("----------后期菌开始尝试签到----------")
msg = ""
for i, item in enumerate(houqijun, 1):
msg += f"第 {i + 1} 个账号开始执行任务\n"
msg += sign_in_hqj(item['username'], item['password'], SCKEY)
print("----------后期菌签到执行完毕----------")
```
#### File: SignInSetForPython/utils/config.py
```python
import os
import toml
import chardet
from utils.log import get_logger, init_logger
logger = get_logger('Setting')
SCRIPT_PATH = f'{os.path.split(os.path.realpath(__file__))[0][:-5]}'
DEFAULT_PATH = f'{SCRIPT_PATH}config.toml'
CFG = {}
def get_script_path() -> str:
'''
获取脚本所在路径
返回:
str: 脚本所在路径
'''
return (SCRIPT_PATH)
def get_config(key: str) -> dict:
'''
获取某一项配置
参数:
key: 要获取的设置键名
返回:
dict: 配置信息字典
'''
return (CFG.get(key))
def get_all_config() -> dict:
'''
获取全部配置
返回:
dict: 配置信息字典
'''
return (CFG)
def load_config(path: str = DEFAULT_PATH) -> dict:
'''
读取并验证配置
参数:
[path]: 配置文件路径,默认为config.toml
返回:
dict: 验证过的配置字典
'''
global CFG
try:
logger.debug('开始读取配置')
with open(path, 'rb') as f:
content = f.read()
detect = chardet.detect(content)
encode = detect.get('encode', 'utf-8')
raw_cfg = dict(toml.loads(content.decode(encode)))
CFG = verify_config(raw_cfg)
debug = os.environ.get('mode', 'release').lower()
level = 0 if debug == 'debug' else 20
init_logger(level)
logger.debug('配置验证通过')
return (CFG)
except FileNotFoundError:
logger.error(f'[*] 配置文件[{path}]不存在')
raise FileNotFoundError(f'[*] 配置文件[{path}]不存在')
except ValueError as e:
logger.error(f'[*] 配置文件验证失败 [{e}]', exc_info=True)
def verify_config(cfg: dict) -> dict:
'''
验证配置
参数:
cfg: 配置字典
返回:
dict: 验证过的配置字典,剔除错误的和不必要的项目
'''
vcfg = {'main': {'check_update': False, 'debug': False},
'ftqq': {'enable': False, 'skey': '', 'only_on_error': False},
'email': {'port': 465, 'server': '', 'password': '', 'user': '',
'recvaddr': '', 'sendaddr': '', 'only_on_error': False},
'52pojie': [],
'houqijun': [],
'52cxy': [],
'smzdm': [],
}
wuaipojie = cfg.get('52pojie', {})
houqijun = cfg.get('houqijun', {})
wuaicxy = cfg.get('52cxy', {})
smzdm = cfg.get('smzdm', {})
vcfg['52pojie'] = wuaipojie
vcfg['houqijun'] = houqijun
vcfg['52cxy'] = wuaicxy
vcfg['smzdm'] = smzdm
main = cfg.get('main', {})
if main and isinstance(main, dict):
debug = bool(main.get('debug', False))
check_update = bool(main.get('check_update', True))
vcfg['main'] = {'check_update': check_update, 'debug': debug}
else:
logger.debug('[main]节配置有误或者未配置,将使用默认配置')
ftqq = cfg.get('ftqq', {})
if ftqq and isinstance(ftqq, dict):
enable = bool(ftqq.get('enable', False))
skey = ftqq.get('skey', "")
only_on_error = bool(ftqq.get('only_on_error', False))
if enable and not skey:
raise ValueError('开启了FTQQ模块,但是未指定SKEY,请检查配置文件')
vcfg['ftqq'] = {'enable': enable, 'skey': skey,
'only_on_error': only_on_error}
else:
logger.debug('[ftqq]节配置有误或者未配置,将使用默认配置')
email = cfg.get('email', {})
if email and isinstance(email, dict):
enable = bool(email.get('enable', False))
try:
port = int(email.get('port', 0))
except ValueError:
port = 465
logger.warning('[*] [email]节port必须为数字')
server = email.get('server', '')
password = email.get('password', '')
user = email.get('user', '')
recvaddr = email.get('recvaddr', '')
sendaddr = email.get('sendaddr', '')
only_on_error = bool(email.get('only_on_error', False))
if enable and not (port and server
and password and user and recvaddr and sendaddr):
raise ValueError('开启了email模块,但是配置不完整,请检查配置文件')
vcfg['email'] = {'enable': enable, 'port': port, 'server': server,
'password': password, 'user': user,
'recvaddr': recvaddr, 'sendaddr': sendaddr,
'only_on_error': only_on_error}
else:
logger.debug('[email]节配置有误或者未配置,将使用默认配置')
return (vcfg)
``` |
{
"source": "jirehbak/subway-place",
"score": 3
} |
#### File: subway-place/appointment/appointment.py
```python
import pandas as pd
import numpy as np
import re
#%%
class basic():
def __init__(self, df):
self.df = df
#% 지하철 경로 함수 정의
def intersect(self, a, b):
return list(set(a) & set(b))
def union(self, a, b):
return list(set(a) | set(b))
def a_not_in_b(self, a, b):
for i in self.intersect(a , b):
a.remove(i)
def transfer(self, code):
if type(code)==list:
pass
else:
code = [code]
trans_list = []
for i in code:
nm = self.df[self.df['station_cd']== i]['station_nm'].unique()[0]
trns = self.df[self.df['station_nm']== nm]['station_cd'].unique().tolist()
if len(trns) > 1:
for j in trns:
if i != j:
trans_list.append([i, j])
else:
pass
else:
pass
return trans_list
def code_to_name(self, code):
if type(code)==list:
pass
else:
code = [code]
return self.df[self.df['station_cd'].isin(code)]['station_nm'].unique().tolist()
def extract_station(self, text):
# stn = re.compile('\w+')
stn = re.compile('[ㄱ-ㅎ가-힣]+')
st1 = stn.findall(text)[0]
st2 = stn.findall(text)[1]
return st1, st2
class route_between_sub: # df = sub_dist; start = '합정'; destination = '삼성'
def __init__(self, df, sub_index, start, destination):
self.df = df
self.sub_index = sub_index
self.start = start
self.destination = destination
self.start_cd_list = self.df[self.df['statnfnm'] == self.start]['statnf_cd'].unique()
self.dest_cd_list = self.df[self.df['statntnm'] == self.destination]['statnt_cd'].unique().tolist()
self.start_line_list = self.df[self.df['statnfnm'] == self.start]['statnf_line'].unique()
self.dest_line_list = self.df[self.df['statntnm'] == self.destination]['statnt_line'].unique()
self.step = 0
self.route_list = []
self.basic = basic(self.df)
def common_lines(self):
return self.basic.intersect(self.start_line_list, self.dest_line_list)
def union_lines(self):
return self.basic.union(self.start_line_list, self.dest_line_list)
def next_stations(self, sub_list):
couple = self.df[self.df['statnf_cd'].isin(sub_list)][['statnf_cd', 'statnt_cd']]
couple.columns = [self.step, self.step+1]
return couple
def expand_route(self, route_df, sub_list):
self.step += 1
route_df = pd.merge(route_df, self.next_stations(sub_list),
how = 'left', on = self.step)
route_df = route_df.dropna()
sub_list = self.df[self.df['statnf_cd'].isin(sub_list)]['statnt_cd'].unique().tolist()
# 중복 제거
for ss in range(2):
self.basic.a_not_in_b(sub_list, route_df[self.step - ss])
return route_df, sub_list
def extract_code_with_samelines(self, code_list,line_list):
stations = self.sub_index[self.sub_index['station_cd'].isin(code_list)]
stations_in_lines = stations[stations['line_num'].isin(line_list)]
return stations_in_lines['station_cd'].tolist()
def expand_route_to_commonline(self, route_df, sub_list):
self.step += 1
next_stnt = self.next_stations(sub_list)
next_stations_in_lines = self.extract_code_with_samelines(next_stnt[self.step+1], self.common_lines())
couple_in_lines = next_stnt[next_stnt[self.step+1].isin(next_stations_in_lines)]
route_df = pd.merge(route_df, couple_in_lines,
how = 'left', on = self.step)
route_df = route_df.dropna()
sub_list = self.df[self.df['statnf_cd'].isin(sub_list)]['statnt_cd'].unique().tolist()
# 중복 제거
for ss in range(2):
self.basic.a_not_in_b(sub_list, route_df[self.step - ss])
return route_df, sub_list
def expand_route_to_union_line(self, route_df, sub_list):
self.step += 1
next_stnt = self.next_stations(sub_list)
next_stations_in_lines = self.extract_code_with_samelines(next_stnt[self.step+1], self.union_lines())
couple_in_lines = next_stnt[next_stnt[self.step+1].isin(next_stations_in_lines)]
route_df = pd.merge(route_df, couple_in_lines,
how = 'left', on = self.step)
route_df = route_df.dropna()
sub_list = self.df[self.df['statnf_cd'].isin(sub_list)]['statnt_cd'].unique().tolist()
# 중복 제거
for ss in range(2):
self.basic.a_not_in_b(sub_list, route_df[self.step - ss])
return route_df, sub_list
def extract_route_for_destination(self, route_df):
#필요한 경로만 추출
route_df = route_df.reset_index(drop = True)
route_a = route_df[route_df[self.step+1].isin(self.dest_cd_list)]
for jj in route_a.index:
route_a = route_df.loc[jj].tolist()
self.route_list.append(route_a)
def routes_to_destination_without_transfer(self, route_df, sub_list):
self.step = 0
t = 0
if len(self.common_lines()) > 0:
while len(self.route_list) <= 1:
if t >= 1:
break
else:
if len(self.route_list) == 0:
while len(self.basic.intersect(route_df[self.step+1], self.dest_cd_list)) == 0: # 최종 목적지 도달여부 확인
route_df, sub_list = self.expand_route_to_commonline(route_df, sub_list)
if self.step > 70:
t+= 1
break
if t == 0:
#필요한 경로만 추출
self.extract_route_for_destination(route_df)
else:
pass
else:
route_df, sub_list = self.expand_route_to_commonline(route_df, sub_list)
while len(self.basic.intersect(route_df[self.step+1], self.dest_cd_list)) == 0:
route_df, sub_list = self.expand_route_to_commonline(route_df, sub_list)
if self.step > 70:
t+= 1
break
if t == 0:
#필요한 경로만 추출
self.extract_route_for_destination(route_df)
else:
pass
else:
pass
def routes_to_destination_with_1_transfer(self, route_df, sub_list):
self.step = 0
t = 0
while len(self.route_list) <= 1:
if t >= 1:
break
else:
if len(self.route_list) == 0:
while len(self.basic.intersect(route_df[self.step+1], self.dest_cd_list)) == 0: # 최종 목적지 도달여부 확인
route_df, sub_list = self.expand_route_to_union_line(route_df, sub_list)
if self.step > 70:
t += 1
break
#필요한 경로만 추출
if t == 0:
self.extract_route_for_destination(route_df)
else:
pass
else:
route_df, sub_list = self.expand_route_to_union_line(route_df, sub_list)
while len(self.basic.intersect(route_df[self.step+1], self.dest_cd_list)) == 0:
route_df, sub_list = self.expand_route_to_union_line(route_df, sub_list)
if self.step > 70:
t += 1
break
#필요한 경로만 추출
if t == 0:
self.extract_route_for_destination(route_df)
else:
pass
def routes_to_destination(self, route_df, sub_list):
self.step = 0
t = 0
while len(self.route_list) <= 5:
if t>= 1:
break
else:
if len(self.route_list) == 0:
while len(self.basic.intersect(route_df[self.step+1], self.dest_cd_list)) == 0: # 최종 목적지 도달여부 확인
route_df, sub_list = self.expand_route(route_df, sub_list)
if self.step > 50:
t+=1
break
if t == 0:
#필요한 경로만 추출
self.extract_route_for_destination(route_df)
else:
pass
else:
route_df, sub_list = self.expand_route(route_df, sub_list)
while len(self.basic.intersect(route_df[self.step+1], self.dest_cd_list)) == 0:
route_df, sub_list = self.expand_route(route_df, sub_list)
if self.step > 50:
t += 1
break
if t== 0:
#필요한 경로만 추출
self.extract_route_for_destination(route_df)
else:
pass
#start = '삼성'; destination = '성수'
def calculate_route_between_sub(df, sub_index, start, destination):
sub_dist = df
R = route_between_sub(sub_dist, sub_index, start, destination)
if len(R.start_cd_list) >= 1: # 출발지 입력 제대로 했는지 판별
route_df = R.next_stations(R.start_cd_list)
sub_list = route_df[1].unique().tolist()
# 환승 없는 경로
R3 = route_between_sub(sub_dist, sub_index, start, destination)
R3.routes_to_destination_without_transfer(route_df, sub_list)
route_list_b = R3.route_list
# 한번 환승하는 경로
R4 = route_between_sub(sub_dist, sub_index, start, destination)
R4.routes_to_destination_with_1_transfer(route_df, sub_list)
route_list_c = R4.route_list
# 환승 포함 경로
R2 = route_between_sub(sub_dist, sub_index, start, destination)
R2.routes_to_destination(route_df, sub_list)
route_list_a = R2.route_list
for jj in route_list_b:
route_list_a.append(jj)
for jj in route_list_c:
route_list_a.append(jj)
route_list = route_list_a
rst_route = []
rst = pd.DataFrame()
# route_list 중 최적 경로 선택
try:
# 각 경로별로 dataframe 생성
for ind in range(len(route_list)): # ind = 1
route_b = route_list[ind]
df_route = pd.DataFrame(columns = sub_dist.columns)
for k in range(len(route_b)-1):
st = route_b[k]
d = route_b[k+1]
rt = sub_dist[(sub_dist['statnt_cd'] == d) & (sub_dist['statnf_cd'] == st)]
if len(rt) == 1:
pass
elif len(rt) > 1:
rt = rt[rt['statnf_cd'].isin(df_route['statnt_cd'])==False]
df_route = df_route.append(rt)
# 중복 제거
df_route = df_route.reset_index(drop = True)
df_route = df_route.sort_values(by = 'sctntime', ascending = True)
df_route = df_route.drop_duplicates(['statnfnm', 'statntnm'], keep = 'first').reset_index(drop = False)
df_route = df_route.sort_values(by = 'index', ascending = True).drop('index', axis = 1)
rst_route.append(df_route)
# rst
num_station = len(route_b)
num_transfer = len(df_route['statnt_line'].unique())-1
time = df_route['sctntime'].astype(int).sum()
time_tot = time
rst.loc[ind, '정류장수'] = num_station
rst.loc[ind, '환승횟수'] = num_transfer
rst.loc[ind, '총 소요시간'] = time_tot
except (IndexError):
pass
# print(step)
try:
least_time = rst.loc[rst.sort_values('총 소요시간', ascending = True)[:1].index]
least_transfer = rst.loc[rst.sort_values(['환승횟수', '총 소요시간'], ascending = [True, True])[:1].index]
least_time_ = rst_route[rst.sort_values('총 소요시간', ascending = True)[:1].index[0]]
least_transfer_ = rst_route[rst.sort_values(['환승횟수', '총 소요시간'], ascending = [True, True])[:1].index[0]]
rst = least_time.append(least_transfer)
rst_route = []
rst_route.append(least_time_)
rst_route.append(least_transfer_)
return rst, rst_route
except:
return "입력 오류", "입력 오류"
else:
print("출발지 입력 오류")
return "입력 오류", "입력 오류"
def interactive_route(df, sub_index, x,y):
# x = input("출발: ")
# y = input("도착: ")
j, k = calculate_route_between_sub(df,sub_index, x, y)
if type(j) != str:
try:
t = j['총 소요시간'].min()
k[0] = k[0][k[0]['transfer'] == '0']
stnt = len(k[0]) + 1
trans = j['환승횟수'].min()
route = k[0]['statnfnm'].tolist() + [y]
# route.reverse()
# print("총 소요시간: %d분" %(t))
# print("환승 횟수: %d회" %(trans))
# print("경로: %s" %(route))
return t, stnt, trans, route
except KeyError:
return "입력 오류", "입력 오류", "입력 오류", "입력 오류"
else:
return "입력 오류", "입력 오류", "입력 오류", "입력 오류"
#% 중간 장소 산출 함수
def center_point(df, sub_index, s1, s2, dest_list):
t_df = pd.DataFrame()
it = 0
for dest_ in dest_list:
it += 1
print(it, dest_)
rst1, route1 = calculate_route_between_sub(df, sub_index, start = s1, destination = dest_)
time1 = rst1['총 소요시간'].min()
num_transfer1 = len(route1[0]['statnt_line'].unique())-1
rst2, route2 = calculate_route_between_sub(df, sub_index, start = s2, destination = dest_)
time2 = rst2['총 소요시간'].min()
num_transfer2 = len(route2[0]['statnt_line'].unique())-1
t_dif = np.abs(time1-time2)
t_sum = np.abs(time1+time2)
t_df.loc[dest_, '시간_' + s1] = time1
t_df.loc[dest_, '시간_' + s2] = time2
t_df.loc[dest_, '시간차이'] = t_dif
t_df.loc[dest_, '시간합'] = t_sum
a = t_df[t_df['시간차이'] <= 10]
a = a.sort_values(by = ['시간합'], ascending = True)
center = a[:5].reset_index(drop = False)
return center
```
#### File: subway-place/pre-build/integrate_route_from_gcs.py
```python
import pandas as pd
import os
from tqdm import tqdm
from google.cloud import storage
def main():
# gcs 인증
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))+ '/key/level-district.json'
# 이름 설정
bucket_name = 'j-first-bucket'
save_path = 'route/'
file_name = 'route_all.txt'
# blob list
blobs = list_blob(bucket_name)
# blobs to dataframe
data = blobs_to_dataframe(blobs)
# upload data to gcs
upload_blob(bucket_name, save_path + file_name, data)
def list_blob(bucket_name):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blobs = list(bucket.list_blobs())
return blobs
def blobs_to_dataframe(blobs):
df = pd.DataFrame()
for bl in blobs:
if 'rev' in bl.name:
with open("tmp.txt", "wb") as file_obj:
bl.download_to_file(file_obj)
df = df.append(pd.read_csv("tmp.txt"))
print(bl.name)
else:
pass
return df
def upload_blob(bucket_name, destination_blob_name, df):
global credentials
"""Uploads a file to the bucket.
bucket_name = "your-bucket-name"
destination_blob_name = "storage-object-name"
df = dataframe to save
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
blob.chunk_size = 5 * 1024 * 1024 # Set 5 MB blob size
df.to_csv("route_all.txt", encoding = 'utf-8', index = False)
blob.upload_from_filename("route_all.txt", content_type='text/csv')
# blob.upload_from_filename(source_file_name)
print(
"File uploaded to {}.".format(
destination_blob_name
)
)
if __name__ == '__main__':
main()
``` |
{
"source": "jireh-father/automl",
"score": 2
} |
#### File: automl/efficientdet/afp_to_coco_ori.py
```python
import json
import argparse
import sys
import glob
import os
import datetime
from pycocotools import mask
import imagesize
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument('--image_dir', dest='image_dir', default=None, type=str)
parser.add_argument('--output_path', dest='output_path', default=None, type=str)
parser.add_argument('--label_dir', dest='label_dir', default=None, type=str)
parser.add_argument('--start_idx', dest='start_idx', default=1, type=int)
parser.add_argument('--train_ratio', dest='train_ratio', default=0.9, type=float)
# parser.add_argument('--output_image_dir', dest='output_image_dir', default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def init_coco_annotation(label_dir, start_idx):
coco_output = {}
coco_output["info"] = {
"description": "This is stable 1.0 version of the 2014 MS COCO dataset.",
"url": "http://mscoco.org",
"version": "1.0",
"year": 2014,
"contributor": "Microsoft COCO group",
"date_created": "2015-01-27 09:11:52.357475"
}
coco_output["type"] = "instances"
coco_output["license"] = [{
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/",
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License"
}]
coco_output["images"] = []
coco_output["annotations"] = []
if label_dir is None:
coco_output["categories"] = [{
"supercategory": "eye",
"id": 1,
"name": "eye"
}, ]
else:
label_dirs = glob.glob(os.path.join(label_dir, "*"))
label_dirs.sort()
coco_output["categories"] = [
{"supercategory": os.path.basename(dname), "id": i + start_idx, "name": os.path.basename(dname)} for
i, dname
in enumerate(label_dirs)]
return coco_output
if __name__ == '__main__':
args = parse_args()
os.makedirs(os.path.dirname(args.output_path), exist_ok=True)
# os.makedirs(args.output_image_dir, exist_ok=True)
coco_output = init_coco_annotation(args.label_dir, args.start_idx)
image_id_map = {}
bbox_id_map = {}
anno_sample = {"segmentation": [],
"area": 100.0,
"iscrowd": 0,
"image_id": 1,
"bbox": [1, 1, 1, 1],
"category_id": 1,
"id": 1}
polygon_files = []
if args.image_dir:
polygon_files = glob.glob(os.path.join(args.image_dir, "*.xy"))
for i, polygon_file in enumerate(polygon_files):
image_file = os.path.splitext(polygon_file)[0]
if not os.path.isfile(image_file):
print("not image file", image_file)
continue
segmentation = [
[float(coord.strip()) for coord in open(polygon_file).readline().split(" ")[1:] if coord and coord.strip()]]
width, height = imagesize.get(image_file)
rles = mask.frPyObjects(segmentation, height, width)
rle = mask.merge(rles)
bbox = mask.toBbox(rle)
area = mask.area(rle)
image_fn = os.path.basename(image_file)
if image_fn not in image_id_map:
image_id_map[image_fn] = len(image_id_map) + 1
coco_output["images"].append({
"license": 1,
"url": "",
"file_name": image_fn,
"height": height,
"width": width,
"date_captured": datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
"id": image_id_map[image_fn]
})
coco_output["annotations"].append({
"segmentation": segmentation,
"area": int(area),
"iscrowd": 0,
"image_id": i + 1,
"bbox": list(bbox),
"category_id": 1,
"id": image_id_map[image_fn]
})
json.dump(coco_output, open(args.output_path, "w+"))
print("complete")
```
#### File: automl/efficientdet/infer_and_extract_bbox_tflite.py
```python
import numpy as np
import tensorflow as tf
from absl import flags
from absl import app
import glob
import time
from PIL import Image
import os
import shutil
import json
import random
flags.DEFINE_string('input_image', None, 'Input image path for inference.')
flags.DEFINE_string('label_dir', None, 'Input image path for inference.')
flags.DEFINE_string('output_path', None, 'Output dir for inference.')
flags.DEFINE_string('output_image_dir', None, 'Output dir for inference.')
flags.DEFINE_string('vis_image_dir', None, 'Output dir for inference.')
flags.DEFINE_string('real_image_dir', None, 'Output dir for inference.')
flags.DEFINE_float('min_score_thresh', 0.3, 'Score threshold to show box.')
flags.DEFINE_integer('target_label_idx', None, 'Score threshold to show box.')
flags.DEFINE_string('tflite_path', None, 'Path for exporting tflite file.')
flags.DEFINE_integer('start_index', 1, 'Path for exporting tflite file.')
flags.DEFINE_integer('random_seed', 1, 'Path for exporting tflite file.')
flags.DEFINE_boolean('use_bbox_aug', False, 'Path for exporting tflite file.')
flags.DEFINE_float('bbox_aug_ratio', 0.1, 'Path for exporting tflite file.')
FLAGS = flags.FLAGS
def resize_and_crop_image(img, output_size):
height, width = img.shape[:2]
scale = output_size / float(max(width, height))
if scale != 1.0:
height = int(round(height * scale))
width = int(round(width * scale))
interpolation = cv2.INTER_LINEAR
img = cv2.resize(img, (width, height), interpolation=interpolation)
img = cv2.copyMakeBorder(img, 0, output_size - height, 0, output_size - width, cv2.BORDER_CONSTANT, value=0)
return img, width, height
def main(_):
model_path = FLAGS.tflite_path
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
if FLAGS.random_seed:
random.seed(FLAGS.random_seed)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
image_file_list = glob.glob(FLAGS.input_image)
label_dirs = glob.glob(os.path.join(FLAGS.label_dir, "*"))
label_dirs.sort()
label_dict = {os.path.basename(dname): i + FLAGS.start_index for i, dname in enumerate(label_dirs)}
image_file_list.sort()
if os.path.dirname(FLAGS.output_path):
os.makedirs(os.path.dirname(FLAGS.output_path), exist_ok=True)
os.makedirs(FLAGS.output_image_dir, exist_ok=True)
os.makedirs(FLAGS.vis_image_dir, exist_ok=True)
real_image_dict = {}
for image_file in image_file_list:
splitext = os.path.splitext(image_file)
ext = splitext[1]
fp = splitext[0]
real_file_name = "_".join(os.path.basename(fp).split("_")[1:-1])
searched = glob.glob(os.path.join(FLAGS.real_image_dir, real_file_name + ".*"))
if len(searched) == 1:
real_file_path = searched[0]
else:
real_file_name = "_".join(os.path.basename(fp).split("_")[:-1])
real_file_path = os.path.join(FLAGS.real_image_dir, real_file_name + ext)
if not os.path.isfile(real_file_path):
real_file_path = os.path.join(FLAGS.real_image_dir, real_file_name + ".jpeg")
if not os.path.isfile(real_file_path):
real_file_path = os.path.join(FLAGS.real_image_dir, real_file_name + ".png")
if not os.path.isfile(real_file_path):
real_file_path = os.path.join(FLAGS.real_image_dir, real_file_name + ".bmp")
if real_file_path not in real_image_dict:
real_image_dict[real_file_path] = []
bbox_idx = int(os.path.basename(fp).split("_")[-1])
label_dir = os.path.basename(os.path.dirname(image_file))
real_image_dict[real_file_path].append([bbox_idx, label_dict[label_dir]])
annotations = {}
total_exec_time = 0.
for i, image_file in enumerate(real_image_dict):
bbox_idx_and_labels = real_image_dict[image_file]
print(image_file, bbox_idx_and_labels)
bbox_idxs = [item[0] for item in bbox_idx_and_labels]
labels = [item[1] for item in bbox_idx_and_labels]
pil_im = Image.open(image_file).convert("RGB")
o_w, o_h = pil_im.size
image_fn = os.path.basename(image_file)
annotations[image_fn] = {"width": o_w, "height": o_h, "bbox": []}
# im = np.array(pil_im.resize((input_shape[2], input_shape[1])))
im = np.array(pil_im)
# im = normalize_image(np.array(pil_im))
im, r_w, r_h = resize_and_crop_image(im, input_shape[1])
im = np.expand_dims(im, axis=0)
interpreter.set_tensor(input_details[0]['index'], im)
start = time.time()
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
exec_time = time.time() - start
print(i, len(real_image_dict), image_file, exec_time)
total_exec_time += exec_time
r_h = input_shape[1]
r_w = input_shape[2]
if FLAGS.target_label_idx is not None:
eye_indexes = np.squeeze(np.argwhere(output_data[0, :, 6] == FLAGS.target_label_idx), 1)
eye_indexes = np.squeeze(np.argwhere(output_data[0][eye_indexes][:, 5] > 0.), 1)
else:
eye_indexes = np.squeeze(np.argwhere(output_data[0, :, 5] > 0.), 1)
if len(eye_indexes) > 0:
top_k = 30
top_k_indexes = output_data[0][eye_indexes][:, 5].argsort()[::-1][:top_k]
scores = output_data[0][top_k_indexes][:, 5]
bboxes = output_data[0][top_k_indexes][:, 1:5]
eyes_bboxes = []
for j, score in enumerate(scores):
if score < FLAGS.min_score_thresh:
print("skip", score, "<", FLAGS.min_score_thresh)
break
r_y1, r_x1, r_y2, r_x2 = bboxes[j]
y1 = r_y1 / r_h * o_h
y2 = r_y2 / r_h * o_h
x1 = r_x1 / r_w * o_w
x2 = r_x2 / r_w * o_w
print(o_w, o_h, (x1, y1, x2, y2))
eyes_bboxes.append({"x1": x1, "y1": y1, "x2": x2, "y2": y2})
crop_im = pil_im.crop((x1, y1, x2, y2))
output_filename = "{}_{}.jpg".format(os.path.splitext(os.path.basename(image_file))[0], j)
output_path = os.path.join(FLAGS.vis_image_dir, output_filename)
crop_im.save(output_path)
print(eyes_bboxes)
if eyes_bboxes:
if not os.path.isfile(os.path.join(FLAGS.output_image_dir, os.path.basename(image_file))):
shutil.copy(image_file, FLAGS.output_image_dir)
dup_idx = False
if len(set(bbox_idxs)) < len(bbox_idxs):
dup_idx = True
bbox_idx_map = {}
for j, bbox_idx in enumerate(bbox_idxs):
if bbox_idx >= len(eyes_bboxes):
raise Exception("invalid bbox index!", bbox_idx, eyes_bboxes)
bbox = eyes_bboxes[bbox_idx]
bbox["label"] = labels[j]
if dup_idx and FLAGS.use_bbox_aug and bbox_idx in bbox_idx_map:
w = bbox["x2"] - bbox["x1"]
h = bbox["y2"] - bbox["y1"]
w_aug_max = round(w * FLAGS.bbox_aug_ratio)
h_aug_max = round(h * FLAGS.bbox_aug_ratio)
x1_aug = random.randint(0, w_aug_max) - (w_aug_max // 2)
x2_aug = random.randint(0, w_aug_max) - (w_aug_max // 2)
y1_aug = random.randint(0, h_aug_max) - (h_aug_max // 2)
y2_aug = random.randint(0, h_aug_max) - (h_aug_max // 2)
bbox["x1"] += x1_aug
bbox["x2"] += x2_aug
bbox["y1"] += y1_aug
bbox["y2"] += y2_aug
bbox_idx_map[bbox_idx] = True
annotations[image_fn]["bbox"].append(bbox)
else:
print("no eyes")
json.dump(annotations, open(FLAGS.output_path, "w+"))
print("avg exec time", total_exec_time / len(real_image_dict), "total images", len(real_image_dict))
if __name__ == '__main__':
# tf.disable_eager_execution()
app.run(main)
``` |
{
"source": "jireh-father/Detectron",
"score": 2
} |
#### File: jireh-father/Detectron/split_hash_dict.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
import json
from itertools import islice
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--image_hash_dict_file',
dest='image_hash_dict_file',
help='image_hash_dict_file',
type=str
)
parser.add_argument(
'--num_processes',
dest='num_processes',
help='num_processes',
default=4,
type=int
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
image_hash_dict = json.load(open(args.image_hash_dict_file))
num_chunks = len(image_hash_dict) // args.num_processes
hash_dict_chunks = list(chunks(image_hash_dict, num_chunks))
if len(hash_dict_chunks) > args.num_processes:
hash_dict_chunks[0].update(hash_dict_chunks[-1])
hash_dict_chunks = hash_dict_chunks[:-1]
output_dir = os.path.dirname(args.image_hash_dict_file)
for i, hash_dict_chunk in enumerate(hash_dict_chunks):
json.dump(hash_dict_chunk, open(os.path.join(output_dir, "image_hash_dict_chunk_%d.json" % i), "w+"))
```
#### File: Detectron/tools/merge_dicts.py
```python
import json
import argparse
import pickle
import sys
import glob
import os
def merge_dict(dict1, dict2):
''' Merge dictionaries and keep values of common keys in list'''
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
dict3[key] = value + dict1[key]
return dict3
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--dict_dir',
dest='dict_dir',
default=None,
type=str
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
detection_json_files = ""
json_files = glob.glob(os.path.join(args.dict_dir, "*.json"))
image_hash_map = {}
for i, json_file in enumerate(json_files):
tmp_hash_map = json.load(open(json_file, "r"))
image_hash_map = merge_dict(image_hash_map, tmp_hash_map)
json.dump(image_hash_map, open(os.path.join(args.dict_dir, "merged_dict.json"), "w+"))
``` |
{
"source": "jireh-father/InsightFace_Pytorch",
"score": 2
} |
#### File: jireh-father/InsightFace_Pytorch/inference_dog_eyes_verification.py
```python
import argparse
import cv2
from PIL.JpegImagePlugin import JpegImageFile
cv2.ocl.setUseOpenCL(False)
import albumentations as al
from albumentations.pytorch import ToTensorV2
import numpy as np
import torch
from backbone import Backbone
def get_test_transforms(input_size=None, use_random_crop=False, use_gray=False,
use_center_crop=False,
center_crop_ratio=0.8):
if use_random_crop:
compose = [al.Resize(int(input_size * 1.1), int(input_size * 1.1)),
al.CenterCrop(input_size, input_size)]
elif use_center_crop:
compose = [al.Resize(int(input_size * (2.0 - center_crop_ratio)), int(input_size * (2.0 - center_crop_ratio))),
al.CenterCrop(input_size, input_size)]
else:
compose = [al.Resize(input_size, input_size)]
return al.Compose(compose + [
al.ToGray(p=1.0 if use_gray else 0.0),
al.Normalize(),
ToTensorV2()
])
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
class DogEyesVerifier(object):
def __init__(self, input_size=112, model_path=None, pos_thr=None, neg_thr=None, drop_ratio=0.6,
net_depth=50, use_random_crop=False,
use_gray=False, use_center_crop=False, center_crop_ratio=0.8, device='CPU', use_onnx=False):
assert model_path is not None
self.pos_thr = pos_thr
self.neg_thr = neg_thr
self.input_size = input_size
self.transforms = get_test_transforms(input_size, use_random_crop=use_random_crop, use_gray=use_gray,
use_center_crop=use_center_crop, center_crop_ratio=center_crop_ratio)
self.use_onnx = use_onnx
if use_onnx:
import onnxruntime
self.model = onnxruntime.InferenceSession(model_path)
else:
self.model = Backbone(net_depth, drop_ratio, 'ir_se').to(device)
self.model.eval()
if device == 'cpu':
self.model.load_state_dict(torch.load(model_path, map_location=device), strict=True)
else:
self.model.load_state_dict(torch.load(model_path), strict=True)
self.use_cuda = device == "cuda"
def is_same(self, img1, img2, is_same_side, pos_thr=None, neg_thr=None, use_pos_low_thr=False):
if pos_thr is None:
pos_thr = self.pos_thr
if neg_thr is None:
neg_thr = self.neg_thr
if pos_thr is None and neg_thr is None:
raise ValueError("pos_thr and neg_thr are None.")
if isinstance(img1, str):
img1 = cv2.imread(img1, cv2.IMREAD_COLOR)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
elif isinstance(img1, JpegImageFile):
if img1.mode != "RGB":
img1 = img1.convert("RGB")
img1 = np.array(img1)
if isinstance(img2, str):
img2 = cv2.imread(img2, cv2.IMREAD_COLOR)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
elif isinstance(img2, JpegImageFile):
if img2.mode != "RGB":
img2 = img2.convert("RGB")
img2 = np.array(img2)
img1 = self.transforms(image=img1)['image'].unsqueeze(0)
img2 = self.transforms(image=img2)['image'].unsqueeze(0)
if self.use_cuda:
img1 = img1.cuda()
img2 = img2.cuda()
if self.use_onnx:
input1 = {self.model.get_inputs()[0].name: to_numpy(img1)}
input2 = {self.model.get_inputs()[0].name: to_numpy(img2)}
embedding1 = self.model.run(None, input1)[0]
embedding2 = self.model.run(None, input2)[0]
else:
with torch.set_grad_enabled(False):
embedding1 = self.model(img1).cpu().data.numpy()
embedding2 = self.model(img2).cpu().data.numpy()
dist = np.sum(np.square(np.subtract(embedding1, embedding2)), 1)[0]
if is_same_side:
return dist < pos_thr
else:
if use_pos_low_thr:
return dist < neg_thr
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for common image metric learning')
parser.add_argument("-depth", "--net_depth", help="how many layers [50,100,152]", default=50, type=int)
parser.add_argument('--input_size', type=int, default=112)
parser.add_argument('--use_random_crop', default=False, action="store_true")
parser.add_argument('--model_path', default=None, type=str)
parser.add_argument('--use_gpu', default=False, action="store_true")
# onnx 모델 사용할지
parser.add_argument('--use_onnx', default=False, action="store_true")
# 미리 두 눈 이미지를 left vs right 모델로 돌려서 동일한 쪽의 눈인지 체크해줘야 한다.
parser.add_argument('--is_same_side', default=False, action="store_true")
# left vs right 모델이 오탐할 경우에도 두 눈의 vector거리가 가까우면 동일한 눈으로 취급한다.
# 전체적으로 정확도가 더 높아짐. 사용하는것을 추천.
parser.add_argument('--use_pos_low_thr', default=False, action="store_true")
parser.add_argument('--img1', default=None, type=str)
parser.add_argument('--img2', default=None, type=str)
# positive pair(is_same_side가 true) 일때의 threshold값
parser.add_argument('--pos_thr', default=None, type=float)
# negative pair(is_same_side가 false) 인데 use_pos_low_thr 옵션을 사용할 경우 이 negative threshold값을 사용하여 판단
parser.add_argument('--neg_thr', default=None, type=float)
args = parser.parse_args()
verifier = DogEyesVerifier(input_size=args.input_size, model_path=args.model_path,
pos_thr=args.pos_thr, neg_thr=args.neg_thr, use_random_crop=args.use_random_crop,
device="cuda" if args.use_gpu else "cpu", use_onnx=args.use_onnx)
result = verifier.is_same(args.img1, args.img2, args.is_same_side, pos_thr=args.pos_thr, neg_thr=args.neg_thr,
use_pos_low_thr=args.use_pos_low_thr)
print("result", result)
```
#### File: jireh-father/InsightFace_Pytorch/make_font_val_dataset.py
```python
import os
import argparse
import glob
import json
import random
from multiprocessing import Pool
import traceback
from PIL import Image
from data.online_dataset import OnlineFontDataset
import gen_params, transforms
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_dir', type=str, default="/home/irelin/resource/font_recognition/val_dataset")
parser.add_argument('-f', '--font_dir', type=str, default="/home/irelin/resource/font_recognition/font")
parser.add_argument('--font_list', type=str, default="./db/val_font_list.json")
parser.add_argument('--bg_dir', type=str, default="/home/irelin/resource/font_recognition/val_bg")
parser.add_argument('--train_dataset_param_func', type=str, default='get_train_params', # get_params_noraml
required=False) # func or gen_param.json
parser.add_argument('--train_transform_func_name', type=str, default='get_train_transforms',
# get_train_transforms_normal
required=False) # func or gen_param.json
parser.add_argument('--num_sample_each_class', type=int, default=10)
parser.add_argument('--min_num_chars', type=int, default=1)
parser.add_argument('--max_num_chars', type=int, default=6)
parser.add_argument('--han_unicode_file', type=str, default="db/union_korean_unicodes.json")
parser.add_argument('--eng_unicode_file', type=str, default="db/eng_unicodes.json")
parser.add_argument('--num_unicode_file', type=str, default="db/number_unicodes.json")
parser.add_argument('--han_prob', type=float, default=0.4)
parser.add_argument('--eng_prob', type=float, default=0.3)
parser.add_argument('--num_prob', type=float, default=0.3)
parser.add_argument('--mix_prob', type=float, default=0.25)
parser.add_argument('--simple_img_prob', type=float, default=0.2)
parser.add_argument('--font_size_range', type=str, default='10,220')
parser.add_argument('--same_text_in_batch_prob', default=0., type=float)
parser.add_argument('--same_font_size_in_batch_prob', default=0., type=float)
parser.add_argument('--same_text_params_in_batch_prob', default=0., type=float)
parser.add_argument('--use_text_persp_trans_prob', default=0.1, type=float)
parser.add_argument('--use_img_persp_trans_prob', default=0.4, type=float)
parser.add_argument('-w', '--num_processes', type=int, default=8)
parser.add_argument('--seed', type=int, default=None)
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
random.seed(args.seed)
bg_list = glob.glob(os.path.join(args.bg_dir, "*"))
generation_params = getattr(gen_params, args.train_dataset_param_func)()
train_transform_func = getattr(transforms, args.train_transform_func_name)
train_transforms = train_transform_func(use_online=False)
han_unicodes = json.load(open(args.han_unicode_file))
eng_unicodes = json.load(open(args.eng_unicode_file))
num_unicodes = json.load(open(args.num_unicode_file))
font_size_range = args.font_size_range.split(",")
font_size_range = list(range(int(font_size_range[0]), int(font_size_range[1]) + 1))
font_list = [os.path.join(args.font_dir, font_name) for font_name in json.load(open(args.font_list))]
font_list.sort()
feed_data = []
for i in range(len(font_list)):
font_name = os.path.splitext(os.path.basename(font_list[i]))[0]
class_dir = os.path.join(args.output_dir, font_name)
file_cnt = len(glob.glob(os.path.join(class_dir, "*")))
if file_cnt >= args.num_sample_each_class:
print(font_name, "skip")
continue
for j in range(args.num_sample_each_class):
feed_data.append([i, j])
dataset = OnlineFontDataset(font_list, transform=train_transforms, generation_params=generation_params,
bg_list=bg_list,
num_sample_each_class=args.num_sample_each_class,
min_chars=args.min_num_chars, max_chars=args.max_num_chars,
hangul_unicodes=han_unicodes, eng_unicodes=eng_unicodes,
number_unicodes=num_unicodes,
hangul_prob=args.han_prob, eng_prob=args.eng_prob,
num_prob=args.num_prob, mix_prob=args.mix_prob,
simple_img_prob=args.simple_img_prob,
font_size_range=font_size_range,
same_text_in_batch_prob=args.same_text_in_batch_prob,
same_font_size_in_batch_prob=args.same_font_size_in_batch_prob,
same_text_params_in_batch_prob=args.same_text_params_in_batch_prob,
use_text_persp_trans_prob=args.use_text_persp_trans_prob,
use_img_persp_trans_prob=args.use_img_persp_trans_prob,
skip_exception=True,
use_debug=True,
change_font_in_error=False,
use_random_idx=False
)
def _main(feed_item):
font_idx = feed_item[0]
text_idx = feed_item[1]
font_path = font_list[font_idx]
font_name = os.path.splitext(os.path.basename(font_path))[0]
class_dir = os.path.join(args.output_dir, font_name)
print(font_idx, text_idx, font_name)
os.makedirs(class_dir, exist_ok=True)
tried = 0
try:
im = None
while True:
try:
dataset._sampling_text()
im = dataset.create_text_image(font_idx)
break
except Exception as e:
tried += 1
if tried > 6:
raise e
pass
# im, _ = dataset.__getitem__(font_idx)
# print("debug", _, font_idx)
if len(glob.glob(os.path.join(class_dir, "*"))) >= args.num_sample_each_class:
print(font_name, "skip")
return False
fp = os.path.join(class_dir, "%09d.jpg" % text_idx)
while os.path.isfile(fp):
text_idx += 1
fp = os.path.join(class_dir, "%09d.jpg" % text_idx)
Image.fromarray(im).save(os.path.join(class_dir, "%09d.jpg" % text_idx), quality=100)
except Exception:
print("gen error", font_name, font_idx)
traceback.print_exc()
with Pool(args.num_processes) as pool: # ThreadPool(8) as pool:
print("start multi processing")
pool.map(_main, feed_data)
print("end of making")
```
#### File: jireh-father/InsightFace_Pytorch/test_landmark_dacon.py
```python
from config import get_config
from Learner import face_learner
import argparse
import random
import transforms
from pathlib import Path
import torch
from PIL import Image
import os
import glob
import csv
from torch.nn import functional as F
# python train.py -net mobilefacenet -b 200 -w 4
class CustomDataset(torch.utils.data.Dataset):
"""__init__ and __len__ functions are the same as in TorchvisionDataset"""
def __init__(self, root, transform=None):
self.transform = transform
self.samples = []
for image_dir in glob.glob(os.path.join(root, "*")):
for image_file in glob.glob(os.path.join(image_dir, "*")):
self.samples.append(image_file)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path = self.samples[index]
sample = np.array(Image.open(path).convert("RGB"))
if self.transform is not None:
sample = self.transform(image=sample)['image']
return sample, os.path.splitext(os.path.basename(path))[0]
def __len__(self):
return len(self.samples)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for common image metric learning')
parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int)
parser.add_argument("-net", "--net_mode", help="which network, [ir, ir_se, mobilefacenet]", default='ir_se',
type=str)
parser.add_argument("-depth", "--net_depth", help="how many layers [50,100,152]", default=50, type=int)
parser.add_argument('-lr', '--lr', help='learning rate', default=1e-3, type=float)
parser.add_argument("-b", "--batch_size", help="batch_size", default=96, type=int)
parser.add_argument("-w", "--num_workers", help="workers number", default=3, type=int)
parser.add_argument("-d", "--data_mode", help="use which database, [vgg, ms1m, emore, concat]", default='common',
type=str)
parser.add_argument("--embedding_size", help="embedding_size", default=512, type=int)
parser.add_argument("-t", "--test_img_dir", default=None, type=str)
parser.add_argument("--label_file", default=None, type=str)
parser.add_argument("--max_positive_cnt", default=1000, type=int)
parser.add_argument("--val_batch_size", default=256, type=int)
parser.add_argument('--pin_memory', default=False, action="store_true")
parser.add_argument("--not_use_pos", default=False, action='store_true')
parser.add_argument("--not_use_neg", default=False, action='store_true')
parser.add_argument('--work_path', type=str, default=None, required=False)
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--benchmark', default=False, action="store_true")
parser.add_argument('--val_img_dirs', type=str,
default='{"val":"path"}',
required=False)
parser.add_argument('--train_transform_func_name', type=str, default='get_train_common_transforms',
# get_train_transforms_normal
required=False) # func or gen_param.json
parser.add_argument('--val_transform_func_name', type=str, default='get_val_common_transforms',
# get_train_transforms_normal
required=False) # func or gen_param.json
parser.add_argument('--input_size', type=int, default=112)
parser.add_argument('--use_random_crop', default=False, action="store_true")
parser.add_argument('--use_center_crop', default=False, action="store_true")
parser.add_argument('--center_crop_ratio', default=0.8, type=float)
parser.add_argument('--use_gray', default=False, action="store_true")
parser.add_argument('--only_use_pixel_transform', default=False, action="store_true")
parser.add_argument('--use_blur', default=False, action="store_true")
parser.add_argument('--use_flip', default=False, action="store_true")
parser.add_argument('--optimizer', default='sgd', type=str)
parser.add_argument('--pooling', default='GeM', type=str)
parser.add_argument('--last_fc_dropout', type=float, default=0.0)
parser.add_argument('--pretrained', default=False, action="store_true")
parser.add_argument('--loss_module', default='arcface', type=str)
parser.add_argument('--s', type=float, default=30.0)
parser.add_argument('--margin', type=float, default=0.3)
parser.add_argument('--ls_eps', type=float, default=0.0)
parser.add_argument('--theta_zero', type=float, default=1.25)
parser.add_argument('--wd', type=float, default=1e-5)
parser.add_argument('--restore_suffix', default=None, type=str)
parser.add_argument('--train', default=False, action="store_true")
parser.add_argument('--no_transforms', default=False, action="store_true")
parser.add_argument('--ft_model_path', default=None, type=str)
parser.add_argument('--no_strict', default=False, action="store_true")
args = parser.parse_args()
conf = get_config()
for arg in vars(args):
print(arg, getattr(args, arg))
setattr(conf, arg, getattr(args, arg))
conf.work_path = Path(conf.work_path)
conf.model_path = conf.work_path / 'models'
conf.log_path = conf.work_path / 'log'
conf.save_path = conf.work_path / 'save'
if args.net_mode == 'mobilefacenet':
conf.use_mobilfacenet = True
else:
conf.net_mode = args.net_mode
conf.net_depth = args.net_depth
if args.seed is not None:
import numpy as np
import torch
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
transforms = transforms.get_test_transforms_v2(args.input_size, use_crop=args.use_crop,
center_crop_ratio=args.center_crop_ratio, use_gray=args.use_gray)
dataset = CustomDataset(args.test_img_dir, transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=args.pin_memory)
learner = face_learner(conf, inference=True)
for imgs, labels in self.loader:
imgs = imgs.to(conf.device)
labels = labels.to(conf.device)
self.optimizer.zero_grad()
embeddings = self.model(imgs)
thetas = self.head(embeddings, labels)
device = 'cuda'
total_scores = []
total_indices = []
total_file_names = []
for step, (imgs, file_names) in enumerate(dataloader):
if step > 0 and step % args.log_step_interval == 0:
print(step, len(dataloader))
imgs = imgs.to(device)
total_file_names += list(file_names)
with torch.set_grad_enabled(False):
embeddings = learner.model(imgs)
outputs = learner.head(embeddings, labels)
scores, indices = torch.max(F.softmax(outputs, 1), dim=1)
total_indices += list(indices.cpu().numpy())
total_scores += list(scores.cpu().numpy())
rows = zip(total_file_names, total_indices, total_scores)
output_dir = os.path.dirname(args.output_csv_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
with open(args.output_csv_path, "w") as f:
writer = csv.writer(f)
writer.writerow(["id", "landmark_id", "conf"])
for row in rows:
writer.writerow(row)
print("done")
```
#### File: jireh-father/InsightFace_Pytorch/transforms.py
```python
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import albumentations as al
from albumentations.augmentations import functional as F
from albumentations.pytorch import ToTensorV2
def get_common_train_transforms():
return [
# al.OneOf([
# al.RandomRotate90(),
# al.Rotate(limit=90, border_mode=cv2.BORDER_CONSTANT),
# ], p=0.1),
al.ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT, rotate_limit=30, p=0.05),
al.OpticalDistortion(border_mode=cv2.BORDER_CONSTANT, distort_limit=5.0, shift_limit=0.1, p=0.05),
al.ElasticTransform(border_mode=cv2.BORDER_CONSTANT, alpha_affine=15, p=0.05),
al.OneOf([
al.RandomGamma(),
al.HueSaturationValue(),
al.RGBShift(),
al.CLAHE(),
al.ChannelShuffle(),
al.InvertImg(),
], p=0.1),
al.RandomSnow(p=0.05),
al.RandomRain(p=0.05),
# al.RandomFog(p=0.05),
# al.RandomSunFlare(p=0.05),
al.RandomShadow(p=0.05),
al.RandomBrightnessContrast(p=0.2),
al.GaussNoise(p=0.2),
al.ISONoise(p=0.05),
al.ToGray(p=0.05),
al.OneOf([
# al.MotionBlur(blur_limit=4),
al.Blur(blur_limit=2),
# al.MedianBlur(blur_limit=4),
# al.GaussianBlur(blur_limit=4),
], p=0.05),
al.CoarseDropout(p=0.05),
al.Downscale(p=0.05),
al.ImageCompression(quality_lower=60, p=0.2),
]
def _get_train_pixel_transforms(use_gray=False, use_blur=False):
return [
al.RandomGamma(p=0.05),
al.HueSaturationValue(p=0.05),
al.RGBShift(p=0.05),
al.CLAHE(p=0.05),
al.ChannelShuffle(p=0.05),
al.InvertImg(p=0.05),
al.RandomSnow(p=0.05),
al.RandomRain(p=0.05),
al.RandomFog(p=0.05),
al.RandomSunFlare(p=0.05, num_flare_circles_lower=1, num_flare_circles_upper=2, src_radius=110),
al.RandomShadow(p=0.05),
al.RandomBrightnessContrast(p=0.05),
al.GaussNoise(p=0.05),
al.ISONoise(p=0.05),
al.MultiplicativeNoise(p=0.05),
al.ToGray(p=1.0 if use_gray else 0.05),
al.ToSepia(p=0.05),
al.Solarize(p=0.05),
al.Equalize(p=0.05),
al.Posterize(p=0.05),
al.FancyPCA(p=0.05),
al.OneOf([
al.MotionBlur(blur_limit=1),
al.Blur(blur_limit=1),
al.MedianBlur(blur_limit=1),
al.GaussianBlur(blur_limit=1),
], p=0.05 if use_blur else 0.),
al.CoarseDropout(p=0.05),
al.Cutout(p=0.05),
al.GridDropout(p=0.05),
al.ChannelDropout(p=0.05),
al.Downscale(p=0.1),
al.ImageCompression(quality_lower=60, p=0.1),
]
def _get_train_transforms(use_gray=False, only_use_pixel_transform=False, use_flip=False, use_blur=False):
pixel_transforms = _get_train_pixel_transforms(use_gray, use_blur)
if only_use_pixel_transform:
return pixel_transforms
else:
return [
al.Flip(p=0.5 if use_flip else 0.),
al.OneOf([
al.Rotate(limit=90, border_mode=cv2.BORDER_CONSTANT),
], p=0.05),
al.ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT, rotate_limit=30, p=0.05),
al.OpticalDistortion(border_mode=cv2.BORDER_CONSTANT, distort_limit=5.0, shift_limit=0.1, p=0.05),
al.GridDistortion(border_mode=cv2.BORDER_CONSTANT, p=0.05),
al.ElasticTransform(border_mode=cv2.BORDER_CONSTANT, alpha_affine=15, p=0.05),
al.RandomGridShuffle(p=0.05),
] + pixel_transforms
def get_train_transforms(input_size=None, use_random_crop=False, use_gray=False, use_online=True,
use_same_random_crop_in_batch=False, use_normalize=True, only_use_pixel_transform=False,
use_flip=False, use_blur=False):
if not use_online:
return al.Compose(_get_train_transforms(use_gray, only_use_pixel_transform, use_flip, use_blur))
def resize_image(img, interpolation=cv2.INTER_LINEAR, **params):
height, width = img.shape[:2]
if width >= height:
img = F.smallest_max_size(img, max_size=input_size, interpolation=interpolation)
else:
img = F.longest_max_size(img, max_size=input_size, interpolation=interpolation)
pad_width = input_size - img.shape[:2][1]
left = pad_width // 2
right = pad_width - left
img = F.pad_with_params(img, 0, 0, left, right, border_mode=cv2.BORDER_CONSTANT, value=0)
return img
def left_crop(img, **params):
height, width = img.shape[:2]
if width > input_size:
img = img[:, :input_size, :]
return img
if use_same_random_crop_in_batch:
compose = _get_train_transforms(use_gray, only_use_pixel_transform, use_flip, use_blur)
else:
if use_random_crop:
crop = al.RandomCrop(input_size, input_size)
else:
crop = al.Lambda(left_crop)
compose = [ # al.SmallestMaxSize(input_size),
al.Lambda(resize_image),
crop] + _get_train_transforms(use_gray, only_use_pixel_transform, use_flip, use_blur)
if use_normalize:
return al.Compose(compose +
[
al.Normalize(),
ToTensorV2()
])
else:
return al.Compose(compose)
def get_train_common_transforms(input_size=None, use_random_crop=False, use_gray=False, only_use_pixel_transform=False,
use_flip=False, use_blur=False, no_transforms=False,
use_center_crop=False,
center_crop_ratio=0.8):
if use_random_crop:
compose = [al.Resize(int(input_size * 1.1), int(input_size * 1.1)),
al.RandomCrop(input_size, input_size)]
elif use_center_crop:
compose = [al.Resize(int(input_size * (2.0 - center_crop_ratio)), int(input_size * (2.0 - center_crop_ratio))),
al.CenterCrop(input_size, input_size)]
else:
compose = [al.Resize(input_size, input_size)]
if no_transforms:
return al.Compose(compose +
[
al.Normalize(),
ToTensorV2()
])
return al.Compose(compose + _get_train_transforms(use_gray, only_use_pixel_transform, use_flip, use_blur) +
[
al.Normalize(),
ToTensorV2()
])
def get_train_transforms_simple(input_size=None, use_random_crop=False, use_gray=False, only_use_pixel_transform=False,
use_flip=False, use_blur=False, no_transforms=False,
use_center_crop=False,
center_crop_ratio=0.8):
return al.Compose(
[
al.Resize(input_size, input_size, p=1.0),
al.HorizontalFlip(p=0.5),
al.Normalize(),
ToTensorV2()
])
def get_train_transforms_mmdetection(input_size=None, use_random_crop=False, use_gray=False,
only_use_pixel_transform=False,
use_flip=False, use_blur=False, no_transforms=False,
use_center_crop=False,
center_crop_ratio=0.8):
return al.Compose(
[
al.RandomResizedCrop(height=input_size,
width=input_size,
scale=(0.4, 1.0),
interpolation=0,
p=0.5),
al.Resize(input_size, input_size, p=1.0),
al.HorizontalFlip(p=0.5),
al.OneOf([
al.ShiftScaleRotate(border_mode=0,
shift_limit=(-0.2, 0.2), scale_limit=(-0.2, 0.2),
rotate_limit=(-20, 20)),
al.OpticalDistortion(border_mode=0,
distort_limit=[-0.5, 0.5], shift_limit=[-0.5, 0.5]),
al.GridDistortion(num_steps=5, distort_limit=[-0., 0.3], border_mode=0),
al.ElasticTransform(border_mode=0),
al.IAAPerspective(),
al.RandomGridShuffle()
], p=0.1),
al.Rotate(limit=(-25, 25), border_mode=0, p=0.1),
al.OneOf([
al.RandomBrightnessContrast(
brightness_limit=(-0.2, 0.2),
contrast_limit=(-0.2, 0.2)),
al.HueSaturationValue(hue_shift_limit=(-20, 20),
sat_shift_limit=(-30, 30),
val_shift_limit=(-20, 20)),
al.RandomGamma(gamma_limit=(30, 150)),
al.RGBShift(),
al.CLAHE(clip_limit=(1, 15)),
al.ChannelShuffle(),
al.InvertImg(),
], p=0.1),
al.RandomSnow(p=0.05),
al.RandomRain(p=0.05),
al.RandomFog(p=0.05),
al.RandomSunFlare(num_flare_circles_lower=1, num_flare_circles_upper=2, src_radius=110, p=0.05),
al.RandomShadow(p=0.05),
al.GaussNoise(var_limit=(10, 20), p=0.05),
al.ISONoise(color_shift=(0, 15), p=0.05),
al.MultiplicativeNoise(p=0.05),
al.OneOf([
al.ToGray(p=1. if use_gray else 0.05),
al.ToSepia(p=0.05),
al.Solarize(p=0.05),
al.Equalize(p=0.05),
al.Posterize(p=0.05),
al.FancyPCA(p=0.05),
], p=0.05),
al.OneOf([
al.MotionBlur(blur_limit=(3, 7)),
al.Blur(blur_limit=(3, 7)),
al.MedianBlur(blur_limit=3),
al.GaussianBlur(blur_limit=3),
], p=0.05),
al.CoarseDropout(p=0.05),
al.Cutout(num_holes=30, max_h_size=37, max_w_size=37, fill_value=0, p=0.05),
al.GridDropout(p=0.05),
al.ChannelDropout(p=0.05),
al.Downscale(scale_min=0.5, scale_max=0.9, p=0.1),
al.ImageCompression(quality_lower=60, p=0.2),
al.Normalize(),
ToTensorV2()
])
def get_val_common_transforms(input_size=None, use_random_crop=False, use_gray=False,
use_center_crop=False,
center_crop_ratio=0.8):
if use_random_crop:
compose = [al.Resize(int(input_size * 1.1), int(input_size * 1.1)),
al.CenterCrop(input_size, input_size)]
elif use_center_crop:
compose = [al.Resize(int(input_size * (2.0 - center_crop_ratio)), int(input_size * (2.0 - center_crop_ratio))),
al.CenterCrop(input_size, input_size)]
else:
compose = [al.Resize(input_size, input_size)]
return al.Compose(compose + [
al.ToGray(p=1.0 if use_gray else 0.0),
al.Normalize(),
ToTensorV2()
])
def get_simple_transforms(input_size=224, use_random_crop=False, use_same_random_crop_in_batch=False, use_gray=False):
def resize_image(img, interpolation=cv2.INTER_LINEAR, **params):
height, width = img.shape[:2]
if width >= height:
img = F.smallest_max_size(img, max_size=input_size, interpolation=interpolation)
else:
img = F.longest_max_size(img, max_size=input_size, interpolation=interpolation)
pad_width = input_size - img.shape[:2][1]
left = pad_width // 2
right = pad_width - left
img = F.pad_with_params(img, 0, 0, left, right, border_mode=cv2.BORDER_CONSTANT, value=0)
return img
def left_crop(img, **params):
height, width = img.shape[:2]
if width > input_size:
img = img[:, :input_size, :]
return img
if use_same_random_crop_in_batch:
compose = []
else:
if use_random_crop:
crop = al.RandomCrop(input_size, input_size)
else:
crop = al.Lambda(left_crop)
compose = [ # al.SmallestMaxSize(input_size),
al.Lambda(resize_image),
crop]
if use_gray:
return al.Compose(compose +
[
al.ToGray(p=1.0 if use_gray else 0.05),
al.Normalize(),
ToTensorV2()
])
else:
return al.Compose(compose +
[
al.Normalize(),
ToTensorV2()
])
def get_val_transforms(input_size=224, use_gray=False):
def resize_image(img, interpolation=cv2.INTER_LINEAR, **params):
height, width = img.shape[:2]
if width >= height:
img = F.smallest_max_size(img, max_size=input_size, interpolation=interpolation)
else:
img = F.longest_max_size(img, max_size=input_size, interpolation=interpolation)
pad_width = input_size - img.shape[:2][1]
left = pad_width // 2
right = pad_width - left
img = F.pad_with_params(img, 0, 0, left, right, border_mode=cv2.BORDER_CONSTANT, value=0)
return img
def left_crop(img, **params):
height, width = img.shape[:2]
if width > input_size:
img = img[:, :input_size, :]
return img
crop = al.Lambda(left_crop)
compose = [ # al.SmallestMaxSize(input_size),
al.Lambda(resize_image),
crop]
if use_gray:
return al.Compose(compose +
[
al.ToGray(p=1.0 if use_gray else 0.05),
al.Normalize(),
ToTensorV2()
])
else:
return al.Compose(compose +
[
al.Normalize(),
ToTensorV2()
])
def get_test_transforms(input_size, use_gray=False):
# return transforms.Compose([
# transforms.Resize(input_size),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ])
return al.Compose([
al.SmallestMaxSize(input_size),
al.ToGray(p=1.0 if use_gray else 0.0),
al.Normalize(),
ToTensorV2()
])
def get_test_transforms_v2(input_size, use_crop=False, center_crop_ratio=0.9, use_gray=False):
if use_crop:
resize = [al.Resize(int(input_size * (2 - center_crop_ratio)),
int(input_size * (2 - center_crop_ratio))),
al.CenterCrop(height=input_size, width=input_size)]
else:
resize = [al.Resize(input_size, input_size)]
return al.Compose(resize + [
al.ToGray(p=1. if use_gray else 0.),
al.Normalize(),
ToTensorV2()
])
``` |
{
"source": "jireh-father/keras",
"score": 2
} |
#### File: keras/dtensor/optimizers.py
```python
from keras.dtensor import dtensor_api as dtensor
from keras.optimizers.optimizer_experimental import adadelta
from keras.optimizers.optimizer_experimental import adagrad
from keras.optimizers.optimizer_experimental import adam
from keras.optimizers.optimizer_experimental import optimizer as optimizer_lib
from keras.optimizers.optimizer_experimental import rmsprop
from keras.optimizers.optimizer_experimental import sgd
from keras.optimizers.schedules import learning_rate_schedule
import tensorflow.compat.v2 as tf
# pylint: disable=protected-access,missing-class-docstring
class Optimizer(optimizer_lib._BaseOptimizer):
"""DTensor specific optimizers.
The major changes for this class are:
All the variable init logic will be mesh/layout aware.
Note that we didn't subclass optimizer_lib.Optimizer since it contains the
extra logic of handling distribution strategy, which we don't need for DTensor
"""
def __init__(self, name, mesh=None):
"""Create a new Optimizer.
Args:
name: String. The name of the optimizer, which will appear in all the
state variables created by this optimizer.
mesh: dtensor.Mesh. The optional Mesh which will be used to create
the states. Note that usually the state variable will use the layout
from the corresponding model variables. This mesh only used for global
variables like globle steps, learning rate, etc.
"""
# TODO(scottzhu): Skip the gradients_clip_option and ema_option for now, and
# will cover them in future if really needed.
# TODO(scottzhu): We might want to make mesh to be required in future.
self._mesh = mesh
super().__init__(name=name)
def _create_iteration_variable(self):
init_val = tf.constant(0, dtype=tf.int64)
if self._mesh:
init_val = dtensor.copy_to_mesh(
init_val, dtensor.Layout.replicated(self._mesh, rank=0))
with tf.init_scope():
# Lift the variable creation to init scope to avoid environment issue.
self._iterations = dtensor.DVariable(init_val, name='iteration')
################## Override methods from keras.Optimizer ################
def add_variable_from_reference(self,
model_variable,
variable_name,
initial_value=None):
"""Create an optimizer variable from model variable.
Create an optimizer variable based on the information of model variable.
For example, in SGD optimizer momemtum, for each model variable, a
corresponding momemtum variable is created of the same shape and dtype.
Args:
model_variable: The corresponding model variable to the optimizer variable
to be created.
variable_name: The name prefix of the optimizer variable to be created.
The create variables name will follow the pattern
`{variable_name}/{model_variable.name}`, e.g., `momemtum/dense_1`.
initial_value: The initial value of the optimizer variable, if None, the
value will be default to 0.
Returns:
An optimizer variable.
"""
if initial_value is None:
# Use tf.zeros_like which will propagate the layout information from the
# model weights if any.
initial_value = tf.zeros_like(model_variable)
elif isinstance(initial_value, tf.Tensor):
initial_value = dtensor.copy_to_mesh(
initial_value,
dtensor.Layout.replicated(self._mesh, rank=initial_value.shape.rank))
return dtensor.DVariable(
initial_value=initial_value,
name=f'{variable_name}/{model_variable._shared_name}',
dtype=model_variable.dtype,
trainable=False)
def aggregate_gradients(self, grads_and_vars):
# Hide the aggregate_gradients from Optimizer.aggregate_gradients
raise NotImplementedError(
'Dtensor doesn\'t need to manually aggregate gradients')
def _var_key(self, variable):
"""Get a unique identifier of the given variable."""
return optimizer_lib._BaseOptimizer._var_key(self, variable)
def apply_gradients(self, grads_and_vars):
"""Apply gradients to variables.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
None
Raises:
TypeError: If `grads_and_vars` is malformed.
"""
# Explicitly call the _BaseOptimizer to avoid any chance of using
# Optimizers.apply_gradients which contains distribution strategy logic.
optimizer_lib._BaseOptimizer.apply_gradients(self, grads_and_vars)
def _internal_apply_gradients(self, grads_and_vars):
"""Helper function of apply gradients.
This is required for separating out distributed training logic.
Args:
grads_and_vars: List of (gradient, variable) pairs.
"""
# Explicitly call the _BaseOptimizer to avoid any chance of using
# Optimizers.apply_gradients which contains distribution strategy logic.
optimizer_lib._BaseOptimizer._internal_apply_gradients(self, grads_and_vars)
def _overwrite_model_variables_with_average_value_helper(self, var_list):
"""Helper function to _overwrite_model_variables_with_average_value."""
(optimizer_lib._BaseOptimizer.
_overwrite_model_variables_with_average_value_helper(self, var_list))
def _build_learning_rate(self, learning_rate):
if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule):
# Create a variable to hold the current learning rate.
# Note that the init value `learning_rate(self.iterations)` should have
# the correct layout information from self.iterations.
self._current_learning_rate = dtensor.DVariable(
learning_rate(self.iterations),
name='learning_rate',
dtype=tf.float32)
return learning_rate
init_val = tf.constant(learning_rate, dtype=tf.float32)
if self._mesh:
init_val = dtensor.copy_to_mesh(
init_val, dtensor.Layout.replicated(self._mesh, rank=0))
return dtensor.DVariable(init_val, name='learning_rate')
class Adadelta(Optimizer, adadelta.Adadelta):
def __init__(self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
gradients_clip_option=None,
ema_option=None,
name='Adadelta',
mesh=None):
# Skip the adam.Adadelta.__init__ and only call the Optimizer.__init__
# this is to skip the keras.Optimizer.__init__, which contains the logic
# of distribution strategy. Same for all the optimizers subclasses.
Optimizer.__init__(self, name=name, mesh=mesh)
self._learning_rate = self._build_learning_rate(learning_rate)
self.rho = rho
self.epsilon = epsilon
class Adagrad(Optimizer, adagrad.Adagrad):
def __init__(self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
gradients_clip_option=None,
ema_option=None,
name='Adagrad',
mesh=None):
Optimizer.__init__(self, name=name, mesh=mesh)
self._learning_rate = self._build_learning_rate(learning_rate)
self.initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon
class Adam(Optimizer, adam.Adam):
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
gradients_clip_option=None,
ema_option=None,
name='Adam',
mesh=None):
Optimizer.__init__(self, name=name, mesh=mesh)
self._learning_rate = self._build_learning_rate(learning_rate)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.amsgrad = amsgrad
class RMSprop(Optimizer, rmsprop.RMSprop):
def __init__(self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
gradients_clip_option=None,
ema_option=None,
jit_compile=False,
name='RMSprop',
mesh=None):
Optimizer.__init__(self, name=name, mesh=mesh)
self._learning_rate = self._build_learning_rate(learning_rate)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
class SGD(Optimizer, sgd.SGD):
def __init__(self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
amsgrad=False,
gradients_clip_option=None,
ema_option=None,
jit_compile=False,
name='SGD',
mesh=None):
Optimizer.__init__(self, name=name, mesh=mesh)
self._learning_rate = self._build_learning_rate(learning_rate)
self.momentum = momentum
self.nesterov = nesterov
if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
raise ValueError('`momentum` must be between [0, 1].')
``` |
{
"source": "jireh-father/KoBERT",
"score": 2
} |
#### File: jireh-father/KoBERT/ht_train_extractive_summary.py
```python
import argparse
import random
from kobert.pytorch_kobert import get_pytorch_kobert_model
import jsonlines
from torch.utils import data
from gluonnlp.data import SentencepieceTokenizer
from kobert.utils import get_tokenizer
from torch.nn.utils.rnn import pad_sequence
from torch.optim import lr_scheduler
import datetime
import time
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score
import matplotlib.pyplot as plt
import itertools
from functools import partial
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler
from model import ExtractiveModel
default_config = {
"optimizer": tune.choice(['adam', 'sgd']), # tune.grid_search(['adam', 'sgd']),
"lr": tune.loguniform(1e-4, 1e-1), # tune.loguniform(1e-4, 1e-1),
"weight_decay": tune.loguniform(1e-6, 1e-3),
"scheduler": tune.choice(['step', 'cosine']), # tune.grid_search(['cosine', 'step']),
"max_word_dropout_ratio": tune.quniform(0.1, 0.5, 0.05), # tune.choice([0.1, 0.2, 0.3]),
"word_dropout_prob": tune.quniform(0.0, 1.0, 0.1),
"label_smoothing": tune.choice([0.1, 0.0]), # tune.grid_search([0.1, 0.0]),
"use_multi_class": False, # tune.grid_search([True, False]),
"freeze_bert": tune.choice([False, True]),
"use_bert_sum_words": tune.choice([True, False]), # tune.grid_search([True, False]),
"use_pos": tune.choice([True, False]), # True, # tune.grid_search([True, False]),
"use_media": tune.choice([True, False]), # tune.grid_search([True, False]),
"simple_model": tune.choice([False, True]), # tune.grid_search([True, False]),
"max_token_cnt": tune.choice([200, 300, 400, 500]),
"dim_feedforward": tune.choice([512, 768, 1024]),
"dropout": tune.choice([0.0, 0.1, 0.2, 0.3, 0.4]),
"weighted_loss": tune.quniform(1.0, 10.0, 1.0)
}
def freeze_params(model):
"""Set requires_grad=False for each of model.parameters()"""
for par in model.parameters():
par.requires_grad = False
def non_freeze_params(model):
"""Set requires_grad=False for each of model.parameters()"""
for par in model.parameters():
par.requires_grad = False
def init_optimizer(optimizer_name, model, lr, wd, lr_restart_step=1, lr_decay_gamma=0.9,
scheduler="step", nesterov=False, num_epochs=None, steps_per_epoch=None):
if optimizer_name == "sgd":
optimizer_ft = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=wd, nesterov=nesterov)
elif optimizer_name == "adam":
optimizer_ft = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
elif optimizer_name == "adamp":
from adamp import AdamP
optimizer_ft = AdamP(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=wd) # 1e-2)
elif optimizer_name == "sgdp":
from adamp import SGDP
optimizer_ft = SGDP(model.parameters(), lr=lr, weight_decay=wd, momentum=0.9, nesterov=nesterov)
# else:
# opt_attr = getattr(toptim, optimizer_name)
# if opt_attr:
# optimizer_ft = opt_attr(model.parameters())
# else:
# raise Exception("unknown optimizer name", optimizer_name)
if scheduler == "cosine":
exp_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer_ft, lr_restart_step)
use_lr_schedule_steps = True
elif scheduler == "cycle":
exp_lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer_ft, max_lr=lr, steps_per_epoch=steps_per_epoch,
epochs=num_epochs, pct_start=0.1)
use_lr_schedule_steps = False
elif scheduler == "step":
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=lr_restart_step, gamma=lr_decay_gamma)
use_lr_schedule_steps = False
return optimizer_ft, exp_lr_scheduler, use_lr_schedule_steps
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss
def linear_combination(x, y, epsilon):
return epsilon * x + (1 - epsilon) * y
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, epsilon=0.1, reduction='mean'):
super().__init__()
self.epsilon = epsilon
self.reduction = reduction
def forward(self, preds, target):
n = preds.size()[-1]
log_preds = F.log_softmax(preds, dim=-1)
loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)
nll = F.nll_loss(log_preds, target, reduction=self.reduction)
return linear_combination(loss / n, nll, self.epsilon)
class SentenceDataset(data.Dataset):
"""__init__ and __len__ functions are the same as in TorchvisionDataset"""
def __init__(self, samples, vocab, media_map, word_dropout_prob=0.0, max_word_dropout_ratio=0.0, max_token_cnt=300):
self.tokenizer = SentencepieceTokenizer(get_tokenizer())
self.vocab = vocab
self.samples = samples
self.targets = [s[1] for s in samples]
self.media_map = media_map
self.word_dropout_prob = word_dropout_prob
self.max_word_dropout_ratio = max_word_dropout_ratio
self.max_token_cnt = max_token_cnt
# self.classes = classes
# self.class_to_idx = class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
sentence, target, pos_idx, media = self.samples[index]
media = self.media_map[media]
tokens = self.tokenizer(sentence)
token_ids = self.vocab.to_indices(tokens)
if random.random() < self.word_dropout_prob:
dropout_cnt = round(self.max_word_dropout_ratio * len(token_ids))
for i in range(dropout_cnt):
dropout_idx = random.randint(0, len(token_ids) - 1)
del token_ids[dropout_idx]
if len(token_ids) > self.max_token_cnt:
token_ids = token_ids[:self.max_token_cnt]
return torch.tensor(token_ids, dtype=torch.long), target, pos_idx, media
def __len__(self):
return len(self.samples)
def pad_collate(batch):
token_ids_batch, target_batch, pos_idx_batch, media_batch = zip(*batch)
token_ids_batch = pad_sequence(token_ids_batch, batch_first=True, padding_value=0)
return token_ids_batch, torch.tensor(target_batch, dtype=torch.long), \
torch.tensor(pos_idx_batch, dtype=torch.long), torch.tensor(media_batch, dtype=torch.long),
def save_model(model, model_path):
if hasattr(model, 'module'):
model = model.module
print("save model", model_path)
torch.save(model.state_dict(), model_path)
def train(config, args):
# 문장 최대 갯수 100개
# import unicodedata
# unicodedata.normalize('NFKC', '한국어로는 안되?ㅋ')
samples_dict = {}
medias = set()
with jsonlines.open(args.train_file) as f:
for line in f.iter():
media = line['media']
medias.add(media)
extractive = line['extractive']
for i, sentence in enumerate(line['article_original']):
if i in extractive:
if config['use_multi_class']:
label = extractive.index(i)
else:
label = 0
else:
if config['use_multi_class']:
label = 3
else:
label = 1
if label not in samples_dict:
samples_dict[label] = []
samples_dict[label].append([sentence.replace('\n', '').strip(), label, i, media])
medias = list(medias)
medias.sort()
media_map = {m: i for i, m in enumerate(medias)}
print("medias", media_map)
os.makedirs(os.path.join(args.work_dir, "saved_models"), exist_ok=True)
train_samples = []
val_samples = []
class_cnt = []
num_classes = 4 if config['use_multi_class'] else 2
for label in range(num_classes):
random.shuffle(samples_dict[label])
val_cnt = round(len(samples_dict[label]) * args.val_ratio)
val_samples += samples_dict[label][:val_cnt]
tmp_train_samples = samples_dict[label][val_cnt:]
class_cnt.append(len(tmp_train_samples))
if args.use_all_train:
train_samples += samples_dict[label]
elif args.train_val_data:
train_samples += val_samples
else:
train_samples += tmp_train_samples
print('class_cnt', class_cnt)
random.shuffle(train_samples)
train_targets = [t[1] for t in train_samples]
print("total samples", len(train_samples) + len(val_samples))
print("train samples", len(train_samples))
print("val samples", len(val_samples))
bert_model, vocab = get_pytorch_kobert_model()
if config['freeze_bert']:
freeze_params(bert_model.embeddings)
freeze_params(bert_model.encoder)
freeze_params(bert_model.pooler)
else:
non_freeze_params(bert_model.embeddings)
non_freeze_params(bert_model.encoder)
non_freeze_params(bert_model.pooler)
train_dataset = SentenceDataset(train_samples, vocab, media_map, word_dropout_prob=config['word_dropout_prob'],
max_word_dropout_ratio=config['max_word_dropout_ratio'],
max_token_cnt=config['max_token_cnt'])
val_dataset = SentenceDataset(val_samples, vocab, media_map, max_token_cnt=config['max_token_cnt'])
weights = 1. / torch.tensor(class_cnt, dtype=torch.float)
print('weights', weights)
samples_weights = weights[train_targets]
sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weights, len(train_samples))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size,
num_workers=args.num_workers, pin_memory=args.train_pin_memory,
collate_fn=pad_collate,
sampler=sampler
)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size * 2,
num_workers=args.num_workers,
shuffle=False, pin_memory=args.val_pin_memory, collate_fn=pad_collate)
model = ExtractiveModel(bert_model, 100, 11, 768,
use_bert_sum_words=config["use_bert_sum_words"],
use_pos=config["use_pos"],
use_media=config['use_media'],
simple_model=config['simple_model'],
num_classes=num_classes,
dim_feedforward=config['dim_feedforward'],
dropout=config['dropout'])
if args.checkpoint_path is not None and os.path.isfile(args.checkpoint_path):
state_dict = torch.load(args.checkpoint_path)
model.load_state_dict(state_dict)
if torch.cuda.device_count() > 1 and args.data_parallel:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
device = "cuda"
model.to(device)
steps_per_epoch = len(train_samples) // args.train_batch_size
if len(train_samples) % args.train_batch_size > 0:
steps_per_epoch += 1
optimizer, scheduler, use_lr_schedule_steps = init_optimizer(config['optimizer'], model,
config['lr'], config['weight_decay'],
args.lr_restart_step,
args.lr_decay_gamma,
config['scheduler'],
nesterov=args.nesterov,
num_epochs=args.num_epochs,
steps_per_epoch=steps_per_epoch)
if config['weighted_loss'] > 0:
weights = [config['weighted_loss'], 1.]
class_weights = torch.FloatTensor(weights).to(device)
criterion = torch.nn.CrossEntropyLoss(weight=class_weights)
elif config['label_smoothing'] > 0:
criterion = LabelSmoothingCrossEntropy(epsilon=config['label_smoothing'])
else:
criterion = torch.nn.CrossEntropyLoss()
criterion = criterion.to(device)
os.makedirs(args.work_dir, exist_ok=True)
print('train_loader', len(train_loader))
for epoch in range(args.num_epochs):
if args.train:
print("Epoch %d/%d, LR: %f" % (epoch, args.num_epochs, np.array(scheduler.get_lr()).mean()))
epoch_start_time = time.time()
model.train()
epoch_labels = []
epoch_preds = []
epoch_loss = 0.
for step, (token_ids_batch, labels, pos_idx_batch, media_batch) in enumerate(train_loader):
batch_start_time = time.time()
epoch_labels += list(labels.numpy())
labels = labels.to(device)
token_ids_batch = token_ids_batch.to(device)
pos_idx_batch = pos_idx_batch.to(device)
media_batch = media_batch.to(device)
if use_lr_schedule_steps:
scheduler.step(epoch - 1 + step / len(train_loader))
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(True):
outputs = model(token_ids_batch, pos_idx_batch, media_batch)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
epoch_preds += list(preds.cpu().numpy())
# backward + optimize only if in training phase
loss.backward()
optimizer.step()
epoch_loss += loss.item() * token_ids_batch.size(0)
batch_elapsed_time = time.time() - batch_start_time
if step >= 0 and (step + 1) % args.log_step_interval == 0:
current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
f1 = f1_score(labels.cpu().numpy(), preds.cpu().numpy(), average='macro')
acc = accuracy_score(labels.cpu().numpy(), preds.cpu().numpy())
print("[train-epoch:%02d/%02d,step:%d/%d,%s] batch_elapsed: %f" %
(epoch, args.num_epochs, step, len(train_loader), current_datetime, batch_elapsed_time))
print("loss: %f, acc: %f, f1: %f, lr: %f" % (
loss.item(), acc, f1, np.array(scheduler.get_lr()).mean()))
if not use_lr_schedule_steps:
scheduler.step()
epoch_loss = epoch_loss / len(train_loader.dataset)
epoch_elapsed_time = time.time() - epoch_start_time
current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
epoch_f1 = f1_score(epoch_labels, epoch_preds, average='macro')
epoch_acc = accuracy_score(epoch_labels, epoch_preds)
print(
"[result:train-epoch:%02d/%02d,%s] epoch_elapsed: %s, loss: %f, acc: %f, f1: %f, lr: %f" % (
epoch, args.num_epochs, current_datetime, epoch_elapsed_time, epoch_loss, epoch_acc, epoch_f1,
scheduler.get_lr()[0]))
if args.val:
model.eval() # Set model to evaluate mode
epoch_start_time = time.time()
epoch_preds = []
epoch_labels = []
epoch_loss = 0.
for step, (token_ids_batch, labels, pos_idx_batch, media_batch) in enumerate(val_loader):
batch_start_time = time.time()
epoch_labels += list(labels.numpy())
labels = labels.to(device)
token_ids_batch = token_ids_batch.to(device)
pos_idx_batch = pos_idx_batch.to(device)
media_batch = media_batch.to(device)
# forward
# track history if only in train
with torch.set_grad_enabled(False):
start = time.time()
outputs = model(token_ids_batch, pos_idx_batch, media_batch)
# print("batch speed", time.time() - start)
_, preds = torch.max(outputs, 1)
epoch_preds += list(preds.cpu().numpy())
loss = criterion(outputs, labels)
# statistics
epoch_loss += loss.item() * token_ids_batch.size(0)
batch_elapsed_time = time.time() - batch_start_time
if step >= 0 and (step + 1) % args.log_step_interval == 0:
current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
f1 = f1_score(labels.cpu().numpy(), preds.cpu().numpy(), average='macro')
acc = accuracy_score(labels.cpu().numpy(), preds.cpu().numpy())
print("[val-epoch:%d, step:%d/%d,%s] batch_elapsed: %f" %
(epoch, step, len(val_loader), current_datetime, batch_elapsed_time))
print("loss: %f, acc: %f, f1: %f" % (loss.item(), acc, f1))
epoch_loss = epoch_loss / len(val_loader.dataset)
current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
epoch_acc = accuracy_score(epoch_labels, epoch_preds)
epoch_f1 = f1_score(epoch_labels, epoch_preds, pos_label=0)
epoch_precision = precision_score(epoch_labels, epoch_preds, pos_label=0)
epoch_recall = recall_score(epoch_labels, epoch_preds, pos_label=0)
epoch_elapsed = time.time() - epoch_start_time
with tune.checkpoint_dir(epoch) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save((model.state_dict(), optimizer.state_dict()), path)
epoch_cm = confusion_matrix(epoch_labels, epoch_preds)
np_epoch_labels = np.unique(np.array(epoch_labels))
np_epoch_labels.sort()
print("confusion matrix")
print(epoch_cm)
tune.report(loss=epoch_loss, f1=epoch_f1, acc=epoch_acc, pos_acc=epoch_cm[0], neg_acc=epoch_cm[1],
precision=epoch_precision, recall=epoch_recall)
print(
"[result_val-epoch:%d,%s] epoch_elapsed: %s, loss: %f, acc: %f, f1: %f, pre: %f, recall: %f" % (
epoch, current_datetime, epoch_elapsed, epoch_loss, epoch_acc, epoch_f1, epoch_precision,
epoch_recall))
cls_report = classification_report(epoch_labels, epoch_preds) # , target_names=classes)
print(cls_report)
# np.save(os.path.join(log_dir, "confusion_matrix_%s_epoch_%d.npy" % (val_name, epoch)), epoch_cm)
epoch_cm = epoch_cm.astype('float') / epoch_cm.sum(axis=1)[:, np.newaxis]
epoch_cm = epoch_cm.diagonal()
print("each accuracies")
print(epoch_cm)
if not args.train and args.val:
print("The end of evaluation.")
break
def test_accuracy(model, use_multi_class, max_token_cnt, device, args):
samples_dict = {}
medias = set()
with jsonlines.open(args.train_file) as f:
for line in f.iter():
media = line['media']
medias.add(media)
extractive = line['extractive']
for i, sentence in enumerate(line['article_original']):
if i in extractive:
if use_multi_class:
label = extractive.index(i)
else:
label = 0
else:
if use_multi_class:
label = 3
else:
label = 1
if label not in samples_dict:
samples_dict[label] = []
samples_dict[label].append([sentence.replace('\n', '').strip(), label, i, media])
medias = list(medias)
medias.sort()
media_map = {m: i for i, m in enumerate(medias)}
print("medias", media_map)
train_samples = []
val_samples = []
class_cnt = []
num_classes = 4 if use_multi_class else 2
for label in range(num_classes):
random.shuffle(samples_dict[label])
val_cnt = round(len(samples_dict[label]) * args.val_ratio)
val_samples += samples_dict[label][:val_cnt]
tmp_train_samples = samples_dict[label][val_cnt:]
class_cnt.append(len(tmp_train_samples))
if args.use_all_train:
train_samples += samples_dict[label]
elif args.train_val_data:
train_samples += val_samples
else:
train_samples += tmp_train_samples
print('class_cnt', class_cnt)
random.shuffle(train_samples)
train_targets = [t[1] for t in train_samples]
print("total samples", len(train_samples) + len(val_samples))
print("train samples", len(train_samples))
print("val samples", len(val_samples))
_, vocab = get_pytorch_kobert_model()
val_dataset = SentenceDataset(val_samples, vocab, media_map, max_token_cnt=max_token_cnt)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size * 2,
num_workers=args.num_workers,
shuffle=False, pin_memory=args.val_pin_memory, collate_fn=pad_collate)
model.eval() # Set model to evaluate mode
epoch_start_time = time.time()
epoch_preds = []
epoch_labels = []
for step, (token_ids_batch, labels, pos_idx_batch, media_batch) in enumerate(val_loader):
batch_start_time = time.time()
epoch_labels += list(labels.numpy())
token_ids_batch = token_ids_batch.to(device)
pos_idx_batch = pos_idx_batch.to(device)
media_batch = media_batch.to(device)
# forward
# track history if only in train
with torch.set_grad_enabled(False):
start = time.time()
outputs = model(token_ids_batch, pos_idx_batch, media_batch)
# print("batch speed", time.time() - start)
_, preds = torch.max(outputs, 1)
epoch_preds += list(preds.cpu().numpy())
current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
epoch_acc = accuracy_score(epoch_labels, epoch_preds)
epoch_f1 = f1_score(epoch_labels, epoch_preds, average='macro')
epoch_elapsed = time.time() - epoch_start_time
print(
"[result_val,%s] epoch_elapsed: %s, acc: %f, f1: %f" % (
current_datetime, epoch_elapsed, epoch_acc, epoch_f1))
cls_report = classification_report(epoch_labels, epoch_preds) # , target_names=classes)
print(cls_report)
epoch_cm = confusion_matrix(epoch_labels, epoch_preds)
print("confusion matrix")
print(epoch_cm)
# np.save(os.path.join(log_dir, "confusion_matrix_%s_epoch_%d.npy" % (val_name, epoch)), epoch_cm)
epoch_cm = epoch_cm.astype('float') / epoch_cm.sum(axis=1)[:, np.newaxis]
epoch_cm = epoch_cm.diagonal()
print("each accuracies")
print(epoch_cm)
return epoch_f1
def main(args=None):
# config = {
# "optimizer": 'adam', # tune.grid_search(['adam', 'sgd']),
# "lr": 0.001, # tune.loguniform(1e-4, 1e-1),
# "scheduler": 'step', # tune.grid_search(['cosine', 'step']),
# "max_word_dropout_ratio": 0.0,
# "word_dropout_prob": 0.0,
# "label_smoothing": tune.grid_search([0.1, 0.0]),
# "use_multi_class": False, # tune.grid_search([True, False]),
# "freeze_bert": tune.grid_search([True, False]),
# "use_bert_sum_words": tune.grid_search([True, False]),
# "use_pos": tune.grid_search([True, False]),
# "use_media": tune.grid_search([True, False]),
# "simple_model": tune.grid_search([True, False])
# }
config = {
"optimizer": tune.grid_search(['adam', 'sgd']), # tune.grid_search(['adam', 'sgd']),
"lr": tune.grid_search([0.1, 0.01, 0.001, 0.0001]), # tune.loguniform(1e-4, 1e-1),
"scheduler": tune.grid_search(['step', 'cosine']), # tune.grid_search(['cosine', 'step']),
"max_word_dropout_ratio": tune.grid_search([0.1, 0.2, 0.3]),
"word_dropout_prob": tune.grid_search([0.5, 0.0, 0.25, 0.75, 1.0]),
"label_smoothing": 0.0, # tune.grid_search([0.1, 0.0]),
"use_multi_class": False, # tune.grid_search([True, False]),
"freeze_bert": tune.grid_search([True, False]),
"use_bert_sum_words": False, # tune.grid_search([True, False]),
"use_pos": True, # tune.grid_search([True, False]),
"use_media": False, # tune.grid_search([True, False]),
"simple_model": True, # tune.grid_search([True, False])
}
# config = {
# "optimizer": tune.grid_search(['adam', 'sgd']), # tune.grid_search(['adam', 'sgd']),
# "lr": tune.grid_search([0.001, 0.0001, 0.1, 0.01]), # tune.loguniform(1e-4, 1e-1),
# "scheduler": tune.grid_search(['cosine', 'step']), # tune.grid_search(['cosine', 'step']),
# "max_word_dropout_ratio": tune.grid_search([0.3, 0.2, 0.1]),
# "word_dropout_prob": tune.grid_search([1.0, 0.75, 0.25, 0.0, 0.5]),
# "label_smoothing": tune.grid_search([0.0, 0.1]),
# "use_multi_class": False, # tune.grid_search([True, False]),
# "freeze_bert": tune.grid_search([False, True]),
# "use_bert_sum_words": False, # tune.grid_search([True, False]),
# "use_pos": tune.grid_search([False, True]),
# "use_media": tune.grid_search([False, True]),
# "simple_model": tune.grid_search([False, True])
# }
scheduler = ASHAScheduler(
metric="f1",
mode="max",
max_t=args.num_epochs,
grace_period=1,
reduction_factor=2)
reporter = CLIReporter(
# parameter_columns=["l1", "l2", "lr", "batch_size"],
metric_columns=["loss", "f1", "training_iteration"])
result = tune.run(
partial(train, args=args),
resources_per_trial={"cpu": args.num_workers, "gpu": args.gpus_per_trial},
config=config,
num_samples=args.num_tune_samples,
scheduler=scheduler,
progress_reporter=reporter,
local_dir=args.work_dir)
best_trial = result.get_best_trial("f1", "max", "last")
print("Best trial config: {}".format(best_trial.config))
print("Best trial final validation loss: {}".format(
best_trial.last_result["loss"]))
print("Best trial final validation accuracy: {}".format(
best_trial.last_result["accuracy"]))
bert_model, vocab = get_pytorch_kobert_model()
num_classes = 4 if best_trial.config["use_multi_class"] else 2
best_trained_model = ExtractiveModel(bert_model, 100, 11, 768,
use_bert_sum_words=best_trial.config["use_bert_sum_words"],
use_pos=best_trial.config["use_pos"],
use_media=best_trial.config['use_media'],
simple_model=best_trial.config['simple_model'],
num_classes=num_classes,
dim_feedforward=best_trial.config['dim_feedforward'],
dropout=best_trial.config['dropout'])
if torch.cuda.is_available():
device = "cuda"
# if args.gpus_per_trial > 1:
# best_trained_model = nn.DataParallel(best_trained_model)
best_trained_model.to(device)
best_checkpoint_dir = best_trial.checkpoint.value
model_state, optimizer_state = torch.load(os.path.join(
best_checkpoint_dir, "checkpoint"))
best_trained_model.load_state_dict(model_state)
test_acc = test_accuracy(best_trained_model, best_trial.config["use_multi_class"], device)
print("Best trial test set f1: {}".format(test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--work_dir', type=str, default='./log')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None, required=False, help='checkpoint path')
parser.add_argument('--train_file', default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/train.jsonl',
type=str)
parser.add_argument('--val_ratio', type=float, default=0.1)
parser.add_argument('--lr_restart_step', type=int, default=1)
parser.add_argument('-e', '--num_epochs', type=int, default=100)
parser.add_argument('--log_step_interval', type=int, default=100)
parser.add_argument('--train_batch_size', type=int, default=32)
parser.add_argument('--val_batch_size', type=int, default=64)
parser.add_argument('-w', '--num_workers', type=int, default=8)
parser.add_argument('--max_token_cnt', type=int, default=300)
parser.add_argument('--lr_decay_gamma', type=float, default=0.9)
# parser.add_argument('-d', '--weight_decay', type=float, default=1e-5)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('-t', '--train', default=False, action="store_true")
parser.add_argument('-v', '--val', default=False, action="store_true")
parser.add_argument('--use_all_train', default=False, action="store_true")
parser.add_argument('--train_val_data', default=False, action="store_true")
parser.add_argument('--data_parallel', default=False, action="store_true")
parser.add_argument('--train_pin_memory', default=False, action="store_true")
parser.add_argument('--val_pin_memory', default=False, action="store_true")
parser.add_argument('--use_benchmark', default=False, action="store_true")
parser.add_argument('--nesterov', default=False, action="store_true")
parser.add_argument('--gpus_per_trial', type=int, default=2)
parser.add_argument('--num_tune_samples', type=int, default=1)
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
if args.seed:
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
if args.use_benchmark:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
main(args)
``` |
{
"source": "jireh-father/mmdetection_210507",
"score": 3
} |
#### File: mmdetection_210507/tools/crop_center_bbox.py
```python
import argparse
import os
from PIL import Image
import glob
import numpy as np
import time
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--image_dir', type=str, default=None)
args = parser.parse_args()
return args
def main():
args = parse_args()
os.makedirs(args.output_dir, exist_ok=True)
image_files = glob.glob(os.path.join(args.image_dir, "*"))
total_w = []
total_h = []
total_time = 0.
for i, image_file in enumerate(image_files):
if i % 10 == 0:
print(i, len(image_files), image_file)
start = time.time()
im = Image.open(image_file)
bbox = im.getbbox()
im = im.crop(bbox)
exec_time = time.time() - start
w, h = im.size
if w == 1280 and h == 720:
continue
total_time += exec_time
total_w.append(w)
total_h.append(h)
im.save(os.path.join(args.output_dir, os.path.splitext(os.path.basename(image_file))[0] + ".jpg"))
total_h = np.array(total_h)
total_w = np.array(total_w)
print("total_w.mean(), total_w.max(), total_w.min(), total_w.var(), total_w.std()")
print(total_w.mean(), total_w.max(), total_w.min(), total_w.var(), total_w.std())
print("total_h.mean(), total_h.max(), total_h.min(), total_h.var(), total_h.std()")
print(total_h.mean(), total_h.max(), total_h.min(), total_h.var(), total_h.std())
print("cropped cnt", len(total_h))
print("avg time", total_time / len(total_h))
print("done")
if __name__ == '__main__':
main()
```
#### File: tools/model_converters/convert_pytorch_to_android.py
```python
import torch
import torch.nn as nn
import argparse
import os
from mmdet.apis import inference_detector, init_detector
model_classifier_map = {
'alexnet': ['classifier', 6],
'vgg': ['classifier', 6],
'mobilenet': ['classifier', 1],
'mnasnet': ['classifier', 6],
'resnet': ['fc'],
'inception': ['fc'],
'googlenet': ['fc'],
'shufflenet': ['fc'],
'densenet': ['classifier'],
'resnext': ['fc'],
'wide_resnet': ['fc'],
'efficientnet': ['_fc'],
'bagnet': ['fc'],
'rexnet': ['output', 1],
}
def init_model(model_name, num_classes):
if model_name.startswith("efficientnet"):
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
return model
from torchvision import models
for m_key in model_classifier_map:
if m_key in model_name:
model_fn = getattr(models, model_name)
cls_layers = model_classifier_map[m_key]
if model_name.startswith("inception"):
# input_size = 299
model = model_fn(aux_logits=False)
else:
# input_size = 224
model = model_fn()
if len(cls_layers) == 1:
in_features = getattr(model, cls_layers[0]).in_features
setattr(model, cls_layers[0], nn.Linear(in_features, num_classes))
else:
classifier = getattr(model, cls_layers[0])
in_features = classifier[cls_layers[1]].in_features
classifier[cls_layers[1]] = nn.Linear(in_features, num_classes)
return model
def main(args):
device = 'cpu'
model = init_model(args.model_name, args.num_classes)
if args.model_name.startswith("efficientnet"):
model.set_swish(memory_efficient=False)
checkpoint_dict = torch.load(args.model_path, map_location=device)
pretrained_dict = checkpoint_dict['state_dict']
try:
model.load_state_dict(pretrained_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(pretrained_dict)
model = model.module
model = init_detector(args.config, args.checkpoint, device=args.device)
model = model.to(device)
model.eval()
example = torch.rand(1, 3, args.input_size, args.input_size)
ret = model(example)
print(ret, ret.shape)
traced_script_module = torch.jit.trace(model, example)
os.makedirs(os.path.dirname(args.output_path), exist_ok=True)
traced_script_module.save(args.output_path)
print("done")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str)
parser.add_argument('--output_path', default=None, type=str)
parser.add_argument('--model_name', default=None, type=str)
parser.add_argument('--num_classes', default=None, type=int)
parser.add_argument('--input_size', default=224, type=int)
main(parser.parse_args())
``` |
{
"source": "jireh-father/pymetric",
"score": 2
} |
#### File: modeling/layers/soa.py
```python
import torch
import torch.nn as nn
import metric.core.net as net
from metric.core.config import cfg
class SOABlock(nn.Module):
def __init__(self, in_ch):
super(SOABlock, self).__init__()
self.in_ch = in_ch
self.out_ch = in_ch
self.mid_ch = in_ch // 2
print('Num channels: in out mid')
print(' {:>4d} {:>4d} {:>4d}'.format(self.in_ch, self.out_ch, self.mid_ch))
self.f = nn.Sequential(
nn.Conv2d(self.in_ch, self.mid_ch, (1, 1), (1, 1)),
nn.BatchNorm2d(self.mid_ch, eps=cfg.BN.EPS, momentum=cfg.BN.MOM),
nn.ReLU())
self.g = nn.Sequential(
nn.Conv2d(self.in_ch, self.mid_ch, (1, 1), (1, 1)),
nn.BatchNorm2d(self.mid_ch, eps=cfg.BN.EPS, momentum=cfg.BN.MOM),
nn.ReLU())
self.h = nn.Conv2d(self.in_ch, self.mid_ch, (1, 1), (1, 1))
self.v =nn.Conv2d(self.mid_ch, self.out_ch, (1, 1), (1, 1))
self.softmax = nn.Softmax(dim=-1)
for conv in [self.f, self.g, self.h]: #, self.v]:
conv.apply(net.init_weights)
self.v.apply(net.init_weights_classifier)
def forward(self, x):
B, C, H, W = x.shape
f_x = self.f(x).view(B, self.mid_ch, H * W) # B * mid_ch * N, where N = H*W
g_x = self.g(x).view(B, self.mid_ch, H * W) # B * mid_ch * N, where N = H*W
h_x = self.h(x).view(B, self.mid_ch, H * W) # B * mid_ch * N, where N = H*W
z = torch.bmm(f_x.permute(0, 2, 1), g_x) # B * N * N, where N = H*W
attn = self.softmax((self.mid_ch ** -.50) * z)
z = torch.bmm(attn, h_x.permute(0, 2, 1)) # B * N * mid_ch, where N = H*W
z = z.permute(0, 2, 1).view(B, self.mid_ch, H, W) # B * mid_ch * H * W
z = self.v(z)
z = z + x
return z
```
#### File: metric/eval/clustering_classes_ssl.py
```python
import os
import sys
import numpy as np
import torch
import metric.core.config as config
from metric.core.config import cfg
import glob
from sklearn import cluster
from sklearn import mixture
import shutil
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import distance
from PIL import Image
def extract(feature_dir):
feature_files = glob.glob(os.path.join(feature_dir, "*res5avg_features.npy"))
feature_list = []
for ff in feature_files:
embeds = np.load(open(ff, "rb"))
embeds = np.squeeze(embeds)
feature_list += list(embeds)
return feature_list
# im = cv2.imread(imgpath)
# im = im.astype(np.float32, copy=False)
# im = preprocess(im)
# im_array = np.asarray(im, dtype=np.float32)
# input_data = torch.from_numpy(im_array)
# if torch.cuda.is_available():
# input_data = input_data.cuda()
# fea = model(input_data, targets=None)
# fea = pool_layer(fea)
# embedding = to_numpy(fea.squeeze())
# # print("fea_shape: ", embedding.shape)
# return embedding
cluster_algos = {
"KMeans": cluster.KMeans,
"SpectralClustering": cluster.SpectralClustering,
"MeanShift": cluster.MeanShift,
"AffinityPropagation": cluster.AffinityPropagation,
"AgglomerativeClustering": cluster.AgglomerativeClustering,
"FeatureAgglomeration": cluster.FeatureAgglomeration,
"MiniBatchKMeans": cluster.MiniBatchKMeans,
"DBSCAN": cluster.DBSCAN,
"OPTICS": cluster.OPTICS,
"SpectralBiclustering": cluster.SpectralBiclustering,
"SpectralCoclustering": cluster.SpectralCoclustering,
"Birch": cluster.Birch,
"GaussianMixture": mixture.GaussianMixture,
"BayesianGaussianMixture": mixture.BayesianGaussianMixture
}
def main(model_path, output_dir, image_root, use_pca, pool_layer, use_norm, num_clusters, num_pca_comps, random_state,
feature_dir):
total_embeddings = extract(feature_dir)
embed_idx = 0
class_dirs = glob.glob(os.path.join(image_root, "*"))
class_dirs.sort()
os.makedirs(output_dir, exist_ok=True)
for i, class_dir in enumerate(class_dirs):
print(i, class_dir)
image_files = glob.glob(os.path.join(class_dir, '*'))
image_files.sort()
from_idx = embed_idx
for image_file in image_files:
try:
Image.open(image_file)
except:
continue
embed_idx += 1
to_idx = embed_idx
embeddings = np.array(total_embeddings[from_idx: to_idx])
# print(embeddings.shape)
# if use_norm:
# embeddings = np.linalg.norm(embeddings, ord=2)
cdist = distance.cdist(embeddings, embeddings, 'euclidean')
cdist_exlfile = os.path.join(output_dir, "{}.xlsx".format(os.path.basename(class_dir)))
df = pd.DataFrame(cdist, columns=[os.path.basename(f).split("_")[-1].split(".")[0] for f in image_files])
df = df.set_index(df.columns)
df.to_excel(cdist_exlfile)
print(embeddings.shape)
for j, key in enumerate(cluster_algos):
print(j, len(cluster_algos), key)
try:
if key in ["AffinityPropagation", "GaussianMixture", "BayesianGaussianMixture"]:
clustered = cluster_algos[key](random_state=random_state)
elif key in ["MeanShift", "DBSCAN", "OPTICS"]:
clustered = cluster_algos[key]()
elif key in ["AgglomerativeClustering", "FeatureAgglomeration", "Birch"]:
clustered = cluster_algos[key](n_clusters=num_clusters)
else:
clustered = cluster_algos[key](n_clusters=num_clusters, random_state=random_state)
if use_pca:
pca = PCA(n_components=num_pca_comps, random_state=random_state)
embeddings = pca.fit_transform(embeddings)
if key in ["GaussianMixture", "BayesianGaussianMixture"]:
labels = clustered.fit_predict(embeddings)
elif key in ["SpectralBiclustering", "SpectralCoclustering"]:
clustered.fit(embeddings)
labels = clustered.row_labels_
else:
clustered.fit(embeddings)
labels = clustered.labels_
# kmeans.fit(embeddings)
for j, label in enumerate(labels):
cur_output_dir = os.path.join(output_dir,
"{}_{}".format(os.path.basename(class_dir), key),
"{}".format(label))
os.makedirs(cur_output_dir, exist_ok=True)
shutil.copy(image_files[j], cur_output_dir)
if not use_pca:
pca = PCA(n_components=2, random_state=random_state)
embeddings = pca.fit_transform(embeddings)
plt.figure()
plt.scatter(embeddings[:, 0], embeddings[:, 1],
edgecolor='none', alpha=0.5)
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar()
output_file = os.path.join(output_dir, "{}_{}.png".format(os.path.basename(class_dir), key))
plt.savefig(output_file)
except:
import traceback
traceback.print_exc()
print("done")
def load_checkpoint(checkpoint_file, model, optimizer=None):
"""Loads the checkpoint from the given file."""
err_str = "Checkpoint '{}' not found"
assert os.path.exists(checkpoint_file), err_str.format(checkpoint_file)
# Load the checkpoint on CPU to avoid GPU mem spike
checkpoint = torch.load(checkpoint_file, map_location="cpu")
try:
state_dict = checkpoint["model_state"]
except KeyError:
state_dict = checkpoint
# Account for the DDP wrapper in the multi-gpu setting
ms = model
model_dict = ms.state_dict()
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
if len(pretrained_dict) == len(state_dict):
print('All params loaded')
else:
print('construct model total {} keys and pretrin model total {} keys.'.format(len(model_dict), len(state_dict)))
print('{} pretrain keys load successfully.'.format(len(pretrained_dict)))
not_loaded_keys = [k for k in state_dict.keys() if k not in pretrained_dict.keys()]
print(('%s, ' * (len(not_loaded_keys) - 1) + '%s') % tuple(not_loaded_keys))
model_dict.update(pretrained_dict)
ms.load_state_dict(model_dict)
# ms.load_state_dict(checkpoint["model_state"])
# Load the optimizer state (commonly not done when fine-tuning)
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state"])
# return checkpoint["epoch"]
return checkpoint
if __name__ == '__main__':
print(sys.argv)
args = config.load_cfg_and_args("Extract feature.")
config.assert_and_infer_cfg()
cfg.freeze()
# if args.pool == "maxpool":
# pool_layer = nn.AdaptiveMaxPool2d(1)
# else:
# pool_layer = GeneralizedMeanPoolingP()
# pool_layer.cuda()
main(cfg.INFER.MODEL_WEIGHTS, cfg.INFER.OUTPUT_DIR, args.image_root, args.use_pca, None, args.use_norm,
args.num_clusters,
args.num_pca_comps,
args.random_state,
args.feature_dir)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.